repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cpieloth/BackBacker | setup.py | 1 | 2142 | #!/usr/bin/env python3
"""
A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import setup_commands
__author__ = 'Christof Pieloth'
install_requires = [
'requests==2.20.*'
]
setup(
cmdclass=dict(setup_commands.custom_commands),
name=setup_commands.project_name,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=setup_commands.version,
description='BackBacker is a light backup tool '
'with a "declarative" job file based on simple commands with arguments.',
author='Christof Pieloth',
url='https://github.com/cpieloth',
# Choose a license: https://choosealicense.com
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
packages=find_packages(exclude=['build*', 'docs', 'tests*', 'tools*', 'venv*']),
install_requires=install_requires,
test_suite='tests',
include_package_data=True,
entry_points={
'console_scripts': [
'{} = {}.{}:main'.format(setup_commands.api_name, setup_commands.api_name, setup_commands.api_name)
],
},
)
| gpl-3.0 | -569,968,029,977,721,900 | 27.56 | 111 | 0.65733 | false |
udoklein/blinkenlight | Experiments/Removing_Flicker/glowing_bounce/glowing_bounce.py | 1 | 3310 | #!/usr/bin/python
#
# www.blinkenlight.net
#
# Copyright 2011 Udo Klein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
import math
# output common start of program
print """#include <MsTimer2.h>
#include <avr/io.h>
#include <avr/pgmspace.h>
const uint8_t ports = 3;
const uint8_t brightness_levels = 32;
const uint8_t period = 39;
uint8_t const pov_pattern[ports*brightness_levels*period] PROGMEM = {"""
def cursor(phase): return phase if phase<period/2 else period-phase
def distance_to_cursor(LED, phase): return abs(LED-cursor(phase))
def brightness_by_distance(distance): return [32, 8, 1, 0, 0][distance] if distance<5 else 0
def brightness(LED, phase): return brightness_by_distance(distance_to_cursor(LED, phase))
def LEDstate(LED, phase, cycle): return 1 if cycle < brightness(LED, phase) else 0
period = 39
cycles = 32
LEDs = 20
# for each colum in the picture output 3 bytes
# that will be copied into the LED output ports
for phase in range(0, period):
for cycle in range(0, cycles):
line = " 0b"
for LED in range(0, LEDs):
if LED==6 or LED==12:
line = line+", 0b"
line = line + str(LEDstate(LED, phase, cycle))
# add a comment that makes it easier to see
# the intended meaning of the 3 bytes
line = line + (", // phase {0:>2}, cycle {1:>2}".format(phase, cycle))
print line
print """};
"""
##uint32_t duration(uint8_t pos) {
#return (sqrt(((float) 20-pos)/20)-sqrt(((float) 19-pos)/20))*500;
line = "uint16_t ms_delay[period] = {"
for phase in range(0, period):
cursor = phase if 2*phase < period else period-phase
delay = int(60*(math.sqrt(cursor+2)-math.sqrt(cursor+1)))
line += "{0:>3},".format(delay)
print line+"};"
print """
volatile uint16_t base_index = 0;
void iterate() {
static uint8_t index = 0;
static uint16_t counter = 0;
if (counter < ms_delay[index]) {
++counter;
} else {
counter = 0;
base_index = index*(ports*brightness_levels);
++index;
if (index == period) {
index = 0;
}
}
}
void setup() {
DDRD = 0b11111111; // set digital 0- 7 to output
DDRB = 0b00111111; // set digital 8-13 to output
DDRC = 0b00111111; // set digital 14-19 to output (coincidences with analog 0-5)
MsTimer2::set(2, iterate);
MsTimer2::start();
}
void loop() {
static uint16_t index;
cli();
index = base_index;
sei();
for (uint8_t cycle=0; cycle<brightness_levels; ++cycle) {
PORTC = pgm_read_byte(pov_pattern+(index++));
PORTB = pgm_read_byte(pov_pattern+(index++));
PORTD = pgm_read_byte(pov_pattern+(index++));
}
}
"""
| gpl-3.0 | -5,319,239,773,648,759,000 | 28.035088 | 92 | 0.642598 | false |
jwlin/web-crawler-tutorial | ch9/bot_house.py | 1 | 1795 | from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
if __name__ == '__main__':
url = 'http://www.bot.com.tw/house/default.aspx'
try:
driver = webdriver.Chrome(executable_path='chromedriver.exe')
# Webdriver 的執行檔也可以使用 PhantomJS
# driver = webdriver.PhantomJS('phantomjs.exe')
driver.maximize_window()
driver.set_page_load_timeout(60)
driver.get(url)
# 定位日期輸入欄位, 並輸入日期
element = driver.find_element_by_id('fromdate_TextBox')
element.send_keys('1010101')
element = driver.find_element_by_id('todate_TextBox')
element.send_keys('1060101')
# 定位選單所在欄位並點擊
element = driver.find_element_by_id('purpose_DDL')
element.click()
# 巡覽選單, 點擊對應選項
for option in element.find_elements_by_tag_name('option'):
if option.text == '其他':
option.click()
# 點擊送出按鈕
element = driver.find_element_by_id('Submit_Button').click()
# 等待目標表格出現
element = WebDriverWait(driver, 5).until(
expected_conditions.presence_of_element_located((By.ID, 'House_GridView'))
)
# page_source 可以回傳目前瀏覽器所看到的網頁文件
soup = BeautifulSoup(driver.page_source, 'html5lib')
table = soup.find(id='House_GridView')
for row in table.find_all('tr'):
print([s for s in row.stripped_strings])
finally:
driver.quit() # 關閉瀏覽器, 結束 webdriver process
| mit | -2,260,111,691,379,267,300 | 33.659574 | 86 | 0.631676 | false |
im85288/script.tvguide.fullscreen | ReloadAddonFolders.py | 1 | 3020 | import os
import re
import xbmc
import xbmcgui
import xbmcaddon
import xbmcvfs
from rpc import RPC
ADDON = xbmcaddon.Addon(id='script.tvguide.fullscreen')
file_name = 'special://profile/addon_data/script.tvguide.fullscreen/folders.list'
f = xbmcvfs.File(file_name)
items = f.read().splitlines()
f.close()
unique = set(items)
file_name = 'special://profile/addon_data/script.tvguide.fullscreen/addons.ini'
if int(ADDON.getSetting('addons.ini.type')) == 1:
customFile = str(ADDON.getSetting('addons.ini.file'))
if os.path.exists(customFile) and os.access(customFile,os.W_OK):
file_name = customFile
plugins = {}
logos = {}
for path in unique:
try:
response = RPC.files.get_directory(media="files", directory=path, properties=["thumbnail"])
except:
continue
files = response["files"]
dirs = dict([[f["label"], f["file"]] for f in files if f["filetype"] == "directory"])
links = dict([[f["label"], f["file"]] for f in files if f["filetype"] == "file"])
thumbnails = dict([[f["file"], f["thumbnail"]] for f in files if f["filetype"] == "file"])
match = re.match(r"plugin://(.*?)/",path)
if match:
plugin = match.group(1)
else:
continue
if plugin not in plugins:
plugins[plugin] = {}
if plugin not in logos:
logos[plugin] = {}
streams = plugins[plugin]
for label in links:
file = links[label]
streams[label] = file
thumbs = logos[plugin]
for file in thumbnails:
thumb = thumbnails[file]
thumbs[file] = thumb
f = xbmcvfs.File(file_name,'wb')
write_str = "# WARNING Make a copy of this file.\n# It will be overwritten on the next folder add.\n\n"
f.write(write_str.encode("utf8"))
for addonId in sorted(plugins):
write_str = "[%s]\n" % (addonId)
f.write(write_str)
addonStreams = plugins[addonId]
for name in sorted(addonStreams):
stream = addonStreams[name]
if name.startswith(' '):
continue
name = re.sub(r'[:=]',' ',name)
name = re.sub(r'\[.*?\]','',name)
if not name: #TODO names in brackets
continue
if name.startswith(' '):
continue
if not stream:
stream = 'nothing'
write_str = "%s=%s\n" % (name,stream)
f.write(write_str.encode("utf8"))
f.close()
file_name = 'special://profile/addon_data/script.tvguide.fullscreen/icons.ini'
f = xbmcvfs.File(file_name,'wb')
write_str = "# WARNING Make a copy of this file.\n# It will be overwritten on the next folder add.\n\n"
f.write(write_str.encode("utf8"))
for addonId in sorted(logos):
write_str = "[%s]\n" % (addonId)
f.write(write_str)
addonLogos = logos[addonId]
for file in sorted(addonLogos):
logo = addonLogos[file]
if logo:
write_str = "%s|%s\n" % (file,logo)
f.write(write_str.encode("utf8"))
f.close()
dialog = xbmcgui.Dialog()
dialog.notification("TV Guide Fullscreen","Done: Reload Addon Folders",sound=False)
| gpl-2.0 | -7,479,762,841,913,802,000 | 31.12766 | 103 | 0.621523 | false |
brunosantos/Bsan-kodi-repo | plugin.video.kodi/menuLateral.py | 1 | 2906 | import re
from utils import openfile
from gravador import iniciagravador
from servidores import request_servidores
class menulateral(xbmcgui.WindowXMLDialog):
def __init__( self, *args, **kwargs ):
xbmcgui.WindowXML.__init__(self)
self.finalurl = kwargs[ "finalurl" ]
self.siglacanal = kwargs[ "siglacanal" ]
self.name = kwargs[ "name" ]
self.directo = kwargs[ "directo" ]
def onInit(self):
self.updateChannelList()
def onAction(self, action):
if action.getId() in [9, 10, 92, 117]:
self.close()
return
def onClick(self, controlId):
if controlId == 4001:
self.close()
request_servidores('','[B]%s[/B]' %(self.name))
elif controlId == 40010:
self.close()
iniciagravador(self.finalurl,self.siglacanal,self.name,self.directo)
elif controlId == 203:
#xbmc.executebuiltin("XBMC.PlayerControl(stop)")
self.close()
elif controlId == 6000:
listControl = self.getControl(6000)
item = listControl.getSelectedItem()
nomecanal=item.getProperty('chname')
self.close()
request_servidores('',nomecanal)
#else:
# self.buttonClicked = controlId
# self.close()
def onFocus(self, controlId):
pass
def updateChannelList(self):
idx=-1
listControl = self.getControl(6000)
listControl.reset()
canaison=openfile('canaison')
canaison=canaison.replace('[','')
lista=re.compile('B](.+?)/B]').findall(canaison)
for nomecanal in lista:
idx=int(idx+1)
if idx==0: idxaux=' '
else:
idxaux='%4s.' % (idx)
item = xbmcgui.ListItem(idxaux + ' %s' % (nomecanal), iconImage = '')
item.setProperty('idx', str(idx))
item.setProperty('chname', '[B]' + nomecanal + '[/B]')
listControl.addItem(item)
def updateListItem(self, idx, item):
channel = self.channelList[idx]
item.setLabel('%3d. %s' % (idx+1, channel.title))
item.setProperty('idx', str(idx))
def swapChannels(self, fromIdx, toIdx):
if self.swapInProgress: return
self.swapInProgress = True
c = self.channelList[fromIdx]
self.channelList[fromIdx] = self.channelList[toIdx]
self.channelList[toIdx] = c
# recalculate weight
for idx, channel in enumerate(self.channelList):
channel.weight = idx
listControl = self.getControl(6000)
self.updateListItem(fromIdx, listControl.getListItem(fromIdx))
self.updateListItem(toIdx, listControl.getListItem(toIdx))
listControl.selectItem(toIdx)
xbmc.sleep(50)
self.swapInProgress = False
| gpl-2.0 | -8,815,631,443,938,725,000 | 30.247312 | 85 | 0.575705 | false |
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/library_content_module.py | 1 | 29619 | # -*- coding: utf-8 -*-
"""
LibraryContent: The XBlock used to include blocks from a library in a course.
"""
import json
import logging
import random
from copy import copy
from gettext import ngettext
import six
import bleach
from lazy import lazy
from lxml import etree
from opaque_keys.edx.locator import LibraryLocator
from pkg_resources import resource_string
from six import text_type
from six.moves import zip
from web_fragments.fragment import Fragment
from webob import Response
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Integer, List, Scope, String
from capa.responsetypes import registry
from xmodule.mako_module import MakoTemplateBlockBase
from xmodule.studio_editable import StudioEditableBlock
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.xml_module import XmlMixin
from xmodule.x_module import (
HTMLSnippet,
ResourceTemplates,
shim_xmodule_js,
STUDENT_VIEW,
XModuleMixin,
XModuleDescriptorToXBlockMixin,
XModuleToXBlockMixin,
)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
logger = logging.getLogger(__name__)
ANY_CAPA_TYPE_VALUE = 'any'
def _get_human_name(problem_class):
"""
Get the human-friendly name for a problem type.
"""
return getattr(problem_class, 'human_name', problem_class.__name__)
def _get_capa_types():
"""
Gets capa types tags and labels
"""
capa_types = {tag: _get_human_name(registry.get_class_for_tag(tag)) for tag in registry.registered_tags()}
return [{'value': ANY_CAPA_TYPE_VALUE, 'display_name': _('Any Type')}] + sorted([
{'value': capa_type, 'display_name': caption}
for capa_type, caption in capa_types.items()
], key=lambda item: item.get('display_name'))
@XBlock.wants('library_tools') # Only needed in studio
@XBlock.wants('studio_user_permissions') # Only available in studio
@XBlock.wants('user')
class LibraryContentBlock(
MakoTemplateBlockBase,
XmlMixin,
XModuleDescriptorToXBlockMixin,
XModuleToXBlockMixin,
HTMLSnippet,
ResourceTemplates,
XModuleMixin,
StudioEditableBlock,
):
"""
An XBlock whose children are chosen dynamically from a content library.
Can be used to create randomized assessments among other things.
Note: technically, all matching blocks from the content library are added
as children of this block, but only a subset of those children are shown to
any particular student.
"""
# pylint: disable=abstract-method
has_children = True
has_author_view = True
resources_dir = 'assets/library_content'
preview_view_js = {
'js': [],
'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
}
preview_view_css = {
'scss': [],
}
mako_template = 'widgets/metadata-edit.html'
studio_js_module_name = "VerticalDescriptor"
studio_view_js = {
'js': [
resource_string(__name__, 'js/src/vertical/edit.js'),
],
'xmodule_js': resource_string(__name__, 'js/src/xmodule.js'),
}
studio_view_css = {
'scss': [],
}
show_in_read_only_mode = True
completion_mode = XBlockCompletionMode.AGGREGATOR
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Randomized Content Block",
scope=Scope.settings,
)
source_library_id = String(
display_name=_("Library"),
help=_("Select the library from which you want to draw content."),
scope=Scope.settings,
values_provider=lambda instance: instance.source_library_values(),
)
source_library_version = String(
# This is a hidden field that stores the version of source_library when we last pulled content from it
display_name=_("Library Version"),
scope=Scope.settings,
)
mode = String(
display_name=_("Mode"),
help=_("Determines how content is drawn from the library"),
default="random",
values=[
{"display_name": _("Choose n at random"), "value": "random"}
# Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
# Future addition: manually selected blocks
],
scope=Scope.settings,
)
max_count = Integer(
display_name=_("Count"),
help=_("Enter the number of components to display to each student."),
default=1,
scope=Scope.settings,
)
capa_type = String(
display_name=_("Problem Type"),
help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'),
default=ANY_CAPA_TYPE_VALUE,
values=_get_capa_types(),
scope=Scope.settings,
)
selected = List(
# This is a list of (block_type, block_id) tuples used to record
# which random/first set of matching blocks was selected per user
default=[],
scope=Scope.user_state,
)
@property
def source_library_key(self):
"""
Convenience method to get the library ID as a LibraryLocator and not just a string
"""
return LibraryLocator.from_string(self.source_library_id)
@classmethod
def make_selection(cls, selected, children, max_count, mode):
"""
Dynamically selects block_ids indicating which of the possible children are displayed to the current user.
Arguments:
selected - list of (block_type, block_id) tuples assigned to this student
children - children of this block
max_count - number of components to display to each student
mode - how content is drawn from the library
Returns:
A dict containing the following keys:
'selected' (set) of (block_type, block_id) tuples assigned to this student
'invalid' (set) of dropped (block_type, block_id) tuples that are no longer valid
'overlimit' (set) of dropped (block_type, block_id) tuples that were previously selected
'added' (set) of newly added (block_type, block_id) tuples
"""
rand = random.Random()
selected_keys = set(tuple(k) for k in selected) # set of (block_type, block_id) tuples assigned to this student
# Determine which of our children we will show:
valid_block_keys = set((c.block_type, c.block_id) for c in children)
# Remove any selected blocks that are no longer valid:
invalid_block_keys = (selected_keys - valid_block_keys)
if invalid_block_keys:
selected_keys -= invalid_block_keys
# If max_count has been decreased, we may have to drop some previously selected blocks:
overlimit_block_keys = set()
if len(selected_keys) > max_count:
num_to_remove = len(selected_keys) - max_count
overlimit_block_keys = set(rand.sample(selected_keys, num_to_remove))
selected_keys -= overlimit_block_keys
# Do we have enough blocks now?
num_to_add = max_count - len(selected_keys)
added_block_keys = None
if num_to_add > 0:
# We need to select [more] blocks to display to this user:
pool = valid_block_keys - selected_keys
if mode == "random":
num_to_add = min(len(pool), num_to_add)
added_block_keys = set(rand.sample(pool, num_to_add))
# We now have the correct n random children to show for this user.
else:
raise NotImplementedError("Unsupported mode.")
selected_keys |= added_block_keys
if any((invalid_block_keys, overlimit_block_keys, added_block_keys)):
selected = list(selected_keys)
random.shuffle(selected)
return {
'selected': selected,
'invalid': invalid_block_keys,
'overlimit': overlimit_block_keys,
'added': added_block_keys,
}
def _publish_event(self, event_name, result, **kwargs):
"""
Helper method to publish an event for analytics purposes
"""
event_data = {
"location": six.text_type(self.location),
"result": result,
"previous_count": getattr(self, "_last_event_result_count", len(self.selected)),
"max_count": self.max_count,
}
event_data.update(kwargs)
self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data)
self._last_event_result_count = len(result) # pylint: disable=attribute-defined-outside-init
@classmethod
def publish_selected_children_events(cls, block_keys, format_block_keys, publish_event):
"""
Helper method for publishing events when children blocks are
selected/updated for a user. This helper is also used by
the ContentLibraryTransformer.
Arguments:
block_keys -
A dict describing which events to publish (add or
remove), see `make_selection` above for format details.
format_block_keys -
A function to convert block keys to the format expected
by publish_event. Must have the signature:
[(block_type, block_id)] -> T
Where T is a collection of block keys as accepted by
`publish_event`.
publish_event -
Function that handles the actual publishing. Must have
the signature:
<'removed'|'assigned'> -> result:T -> removed:T -> reason:str -> None
Where T is a collection of block_keys as returned by
`format_block_keys`.
"""
if block_keys['invalid']:
# reason "invalid" means deleted from library or a different library is now being used.
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['invalid']),
reason="invalid"
)
if block_keys['overlimit']:
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['overlimit']),
reason="overlimit"
)
if block_keys['added']:
publish_event(
"assigned",
result=format_block_keys(block_keys['selected']),
added=format_block_keys(block_keys['added'])
)
def selected_children(self):
"""
Returns a list() of block_ids indicating which of the possible children
have been selected to display to the current user.
This reads and updates the "selected" field, which has user_state scope.
Note: the return value (self.selected) contains block_ids. To get
actual BlockUsageLocators, it is necessary to use self.children,
because the block_ids alone do not specify the block type.
"""
block_keys = self.make_selection(self.selected, self.children, self.max_count, "random") # pylint: disable=no-member
# Publish events for analytics purposes:
lib_tools = self.runtime.service(self, 'library_tools')
format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys)
self.publish_selected_children_events(
block_keys,
format_block_keys,
self._publish_event,
)
if any(block_keys[changed] for changed in ('invalid', 'overlimit', 'added')):
# Save our selections to the user state, to ensure consistency:
selected = block_keys['selected']
self.selected = selected # TODO: this doesn't save from the LMS "Progress" page.
return self.selected
def _get_selected_child_blocks(self):
"""
Generator returning XBlock instances of the children selected for the
current user.
"""
for block_type, block_id in self.selected_children():
yield self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id))
def student_view(self, context): # lint-amnesty, pylint: disable=missing-function-docstring
fragment = Fragment()
contents = []
child_context = {} if not context else copy(context)
for child in self._get_selected_child_blocks():
if child is None:
# TODO: Fix the underlying issue in TNL-7424
# This shouldn't be happening, but does for an as-of-now
# unknown reason. Until we address the underlying issue,
# let's at least log the error explicitly, ignore the
# exception, and prevent the page from resulting in a
# 500-response.
logger.error('Skipping display for child block that is None')
continue
for displayable in child.displayable_items():
rendered_child = displayable.render(STUDENT_VIEW, child_context)
fragment.add_fragment_resources(rendered_child)
contents.append({
'id': text_type(displayable.location),
'content': rendered_child.content,
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents,
'xblock_context': context,
'show_bookmark_button': False,
'watched_completable_blocks': set(),
'completion_delay_ms': None,
}))
return fragment
def author_view(self, context):
"""
Renders the Studio views.
Normal studio view: If block is properly configured, displays library status summary
Studio container view: displays a preview of all possible children.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
if self.children: # pylint: disable=no-member
fragment.add_content(self.system.render_template("library-block-author-preview-header.html", {
'max_count': self.max_count,
'display_name': self.display_name or self.url_name,
}))
context['can_edit_visibility'] = False
context['can_move'] = False
self.render_children(context, fragment, can_reorder=False, can_add=False)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
# The following JS is used to make the "Update now" button work on the unit page and the container view:
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js'))
fragment.initialize_js('LibraryContentAuthorView')
return fragment
def studio_view(self, _context):
"""
Return the studio view.
"""
fragment = Fragment(
self.system.render_template(self.mako_template, self.get_context())
)
add_webpack_to_fragment(fragment, 'LibraryContentBlockStudio')
shim_xmodule_js(fragment, self.studio_js_module_name)
return fragment
def get_child_descriptors(self):
"""
Return only the subset of our children relevant to the current student.
"""
return list(self._get_selected_child_blocks())
@property
def non_editable_metadata_fields(self):
non_editable_fields = super().non_editable_metadata_fields
# The only supported mode is currently 'random'.
# Add the mode field to non_editable_metadata_fields so that it doesn't
# render in the edit form.
non_editable_fields.extend([
LibraryContentBlock.mode,
LibraryContentBlock.source_library_version,
])
return non_editable_fields
@lazy
def tools(self):
"""
Grab the library tools service or raise an error.
"""
return self.runtime.service(self, 'library_tools')
def get_user_id(self):
"""
Get the ID of the current user.
"""
user_service = self.runtime.service(self, 'user')
if user_service:
# May be None when creating bok choy test fixtures
user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None)
else:
user_id = None
return user_id
@XBlock.handler
def refresh_children(self, request=None, suffix=None): # lint-amnesty, pylint: disable=unused-argument
"""
Refresh children:
This method is to be used when any of the libraries that this block
references have been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of this block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update this block's 'source_library_id' field to store
the version number of the libraries used, so we easily determine if
this block is up to date or not.
"""
user_perms = self.runtime.service(self, 'studio_user_permissions')
if not self.tools:
return Response("Library Tools unavailable in current runtime.", status=400)
self.tools.update_children(self, user_perms)
return Response()
# Copy over any overridden settings the course author may have applied to the blocks.
def _copy_overrides(self, store, user_id, source, dest):
"""
Copy any overrides the user has made on blocks in this library.
"""
for field in six.itervalues(source.fields):
if field.scope == Scope.settings and field.is_set_on(source):
setattr(dest, field.name, field.read_from(source))
if source.has_children:
source_children = [self.runtime.get_block(source_key) for source_key in source.children]
dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children]
for source_child, dest_child in zip(source_children, dest_children):
self._copy_overrides(store, user_id, source_child, dest_child)
store.update_item(dest, user_id)
def studio_post_duplicate(self, store, source_block):
"""
Used by the studio after basic duplication of a source block. We handle the children
ourselves, because we have to properly reference the library upstream and set the overrides.
Otherwise we'll end up losing data on the next refresh.
"""
# The first task will be to refresh our copy of the library to generate the children.
# We must do this at the currently set version of the library block. Otherwise we may not have
# exactly the same children-- someone may be duplicating an out of date block, after all.
user_id = self.get_user_id()
user_perms = self.runtime.service(self, 'studio_user_permissions')
if not self.tools:
raise RuntimeError("Library tools unavailable, duplication will not be sane!")
self.tools.update_children(self, user_perms, version=self.source_library_version)
self._copy_overrides(store, user_id, source_block, self)
# Children have been handled.
return True
def _validate_library_version(self, validation, lib_tools, version, library_key):
"""
Validates library version
"""
latest_version = lib_tools.get_library_version(library_key)
if latest_version is not None:
if version is None or version != six.text_type(latest_version):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'This component is out of date. The library has new content.'),
# TODO: change this to action_runtime_event='...' once the unit page supports that feature.
# See https://openedx.atlassian.net/browse/TNL-993
action_class='library-update-btn',
# Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes)
action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻")
)
)
return False
else:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u'Library is invalid, corrupt, or has been deleted.'),
action_class='edit-button',
action_label=_(u"Edit Library List.")
)
)
return False
return True
def _set_validation_error_if_empty(self, validation, summary):
""" Helper method to only set validation summary if it's empty """
if validation.empty:
validation.set_summary(summary)
def validate(self):
"""
Validates the state of this Library Content Module Instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super().validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
library_tools = self.runtime.service(self, "library_tools")
if not (library_tools and library_tools.can_use_library_content(self)):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(
u"This course does not support content libraries. "
u"Contact your system administrator for more information."
)
)
)
return validation
if not self.source_library_id:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"A library has not yet been selected."),
action_class='edit-button',
action_label=_(u"Select a Library.")
)
)
return validation
lib_tools = self.runtime.service(self, 'library_tools')
self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key)
# Note: we assume refresh_children() has been called
# since the last time fields like source_library_id or capa_types were changed.
matching_children_count = len(self.children) # pylint: disable=no-member
if matching_children_count == 0:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'There are no matching problem types in the specified libraries.'),
action_class='edit-button',
action_label=_(u"Select another problem type.")
)
)
if matching_children_count < self.max_count:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
(
ngettext(
u'The specified library is configured to fetch {count} problem, ',
u'The specified library is configured to fetch {count} problems, ',
self.max_count
) +
ngettext(
u'but there is only {actual} matching problem.',
u'but there are only {actual} matching problems.',
matching_children_count
)
).format(count=self.max_count, actual=matching_children_count),
action_class='edit-button',
action_label=_(u"Edit the library configuration.")
)
)
return validation
def source_library_values(self):
"""
Return a list of possible values for self.source_library_id
"""
lib_tools = self.runtime.service(self, 'library_tools')
user_perms = self.runtime.service(self, 'studio_user_permissions')
all_libraries = [
(key, bleach.clean(name)) for key, name in lib_tools.list_available_libraries()
if user_perms.can_read(key) or self.source_library_id == six.text_type(key)
]
all_libraries.sort(key=lambda entry: entry[1]) # Sort by name
if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]:
all_libraries.append((self.source_library_id, _(u"Invalid Library")))
all_libraries = [(u"", _("No Library Selected"))] + all_libraries
values = [{"display_name": name, "value": six.text_type(key)} for key, name in all_libraries]
return values
def editor_saved(self, user, old_metadata, old_content): # lint-amnesty, pylint: disable=unused-argument
"""
If source_library_id or capa_type has been edited, refresh_children automatically.
"""
old_source_library_id = old_metadata.get('source_library_id', [])
if (old_source_library_id != self.source_library_id or
old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type):
try:
self.refresh_children()
except ValueError:
pass # The validation area will display an error message, no need to do anything now.
def has_dynamic_children(self):
"""
Inform the runtime that our children vary per-user.
See get_child_descriptors() above
"""
return True
def get_content_titles(self):
"""
Returns list of friendly titles for our selected children only; without
thi, all possible children's titles would be seen in the sequence bar in
the LMS.
This overwrites the get_content_titles method included in x_module by default.
"""
titles = []
for child in self.get_child_descriptors():
titles.extend(child.get_content_titles())
return titles
@classmethod
def definition_from_xml(cls, xml_object, system):
children = [
system.process_xml(etree.tostring(child)).scope_ids.usage_id
for child in xml_object.getchildren()
]
definition = {
attr_name: json.loads(attr_value)
for attr_name, attr_value in xml_object.attrib.items()
}
return definition, children
def definition_to_xml(self, resource_fs):
""" Exports Library Content Module to XML """
xml_object = etree.Element('library_content')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
# Set node attributes based on our fields.
for field_name, field in six.iteritems(self.fields):
if field_name in ('children', 'parent', 'content'):
continue
if field.is_set_on(self):
xml_object.set(field_name, six.text_type(field.read_from(self)))
return xml_object
class LibrarySummary(object):
"""
A library summary object which contains the fields required for library listing on studio.
"""
def __init__(self, library_locator, display_name):
"""
Initialize LibrarySummary
Arguments:
library_locator (LibraryLocator): LibraryLocator object of the library.
display_name (unicode): display name of the library.
"""
self.display_name = display_name if display_name else _(u"Empty")
self.id = library_locator # pylint: disable=invalid-name
self.location = library_locator.make_usage_key('library', 'library')
@property
def display_org_with_default(self):
"""
Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'org' field from the key.
"""
return self.location.library_key.org
@property
def display_number_with_default(self):
"""
Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'library' field from the key.
"""
return self.location.library_key.library
| agpl-3.0 | -1,642,454,611,555,085,800 | 39.679945 | 125 | 0.60932 | false |
google/fruit | tests/meta/test_proof_trees.py | 2 | 2261 | #!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#define IN_FRUIT_CPP_FILE 1
#include "meta/common.h"
#include <fruit/impl/meta/metaprogramming.h>
#include <fruit/impl/meta/proof_trees.h>
#include <fruit/impl/meta/proof_tree_comparison.h>
#include <vector>
struct A1 {};
struct B1 {};
struct C1 {};
struct D1 {};
struct X1 {};
struct Y1 {};
using A = Type<A1>;
using B = Type<B1>;
using C = Type<C1>;
using D = Type<D1>;
using X = Type<X1>;
using Y = Type<Y1>;
using Proof1 = Pair<X, ToSet<A, B>>;
using Proof1b = Pair<X, ToSet<B, A>>;
using Proof2 = Pair<Y, ToSet<B, C>>;
'''
class TestProofTrees(parameterized.TestCase):
def test_IsProofTreeEqualTo(self):
source = '''
int main() {
AssertNotSameProof(Pair<X, ToSet<A>>, Pair<X, ToSet<B>>);
AssertNotSameProof(Proof1, Proof2);
AssertSameProof(Proof1, Proof1b);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_IsForestEqualTo(self):
source = '''
int main() {
AssertSameForest(Vector<>, Vector<>);
AssertNotSameForest(Vector<Proof1>, Vector<Proof2>);
AssertSameForest(Vector<Proof1, Proof2>, Vector<Proof2, Proof1b>);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -1,283,456,006,779,043,300 | 28.75 | 82 | 0.59487 | false |
NicovincX2/Python-3.5 | Statistiques/Algorithme du gradient stochastique/sgd_comparison.py | 1 | 1671 | # -*- coding: utf-8 -*-
import os
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
os.system("pause")
| gpl-3.0 | -6,448,479,962,336,297,000 | 29.381818 | 79 | 0.609216 | false |
ForeverWintr/ImageClassipy | clouds/tests/testArena.py | 1 | 3227 | import unittest
import shutil
import tempfile
import os
import mock
import json
import subprocess
import sys
import signal
from queue import Queue
import numpy as np
from clouds.obj.subject import Subject
from clouds.obj.arena import Arena
from clouds.obj.classifier import Classifier
from clouds.tests import util
from clouds.tests.util import abortableSim
from clouds.util import enqueue
TESTDATA = './data'
class testArena(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.seterr(all='raise', under='warn')
cls.workspace = tempfile.mkdtemp(prefix="testSim_")
cls.storedClassifier = os.path.join(cls.workspace, 'sc')
shutil.copytree(os.path.join(TESTDATA, 'xorClassifier'), cls.storedClassifier)
cls.xors = {x[0]: x[1] for x in util.createXors(cls.workspace)}
cls.sim = None
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workspace)
def setUp(self):
c = Classifier.loadFromDir(self.storedClassifier)
#Use mock to replace the long running method randomClassifier with one that just returns
#our xor classifier.
with mock.patch.object(Arena, 'randomClassifier', return_value=c) as m:
self.sim = Arena(workingDir=os.path.join(self.workspace, 'sim'),
images=self.xors)
self.sim.spawnSubjects(1)
def tearDown(self):
shutil.rmtree(self.sim.workingDir)
self.sim = None
def testSimulate(self):
"""
Test a run of the xor problem.
"""
self.sim.simulate(numWorkers=1, reportEpochs=10)
self.assertAlmostEqual(self.sim.subjects[0].fitness, 100)
def testAbort(self):
"""
Assert that a simulation can be aborted, and its subjects will be saved.
"""
#mock pybrain's trainuntilconvergence to sleep a while
jsonImages = json.dumps(self.xors)
#assert that classifier has no trained epochs initially
c = Classifier.loadFromDir(os.path.join(self.workspace, 'sim', 'Subject_0', 'classifier'))
self.assertEqual(c.epochsTrained, 0)
with subprocess.Popen([sys.executable, abortableSim.__file__, self.sim.workingDir, '1',
jsonImages], stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
simOut = Queue()
with enqueue(proc.stdout, simOut):
#wait for the first print statement
self.assertIn("Waiting for interrupt signal", simOut.get().decode())
#send abort
proc.send_signal(signal.SIGINT)
#Give the process some time to exit
try:
proc.wait(timeout=5)
except subprocess.TimeoutExpired:
self.fail("Simulation failed to exit within 5 seconds")
#assert that the exit code was zero
self.assertEqual(proc.poll(), 0, "Simulation failed to exit cleanly")
#check that the classifier was modified
c = Classifier.loadFromDir(os.path.join(self.workspace, 'sim', 'Subject_0', 'classifier'))
self.assertEqual(c.epochsTrained, 1)
pass
| mit | 6,760,544,701,868,459,000 | 33.329787 | 98 | 0.633716 | false |
ResolveWang/algrithm_qa | 分类代表题目/深搜、广搜和回溯/地牢逃脱.py | 1 | 2952 | """
链接:https://www.nowcoder.com/questionTerminal/0385945b7d834a99bc0010e67f892e38
来源:牛客网
给定一个 n 行 m 列的地牢,其中 '.' 表示可以通行的位置,'X' 表示不可通行的障碍,牛牛从 (x0 , y0 ) 位置出发,遍历这个地牢,和一般的游戏所不同的是,他每一步只能按照一些指定的步长遍历地牢,要求每一步都不可以超过地牢的边界,也不能到达障碍上。地牢的出口可能在任意某个可以通行的位置上。牛牛想知道最坏情况下,他需要多少步才可以离开这个地牢。
输入描述:
每个输入包含 1 个测试用例。每个测试用例的第一行包含两个整数 n 和 m(1 <= n, m <= 50),表示地牢的长和宽。接下来的 n 行,每行 m 个字符,描述地牢,地牢将至少包含两个 '.'。接下来的一行,包含两个整数 x0, y0,表示牛牛的出发位置(0 <= x0 < n, 0 <= y0 < m,左上角的坐标为 (0, 0),出发位置一定是 '.')。之后的一行包含一个整数 k(0 < k <= 50)表示牛牛合法的步长数,接下来的 k 行,每行两个整数 dx, dy 表示每次可选择移动的行和列步长(-50 <= dx, dy <= 50)
输出描述:
输出一行一个数字表示最坏情况下需要多少次移动可以离开地牢,如果永远无法离开,输出 -1。以下测试用例中,牛牛可以上下左右移动,在所有可通行的位置.上,地牢出口如果被设置在右下角,牛牛想离开需要移动的次数最多,为3次。
示例1
输入
3 3
...
...
...
0 1
4
1 0
0 1
-1 0
0 -1
输出
3
"""
import sys
class Solution:
def get_rs(self, mat, x, y, cons):
pos = 0
for i in mat:
for j in i:
if j == '.':
pos += 1
visited = set()
queue = list()
queue.append((x, y, 0))
visited.add((x, y))
step = 0
while queue:
cur_x, cur_y, level = queue.pop(0)
step = max([level, step])
for direct in cons:
next_x = cur_x + direct[0]
next_y = cur_y + direct[1]
if 0 <= next_x < len(mat) and 0 <= next_y < len(mat[0]) and mat[next_x][next_y] != 'X' and (next_x, next_y) not in visited:
queue.append((next_x, next_y, level+1))
visited.add((next_x, next_y))
if len(visited) != pos:
print(-1)
return
print(step)
if __name__ == '__main__':
n, m = map(int, sys.stdin.readline().split())
matrix = list()
for _ in range(n):
matrix.append(sys.stdin.readline().strip())
a, b = map(int, sys.stdin.readline().split())
k = int(sys.stdin.readline())
constraint = list()
for _ in range(k):
constraint.append(list(map(int, sys.stdin.readline().split())))
solution = Solution()
solution.get_rs(matrix, a, b, constraint) | mit | 1,234,208,552,566,871,300 | 27.287671 | 281 | 0.5625 | false |
boudewijnrempt/kura | kuralib/lng_strm.py | 1 | 7895 | from dbobj.dbobj import dbRecord, dbTable
from dbobj.dbexceptions import dbRecordNotFoundException
from dbobj.dbtypes import *
from string import lstrip, rstrip
import re
True = 1
False = 0
try:
import kuraapp
from lng_elmt import lng_element, lng_elements
from lng_sttg import lng_stream_tag
import docbook
except ImportError:
import kuralib.kuraapp
from kuralib.lng_elmt import lng_element, lng_elements
from kuralib.lng_sttg import lng_stream_tag
import kuralib.docbook
class lng_stream(dbRecord):
def __init__(self, app, **args):
if args.has_key("fields"):
dbRecord.__init__(self, app, "lng_stream", args["fields"])
else:
dbRecord.__init__(self, app, "lng_stream", args)
self.tags=[]
self.elements=[]
self.parseSets=[]
def __hash__(self):
return id(self)
def __cmp__(self, other):
if other == None:
return -1
if self.seqnr < other.seqnr:
return -1
elif self.seqnr == other.seqnr:
return 0
else:
return 1
def translation(self):
for tag in self.app.getObjects("lng_stream_tag",
streamnr = self.streamnr):
if tag.tag=="TR" or tag.tag=="TRS" or tag.tag=="TRANS":
return tag.getDescription()
return "-"
def note(self):
if (self.tags==[] or self.tags==None):
self.getTags()
for tag in self.tags:
if tag.tag=="NOTE":
return tag.getDescription()
def getTag(self, tag):
try:
tagRecord=self.app.getObject("lng_stream_tag",
streamnr=self.streamnr,
tag=tag)
except dbRecordNotFoundException, error:
tagRecord=error.queryRec
return tagRecord
def getTags(self):
self.tags = self.getChildren("lng_stream_tag")
return self.tags
def getElements(self, all = False):
if self.streamnr <> None:
els = self.app.getObjects("lng_element",
streamnr = self.streamnr)
if all:
return els
result = []
for el in els:
if not el.parent_elementnr:
result.append(el)
result.sort()
return result
def getLink(self):
return """<!--@query = kuraapp.app.getObject("lng_stream" , streamnr=%i)
result = query.asDocbook(asExample=1,simple=1) -->""" % self.streamnr
def asDocbook(self, asExample = False, simple=True):
"""
Returns a unicode string containing a stream marked up
as docbook.
Streams can be used in running text (asExample == false)
or in example sentences in the body of the grammar (asExample == true).
In the latter case, you need to encase the result in an
<example><title></title>stream</example> block. There will be a footnote
that points to the source text for this example.
"""
if simple == 1:
return self.simpleDocBook(asExample)
elif simple == 2:
return self.fopDocBook()
else:
return self.tableDocBook(asExample)
def __pad(self, s, l):
if len(s) == l:
return s
if len(s) > l:
raise "Cannot pad %s to length %i" % (s, l)
return s + (" " * (l - len(s)))
def getInterlinearLines(self, elements):
result = []
l1 = ""
l2 = ""
totlen = 0
for e in elements:
t = e.text
g = e.translation()
l = max(len(t), len(g))
totlen = totlen + l
if totlen > 65:
result.append(l1)
result.append(l2)
result.append("")
l1 = ""
l2 = ""
totlen = l
l1 = l1 + self.__pad(t, l + 1)
l2 = l2 + self.__pad(g, l + 1)
result.append(l1)
result.append(l2)
return "\n".join(result)
def fopDocBook(self):
doc = []
els = self.getElements()
doc.append("""<informaltable pgwide="0" frame="none"><tgroup cols="1">
<tbody><row>""")
s1 = []
s2 = []
for el in els:
s1.append(el.text)
s2.append(el.translation())
doc.append("<entry>%s</entry></row><row>" % "\t".join(s1))
doc.append("<entry>%s</entry>" % "\t".join(s2))
doc.append("</row><row><entry><emphasis>%s</emphasis></entry></row></tbody></tgroup></informaltable>" % docbook.filter(self.translation()))
return "\n".join(doc)
def simpleDocBook(self, asExample):
doc = []
els = self.getElements()
if asExample:
doc.append("""<para><programlisting>%s</programlisting>""" % (self.getInterlinearLines(els)))
else:
doc.append("""<para id="stream_%i"><programlisting>%s</programlisting>""" % (self.streamnr,
self.getInterlinearLines(els)))
note = self.note()
if note:
doc.append(u"%s<footnote><para>%s</para></footnote>" %
(docbook.filter(self.translation()), docbook.filter(note)))
else:
doc.append(docbook.filter(self.translation()))
if asExample:
doc.append("""<footnote><para><link linkend="text_%i">%s, line %i.</link></para></footnote>""" %
(self.textnr,
self.title,
self.seqnr + 1))
doc.append("</para>")
return "\n".join(doc)
def tableDocBook(self, asExample):
doc = []
els = self.getElements()
if not asExample:
doc.append(u"""<para id="stream_%i"><informaltable colsep="0" frame="none" rowsep="0">""" % self.streamnr)
else:
doc.append(u"""<para><informaltable colsep="0" frame="none" rowsep="0">""")
doc.append(u"""<tgroup align="left" cols="%i">
<tbody valign="top">""" % len(els))
doc.append(u"""<row>""")
w = 0
for e in els:
if e.parent_elementnr == 0:
w = w + len(e)
if w > 60:
doc.append("</row><row>")
w = 0
doc.append(e.asDocbook())
note = self.note()
if note:
doc.append(u"</row></tbody></tgroup></informaltable>%s<footnote><para>%s</para></footnote>" %
(docbook.filter(self.translation()), docbook.filter(note)))
else:
doc.append(u"</row></tbody></tgroup></informaltable>%s" % docbook.filter(self.translation()))
if asExample:
doc.append("""<footnote><para><link linkend="text_%i">%s, line %i.</link></para></footnote>""" %
(self.textnr,
self.title,
self.seqnr + 1))
doc.append("</para>")
return u"\n".join(doc)
class lng_streams(dbTable):
"""
Collection class for all stream data.
"""
def __init__(self, app):
dbTable.__init__(self, app, table="lng_stream", recObj=lng_stream)
def select(self, queryRec, orderBy = None):
dbTable.select(self, queryRec, orderBy = None)
self.rows.sort()
def insert(self, textnr, languagenr, streamTexts=[]):
seqnr=0
for streamText in streamTexts:
if streamText <> "":
stream=lng_stream( self.app
, textnr=textnr
, languagenr=languagenr
, seqnr=seqnr
, text=rstrip(lstrip(streamText))
, usernr=kuraapp.app.fieldDefaults["usernr"]
)
stream.insert(checkIntegrity=FALSE)
stream.splitText()
seqnr=seqnr + 1
def export(self, query, format, simple = True):
doc = []
if format == "docbook":
doc.append('')
self.select(query, "order by textnr, seqnr")
languagenr = -1
section = -1
for r in self.rows:
doc.append(r.asDocbook(True, simple))
return u"\n".join(doc)
else:
raise NotImplementedError("Export in format %s not yet implemented" % format)
__copyright__="""
copyright : (C) 2002 by Boudewijn Rempt
see copyright notice for license
email : [email protected]
"""
__revision__="""$Revision: 1.24 $"""[11:-2]
| bsd-2-clause | -1,322,527,140,797,956,400 | 29.019011 | 143 | 0.561748 | false |
pixyj/feel | django-server/feel/codequiz/migrations/0005_auto_20151223_1339.py | 1 | 1605 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 13:39
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('codequiz', '0004_auto_20151222_1810'),
]
operations = [
migrations.CreateModel(
name='CodeQuizAttempt',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('user_key', models.CharField(db_index=True, max_length=40)),
('state', models.IntegerField(choices=[(0, 'NOT_EVALUATED'), (1, 'EVALUATING'), (2, 'EVALUATED'), (3, 'EVALUATION_FAILED')], default=0)),
('code', models.TextField()),
('result', models.BooleanField(default=False)),
('test_cases_results', django.contrib.postgres.fields.jsonb.JSONField()),
('created_at', models.DateTimeField()),
('codequiz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='codequiz.CodeQuiz')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='codequizattempt',
unique_together=set([('codequiz', 'user_key', 'code')]),
),
]
| mit | 8,521,793,478,062,373,000 | 41.236842 | 153 | 0.616199 | false |
tperrier/mwachx | utils/models.py | 1 | 2826 | # Python Imports
# Django Imports
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
# Create your models here.
class TimeStampedModel(models.Model):
"""
An abstract base class for storing created/modified metadata with a model
"""
# The date and time this message was created or modified
created = models.DateTimeField(default=timezone.now, editable=False)
modified = models.DateTimeField(auto_now=True)
def created_str(self, format='%Y-%m-%d %H:%M'):
return self.created.strftime(format)
@property
def created_date(self):
return self.created.date()
@property
def modifed_date(self):
return self.modified.date()
class Meta:
abstract = True
ordering = ['-created']
class BaseQuerySet(models.QuerySet):
"""
A simple query set that adds some utility functions for getting objects
with default values.
"""
def get_or_none(self,**kwargs):
return self.get_or_default(None,**kwargs)
def get_or_default(self,default=None,**kwargs):
try:
return self.get(**kwargs)
except ObjectDoesNotExist:
return default
class ForUserQuerySet(BaseQuerySet):
NO_SMS_STATUS = ('stopped','other','sae','quit')
NOT_ACTIVE_STATUS = NO_SMS_STATUS + ('completed',)
participant_field = 'participant'
def for_user(self,user, superuser=False):
if superuser and user.is_superuser:
return self.all()
# Get facility or return no participants if there is no facility
try:
facility = user.practitioner.facility
except (ObjectDoesNotExist) as e:
return self.none()
return self.by_facility(facility)
def by_facility(self,facility):
return self.filter(self._participant_Q(facility=facility))
def active_users(self):
''' Filter queryset based on active users who should receive SMS messages.'''
q = self._participant_Q(status__in=ForUserQuerySet.NO_SMS_STATUS)
return self.exclude(q)
def pregnant(self):
q = self._participant_Q(status__in=('pregnant','over'))
return self.filter(q)
def post_partum(self):
q = self._participant_Q(status__in=('post','ccc'))
return self.filter(q)
def _participant_Q(self,**kwargs):
''' Return a Q object with participant_field appended
Example: participant_Q(study_group='two-way',is_validated=False)
returns: Q(participant__study_group='two-way',participant__is_validated=False)
'''
prefix = self.participant_field+'__' if self.participant_field is not None else ''
kwargs = {prefix+key:value for key,value in kwargs.items()}
return models.Q(**kwargs)
| apache-2.0 | -5,306,527,070,767,118,000 | 30.4 | 90 | 0.651097 | false |
muendelezaji/workload-automation | wlauto/workloads/reader/__init__.py | 1 | 7324 | # Copyright 2014-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import re
import time
from wlauto import AndroidUiAutoBenchmark, Parameter
from wlauto.exceptions import DeviceError
from wlauto.exceptions import NotFoundError
__version__ = '0.1.0'
class Reader(AndroidUiAutoBenchmark):
activity = 'com.adobe.reader.AdobeReader'
name = 'reader'
package = 'com.adobe.reader'
view = [package+'/com.adobe.reader.help.AROnboardingHelpActivity',
package+'/com.adobe.reader.viewer.ARSplitPaneActivity',
package+'/com.adobe.reader.viewer.ARViewerActivity']
description = """
The Adobe Reader workflow carries out the following typical productivity tasks using
Workload-Automation.
Test description:
1. Open a local file on the device. The following steps are instrumented:
1. Select the local files list menu
2. Select the search button
2. Search for a specific file from within the list
3. Open the selected file
2. Gestures test - measurements of fps, jank and other frame statistics, via dumpsys, are
captured for the following swipe and pinch gestures:
1. Swipe down across the central 50% of the screen in 200 x 5ms steps
2. Swipe up across the central 50% of the screen in 200 x 5ms steps
3. Swipe right from the edge of the screen in 50 x 5ms steps
4. Swipe left from the edge of the screen in 50 x 5ms steps
5. Pinch out 50% in 100 x 5ms steps
6. Pinch In 50% in 100 x 5ms steps
3. Repeat the open file step 1.
4. Search test - a test measuring the time taken to search a large 100+ page mixed content
document for specific strings.
1. Search document_name for first_search_word
2. Search document_name for second_search_word
"""
parameters = [
Parameter('dumpsys_enabled', kind=bool, default=True,
description="""
If ``True``, dumpsys captures will be carried out during the
test run. The output is piped to log files which are then
pulled from the phone.
"""),
Parameter('email', kind=str, default="[email protected]",
description="""
Email account used to register with Adobe online services.
"""),
Parameter('password', kind=str, default="password",
description="""
Password for Adobe online services.
"""),
Parameter('document_name', kind=str, default="Getting_Started.pdf",
description="""
The document name to use for the Gesture and Search test.
Note: spaces must be replaced with underscores in the document name.
"""),
Parameter('first_search_word', kind=str, default="read",
description="""
The first test string to use for the word search test.
Note: Accepts single words only.
"""),
Parameter('second_search_word', kind=str, default="the",
description="""
The second test string to use for the word search test.
Note: Accepts single words only.
"""),
]
instrumentation_log = ''.join([name, '_instrumentation.log'])
def validate(self):
super(Reader, self).validate()
self.output_file = os.path.join(self.device.working_directory, self.instrumentation_log)
self.uiauto_params['package'] = self.package
self.uiauto_params['output_dir'] = self.device.working_directory
self.uiauto_params['output_file'] = self.output_file
self.uiauto_params['email'] = self.email
self.uiauto_params['password'] = self.password
self.uiauto_params['dumpsys_enabled'] = self.dumpsys_enabled
self.uiauto_params['filename'] = self.document_name
self.uiauto_params['first_search_word'] = self.first_search_word
self.uiauto_params['second_search_word'] = self.second_search_word
def initialize(self, context):
super(Reader, self).initialize(context)
if not self.device.is_network_connected():
raise DeviceError('Network is not connected for device {}'.format(self.device.name))
self.reader_local_dir = self.device.path.join(self.device.external_storage_directory,
'Android/data/com.adobe.reader/files/')
# Check for workload dependencies before proceeding
pdf_files = [entry for entry in os.listdir(self.dependencies_directory) if entry.endswith(".pdf")]
if not len(pdf_files):
raise NotFoundError("Cannot find {} file(s) in {}".format('pdf', self.dependencies_directory))
else:
for entry in pdf_files:
self.device.push_file(os.path.join(self.dependencies_directory, entry),
os.path.join(self.reader_local_dir, entry),
timeout=300)
def update_result(self, context):
super(Reader, self).update_result(context)
self.device.pull_file(self.output_file, context.output_directory)
result_file = os.path.join(context.output_directory, self.instrumentation_log)
with open(result_file, 'r') as wfh:
regex = re.compile(r'(?P<key>\w+)\s+(?P<value1>\d+)\s+(?P<value2>\d+)\s+(?P<value3>\d+)')
for line in wfh:
match = regex.search(line)
if match:
context.result.add_metric((match.group('key') + "_start"),
match.group('value1'), units='ms')
context.result.add_metric((match.group('key') + "_finish"),
match.group('value2'), units='ms')
context.result.add_metric((match.group('key') + "_duration"),
match.group('value3'), units='ms')
def teardown(self, context):
super(Reader, self).teardown(context)
for entry in self.device.listdir(self.device.working_directory):
if entry.endswith(".log"):
self.device.pull_file(os.path.join(self.device.working_directory, entry), context.output_directory)
self.device.delete_file(os.path.join(self.device.working_directory, entry))
def finalize(self, context):
super(Reader, self).finalize(context)
for entry in self.device.listdir(self.reader_local_dir):
if entry.endswith(".pdf"):
self.device.delete_file(os.path.join(self.reader_local_dir, entry))
| apache-2.0 | 5,356,216,809,189,931,000 | 44.775 | 115 | 0.61237 | false |
ddu7/PyLC | 023Merge k Sorted Lists.py | 1 | 1824 | # -*- coding: utf-8 -*-
# Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
# 本题引入了heapq库, 目的在于通过维护堆性质并逐次pop出最小值,达到实现排序的目的
import heapq
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
dummy = ListNode(0)
current = dummy
heap = []
# 首先把每一个已经拍好序的list排入堆, 堆中每个元素结构是(值, 链表)
for sorted_list in lists:
if sorted_list:
heapq.heappush(heap, (sorted_list.val, sorted_list))
# 每次pop出最小的元素, 并把对应链表赋值到smallest, 再由current读取
while heap:
smallest = heapq.heappop(heap)[1]
current.next = smallest
current = current.next
# 通过debug可以知道current每次确实是提取出一整串链表,但是每次current.next被赋值后current的第一个元素会变化
# 通过更新current来对dummy进行更新
# 说不清楚, 还是看debug吧
# 如果此时pop出的最小链表中的元素没有取完,再次将提取过首个元素后的链表push进入堆
if smallest.next:
heapq.heappush(heap, (smallest.next.val, smallest.next))
return dummy.next
list1 = ListNode(1)
list1.next = ListNode(3)
list2 = ListNode(2)
list2.next = ListNode(4)
print Solution().mergeKLists([list1, list2])
| mit | -7,032,749,693,135,191,000 | 30.574468 | 100 | 0.608491 | false |
Mariusz1970/enigma2 | lib/python/Tools/Profile.py | 1 | 1710 | # the implementation here is a bit crappy.
import time
from Directories import resolveFilename, SCOPE_CONFIG
from boxbranding import getBoxType
boxtype = getBoxType()
PERCENTAGE_START = 50
PERCENTAGE_END = 100
profile_start = time.time()
profile_data = {}
total_time = 1
profile_file = None
try:
f = open(resolveFilename(SCOPE_CONFIG, "profile"), "r")
profile_old = f.readlines()
f.close()
t = None
for line in profile_old:
(t, id) = line[:-1].split('\t')
t = float(t)
total_time = t
profile_data[id] = t
except:
print "no profile data available"
try:
profile_file = open(resolveFilename(SCOPE_CONFIG, "profile"), "w")
except IOError:
print "WARNING: couldn't open profile file!"
def profile(id):
now = time.time() - profile_start
if profile_file:
profile_file.write("%7.3f\t%s\n" % (now, id))
if id in profile_data:
t = profile_data[id]
if total_time:
perc = t * (PERCENTAGE_END - PERCENTAGE_START) / total_time + PERCENTAGE_START
else:
perc = PERCENTAGE_START
try:
if boxtype == "odinm7" or boxtype == "odinm6" or boxtype == "xp1000s":
f = open("/dev/dbox/oled0", "w")
f.write("%d" % perc)
elif boxtype == "gb800se" or boxtype == "gb800solo":
f = open("/dev/dbox/oled0", "w")
f.write("%d \n" % perc)
elif boxtype == "gb800seplus":
f = open("/dev/mcu", "w")
f.write("%d \n" % perc)
elif boxtype == "ebox5000":
f = open("/proc/progress", "w")
f.write("%d" % perc)
else:
f = open("/proc/progress", "w")
f.write("%d \n" % perc)
f.close()
except IOError:
pass
def profile_final():
global profile_file
if profile_file is not None:
profile_file.close()
profile_file = None
| gpl-2.0 | 2,877,156,768,245,591,000 | 23.084507 | 82 | 0.627485 | false |
weka511/bioinformatics | DDEG.py | 1 | 1236 | # Copyright (C) 2017 Greenweaves Software Pty Ltd
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>
from graphs import ddeg
#n=5
#m=4
#A=[[1, 2],
#[2, 3],
#[4, 3],
#[2, 4]]
#print(ddeg(n,m,A))
if __name__=='__main__':
import timeit
start_time = timeit.default_timer()
with open('c:/Users/Weka/Downloads/rosalind_ddeg.txt') as f:
A=[]
for line in f:
text=line.strip()
pair=text.split(' ')
print (pair)
A.append((int(pair[0]),int(pair[1])))
(n,m)=A[0]
print (ddeg(n,m,A[1:]))
print ('Elapsed: {0} seconds'.format(timeit.default_timer() - start_time)) | gpl-3.0 | 1,793,962,163,063,936,000 | 28.452381 | 82 | 0.642395 | false |
nginx/unit | test/unit/applications/lang/go.py | 1 | 2247 | import os
import subprocess
from unit.applications.proto import TestApplicationProto
from unit.option import option
class TestApplicationGo(TestApplicationProto):
def prepare_env(self, script, name, static=False):
if not os.path.exists(option.temp_dir + '/go'):
os.mkdir(option.temp_dir + '/go')
env = os.environ.copy()
env['GOPATH'] = option.current_dir + '/build/go'
env['GOCACHE'] = option.cache_dir + '/go'
env['GO111MODULE'] = 'auto'
if static:
args = [
'go',
'build',
'-tags',
'netgo',
'-ldflags',
'-extldflags "-static"',
'-o',
option.temp_dir + '/go/' + name,
option.test_dir + '/go/' + script + '/' + name + '.go',
]
else:
args = [
'go',
'build',
'-o',
option.temp_dir + '/go/' + name,
option.test_dir + '/go/' + script + '/' + name + '.go',
]
if option.detailed:
print("\n$ GOPATH=" + env['GOPATH'] + " " + " ".join(args))
try:
process = subprocess.Popen(args, env=env)
process.communicate()
except KeyboardInterrupt:
raise
except:
return None
return process
def load(self, script, name='app', **kwargs):
static_build = False
wdir = option.test_dir + "/go/" + script
executable = option.temp_dir + "/go/" + name
if 'isolation' in kwargs and 'rootfs' in kwargs['isolation']:
wdir = "/go/"
executable = "/go/" + name
static_build = True
self.prepare_env(script, name, static=static_build)
conf = {
"listeners": {"*:7080": {"pass": "applications/" + script}},
"applications": {
script: {
"type": "external",
"processes": {"spare": 0},
"working_directory": wdir,
"executable": executable,
},
},
}
self._load_conf(conf, **kwargs)
| apache-2.0 | -5,466,656,760,982,017,000 | 27.443038 | 72 | 0.449933 | false |
Carpetsmoker/qutebrowser | tests/unit/javascript/position_caret/test_position_caret.py | 1 | 3598 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for position_caret.js."""
import pytest
# FIXME:qtwebengine Make these tests use the tab API
pytest.importorskip('PyQt5.QtWebKit')
from PyQt5.QtCore import Qt
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebPage
@pytest.fixture(autouse=True)
def enable_caret_browsing(qapp):
"""Fixture to enable caret browsing globally."""
settings = QWebSettings.globalSettings()
old_value = settings.testAttribute(QWebSettings.CaretBrowsingEnabled)
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True)
yield
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, old_value)
class CaretTester:
"""Helper class (for the caret_tester fixture) for asserts.
Attributes:
js: The js_tester fixture.
"""
def __init__(self, js_tester):
self.js = js_tester
def check(self):
"""Check whether the caret is before the MARKER text."""
self.js.run_file('position_caret.js')
self.js.webview.triggerPageAction(QWebPage.SelectNextWord)
assert self.js.webview.selectedText().rstrip() == "MARKER"
def check_scrolled(self):
"""Check if the page is scrolled down."""
frame = self.js.webview.page().mainFrame()
minimum = frame.scrollBarMinimum(Qt.Vertical)
value = frame.scrollBarValue(Qt.Vertical)
assert value > minimum
@pytest.fixture
def caret_tester(js_tester_webkit):
"""Helper fixture to test caret browsing positions."""
caret_tester = CaretTester(js_tester_webkit)
# Showing webview here is necessary for test_scrolled_down_img to
# succeed in some cases, see #1988
caret_tester.js.webview.show()
return caret_tester
@pytest.mark.integration
def test_simple(caret_tester):
"""Test with a simple (one-line) HTML text."""
caret_tester.js.load('position_caret/simple.html')
caret_tester.check()
@pytest.mark.integration
def test_scrolled_down(caret_tester):
"""Test with multiple text blocks with the viewport scrolled down."""
caret_tester.js.load('position_caret/scrolled_down.html')
caret_tester.js.scroll_anchor('anchor')
caret_tester.check_scrolled()
caret_tester.check()
@pytest.mark.integration
@pytest.mark.parametrize('style', ['visibility: hidden', 'display: none'])
def test_invisible(caret_tester, style):
"""Test with hidden text elements."""
caret_tester.js.load('position_caret/invisible.html', style=style)
caret_tester.check()
@pytest.mark.integration
def test_scrolled_down_img(caret_tester):
"""Test with an image at the top with the viewport scrolled down."""
caret_tester.js.load('position_caret/scrolled_down_img.html')
caret_tester.js.scroll_anchor('anchor')
caret_tester.check_scrolled()
caret_tester.check()
| gpl-3.0 | -5,318,296,828,640,175,000 | 32.626168 | 74 | 0.72179 | false |
jcastillocano/python-route53 | tests/test_basic.py | 1 | 10078 | import unittest
import route53
from route53.exceptions import AlreadyDeletedError
from route53.transport import BaseTransport
from tests.utils import get_route53_connection
import datetime
import os
class BaseTestCase(unittest.TestCase):
"""
A base unit test class that has some generally useful stuff for the
various test cases.
"""
CONNECTION_OPTIONS = {}
test_zone_name = 'route53-unittest-zone.com.'
def setUp(self):
self.conn = get_route53_connection(**self.CONNECTION_OPTIONS)
self.submittedAt = datetime.datetime.now()
class DummyTransport(route53.transport.BaseTransport):
def __init__(self, *args, **kwargs):
super(DummyTransport, self).__init__(*args, **kwargs)
self.response = []
def set_response(self, response):
self.response.append(response)
def set_response_from_file(self, response_file, **kwargs):
response_path = os.path.join(os.path.dirname(__file__), 'responses', response_file)
self.response.append(open(response_path, 'r').read() % kwargs)
def _send_get_request(self, path, params, headers):
#print "\n-- GET Method --\n - path: %s\n - params: %s\n - headers: %s" % (path, params, headers)
return self.response.pop(0)
def _send_post_request(self, path, params, headers):
#print "\n-- POST Method --\n - path: %s\n - params: %s\n - headers: %s" % (path, params, headers)
return self.response.pop(0)
def _send_delete_request(self, path, headers):
#print "\n-- DELETE Method --\n - path: %s\n - headers: %s" % (path, headers)
return self.response.pop(0)
class BaseTransportTestCase(unittest.TestCase):
"""
Tests for the various HTTP transports.
"""
def test_hmac_signing(self):
"""
Makes sure our HMAC signing methods are matching expected output
for a pre-determined key/value.
"""
conn = route53.connect(
aws_access_key_id='BLAHBLAH',
aws_secret_access_key='wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'
)
trans = BaseTransport(conn)
signed = trans._hmac_sign_string('Thu, 14 Aug 2008 17:08:48 GMT')
self.assertEquals(signed, 'PjAJ6buiV6l4WyzmmuwtKE59NJXVg5Dr3Sn4PCMZ0Yk=')
class HostedZoneTestCase(BaseTestCase):
"""
Tests for manipulating hosted zones.
"""
CONNECTION_OPTIONS = {'transport_class':DummyTransport}
def test_sequence(self):
"""
Runs through a sequence of calls to test hosted zones.
"""
self.conn._transport.set_response_from_file('CreateHostedZoneResponse.xml', SubmittedAt=self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
# Create a new hosted zone.
new_zone, change_info = self.conn.create_hosted_zone(
self.test_zone_name, comment='A comment here.'
)
# Make sure the change info came through.
self.assertIsInstance(change_info, dict)
self.conn._transport.set_response_from_file('ListHostedZonesResponse.xml')
self.conn._transport.set_response_from_file('GetHostedZoneResponse.xml')
# Now get a list of all zones. Look for the one we just created.
found_match = False
for zone in self.conn.list_hosted_zones():
if zone.name == new_zone.name:
found_match = True
# ListHostedZones doesn't return nameservers.
# We lazy load them in this case. Initially, the nameservers
# are empty.
self.assertEqual(zone._nameservers, [])
# This should return the nameservers
self.assertNotEqual(zone.nameservers, [])
# This should now be populated.
self.assertNotEqual(zone._nameservers, [])
break
# If a match wasn't found, we're not happy.
self.assertTrue(found_match)
self.conn._transport.set_response_from_file('GetHostedZoneResponse.xml')
# Now attempt to retrieve the newly created HostedZone.
zone = self.conn.get_hosted_zone_by_id(new_zone.id)
# Its nameservers should be populated.
self.assertNotEqual([], zone.nameservers)
self.conn._transport.set_response_from_file('DeleteHostedZoneResponse.xml', SubmittedAt=self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
zone.delete()
# Trying to delete a second time raises an exception.
self.assertRaises(AlreadyDeletedError, zone.delete)
# Attempting to add a record set to an already deleted zone does the same.
self.assertRaises(AlreadyDeletedError,
zone.create_a_record,
'test.' + self.test_zone_name,
['8.8.8.8']
)
class ResourceRecordSetTestCase(BaseTestCase):
"""
Tests related to RRSets. Deletions are tested in the cleanUp() method,
on the base class, more or less.
"""
CONNECTION_OPTIONS = {'transport_class':DummyTransport}
def test_create_rrset(self):
"""
Tests creation of various record sets.
"""
self.conn._transport.set_response_from_file('CreateHostedZoneResponse.xml', SubmittedAt=self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
new_zone, change_info = self.conn.create_hosted_zone(
self.test_zone_name
)
self.assertIsInstance(change_info, dict)
self.assertEqual(change_info['request_status'], 'INSYNC')
self.assertEqual(change_info['request_submitted_at'].year, self.submittedAt.year)
self.assertEqual(change_info['request_id'], '/change/unique identifier for the change batch request')
self.assertIsInstance(new_zone, route53.hosted_zone.HostedZone)
self.conn._transport.set_response_from_file('GetChangeResponse.xml', SubmittedAt=self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
new_record, change_info = new_zone.create_a_record(
name='test.route53-unittest-zone.com.',
values=['8.8.8.8'],
ttl=40,
# weight=10
)
self.assertIsInstance(change_info, dict)
self.assertEqual(change_info['request_status'], 'PENDING')
self.assertEqual(change_info['request_submitted_at'].year, self.submittedAt.year)
self.assertEqual(change_info['request_id'], 'unique identifier for the change batch request')
self.assertIsInstance(new_record, route53.hosted_zone.AResourceRecordSet)
# Initial values should equal current values.
for key, val in new_record._initial_vals.items():
self.assertEqual(getattr(new_record, key), val)
def test_change_existing_rrset(self):
"""
Tests changing an existing record set.
"""
self.conn._transport.set_response_from_file('CreateHostedZoneResponse.xml', SubmittedAt = self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
new_zone, change_info = self.conn.create_hosted_zone(
self.test_zone_name
)
self.conn._transport.set_response_from_file('GetChangeResponse.xml', SubmittedAt = self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
new_record, change_info = new_zone.create_a_record(
name='test.route53-unittest-zone.com.',
values=['8.8.8.8'],
)
self.assertIsInstance(change_info, dict)
self.assertEqual(change_info['request_status'], 'PENDING')
self.assertEqual(change_info['request_submitted_at'].year, self.submittedAt.year)
self.assertEqual(change_info['request_id'], 'unique identifier for the change batch request')
self.assertIsInstance(new_record, route53.hosted_zone.AResourceRecordSet)
new_record.values = ['8.8.8.7']
self.conn._transport.set_response_from_file('GetChangeResponse.xml', SubmittedAt = self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
new_record.save()
# Initial values should equal current values after the save.
for key, val in new_record._initial_vals.items():
self.assertEqual(getattr(new_record, key), val)
class HealthTestTestCase(BaseTestCase):
"""
Tests for manipulating health check.
"""
CONNECTION_OPTIONS = {'transport_class':DummyTransport}
def test_list_health_checks(self):
self.conn._transport.set_response_from_file('ListHealthChecksResponse.xml')
for health_check in self.conn.list_health_checks():
self.assertEqual(health_check.id, 'Test Health Check ID')
def test_create_health_check(self):
self.conn._transport.set_response_from_file('CreateHealthCheckResponse.xml')
# Create a new health check.
ipaddress = '1.2.3.4'
port = 80
type = 'HTTP'
resource_path = '/health_check'
fqdn = 'www.tuguu.com'
search_string = 'alive'
new_health_check = self.conn.create_health_check(
ipaddress, port, type, resource_path, fqdn, search_string
)
self.assertIsInstance(new_health_check, route53.health_check.HealthCheck)
def test_get_health_check(self):
self.conn._transport.set_response_from_file('GetHealthCheckResponse.xml')
health_check_id = 'Test Health Check'
new_health_check = self.conn.get_health_check_by_id(health_check_id)
self.assertEqual(new_health_check.id, health_check_id)
self.assertIsInstance(new_health_check, route53.health_check.HealthCheck)
def test_delete_health_check(self):
self.conn._transport.set_response_from_file('GetHealthCheckResponse.xml')
self.conn._transport.set_response_from_file('DeleteHealthCheckResponse.xml', SubmittedAt = self.submittedAt.strftime('%Y-%m-%dT%H:%M:%SZ'))
health_check_id = 'Test Health Check'
new_health_check = self.conn.get_health_check_by_id(health_check_id)
self.assertEqual(new_health_check.id, health_check_id)
self.assertIsInstance(new_health_check, route53.health_check.HealthCheck)
new_health_check.delete()
self.assertRaises(AlreadyDeletedError, new_health_check.delete)
| mit | -1,747,630,431,028,824,800 | 41.885106 | 147 | 0.650724 | false |
rueckstiess/dopamine | scripts/fa_test2.py | 1 | 1408 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from random import shuffle
from dopamine.fapprox import *
X_data, Y_data = np.meshgrid(np.arange(-1, 1, 0.3), np.arange(-1, 1, 0.3))
Z_data = np.sin(5*X_data) * np.cos(Y_data) + np.random.normal(0, 0.2, X_data.shape)
plt.ion()
# ax.plot_wireframe(X_data, Y_data, Z_data, cmap=plt.cm.jet, antialiased=True)
models = ['Linear', 'RBF', 'KNN', 'PyBrainNN', 'LWPRFA']
X_model, Y_model = np.meshgrid(np.arange(-1.1, 1.1, 0.05), np.arange(-1.1, 1.1, 0.05))
for mClass in models:
reps = 1
if mClass == 'LWPRFA':
reps = 20
# plot data points
fig = plt.figure()
ax = axes3d.Axes3D(fig)
ax.scatter(X_data.flatten(), Y_data.flatten(), Z_data.flatten(), 'o')
model = eval(mClass + '(2, 1)')
# train model on data
for _ in range(reps):
data3 = zip(X_data.flatten(), Y_data.flatten(), Z_data.flatten())
shuffle(data3)
for x, y, z in data3:
model.update(np.array([x, y]), np.array([z]))
model.train()
# plot results
Z_model = np.array([model.predict(np.array([x,y]))[0] for x,y in zip(X_model.flatten(), Y_model.flatten())])
Z_model = Z_model.reshape(X_model.shape)
ax.plot_wireframe(X_model, Y_model, Z_model, cmap=plt.cm.jet, antialiased=True)
plt.title(mClass)
plt.gcf().canvas.draw()
plt.show() | gpl-3.0 | 4,167,445,729,330,403,300 | 30.311111 | 112 | 0.612926 | false |
jokebill/imagewall | loadwall.py | 1 | 2391 | #!/usr/bin/env python
import os,sys
import cut_image
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c","--center",
metavar="X,Y",
type=str,
default="0,0",
help="Center offset in percentage")
parser.add_option("-r","--ratio",
metavar="X:Y",
type=str,
default="16:9",
help="Image ratio")
parser.add_option("-z","--zoom",
metavar="RATE",
type=float,
default=1.0,
help="Zoom rate")
parser.add_option("-s","--slices",
metavar="NxM",
type=str,
default="4x3",
help="Number of slices in row, column")
parser.add_option("-b","--boarder",
metavar="PX,PY",
type=str,
default="0.04,0.08",
help="Boarder width in percentage")
parser.add_option("-o","--output",
metavar="DIR",
type=str,
default="./output",
help="Output folder"
)
parser.add_option("--resolution",
metavar="pX,pY",
type=str,
default="1600,1200",
help="Slice resolution")
parser.add_option("--slaves",
metavar="FILE",
type=str,
default="slaves",
help="A file with all slaves' hostnames/ips")
parser.add_option("--remote",
metavar="PATH",
type=str,
default="~/wallpapers/wallpaper01.jpg",
help="File path and name for the remote picture tile"
)
options, args = parser.parse_args()
outdir = options.output
imgfile = args[0]
# clear output dir
import shutil
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
cut_image.split_image(options, imgfile)
slaves=list()
if os.path.exists(options.slaves):
with open(options.slaves) as f:
for line in f:
lstr=line.strip()
if lstr[0]<>"#":
slaves.append(line.strip())
else:
raise Exception("Cannot find slave definition file")
localfiles = os.listdir(outdir)
localfiles.sort()
if len(slaves)<=len(localfiles):
raise Exception("Not enough slave nodes to dispatch tiles")
from subprocess import Popen
pipes=list()
for i,lf in enumerate(localfiles):
ip=slaves[i]
lfp = os.path.join(outdir,lf)
cmdstr=['scp', lfp, ip+":"+options.remote]
#print ' '.join(cmdstr)
p=Popen(cmdstr)
pipes.append(p)
exit_codes = [ p.wait() for p in pipes ]
sys.exit(max(exit_codes))
| mit | 4,022,014,293,897,294,000 | 25.274725 | 63 | 0.59724 | false |
pypa/warehouse | warehouse/migrations/versions/b00323b3efd8_uploaded_via_field_for_release_.py | 1 | 1091 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
uploaded_via field for Release and Files
Revision ID: b00323b3efd8
Revises: f2a453c96ded
Create Date: 2018-07-25 17:29:01.995083
"""
import sqlalchemy as sa
from alembic import op
revision = "b00323b3efd8"
down_revision = "f2a453c96ded"
def upgrade():
op.add_column("release_files", sa.Column("uploaded_via", sa.Text(), nullable=True))
op.add_column("releases", sa.Column("uploaded_via", sa.Text(), nullable=True))
def downgrade():
op.drop_column("releases", "uploaded_via")
op.drop_column("release_files", "uploaded_via")
| apache-2.0 | 6,722,949,270,996,561,000 | 30.171429 | 87 | 0.740605 | false |
sourcepole/kadas-albireo | python/__init__.py | 1 | 3151 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : January 2007
Copyright : (C) 2007 by Martin Dobias
Email : wonder dot sk at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Dobias'
__date__ = 'January 2007'
__copyright__ = '(C) 2007, Martin Dobias'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt5 import sip
try:
apis = ["QDate", "QDateTime", "QString", "QTextStream", "QTime", "QUrl", "QVariant"]
for api in apis:
sip.setapi(api, 2)
except ValueError:
# API has already been set so we can't set it again.
pass
from qgis.core import QgsFeature, QgsGeometry
try:
# Add a __nonzero__ method onto QPyNullVariant so we can check for null values easier.
# >>> value = QPyNullVariant("int")
# >>> if value:
# >>> print "Not a null value"
from types import MethodType
from PyQt4.QtCore import QPyNullVariant
def __nonzero__(self):
return False
def __repr__(self):
return 'NULL'
def __eq__(self, other):
return isinstance(other, QPyNullVariant) or other is None
def __ne__(self, other):
return not isinstance(other, QPyNullVariant) and other is not None
QPyNullVariant.__nonzero__ = MethodType(__nonzero__, None, QPyNullVariant)
QPyNullVariant.__repr__ = MethodType(__repr__, None, QPyNullVariant)
QPyNullVariant.__eq__= MethodType(__eq__, None, QPyNullVariant)
QPyNullVariant.__ne__= MethodType(__ne__, None, QPyNullVariant)
# define a dummy QPyNullVariant instance NULL in qgis.core
# this is mainly used to compare against
# so one can write if feat['attr'] == NULL:
from qgis import core
core.NULL = QPyNullVariant( int )
except ImportError:
pass
def mapping_feature(feature):
geom = feature.geometry()
properties = {}
fields = [field.name() for field in feature.fields()]
properties = dict(zip(fields, feature.attributes()))
return {'type' : 'Feature',
'properties' : properties,
'geometry' : geom.__geo_interface__}
def mapping_geometry(geometry):
geo = geometry.exportToGeoJSON()
# We have to use eval because exportToGeoJSON() gives us
# back a string that looks like a dictionary.
return eval(geo)
QgsFeature.__geo_interface__ = property(mapping_feature)
QgsGeometry.__geo_interface__ = property(mapping_geometry)
| gpl-2.0 | -3,245,017,442,672,318,500 | 34.806818 | 90 | 0.555062 | false |
APPIAN-PET/APPIAN | src/mnc2nii.py | 1 | 1555 | import os
import re
import nibabel as nib
import numpy as np
from glob import glob
from sys import argv
dirname=argv[1]
print(dirname)
### COLLECT ALL MNCs
all_fls = []
for dirs, things, fls in os.walk(dirname):
if len(fls) > 0:
for fl in fls:
all_fls.append(os.path.join(dirs,fl))
all_mncs = [x for x in all_fls if '.mnc' in x]
print('%s .mnc and .mnc.gz files found'%(len(all_mncs)))
### SEARCH TO SEE IF NIFTI VERSIONS ALREADY EXIST
already_done = []
for mnc in all_mncs:
print(mnc)
flnm = re.sub('.mnc.gz', '', re.sub('.mnc', '', mnc))
print(flnm)
ni = glob('%s.ni*'%flnm)
if len(ni) > 0:
already_done.append(mnc)
print('%s mncs already have a nifti version. Skipping these files...'%(len(already_done)))
[all_mncs.remove(x) for x in already_done]
print('the following files will be converted:')
[print(x) for x in all_mncs]
### TRANSFORM FILES
for mnc in all_mncs:
flnm = re.sub('.mnc', '', re.sub('.mnc.gz', '', mnc))
if mnc[-1] == 'z':
new_nm = '%s.nii.gz'%flnm
else:
new_nm = '%s.nii.gz'%flnm
print(new_nm)
img = nib.load(mnc)
data = img.get_data()
affine =img.affine
if len(data.shape) == 4 :
out = np.zeros( [ data.shape[1], data.shape[2], data.shape[3], data.shape[0] ] )
for t in range(data.shape[0]) :
out[:,:,:,t] = data[t,:,:,:]
else : out = data
nifti = nib.Nifti1Image(out, affine)
nifti.to_filename(new_nm)
print('converted %s to %s'%(mnc,new_nm))
#if ans:
# os.remove(mnc)
| mit | 4,713,257,596,897,754,000 | 24.916667 | 90 | 0.587781 | false |
softak/webfaction_demo | apps/cart/migrations/0008_auto__add_field_socialtag_paid.py | 1 | 9611 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SocialTag.paid'
db.add_column('cart_socialtag', 'paid', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SocialTag.paid'
db.delete_column('cart_socialtag', 'paid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cart.personaltag': {
'Meta': {'unique_together': "(('user', 'item', 'transaction'),)", 'object_name': 'PersonalTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'personal_tags'", 'to': "orm['stores.Item']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'personal_tags'", 'null': 'True', 'to': "orm['cart.Transaction']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'personal_tags'", 'to': "orm['auth.User']"})
},
'cart.socialbuy': {
'Meta': {'object_name': 'SocialBuy'},
'finish_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_buys'", 'to': "orm['stores.Store']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_buys'", 'to': "orm['auth.User']"})
},
'cart.socialtag': {
'Meta': {'unique_together': "(('user', 'item', 'buy', 'transaction'),)", 'object_name': 'SocialTag'},
'buy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': "orm['cart.SocialBuy']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_tags'", 'to': "orm['stores.Item']"}),
'paid': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'social_tags'", 'null': 'True', 'to': "orm['cart.Transaction']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_tags'", 'to': "orm['auth.User']"})
},
'cart.transaction': {
'Meta': {'object_name': 'Transaction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pay_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_details': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'stores.category': {
'Meta': {'object_name': 'Category'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marker': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'stores.item': {
'Meta': {'object_name': 'Item'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_out_of_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['stores.Store']"})
},
'stores.shoppingregion': {
'Meta': {'object_name': 'ShoppingRegion'},
'center': ('django.contrib.gis.db.models.fields.PointField', [], {'spatial_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zoom': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'stores.store': {
'Meta': {'object_name': 'Store'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stores'", 'to': "orm['stores.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'paypal_email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'paypal_is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['stores.ShoppingRegion']", 'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'store'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['cart']
| bsd-3-clause | -8,394,348,503,170,072,000 | 72.366412 | 182 | 0.551035 | false |
alimanfoo/numcodecs | numcodecs/tests/test_checksum32.py | 1 | 1611 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import itertools
import numpy as np
import pytest
from numcodecs.checksum32 import CRC32, Adler32
from numcodecs.tests.common import (check_encode_decode, check_config, check_repr,
check_backwards_compatibility,
check_err_encode_object_buffer)
# mix of dtypes: integer, float, bool, string
# mix of shapes: 1D, 2D, 3D
# mix of orders: C, F
arrays = [
np.arange(1000, dtype='i4'),
np.linspace(1000, 1001, 1000, dtype='f8'),
np.random.normal(loc=1000, scale=1, size=(100, 10)),
np.random.randint(0, 2, size=1000, dtype=bool).reshape(100, 10, order='F'),
np.random.choice([b'a', b'bb', b'ccc'], size=1000).reshape(10, 10, 10)
]
codecs = [CRC32(), Adler32()]
def test_encode_decode():
for codec, arr in itertools.product(codecs, arrays):
check_encode_decode(arr, codec)
def test_errors():
for codec, arr in itertools.product(codecs, arrays):
enc = codec.encode(arr)
with pytest.raises(RuntimeError):
codec.decode(enc[:-1])
def test_config():
for codec in codecs:
check_config(codec)
def test_repr():
check_repr("CRC32()")
check_repr("Adler32()")
def test_backwards_compatibility():
check_backwards_compatibility(CRC32.codec_id, arrays, [CRC32()])
check_backwards_compatibility(Adler32.codec_id, arrays, [Adler32()])
def test_err_encode_object_buffer():
check_err_encode_object_buffer(CRC32())
check_err_encode_object_buffer(Adler32())
| mit | -160,882,542,628,057,300 | 26.305085 | 82 | 0.646182 | false |
openstack/python-heatclient | heatclient/common/deployment_utils.py | 1 | 5226 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import six
from six.moves.urllib import parse as urlparse
from swiftclient import client as sc
from swiftclient import utils as swiftclient_utils
from heatclient._i18n import _
from heatclient import exc
from heatclient.v1 import software_configs
def build_derived_config_params(action, source, name, input_values,
server_id, signal_transport, signal_id=None):
if isinstance(source, software_configs.SoftwareConfig):
source = source.to_dict()
input_values = input_values or {}
inputs = copy.deepcopy(source.get('inputs')) or []
for inp in inputs:
input_key = inp['name']
inp['value'] = input_values.pop(input_key, inp.get('default'))
# for any input values that do not have a declared input, add
# a derived declared input so that they can be used as config
# inputs
for inpk, inpv in input_values.items():
inputs.append({
'name': inpk,
'type': 'String',
'value': inpv
})
inputs.extend([{
'name': 'deploy_server_id',
'description': _('ID of the server being deployed to'),
'type': 'String',
'value': server_id
}, {
'name': 'deploy_action',
'description': _('Name of the current action being deployed'),
'type': 'String',
'value': action
}, {
'name': 'deploy_signal_transport',
'description': _('How the server should signal to heat with '
'the deployment output values.'),
'type': 'String',
'value': signal_transport
}])
if signal_transport == 'TEMP_URL_SIGNAL':
inputs.append({
'name': 'deploy_signal_id',
'description': _('ID of signal to use for signaling '
'output values'),
'type': 'String',
'value': signal_id
})
inputs.append({
'name': 'deploy_signal_verb',
'description': _('HTTP verb to use for signaling '
'output values'),
'type': 'String',
'value': 'PUT'
})
elif signal_transport != 'NO_SIGNAL':
raise exc.CommandError(
_('Unsupported signal transport %s') % signal_transport)
return {
'group': source.get('group') or 'Heat::Ungrouped',
'config': source.get('config') or '',
'options': source.get('options') or {},
'inputs': inputs,
'outputs': source.get('outputs') or [],
'name': name
}
def create_temp_url(swift_client, name, timeout, container=None):
container = container or '%(name)s-%(uuid)s' % {
'name': name, 'uuid': uuid.uuid4()}
object_name = str(uuid.uuid4())
swift_client.put_container(container)
key_header = 'x-account-meta-temp-url-key'
if key_header not in swift_client.head_account():
swift_client.post_account({
key_header: six.text_type(uuid.uuid4())[:32]})
key = swift_client.head_account()[key_header]
project_path = swift_client.url.split('/')[-1]
path = '/v1/%s/%s/%s' % (project_path, container, object_name)
timeout_secs = timeout * 60
tempurl = swiftclient_utils.generate_temp_url(path, timeout_secs, key,
'PUT')
sw_url = urlparse.urlparse(swift_client.url)
put_url = '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)
swift_client.put_object(container, object_name, '')
return put_url
def build_signal_id(hc, args):
if args.signal_transport != 'TEMP_URL_SIGNAL':
return
if args.os_no_client_auth:
raise exc.CommandError(_(
'Cannot use --os-no-client-auth, auth required to create '
'a Swift TempURL.'))
swift_client = create_swift_client(
hc.http_client.auth, hc.http_client.session, args)
return create_temp_url(swift_client, args.name, args.timeout)
def create_swift_client(auth, session, args):
auth_token = auth.get_token(session)
endpoint = auth.get_endpoint(session,
service_type='object-store',
region_name=args.os_region_name)
project_name = args.os_project_name or args.os_tenant_name
swift_args = {
'auth_version': '2.0',
'tenant_name': project_name,
'user': args.os_username,
'key': None,
'authurl': None,
'preauthtoken': auth_token,
'preauthurl': endpoint,
'cacert': args.os_cacert,
'insecure': args.insecure
}
return sc.Connection(**swift_args)
| apache-2.0 | -2,019,147,268,681,793,300 | 33.609272 | 78 | 0.58917 | false |
51reboot/actual_09_homework | 10/jinderui/cmdb/user/views.py | 1 | 6941 | #encoding: utf-8
import sys,os
reload(sys)
sys.setdefaultencoding( "utf-8" )
from flask import Flask
from flask import request
from flask import redirect
from flask import render_template
from flask import session
from flask import flash
from functools import wraps
import json
from user import app
import logan
from asset import Asset
import time
from models import User
#app = Flask(__name__)
# app.secret_key = os.urandom(32)
#app.secret_key = "jinnery"
# 闭包函数,return 内层函数(内层函数是具体的实现)主要是用来判断session是否存在
def login_required(func):
@wraps(func)
def wrapper():
if session.get('user') is None:
return redirect('/')
rt = func() #把传进来的函数重新赋值并return return的时候一定要在if后,否则上面的代码不执行了
return rt
return wrapper #返回函数,而不是函数调用
#登陆页面 入口
@app.route('/')
def index():
# user_agent = request.headers.get('User-Agent')
return render_template('login.html')
# 打开首页,跳转到登陆页面。成功后跳转到user页面。
#登陆的时候进行了判断,通过外部函数user读取数据库判断用户和密码是否正确。(True就通过,False不通过)
@app.route('/login/',methods=['POST','GET'])
def login():
flash('欢迎')
params = request.args if request.method == 'GET' else request.form
username = params.get('user','')
password = params.get('password','')
if User.validate_login(username=username,password=password): #判断用户名密码是否正确
session['user'] = username #添加用户session信息,防止不登陆访问其他页面
return redirect('/users/')
else:
return render_template('login.html',error='用户名或密码错误') #不正确提示信息通过渲染模板实现把error的值传到login.html里面
#获取用户信息
@app.route('/users/',methods=['POST','GET'])
@login_required #通过调用闭包函数检查session信息。没有session跳转到登陆页面
def users():
user_list = User.get_users()
return render_template('users.html',user_list=user_list) #渲染模板 通过for打印所有用户在users.html里面实现
@app.route('/commituser/',methods=['POST','GET'])
def commituser():
#userid = request.form.get('id')
username = request.form.get('username')
age = request.form.get('age')
password = request.form.get('password')
_is_ok, _error = User.validate_add_user(username, password, age)
if _is_ok: #外部模块user.validate_user判断用户信息True执行
User.add_user(username,password,age)
return json.dumps({'is_ok':_is_ok,'error':_error})
@app.route('/user/create/',methods=['POST','GET'])
def user_create():
return render_template('user_create.html')
#删除用户通过get请求获取参数。通过id然后把对应用户删掉
@app.route('/deluser/',methods=['POST'])
def deluser():
flash('删除用户成功')
delid = request.form.get('userid')
User.del_user(delid) #通过user.del_user函数调用sql删掉得到的用户
return redirect('/users/')
@app.route('/changeuser/',methods=['POST','GET'])
@login_required
def changeuser():
userid = request.form.get('userid')
updateage = request.form.get('age')
#获取判断信息这个地方获取的是元组
_is_ok, _error = User.validate_user(updateage)
#如果是true修改
if _is_ok:
User.change_user(userid,updateage)
return json.dumps({'is_ok':_is_ok,'error':_error,'mess':'update'})
@app.route('/user/update/',methods=['POST','GET'])
def user_update():
return render_template('user_update.html')
#退出清理sssion
@app.route('/logout/')
def logout():
session.clear()
return redirect('/')
#文件上传
@app.route('/upfile/')
def upfile():
return render_template('upfile.html')
#日志展示
@app.route('/logs/',methods=['POST','GET'])
def logs():
_upfile = request.files.get('upfile')
if _upfile:
_upfile.save('/tmp/logs')
logan.execute_commit_sql('/tmp/logs')
topn = request.args.get('topn',10) #通过get请求获取topn。没有默认是10
_rt_list = logan.fetch_accesslog(topn) #从数据库获取指定数据
return render_template('logs.html',rt_list=_rt_list) #模板渲染展示界面
#对前端js post请求过来的数据进行判断。并返回数据让js进行相应处理
@app.route('/changepassword/',methods=['POST','GET'])
def changepassword():
userid = request.form.get('userid')
manager_password = request.form.get('manager-password')
user_password = request.form.get('user-password')
_is_ok,_error = User.validate_charge_user_password(userid,user_password,session['user'],manager_password)
if _is_ok:
User.charge_user_password(userid,user_password)
return json.dumps({'is_ok':_is_ok,'error':_error, 'mess':'update'})
@app.route('/user/changepassword/',methods=['POST','GET'])
def user_changepassword():
return render_template('user_changepwd.html')
@app.route('/assets/',methods=['GET','POST'])
def assets():
_assets = Asset.get_list()
_idcs = Asset.get_idcs_list()
return render_template('assets.html',assets=_assets,idcs=_idcs)
@app.route('/asset/create/',methods=['POST','GET'])
@login_required
def create_asset():
_assets = Asset.get_list()
_idcs = Asset.get_idcs_list()
return render_template('asset_create.html',idcs=_idcs)
@app.route('/asset/add/',methods=['GET','POST'])
def add_assets():
_sn = request.form.get('sn')
_warranty = request.form.get('warranty')
_cpu = request.form.get('cpu')
_ram = request.form.get('ram')
_disk = request.form.get('disk')
_ip = request.form.get('ip')
_idc_id = request.form.get('idc_id')
print _idc_id
_is_ok, _error = Asset.validate_create(sn=_sn, warranty=_warranty, cpu=_cpu,ram=_ram,disk=_disk,ip=_ip)
if _is_ok:
print 'in .....'
Asset.create(sn=_sn, warranty=_warranty, cpu=_cpu,ram=_ram,disk=_disk,ip=_ip,idc_id=_idc_id)
return json.dumps({'is_ok':_is_ok,'error':_error})
@app.route('/asset/update/',methods=['GET','POST'])
def update_assets():
_idcs = Asset.get_idcs_list()
_assets = Asset.get_list()
return render_template('asset_update.html',idcs=_idcs,assets=_assets)
@app.route('/asset/addupdate/',methods=['GET','POST'])
def addupdate_assets():
_id = request.form.get('id')
print 'i am id--%s' % _id
_sn = request.form.get('sn')
_warranty = request.form.get('warranty')
_cpu = request.form.get('cpu')
_ram = request.form.get('ram')
_disk = request.form.get('disk')
_idc_id = request.form.get('idc_id')
_is_ok, _error = Asset.validate_update(sn=_sn,warranty=_warranty,cpu=_cpu,ram=_ram,disk=_disk)
if _is_ok:
print 'rrr'
Asset.update(idc_id=_idc_id,warranty=_warranty,cpu=_cpu,ram=_ram,disk=_disk,id=_id)
return json.dumps({'is_ok':_is_ok,'error':_error})
@app.route('/asset/delete/',methods=['GET','POST'])
def delete_assets():
id = request.args.get('id')
Asset.delete(id)
return redirect('/assets/')
#if __name__ == '__main__':
# app.run(host='0.0.0.0',port=8888,debug=True)
| mit | -8,779,404,798,102,596,000 | 26.207965 | 106 | 0.693771 | false |
ismaelgaudioso/pihome | Views/baseView.py | 1 | 1139 | from Events.draw import *
from Events.quitcleanup import *
from Libraries.Buttons import *
from config import *
import pygame
class BaseView:
"""..."""
def __init__(self, evManager):
self.evManager = evManager
self.evManager.RegisterListener(self)
size = (Config.DEFAULT_WIDTH, Config.DEFAULT_HEIGHT)
self.window = pygame.display.set_mode( size )
#mode = pygame.NOFRAME
#self.window = pygame.display.set_mode(size, mode)
self.background = pygame.Surface( size )
self.drawBackground()
#self.backSprites = pygame.sprite.RenderUpdates()
#self.frontSprites = pygame.sprite.RenderUpdates()
def emptyScreen(self):
self.window.fill(Config.BLACK_COLOR)
def drawBackground(self):
self.background.fill( Config.DEFAULT_BACKGROUND_COLOR )
self.window.blit( self.background, (0,0) )
pygame.display.flip()
def Notify(self, event):
if isinstance( event, DrawEvent ):
# Draw Background
self.drawBackground()
if isinstance( event, QuitCleanupEvent ):
pygame.quit() | mit | -468,260,044,115,288,700 | 25.511628 | 64 | 0.640913 | false |
ros2/ci | ros2_batch_job/windows_batch/__init__.py | 1 | 4135 | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from ..batch_job import BatchJob
from ..util import info
from ..util import warn
class WindowsBatchJob(BatchJob):
def __init__(self, args):
self.args = args
# The BatchJob constructor will set self.run and self.python
BatchJob.__init__(self, python_interpreter=args.python_interpreter)
def pre(self):
pass
def post(self):
pass
def show_env(self):
# Show the env
self.run(['set'], shell=True)
# Show what pip has
self.run([self.python, '-m', 'pip', 'freeze', '--all'])
def setup_env(self):
# Try to find the connext env file and source it
connext_env_file = None
if ('rmw_connext_cpp' not in self.args.ignore_rmw or
'rmw_connextdds' not in self.args.ignore_rmw):
pf = os.environ.get('ProgramFiles', "C:\\Program Files\\")
connext_env_file = os.path.join(
pf, 'rti_connext_dds-5.3.1', 'resource', 'scripts', 'rtisetenv_x64Win64VS2017.bat')
if not os.path.exists(connext_env_file):
warn("Asked to use Connext but the RTI env was not found at '{0}'".format(
connext_env_file))
connext_env_file = None
# Try to find the OpenSplice env file
opensplice_env_file = None
if 'rmw_opensplice_cpp' not in self.args.ignore_rmw:
default_home = os.path.join(
os.path.abspath(os.sep), 'dev', 'opensplice', 'HDE', 'x86_64.win64')
ospl_home = os.environ.get('OSPL_HOME', default_home)
opensplice_env_file = os.path.join(ospl_home, 'release.bat')
if not os.path.exists(opensplice_env_file):
warn("Asked to use OpenSplice but the env file was not found at '{0}'".format(
opensplice_env_file))
opensplice_env_file = None
# Generate the env file
if os.path.exists('env.bat'):
os.remove('env.bat')
with open('env.bat', 'w') as f:
f.write("@echo off" + os.linesep)
assert self.args.visual_studio_version is not None
f.write(
'call '
'"C:\\Program Files (x86)\\Microsoft Visual Studio\\%s\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" ' %
self.args.visual_studio_version + 'x86_amd64' + os.linesep)
if connext_env_file is not None:
f.write('call "%s"%s' % (connext_env_file, os.linesep))
if opensplice_env_file is not None:
f.write('call "%s"%s' % (opensplice_env_file, os.linesep))
f.write("%*" + os.linesep)
f.write("if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL%" + os.linesep)
# Show the result
info("Contents of 'env.bat':")
with open('env.bat', 'r') as f:
print(f.read(), end='')
current_run = self.run
def with_vendors(cmd, **kwargs):
# Ensure shell is on since we're using &&
kwargs['shell'] = True
# Use the env file to call the commands
# ensure that quoted arguments are passed through as quoted arguments
cmd = ['env.bat'] + [
'"%s"' % c if (' ' in c or '|' in c) and not (c.startswith('"') and c.endswith('"')) else c
for c in cmd]
# Pass along to the original runner
return current_run(cmd, **kwargs)
# Push the custom runner
self.push_run(with_vendors)
| apache-2.0 | -4,589,281,492,062,788,000 | 39.940594 | 123 | 0.58283 | false |
tensorflow/model-analysis | tensorflow_model_analysis/evaluators/analysis_table_evaluator_test.py | 1 | 3340 | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for analysis_table_evaluator."""
from __future__ import division
from __future__ import print_function
import apache_beam as beam
from apache_beam.testing import util
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.evaluators import analysis_table_evaluator
class AnalysisTableEvaulatorTest(testutil.TensorflowModelAnalysisTest):
def testIncludeFilter(self):
with beam.Pipeline() as pipeline:
got = (
pipeline
| 'Create' >> beam.Create([{
'a': 1,
'b': 2
}])
| 'EvaluateExtracts' >>
analysis_table_evaluator.EvaluateExtracts(include=['a']))
def check_result(got):
try:
self.assertEqual(got, [{'a': 1}])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(got[constants.ANALYSIS_KEY], check_result)
def testExcludeFilter(self):
with beam.Pipeline() as pipeline:
got = (
pipeline
| 'Create' >> beam.Create([{
'a': 1,
'b': 2
}])
| 'EvaluateExtracts' >>
analysis_table_evaluator.EvaluateExtracts(exclude=['a']))
def check_result(got):
try:
self.assertEqual(got, [{'b': 2}])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(got[constants.ANALYSIS_KEY], check_result)
def testNoIncludeOrExcludeFilters(self):
with beam.Pipeline() as pipeline:
got = (
pipeline
| 'Create' >> beam.Create([{
constants.INPUT_KEY: 'input',
'other': 2
}])
| 'EvaluateExtracts' >> analysis_table_evaluator.EvaluateExtracts())
def check_result(got):
try:
self.assertEqual(got, [{'other': 2}])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(got[constants.ANALYSIS_KEY], check_result)
def testEmptyExcludeFilters(self):
with beam.Pipeline() as pipeline:
got = (
pipeline
| 'Create' >> beam.Create([{
constants.INPUT_KEY: 'input',
'other': 2
}])
| 'EvaluateExtracts' >>
analysis_table_evaluator.EvaluateExtracts(exclude=[]))
def check_result(got):
try:
self.assertEqual(got, [{constants.INPUT_KEY: 'input', 'other': 2}])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(got[constants.ANALYSIS_KEY], check_result)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -1,047,125,563,961,782,500 | 30.214953 | 78 | 0.626048 | false |
SEC-i/ecoControl | server/forecasting/optimizing/auto_optimization.py | 1 | 6269 | """
This module contains the algorithm for optimizing the costs of energy systems.
"""
from datetime import datetime
from scipy.optimize import fmin_l_bfgs_b
import calendar
import cProfile
import copy
from collections import namedtuple
import numpy as np
from numpy import array
from server.devices.base import BaseEnvironment
from server.functions import get_configuration
import multiprocessing
from multiprocessing.process import Process
import os
from server.settings import BASE_DIR
from csv import writer
import dateutil
DEFAULT_FORECAST_INTERVAL = 1 * 3600.0
"""The interval for how long one auto_optimize will forecast and for how long one specific workload is set.
Note, that this constant also represents a compromise: Shorter intervals can adjust to quick changes,
f.e. electricity demands changes, while longer intervals can incorporate more forecasts, but wont be able
to adjust quickly.
The interval of one hour lead to good results in our tests.
"""
def auto_optimize(forecast):
""" Tries to optimize the cost and sets the ``cu.overwrite_workload``
The method forecasts from ``env.now`` with different cu workloads and finds the one with the
lowest cost. The length of the forecast is :attr:`DEFAULT_FORECAST_INTERVAL`.
:param forecast: the forecast to be optimized
"""
optimized_config = find_optimal_config(forecast)
cu = forecast.getCU()
cu.overwrite_workload = float(optimized_config["cu_overwrite_workload"])
print "optimization round at time: ",datetime.fromtimestamp(forecast.env.now),":", optimized_config
def find_optimal_config(initial_time, forecast):
""" ``Internal Method`` Main method, which optimizes the costs by running a global
approximation for the best configuration and then running a local minimization
method on this approximation"""
prices = {}
prices["gas_costs"] = get_configuration('gas_costs')
prices["electrical_costs"] = get_configuration('electrical_costs')
rewards = {}
rewards["thermal_revenues"] = get_configuration('thermal_revenues')
rewards["warmwater_revenues"] = get_configuration('warmwater_revenues')
rewards["electrical_revenues"] = get_configuration('electrical_revenues')
rewards["feed_in_reward"] = get_configuration('feed_in_reward')
arguments = (initial_time, forecast, prices, rewards)
#find initial approximation for parameters
results = []
for cu_load in range(0,100,10):
config = [cu_load,]
cost = estimate_cost(config, *arguments)
results.append(BilanceResult(cost, config))
boundaries = [(0.0,100.0)]
#take parameters with lowest cost
initial_parameters = min(results,key=lambda result: result.cost).params
parameters = fmin_l_bfgs_b(estimate_cost, x0 = array(initial_parameters),
args = arguments, bounds = boundaries,
approx_grad = True, factr=10**4, iprint=0,
epsilon=1, maxfun =50)
cu_workload, = parameters[0]
return {"cu_overwrite_workload":cu_workload}
def estimate_cost(params, *args):
"""``Internal Method`` copies the devices and environment, forwards it and returns the costs.
:param list params: parameter to be optimized (CU.workload for now)
:param args: (initial_time, forecast, prices, rewards)
"""
(initial_time, forecast, prices, rewards) = args
copied_devices = copy.deepcopy(forecast.devices)
cu = copied_devices.cu
cu.overwrite_workload = params[0]
simplified_forecast(cu.env, initial_time, copied_devices)
return total_costs(copied_devices, prices, rewards)
def simplified_forecast(env, initial_time, devices):
"""runs the forward loop only executing the step function"""
forward = DEFAULT_FORECAST_INTERVAL
while forward > 0:
for device in devices:
device.step()
env.now += env.step_size
forward -= env.step_size
def total_costs(devices, prices, rewards):
"""``Internal Method`` Returns the cost of a forecast run. The function uses the prices which are stored
in the db deviceconfiguration. It is also constrained by boundaries, f.e. the heatstorage should
never go below min temperature.
:param devices: The devices after the forecast
:param dict prices, rewards: Cached prices and rewards
"""
d = devices
cu,plb,ec,pm,tc,hs = d.cu,d.plb,d.ec,d.pm,d.tc,d.hs
#maintenance_costs = cu.power_on_count
gas_costs = (cu.total_gas_consumption + plb.total_gas_consumption) * prices["gas_costs"]
own_el_consumption = ec.total_consumption - pm.fed_in_electricity - pm.total_purchased
electric_rewards = pm.fed_in_electricity * rewards["feed_in_reward"] + own_el_consumption * rewards["electrical_revenues"]
electric_costs = pm.total_purchased * prices["electrical_costs"]
thermal_rewards = tc.total_consumed * rewards["thermal_revenues"]
final_cost = electric_costs-electric_rewards + gas_costs - thermal_rewards
temp = hs.get_temperature()
above_penalty = abs(min(hs.config["critical_temperature"] - temp, 0) * 1000)
below_penalty = abs(max(hs.config["min_temperature"] - temp, 0) * 1000)
small_penalties = (temp > hs.config["target_temperature"]+5) * 15 + (temp < hs.config["target_temperature"]-5) * 5
return final_cost + above_penalty + below_penalty + small_penalties
class BilanceResult(object):
""" wrapper for storing a optimization result"""
def __init__(self, cost, params):
self.params = params
self.cost = cost
####################################
######### multiprocess map #########
####################################
def multiprocess_map(target,params, *args):
mgr = multiprocessing.Manager()
dict_threadsafe = mgr.dict()
jobs = [Process(target=target_wrapper, args=(target,param,index,dict_threadsafe,args)) for index, param in enumerate(params)]
for job in jobs: job.start()
for job in jobs: job.join()
return dict_threadsafe.values()
def target_wrapper(target, params, index, dict_threadsafe, args):
dict_threadsafe[index] = BilanceResult(target(params, *args),params)
| mit | -52,483,128,942,778,310 | 37.22561 | 129 | 0.684001 | false |
googleads/google-ads-python | google/ads/googleads/v6/enums/types/dsa_page_feed_criterion_field.py | 1 | 1199 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.enums",
marshal="google.ads.googleads.v6",
manifest={"DsaPageFeedCriterionFieldEnum",},
)
class DsaPageFeedCriterionFieldEnum(proto.Message):
r"""Values for Dynamic Search Ad Page Feed criterion fields."""
class DsaPageFeedCriterionField(proto.Enum):
r"""Possible values for Dynamic Search Ad Page Feed criterion
fields.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PAGE_URL = 2
LABEL = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,596,275,378,087,262,700 | 28.243902 | 74 | 0.698082 | false |
CDNoyes/EDL-Py | EntryGuidance/HeadingAlignment.py | 1 | 3276 | """ Heading Alignment Controllers """
from numpy import sin, cos, arcsin, arccos, sqrt, pi, radians
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.integrate import trapz
from MPC import constant
from functools import partial
def desiredHeading(lon_current, lat_current, lon_target, lat_target):
# delta = 2*arcsin( sqrt( sin(0.5*(lat_current-lat_target))**2 + cos(lat_current)*cos(lat_target)*sin(0.5*(lon_current-lon_target))**2 ) )
# heading = pi/2.0 - np.sign(lon_target-lon_current)*arccos( (sin(lat_target)-sin(lat_current)*cos(delta))/(sin(delta)*cos(lat_current)))
if np.abs(lon_target-lon_current) < 1e-5:
if lat_target-lat_current > 0:
PHI = 0
else:
PHI = pi
else:
d = arccos(cos(lat_current)*cos(lat_target)*cos(lon_current-lon_target) + sin(lat_current)*sin(lat_target))
PHI = np.sign(lon_target-lon_current)*arccos( (sin(lat_target)-sin(lat_current)*cos(d))/(cos(lat_current)*sin(d)) )
heading = pi/2-PHI
return heading
def controller(control_options, control_bounds, get_heading, **kwargs):
''' Model predictive controller for heading alignment '''
if kwargs['rangeToGo'] < 0:
return 0
else:
sol = optimize(kwargs['current_state'], control_options, control_bounds, kwargs['aero_ratios'], get_heading)
return sol.x
def optimize(current_state, control_options, control_bounds, aero_ratios, get_heading):
''' Optimization routine used in MPC form of heading alignment controller '''
from Simulation import Simulation, NMPCSim
sim = Simulation(output=False, find_transitions=False, **NMPCSim(control_options))
guess = [-pi/2]
sol = minimize_scalar(cost, method='Bounded', bounds=control_bounds, args=(sim, current_state, aero_ratios, get_heading))
return sol
def cost(u, sim, state, ratios, get_heading):
''' Cost function used in MPC optimization '''
controls = [partial(constant,value=u)]
output = sim.run(state, controls, AeroRatios=ratios)
time = output[:,0]
vel = output[:,7]
heading = radians(output[:,9])
heading_desired = [get_heading(lon,lat) for lon,lat in radians(output[:,5:7])]
integrand = (heading-heading_desired)**2
return trapz(integrand, time)
def bank(rtg, cr):
""" Range to go - positive in undershoot position
Crossrange to target
"""
return np.arctan2(cr, rtg)
def test_desiredHeading():
import matplotlib.pyplot as plt
lon = np.linspace(-1,1,101)
lat = np.linspace(-1,1,101)
# psi = np.array([desiredHeading(Lon,Lat,0,0) for Lon in lon for Lat in lat])
# xx,yy = np.meshgrid(np.degrees(lon),np.degrees(lat))
psi = np.array([bank(1.1-Lon,-Lat) for Lon in lon for Lat in lat])
psi.shape = (len(lon),len(lat))
xx,yy = np.meshgrid((1.1-lon)*3397,lat*3397)
# plt.scatter(lon, lat, color=psi, alpha=0.5)
CT = plt.contour(xx,yy, np.degrees(psi.T),18)
# CT = plt.contourf(xx,yy, np.degrees(psi.T),18)
plt.clabel(CT, fontsize=10)
plt.xlabel('Longitude (deg)')
plt.ylabel('Latitude (deg)')
plt.show()
if __name__ == "__main__":
test_desiredHeading()
| gpl-3.0 | 498,352,842,672,781,600 | 33.608696 | 142 | 0.639194 | false |
forseti-security/forseti-security | google/cloud/forseti/notifier/notifiers/base_email_notification.py | 1 | 1410 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base email notifier to perform notifications"""
import abc
from future.utils import with_metaclass
from google.cloud.forseti.notifier.notifiers import base_notification
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
class BaseEmailNotification(with_metaclass(abc.ABCMeta,
base_notification.BaseNotification)):
"""Base email notifier."""
@abc.abstractmethod
def _send(self, **kwargs):
"""Send notifications.
Args:
**kwargs: Arbitrary keyword arguments.
"""
pass
@abc.abstractmethod
def _compose(self, **kwargs):
"""Compose notifications.
Args:
**kwargs: Arbitrary keyword arguments.
"""
pass
| apache-2.0 | 2,863,507,899,064,744,400 | 29.652174 | 80 | 0.685816 | false |
Lancey6/redwind | redwind/plugins/instagram.py | 1 | 7607 | from .. import hooks
from .. import util
from ..extensions import db
from ..models import Post, Setting, get_settings, Context
from ..tasks import get_queue, async_app_context
from flask.ext.login import login_required
from flask import (
request, redirect, url_for, Blueprint, current_app,
)
import requests
import urllib
import datetime
PERMALINK_RE = util.INSTAGRAM_RE
instagram = Blueprint('instagram', __name__)
def register(app):
app.register_blueprint(instagram)
hooks.register('create-context', create_context)
hooks.register('post-saved', send_to_instagram)
@instagram.route('/authorize_instagram')
@login_required
def authorize_instagram():
redirect_uri = url_for('.authorize_instagram', _external=True)
code = request.args.get('code')
if not code:
# redirect to instagram authorization page
params = {
'client_id': get_settings().instagram_client_id,
'redirect_uri': redirect_uri,
'response_type': 'code',
'scope': 'likes comments',
}
return redirect('https://api.instagram.com/oauth/authorize/?'
+ urllib.parse.urlencode(params))
params = {
'client_id': get_settings().instagram_client_id,
'client_secret': get_settings().instagram_client_secret,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri,
'code': code,
}
result = requests.post(
'https://api.instagram.com/oauth/access_token', data=params)
current_app.logger.debug('received result %s', result)
payload = result.json()
access_token = payload.get('access_token')
Setting.query.get('instagram_access_token').value = access_token
db.session.commit()
return redirect(url_for('admin.edit_settings'))
def create_context(url):
m = PERMALINK_RE.match(url)
if not m:
current_app.logger.debug('url is not an instagram media url %s', url)
return
r = ig_get('https://api.instagram.com/v1/media/shortcode/' + m.group(1))
if r.status_code // 2 != 100:
current_app.logger.warn(
"failed to fetch instagram media with shortcode %s %s %s",
m.group(1), r, r.content)
return
blob = r.json()
author = blob.get('data', {}).get('user', {})
author_name = author.get('full_name')
author_image = author.get('profile_picture')
author_url = author.get('website')
created_time = blob.get('data', {}).get('created_time')
caption_text = (blob.get('data', {}).get('caption') or {}).get('text')
images = blob.get('data', {}).get('images', {})
image = images.get('standard_resolution').get('url')
if created_time:
published = datetime.datetime.fromtimestamp(int(created_time))
content = ''
if caption_text:
content += '<p>' + caption_text + '</p>'
if image:
content += '<img src="' + image + '"/>'
context = Context()
context.url = context.permalink = url
context.author_name = author_name
context.author_image = author_image
context.author_url = author_url
context.published = published
context.title = None
context.content = content
context.content_plain = caption_text
current_app.logger.debug('created instagram context %s', context)
return context
def send_to_instagram(post, args):
"""Share a like or comment to Instagram without user-input.
"""
if 'instagram' in args.getlist('syndicate-to'):
if not is_instagram_authorized():
return False, 'Current user is not authorized for instagram'
current_app.logger.debug(
"queueing post to instagram {}".format(post.id))
get_queue().enqueue(do_send_to_instagram, post.id, current_app.config)
return True, 'Success'
def do_send_to_instagram(post_id, app_config):
with async_app_context(app_config):
current_app.logger.debug('posting to instagram %d', post_id)
post = Post.load_by_id(post_id)
in_reply_to, repost_of, like_of \
= util.posse_post_discovery(post, PERMALINK_RE)
# likes are the only thing we can POSSE to instagram unfortunately
if like_of:
m = PERMALINK_RE.match(like_of)
shortcode = m.group(1)
r = ig_get('https://api.instagram.com/v1/media/shortcode/'
+ m.group(1))
if r.status_code // 2 != 100:
current_app.logger.warn(
"failed to fetch instagram media %s %s", r, r.content)
return None
media_id = r.json().get('data', {}).get('id')
if not media_id:
current_app.logger.warn(
'could not find media id for shortcode %s', shortcode)
return None
r = ig_get('https://api.instagram.com/v1/users/self')
my_username = r.json().get('data', {}).get('username')
r = ig_post('https://api.instagram.com/v1/media/'
+ media_id + '/likes')
if r.status_code // 2 != 100:
current_app.logger.warn(
"failed to POST like for instagram id %s", media_id)
return None
like_url = like_of + '#liked-by-' + my_username
post.add_syndication_url(like_url)
db.session.commit()
return like_url
if in_reply_to:
comment_text = format_markdown_for_instagram(post.content)
comment_url = post_comment(in_reply_to, comment_text)
if comment_url:
post.add_syndication_url(comment_url)
db.session.commit()
return comment_url
def format_markdown_for_instagram(data):
return util.format_as_text(util.markdown_filter(data))
def post_comment(permalink, comment_text):
if ('INSTAGRAM_USERNAME' not in current_app.config
or 'INSTAGRAM_PASSWORD' not in current_app.config):
return
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dc = dict(DesiredCapabilities.PHANTOMJS)
dc['ssl-protocol'] = 'any'
browser = webdriver.PhantomJS(desired_capabilities=dc)
wait = ui.WebDriverWait(browser, 10) # timeout after 10 seconds
browser.get('https://instagram.com/accounts/login/')
un = browser.find_element_by_id('lfFieldInputUsername')
un.send_keys(current_app.config['INSTAGRAM_USERNAME']
+ Keys.TAB
+ current_app.config['INSTAGRAM_PASSWORD'])
un.submit()
wait.until(lambda b: b.current_url == 'https://instagram.com/')
browser.get(permalink)
inp = browser.find_element_by_tag_name('input')
inp.send_keys(comment_text)
inp.submit()
# workaround for https://github.com/SeleniumHQ/selenium/issues/767
browser.service.process.terminate()
browser.quit()
return (permalink + '#comment-by-'
+ current_app.config['INSTAGRAM_USERNAME']
+ '-' + datetime.datetime.now().isoformat())
def ig_get(url):
return requests.get(url, params={
'access_token': get_settings().instagram_access_token,
})
def ig_post(url):
return requests.post(url, data={
'access_token': get_settings().instagram_access_token,
})
def is_instagram_authorized():
return (hasattr(get_settings(), 'instagram_access_token')
and get_settings().instagram_access_token)
| bsd-2-clause | -4,872,800,979,763,446,000 | 31.233051 | 82 | 0.615486 | false |
arantius/readability-api | main.py | 1 | 3537 | #!/usr/bin/env python
"""App Engine request handler for Readability API project.
--------------------------------------------------------------------------------
Readability API - Clean up pages and feeds to be readable.
Copyright (C) 2010 Anthony Lieuallen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# First, munge sys.path to put us first!
import sys
for i, path in enumerate(sys.path):
if 'readability-api' in path:
del sys.path[i]
sys.path.insert(0, path)
from email import utils as email_utils # pylint: disable-msg=E0611,C6202,C6204
import re
import time
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import clean
import feed
import models
import util
class MainPage(webapp.RequestHandler):
request = None
response = None
def get(self):
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(util.RenderTemplate('main.html'))
class CleanPage(webapp.RequestHandler):
request = None
response = None
def get(self):
url = self.request.get('url') or self.request.get('link')
if url:
output = clean.Clean(url)
else:
output = 'Provide "url" parameter!'
self.response.headers['Content-Type'] = 'text/html; charset=UTF-8'
self.response.headers['Cache-Control'] = 'max-age=3600'
self.response.headers['Expires'] = email_utils.formatdate(
timeval=time.time() + 3600, usegmt=True)
self.response.out.write(output)
class CleanFeed(webapp.RequestHandler):
request = None
response = None
def get(self):
url = self.request.get('url') or self.request.get('link')
include_original = self.request.get('include', None) == 'True'
if not url:
self.response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
self.response.out.write('Provide "url" parameter!')
return
else:
url = re.sub(r'\?at=[^?&]+', '', url)
feed_entity = models.Feed.get_by_key_name(url)
if not feed_entity:
feed_entity = feed.CreateFeed(url)
self.response.headers['Content-Type'] = 'application/atom+xml; charset=UTF-8'
self.response.out.write(feed.PrintFeed(feed_entity, include_original))
class StatsPage(webapp.RequestHandler):
request = None
response = None
def get(self):
types = ('direct_google_docs', 'direct_youtube',
'direct_pdf', 'direct_image', 'error', 'feed', 'content')
stats = [(type, memcache.get('cleaned_%s' % type)) for type in types]
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(util.RenderTemplate('stats.html', {'stats': stats}))
application = webapp.WSGIApplication(
[('/', MainPage),
('/stats', StatsPage),
('/page', CleanPage),
('/feed', CleanFeed),
('/clean', CleanPage), # legacy
],
debug=util.IS_DEV_APPSERVER)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,945,832,401,685,141,800 | 27.991803 | 81 | 0.675997 | false |
jnnk/pyethereum | pyethereum/blocks.py | 1 | 32057 | import time
import rlp
import trie
import db
import utils
import processblock
import transactions
import logging
import copy
import sys
from repoze.lru import lru_cache
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
INITIAL_DIFFICULTY = 2 ** 17
GENESIS_PREVHASH = '\00' * 32
GENESIS_COINBASE = "0" * 40
GENESIS_NONCE = utils.sha3(chr(42))
GENESIS_GAS_LIMIT = 10 ** 6
MIN_GAS_LIMIT = 125000
GASLIMIT_EMA_FACTOR = 1024
BLOCK_REWARD = 1500 * utils.denoms.finney
UNCLE_REWARD = 15 * BLOCK_REWARD / 16
NEPHEW_REWARD = BLOCK_REWARD / 32
BLOCK_DIFF_FACTOR = 1024
GENESIS_MIN_GAS_PRICE = 0
BLKLIM_FACTOR_NOM = 6
BLKLIM_FACTOR_DEN = 5
DIFF_ADJUSTMENT_CUTOFF = 5
RECORDING = 1
NONE = 0
VERIFYING = -1
GENESIS_INITIAL_ALLOC = \
{"51ba59315b3a95761d0863b05ccc7a7f54703d99": 2 ** 200, # (G)
"e6716f9544a56c530d868e4bfbacb172315bdead": 2 ** 200, # (J)
"b9c015918bdaba24b4ff057a92a3873d6eb201be": 2 ** 200, # (V)
"1a26338f0d905e295fccb71fa9ea849ffa12aaf4": 2 ** 200, # (A)
"2ef47100e0787b915105fd5e3f4ff6752079d5cb": 2 ** 200, # (M)
"cd2a3d9f938e13cd947ec05abc7fe734df8dd826": 2 ** 200, # (R)
"6c386a4b26f73c802f34673f7248bb118f97424a": 2 ** 200, # (HH)
"e4157b34ea9615cfbde6b4fda419828124b70c78": 2 ** 200, # (CH)
}
block_structure = [
["prevhash", "bin", "\00" * 32],
["uncles_hash", "bin", utils.sha3(rlp.encode([]))],
["coinbase", "addr", GENESIS_COINBASE],
["state_root", "trie_root", trie.BLANK_ROOT],
["tx_list_root", "trie_root", trie.BLANK_ROOT],
["difficulty", "int", INITIAL_DIFFICULTY],
["number", "int", 0],
["min_gas_price", "int", GENESIS_MIN_GAS_PRICE],
["gas_limit", "int", GENESIS_GAS_LIMIT],
["gas_used", "int", 0],
["timestamp", "int", 0],
["extra_data", "bin", ""],
["nonce", "bin", ""],
]
block_structure_rev = {}
for i, (name, typ, default) in enumerate(block_structure):
block_structure_rev[name] = [i, typ, default]
acct_structure = [
["nonce", "int", 0],
["balance", "int", 0],
["storage", "trie_root", trie.BLANK_ROOT],
["code", "hash", ""],
]
acct_structure_rev = {}
for i, (name, typ, default) in enumerate(acct_structure):
acct_structure_rev[name] = [i, typ, default]
def calc_difficulty(parent, timestamp):
offset = parent.difficulty / BLOCK_DIFF_FACTOR
sign = 1 if timestamp - parent.timestamp < DIFF_ADJUSTMENT_CUTOFF else -1
return parent.difficulty + offset * sign
def calc_gaslimit(parent):
prior_contribution = parent.gas_limit * (GASLIMIT_EMA_FACTOR - 1)
new_contribution = parent.gas_used * BLKLIM_FACTOR_NOM / BLKLIM_FACTOR_DEN
gl = (prior_contribution + new_contribution) / GASLIMIT_EMA_FACTOR
return max(gl, MIN_GAS_LIMIT)
class UnknownParentException(Exception):
pass
class TransientBlock(object):
"""
Read only, non persisted, not validated representation of a block
"""
def __init__(self, rlpdata):
self.rlpdata = rlpdata
self.header_args, transaction_list, uncles = rlp.decode(rlpdata)
self.hash = utils.sha3(rlp.encode(self.header_args))
self.transaction_list = transaction_list # rlp encoded transactions
self.uncles = uncles
for i, (name, typ, default) in enumerate(block_structure):
setattr(self, name, utils.decoders[typ](self.header_args[i]))
def __repr__(self):
return '<TransientBlock(#%d %s %s)>' %\
(self.number, self.hash.encode('hex')[
:4], self.prevhash.encode('hex')[:4])
def check_header_pow(header):
assert len(header[-1]) == 32
rlp_Hn = rlp.encode(header[:-1])
nonce = header[-1]
diff = utils.decoders['int'](header[block_structure_rev['difficulty'][0]])
h = utils.sha3(utils.sha3(rlp_Hn) + nonce)
return utils.big_endian_to_int(h) < 2 ** 256 / diff
class Block(object):
def __init__(self,
prevhash='\00' * 32,
uncles_hash=block_structure_rev['uncles_hash'][2],
coinbase=block_structure_rev['coinbase'][2],
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
difficulty=block_structure_rev['difficulty'][2],
number=0,
min_gas_price=block_structure_rev['min_gas_price'][2],
gas_limit=block_structure_rev['gas_limit'][2],
gas_used=0, timestamp=0, extra_data='', nonce='',
transaction_list=[],
uncles=[],
header=None):
self.prevhash = prevhash
self.uncles_hash = uncles_hash
self.coinbase = coinbase
self.difficulty = difficulty
self.number = number
self.min_gas_price = min_gas_price
self.gas_limit = gas_limit
self.gas_used = gas_used
self.timestamp = timestamp
self.extra_data = extra_data
self.nonce = nonce
self.uncles = uncles
self.suicides = []
self.postqueue = []
self.caches = {
'balance': {},
'nonce': {},
'code': {},
'all': {}
}
self.journal = []
self.transactions = trie.Trie(utils.get_db_path(), tx_list_root)
self.transaction_count = 0
self.state = trie.Trie(utils.get_db_path(), state_root)
self.proof_mode = None
self.proof_nodes = []
# If transaction_list is None, then it's a block header imported for
# SPV purposes
if transaction_list is not None:
# support init with transactions only if state is known
assert self.state.root_hash_valid()
for tx_lst_serialized, state_root, gas_used_encoded \
in transaction_list:
self._add_transaction_to_list(
tx_lst_serialized, state_root, gas_used_encoded)
if tx_list_root != self.transactions.root_hash:
raise Exception("Transaction list root hash does not match!")
if not self.is_genesis() and self.nonce and\
not check_header_pow(header or self.list_header()):
raise Exception("PoW check failed")
# make sure we are all on the same db
assert self.state.db.db == self.transactions.db.db
# use de/encoders to check type and validity
for name, typ, d in block_structure:
v = getattr(self, name)
assert utils.decoders[typ](utils.encoders[typ](v)) == v
# Basic consistency verifications
if not self.state.root_hash_valid():
raise Exception(
"State Merkle root not found in database! %r" % self)
if not self.transactions.root_hash_valid():
raise Exception(
"Transactions root not found in database! %r" % self)
if len(self.extra_data) > 1024:
raise Exception("Extra data cannot exceed 1024 bytes")
if self.coinbase == '':
raise Exception("Coinbase cannot be empty address")
def validate_uncles(self):
if utils.sha3(rlp.encode(self.uncles)) != self.uncles_hash:
return False
# Check uncle validity
ancestor_chain = [self]
# Uncle can have a block from 2-7 blocks ago as its parent
for i in [1, 2, 3, 4, 5, 6, 7]:
if ancestor_chain[-1].number > 0:
ancestor_chain.append(ancestor_chain[-1].get_parent())
ineligible = []
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
for ancestor in ancestor_chain[1:]:
ineligible.extend(ancestor.uncles)
ineligible.extend([b.list_header() for b in ancestor_chain])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in self.uncles:
if not check_header_pow(uncle):
sys.stderr.write('1\n\n')
return False
# uncle's parent cannot be the block's own parent
prevhash = uncle[block_structure_rev['prevhash'][0]]
if prevhash not in eligible_ancestor_hashes:
logger.debug("%r: Uncle does not have a valid ancestor", self)
sys.stderr.write('2 ' + prevhash.encode('hex') + ' ' + str(map(lambda x: x.encode('hex'), eligible_ancestor_hashes)) + '\n\n')
return False
if uncle in ineligible:
sys.stderr.write('3\n\n')
logger.debug("%r: Duplicate uncle %r", self, utils.sha3(rlp.encode(uncle)).encode('hex'))
return False
ineligible.append(uncle)
return True
def is_genesis(self):
return self.prevhash == GENESIS_PREVHASH and \
self.nonce == GENESIS_NONCE
def check_proof_of_work(self, nonce):
H = self.list_header()
H[-1] = nonce
return check_header_pow(H)
@classmethod
def deserialize_header(cls, header_data):
if isinstance(header_data, (str, unicode)):
header_data = rlp.decode(header_data)
assert len(header_data) == len(block_structure)
kargs = {}
# Deserialize all properties
for i, (name, typ, default) in enumerate(block_structure):
kargs[name] = utils.decoders[typ](header_data[i])
return kargs
@classmethod
def deserialize(cls, rlpdata):
header_args, transaction_list, uncles = rlp.decode(rlpdata)
kargs = cls.deserialize_header(header_args)
kargs['header'] = header_args
kargs['transaction_list'] = transaction_list
kargs['uncles'] = uncles
# if we don't have the state we need to replay transactions
_db = db.DB(utils.get_db_path())
if len(kargs['state_root']) == 32 and kargs['state_root'] in _db:
return Block(**kargs)
elif kargs['prevhash'] == GENESIS_PREVHASH:
return Block(**kargs)
else: # no state, need to replay
try:
parent = get_block(kargs['prevhash'])
except KeyError:
raise UnknownParentException(kargs['prevhash'].encode('hex'))
return parent.deserialize_child(rlpdata)
@classmethod
def init_from_header(cls, rlpdata):
kargs = cls.deserialize_header(rlpdata)
kargs['transaction_list'] = None
kargs['uncles'] = None
return Block(**kargs)
def deserialize_child(self, rlpdata):
"""
deserialization w/ replaying transactions
"""
header_args, transaction_list, uncles = rlp.decode(rlpdata)
assert len(header_args) == len(block_structure)
kargs = dict(transaction_list=transaction_list, uncles=uncles)
# Deserialize all properties
for i, (name, typ, default) in enumerate(block_structure):
kargs[name] = utils.decoders[typ](header_args[i])
block = Block.init_from_parent(self, kargs['coinbase'],
extra_data=kargs['extra_data'],
timestamp=kargs['timestamp'],
uncles=uncles)
# replay transactions
for tx_lst_serialized, _state_root, _gas_used_encoded in \
transaction_list:
tx = transactions.Transaction.create(tx_lst_serialized)
# logger.debug('state:\n%s', utils.dump_state(block.state))
# logger.debug('applying %r', tx)
success, output = processblock.apply_transaction(block, tx)
#block.add_transaction_to_list(tx) # < this is done by processblock
# logger.debug('state:\n%s', utils.dump_state(block.state))
logger.debug('d %s %s', _gas_used_encoded, block.gas_used)
assert utils.decode_int(_gas_used_encoded) == block.gas_used, \
"Gas mismatch (ours %d, theirs %d) on block: %r" % \
(block.gas_used, _gas_used_encoded, block.to_dict(False, True, True))
assert _state_root == block.state.root_hash, \
"State root mismatch (ours %r theirs %r) on block: %r" % \
(block.state.root_hash.encode('hex'),
_state_root.encode('hex'),
block.to_dict(False, True, True))
block.finalize()
block.uncles_hash = kargs['uncles_hash']
block.nonce = kargs['nonce']
block.min_gas_price = kargs['min_gas_price']
# checks
assert block.prevhash == self.hash
assert block.gas_used == kargs['gas_used']
assert block.gas_limit == kargs['gas_limit']
assert block.timestamp == kargs['timestamp']
assert block.difficulty == kargs['difficulty']
assert block.number == kargs['number']
assert block.extra_data == kargs['extra_data']
assert utils.sha3(rlp.encode(block.uncles)) == kargs['uncles_hash']
assert block.tx_list_root == kargs['tx_list_root']
assert block.state.root_hash == kargs['state_root'], (block.state.root_hash, kargs['state_root'])
return block
@classmethod
def hex_deserialize(cls, hexrlpdata):
return cls.deserialize(hexrlpdata.decode('hex'))
def mk_blank_acct(self):
if not hasattr(self, '_blank_acct'):
codehash = ''
self.state.db.put(codehash, '')
self._blank_acct = [utils.encode_int(0),
utils.encode_int(0),
trie.BLANK_ROOT,
codehash]
return self._blank_acct[:]
def get_acct(self, address):
if len(address) == 40:
address = address.decode('hex')
acct = rlp.decode(self.state.get(address)) or self.mk_blank_acct()
return tuple(utils.decoders[t](acct[i])
for i, (n, t, d) in enumerate(acct_structure))
# _get_acct_item(bin or hex, int) -> bin
def _get_acct_item(self, address, param):
''' get account item
:param address: account address, can be binary or hex string
:param param: parameter to get
'''
if param != 'storage' and address in self.caches[param]:
return self.caches[param][address]
return self.get_acct(address)[acct_structure_rev[param][0]]
# _set_acct_item(bin or hex, int, bin)
def _set_acct_item(self, address, param, value):
''' set account item
:param address: account address, can be binary or hex string
:param param: parameter to set
:param value: new value
'''
# logger.debug('set acct %r %r %d', address, param, value)
self.set_and_journal(param, address, value)
self.set_and_journal('all', address, True)
def set_and_journal(self, cache, index, value):
prev = self.caches[cache].get(index, None)
if prev != value:
self.journal.append([cache, index, prev, value])
self.caches[cache][index] = value
# _delta_item(bin or hex, int, int) -> success/fail
def _delta_item(self, address, param, value):
''' add value to account item
:param address: account address, can be binary or hex string
:param param: parameter to increase/decrease
:param value: can be positive or negative
'''
value = self._get_acct_item(address, param) + value
if value < 0:
return False
self._set_acct_item(address, param, value)
return True
def _add_transaction_to_list(self, tx_lst_serialized,
state_root, gas_used_encoded):
# adds encoded data # FIXME: the constructor should get objects
assert isinstance(tx_lst_serialized, list)
data = [tx_lst_serialized, state_root, gas_used_encoded]
self.transactions.update(
rlp.encode(utils.encode_int(self.transaction_count)),
rlp.encode(data))
self.transaction_count += 1
def add_transaction_to_list(self, tx):
tx_lst_serialized = rlp.decode(tx.serialize())
self._add_transaction_to_list(tx_lst_serialized,
self.state_root,
utils.encode_int(self.gas_used))
def _list_transactions(self):
# returns [[tx_lst_serialized, state_root, gas_used_encoded],...]
txlist = []
for i in range(self.transaction_count):
txlist.append(self.get_transaction(i))
return txlist
def get_transaction(self, num):
# returns [tx_lst_serialized, state_root, gas_used_encoded]
return rlp.decode(self.transactions.get(rlp.encode(utils.encode_int(num))))
def get_transactions(self):
return [transactions.Transaction.create(tx) for
tx, s, g in self._list_transactions()]
def get_nonce(self, address):
return self._get_acct_item(address, 'nonce')
def set_nonce(self, address, value):
return self._set_acct_item(address, 'nonce', value)
def increment_nonce(self, address):
return self._delta_item(address, 'nonce', 1)
def decrement_nonce(self, address):
return self._delta_item(address, 'nonce', -1)
def get_balance(self, address):
return self._get_acct_item(address, 'balance')
def set_balance(self, address, value):
self._set_acct_item(address, 'balance', value)
def delta_balance(self, address, value):
return self._delta_item(address, 'balance', value)
def transfer_value(self, from_addr, to_addr, value):
assert value >= 0
if self.delta_balance(from_addr, -value):
return self.delta_balance(to_addr, value)
return False
def get_code(self, address):
return self._get_acct_item(address, 'code')
def set_code(self, address, value):
self._set_acct_item(address, 'code', value)
def get_storage(self, address):
storage_root = self._get_acct_item(address, 'storage')
return trie.Trie(utils.get_db_path(), storage_root)
def get_storage_data(self, address, index):
if 'storage:'+address in self.caches:
if index in self.caches['storage:'+address]:
return self.caches['storage:'+address][index]
t = self.get_storage(address)
t.proof_mode = self.proof_mode
t.proof_nodes = self.proof_nodes
key = utils.zpad(utils.coerce_to_bytes(index), 32)
val = rlp.decode(t.get(key))
if self.proof_mode == RECORDING:
self.proof_nodes.extend(t.proof_nodes)
return utils.big_endian_to_int(val) if val else 0
def set_storage_data(self, address, index, val):
if 'storage:'+address not in self.caches:
self.caches['storage:'+address] = {}
self.set_and_journal('all', address, True)
self.set_and_journal('storage:'+address, index, val)
def commit_state(self):
changes = []
if not len(self.journal):
processblock.pblogger.log('delta', changes=[])
return
for address in self.caches['all']:
acct = rlp.decode(self.state.get(address.decode('hex'))) \
or self.mk_blank_acct()
for i, (key, typ, default) in enumerate(acct_structure):
if key == 'storage':
t = trie.Trie(utils.get_db_path(), acct[i])
t.proof_mode = self.proof_mode
t.proof_nodes = self.proof_nodes
for k, v in self.caches.get('storage:'+address, {}).iteritems():
enckey = utils.zpad(utils.coerce_to_bytes(k), 32)
val = rlp.encode(utils.int_to_big_endian(v))
changes.append(['storage', address, k, v])
if v:
t.update(enckey, val)
else:
t.delete(enckey)
acct[i] = t.root_hash
if self.proof_mode == RECORDING:
self.proof_nodes.extend(t.proof_nodes)
else:
if address in self.caches[key]:
v = self.caches[key].get(address, default)
changes.append([key, address, v])
acct[i] = utils.encoders[acct_structure[i][1]](v)
self.state.update(address.decode('hex'), rlp.encode(acct))
if self.proof_mode == RECORDING:
self.proof_nodes.extend(self.state.proof_nodes)
self.state.proof_nodes = []
if processblock.pblogger.log_state_delta:
processblock.pblogger.log('delta', changes=changes)
self.reset_cache()
def del_account(self, address):
self.commit_state()
if len(address) == 40:
address = address.decode('hex')
self.state.delete(address)
def account_to_dict(self, address, with_storage_root=False,
with_storage=True, for_vmtest=False):
if with_storage_root:
assert len(self.journal) == 0
med_dict = {}
for i, val in enumerate(self.get_acct(address)):
name, typ, default = acct_structure[i]
key = acct_structure[i][0]
if name == 'storage':
strie = trie.Trie(utils.get_db_path(), val)
if with_storage_root:
med_dict['storage_root'] = strie.get_root_hash().encode('hex')
else:
med_dict[key] = self.caches[key].get(address, utils.printers[typ](val))
if with_storage:
med_dict['storage'] = {}
d = strie.to_dict()
subcache = self.caches.get('storage:'+address, {})
subkeys = [utils.zpad(utils.coerce_to_bytes(kk), 32) for kk in subcache.keys()]
for k in d.keys() + subkeys:
v = d.get(k, None)
v2 = subcache.get(utils.big_endian_to_int(k), None)
hexkey = '0x'+utils.zunpad(k).encode('hex')
if v2 is not None:
if v2 != 0:
med_dict['storage'][hexkey] = \
'0x'+utils.int_to_big_endian(v2).encode('hex')
elif v is not None:
med_dict['storage'][hexkey] = '0x'+rlp.decode(v).encode('hex')
return med_dict
def reset_cache(self):
self.caches = {
'all': {},
'balance': {},
'nonce': {},
'code': {},
}
self.journal = []
# Revert computation
def snapshot(self):
return {
'state': self.state.root_hash,
'gas': self.gas_used,
'txs': self.transactions,
'txcount': self.transaction_count,
'postqueue': copy.copy(self.postqueue),
'suicides': self.suicides,
'suicides_size': len(self.suicides),
'journal': self.journal, # pointer to reference, so is not static
'journal_size': len(self.journal)
}
def revert(self, mysnapshot):
self.journal = mysnapshot['journal']
logger.debug('reverting')
while len(self.journal) > mysnapshot['journal_size']:
cache, index, prev, post = self.journal.pop()
logger.debug('%r %r %r %r', cache, index, prev, post)
if prev is not None:
self.caches[cache][index] = prev
else:
del self.caches[cache][index]
self.suicides = mysnapshot['suicides']
while len(self.suicides) > mysnapshot['suicides_size']:
self.suicides.pop()
self.state.root_hash = mysnapshot['state']
self.gas_used = mysnapshot['gas']
self.transactions = mysnapshot['txs']
self.transaction_count = mysnapshot['txcount']
self.postqueue = mysnapshot['postqueue']
def finalize(self):
"""
Apply rewards
We raise the block's coinbase account by Rb, the block reward,
and the coinbase of each uncle by 7 of 8 that.
Rb = 1500 finney
"""
self.delta_balance(self.coinbase,
BLOCK_REWARD + NEPHEW_REWARD * len(self.uncles))
for uncle_rlp in self.uncles:
uncle_data = Block.deserialize_header(uncle_rlp)
self.delta_balance(uncle_data['coinbase'], UNCLE_REWARD)
self.commit_state()
def serialize_header_without_nonce(self):
return rlp.encode(self.list_header(exclude=['nonce']))
def get_state_root(self):
self.commit_state()
return self.state.root_hash
def set_state_root(self, state_root_hash):
self.state = trie.Trie(utils.get_db_path(), state_root_hash)
self.reset_cache()
state_root = property(get_state_root, set_state_root)
def get_tx_list_root(self):
return self.transactions.root_hash
tx_list_root = property(get_tx_list_root)
def list_header(self, exclude=[]):
header = []
for name, typ, default in block_structure:
# print name, typ, default , getattr(self, name)
if name not in exclude:
header.append(utils.encoders[typ](getattr(self, name)))
return header
def serialize(self):
# Serialization method; should act as perfect inverse function of the
# constructor assuming no verification failures
return rlp.encode([self.list_header(),
self._list_transactions(),
self.uncles])
def hex_serialize(self):
return self.serialize().encode('hex')
def serialize_header(self):
return rlp.encode(self.list_header())
def hex_serialize_header(self):
return rlp.encode(self.list_header()).encode('hex')
def to_dict(self, with_state=False, full_transactions=False,
with_storage_roots=False, with_uncles=False):
"""
serializes the block
with_state: include state for all accounts
full_transactions: include serialized tx (hashes otherwise)
with_uncles: include uncle hashes
"""
b = {}
for name, typ, default in block_structure:
b[name] = utils.printers[typ](getattr(self, name))
txlist = []
for i in range(self.transaction_count):
tx_rlp = self.transactions.get(rlp.encode(utils.encode_int(i)))
tx, msr, gas = rlp.decode(tx_rlp)
if full_transactions:
txjson = transactions.Transaction.create(tx).to_dict()
else:
txjson = utils.sha3(rlp.descend(tx_rlp, 0)).encode('hex') # tx hash
txlist.append({
"tx": txjson,
"medstate": msr.encode('hex'),
"gas": str(utils.decode_int(gas))
})
b["transactions"] = txlist
if with_state:
state_dump = {}
for address, v in self.state.to_dict().iteritems():
state_dump[address.encode('hex')] = \
self.account_to_dict(address, with_storage_roots)
b['state'] = state_dump
if with_uncles:
b['uncles'] = [utils.sha3(rlp.encode(u)).encode('hex') for u in self.uncles]
return b
def _hash(self):
return utils.sha3(self.serialize_header())
@property
def hash(self):
return self._hash()
def hex_hash(self):
return self.hash.encode('hex')
def get_parent(self):
if self.number == 0:
raise UnknownParentException('Genesis block has no parent')
try:
parent = get_block(self.prevhash)
except KeyError:
raise UnknownParentException(self.prevhash.encode('hex'))
#assert parent.state.db.db == self.state.db.db
return parent
def has_parent(self):
try:
self.get_parent()
return True
except UnknownParentException:
return False
def chain_difficulty(self):
# calculate the summarized_difficulty
if self.is_genesis():
return self.difficulty
elif 'difficulty:'+self.hex_hash() in self.state.db:
return utils.decode_int(
self.state.db.get('difficulty:'+self.hex_hash()))
else:
_idx, _typ, _ = block_structure_rev['difficulty']
o = self.difficulty + self.get_parent().chain_difficulty()
o += sum([utils.decoders[_typ](u[_idx]) for u in self.uncles])
self.state.db.put('difficulty:'+self.hex_hash(), utils.encode_int(o))
return o
def __eq__(self, other):
return isinstance(other, (Block, CachedBlock)) and self.hash == other.hash
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return self.number > other.number
def __lt__(self, other):
return self.number < other.number
def __repr__(self):
return '<Block(#%d %s %s)>' % (self.number,
self.hex_hash()[:4],
self.prevhash.encode('hex')[:4])
@classmethod
def init_from_parent(cls, parent, coinbase, extra_data='',
timestamp=int(time.time()), uncles=[]):
return Block(
prevhash=parent.hash,
uncles_hash=utils.sha3(rlp.encode(uncles)),
coinbase=coinbase,
state_root=parent.state.root_hash,
tx_list_root=trie.BLANK_ROOT,
difficulty=calc_difficulty(parent, timestamp),
number=parent.number + 1,
min_gas_price=0,
gas_limit=calc_gaslimit(parent),
gas_used=0,
timestamp=timestamp,
extra_data=extra_data,
nonce='',
transaction_list=[],
uncles=uncles)
def set_proof_mode(self, pm, pmnodes=None):
self.proof_mode = pm
self.state.proof_mode = pm
self.proof_nodes = pmnodes or []
self.state.proof_nodes = pmnodes or []
class CachedBlock(Block):
# note: immutable refers to: do not manipulate!
_hash_cached = None
def _set_acct_item(self): raise NotImplementedError
def _add_transaction_to_list(self): raise NotImplementedError
def set_state_root(self): raise NotImplementedError
def revert(self): raise NotImplementedError
def commit_state(self): pass
def _hash(self):
if not self._hash_cached:
self._hash_cached = Block._hash(self)
return self._hash_cached
@classmethod
def create_cached(cls, blk):
blk.__class__ = CachedBlock
return blk
@lru_cache(500)
def get_block(blockhash):
"""
Assumtion: blocks loaded from the db are not manipulated
-> can be cached including hash
"""
return CachedBlock.create_cached(Block.deserialize(db.DB(utils.get_db_path()).get(blockhash)))
def has_block(blockhash):
return blockhash in db.DB(utils.get_db_path())
def genesis(start_alloc=GENESIS_INITIAL_ALLOC, difficulty=INITIAL_DIFFICULTY):
# https://ethereum.etherpad.mozilla.org/11
block = Block(prevhash=GENESIS_PREVHASH, coinbase=GENESIS_COINBASE,
tx_list_root=trie.BLANK_ROOT,
difficulty=difficulty, nonce=GENESIS_NONCE,
gas_limit=GENESIS_GAS_LIMIT)
for addr, balance in start_alloc.iteritems():
block.set_balance(addr, balance)
block.state.db.commit()
return block
def dump_genesis_block_tests_data():
import json
g = genesis()
data = dict(
genesis_state_root=g.state_root.encode('hex'),
genesis_hash=g.hex_hash(),
genesis_rlp_hex=g.serialize().encode('hex'),
initial_alloc=dict()
)
for addr, balance in GENESIS_INITIAL_ALLOC.iteritems():
data['initial_alloc'][addr] = str(balance)
print json.dumps(data, indent=1)
| mit | 5,795,150,003,162,164,000 | 36.892435 | 142 | 0.574102 | false |
cnobile2012/dcolumn | example_site/settings/base.py | 1 | 7181 | # -*- coding: utf-8 -*-
"""
Django settings for example_site project.
For more information on this file, see
https://docs.djangoproject.com/en/<version>/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/<version>/ref/settings/
"""
import os
from dcolumn.dcolumns.manager import dcolumn_manager
# Where is the 'website' directory with settings dir, apps, urls.py, etc. are.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Set the type of auto PK that is generated.
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SITE_ID = 1
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/<version>/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gsx-ua^+oo7aqw=jn2ln2jiy3w4sl+5q$lxb2k-5tqasw+sxl*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TRAVIS = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admindocs',
#'django.contrib.sites',
'dcolumn.dcolumns',
'example_site.books',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
],
},
},
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example_site.urls'
WSGI_APPLICATION = 'example_site.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/<version>/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/<version>/howto/static-files/
# Where is the root of the site? This can be a root-relative URL.
SITE_URL = '/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, 'static/'))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = SITE_URL + 'static/'
# Additional locations of static files
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATICFILES_DIRS = (
os.path.abspath(os.path.join(BASE_DIR, 'dev')),
)
# DCOLUMN config
dcolumn_manager.register_css_containers(
(('author_top', 'author-top'),
('author_center', 'author-center'),
('author_bottom', 'author-botton'),
('book_top', 'book-top'),
('book_center', 'book-center'),
('book_bottom', 'book-bottom'),
('promotion_top', 'promotion-top'),
('promotion_center', 'promotion-center'),
('promotion_bottom', 'promotion-bottom'),
('publisher_top', 'publisher-top'),
('publisher_center', 'publisher-center'),
('publisher_bottom', 'publisher-bottom'),
))
DYNAMIC_COLUMNS = {
# To allow anybody to access the API set to True.
'INACTIVATE_API_AUTH': False,
}
# Change the URL below to your login path.
LOGIN_URL = '/admin/login/'
# A sample logging configuration. The only tangible logging performed by this
# configuration is to send an email to the site admins on every HTTP 500 error
# when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOG_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'logs'))
not os.path.isdir(LOG_DIR) and os.mkdir(LOG_DIR, 0o0775)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ("%(asctime)s %(levelname)s %(name)s %(funcName)s "
"[line:%(lineno)d] %(message)s")
},
'simple': {
'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': 'True',
},
'console': {
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'examples_file': {
'class': ('example_site.common.loghandlers'
'.DeferredRotatingFileHandler'),
'level': 'DEBUG',
'formatter': 'verbose',
'filename': '/dev/null',
'maxBytes': 50000000, # 50 Meg bytes
'backupCount': 5,
},
'dcolumns_file': {
'class': ('example_site.common.loghandlers'
'.DeferredRotatingFileHandler'),
'level': 'DEBUG',
'formatter': 'verbose',
'filename': '/dev/null',
'maxBytes': 50000000, # 50 Meg bytes
'backupCount': 5,
},
},
'loggers': {
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'examples': {
'handlers': ('examples_file', 'mail_admins',),
'level': 'ERROR',
'propagate': True,
},
'dcolumns': {
'handlers': ('dcolumns_file', 'mail_admins',),
'level': 'ERROR',
'propagate': True,
},
'tests': {
'handlers': ('dcolumns_file',),
'level': 'DEBUG',
'propagate': True,
},
},
}
| mit | 5,756,740,906,928,569,000 | 30.915556 | 79 | 0.599499 | false |
mganeva/mantid | Framework/PythonInterface/plugins/algorithms/USANSSimulation.py | 1 | 6752 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
import math
import numpy
class USANSSimulation(PythonAlgorithm):
def category(self):
return "SANS"
def seeAlso(self):
return [ "USANSReduction" ]
def name(self):
return "USANSSimulation"
def summary(self):
return "Simulate a USANS workspace"
def PyInit(self):
self.declareProperty("TwoTheta", 0.01, "Scattering angle in degrees")
self.declareProperty(FloatArrayProperty("WavelengthPeaks", values=[0.72, 0.9, 1.2, 1.8, 3.6],
direction=Direction.Input), "Wavelength peaks out of the monochromator")
self.declareProperty("CountTime", 1000.0, "Fake count time")
# Model parameters
self.declareProperty("EmptyRun", False, "If True, the run is considered an empty run")
self.declareProperty("SphereRadius", 60.0, "Radius for the sphere model (Angstrom)")
self.declareProperty("Background", 0.0, "Background")
self.declareProperty("SigmaPeak", 0.01, "Width of the wavelength peaks")
self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "", Direction.Output), "Output workspace")
self.declareProperty(MatrixWorkspaceProperty("MonitorWorkspace", "", Direction.Output), "Output monitor workspace")
#pylint: disable=too-many-locals
def PyExec(self):
workspace = self.getPropertyValue("OutputWorkspace")
out_ws = CreateSimulationWorkspace(Instrument="USANS",
BinParams="0,50,32000",
UnitX="TOF",
OutputWorkspace=workspace)
out_ws.setYUnitLabel("1/cm")
data_x = out_ws.dataX(0)
mon_ws_name = self.getPropertyValue("MonitorWorkspace")
mon_ws = CreateWorkspace(dataX=data_x, dataY=numpy.zeros(len(data_x)-1),
UnitX="TOF", OutputWorkspace=mon_ws_name)
mon_y = mon_ws.dataY(0)
mon_e = mon_ws.dataE(0)
# Number of pixels for the main detector
n_pixels = int(out_ws.getNumberHistograms()/2)
# Clean up the workspace
for j in range(n_pixels):
data_y = out_ws.dataY(j)
for i in range(len(data_y)):
data_y[i] = 0.0
# Fill monitor workspace with fake beam profile
count_time = self.getProperty("CountTime").value
for i in range(len(data_x)-1):
wl_i = 0.0039560/30.0*(data_x[i]+data_x[i+1])/2.0
mon_y[i] = count_time*math.exp(-wl_i)
mon_e[i] = math.sqrt(mon_y[i])
# Add analyzer theta value and monochromator angle theta_b in logs
two_theta = self.getProperty("TwoTheta").value
is_empty_run = self.getProperty("EmptyRun").value
if is_empty_run:
two_theta = 0.0
theta_b = 70.0
theta = theta_b + two_theta
out_ws.getRun().addProperty("AnalyzerTheta", theta, 'degree', True)
out_ws.getRun().addProperty("two_theta", two_theta, 'degree', True)
out_ws.getRun().addProperty("MonochromatorTheta", theta_b, 'degree', True)
out_ws.getRun().addProperty("run_title", "Simulated USANS", True)
out_ws.getRun().addProperty("run_number", "1234", True)
# List of wavelength peaks, and width of the peaks
wl_peaks = self.getProperty("WavelengthPeaks").value
sigma = self.getProperty("SigmaPeak").value
for wl in wl_peaks:
q = 6.28*math.sin(two_theta)/wl
Logger("USANS").notice( "wl = %g; Q = %g" % (wl, q))
for i in range(len(data_x)-1):
wl_i = 0.0039560/30.0*(data_x[i]+data_x[i+1])/2.0
# Scale the I(q) by a Gaussian to simulate the wavelength peaks selected by the monochromator
flux = 1.0e6/(sigma*math.sqrt(2.0*math.pi))*math.exp(-(wl_i-wl)*(wl_i-wl)/(2.0*sigma*sigma))
# Multiply by beam profile
flux *= mon_y[i]
# Account for transmission
if not is_empty_run:
flux *= math.exp(-wl_i/2.0)
# Transmission detector
for j in range(n_pixels, 2*n_pixels):
det_pos = out_ws.getInstrument().getDetector(j).getPos()
r = math.sqrt(det_pos.Y()*det_pos.Y()+det_pos.X()*det_pos.X())
sigma = 0.01
scale = math.exp(-r*r/(2.0*sigma*sigma))
data_y = out_ws.dataY(j)
data_y[i] += int(scale*flux)
data_e = out_ws.dataE(j)
data_e[i] = math.sqrt(data_e[i]*data_e[i]+scale*scale*flux*flux)
# If we have an empty run, there's no need to fill the main detector
if is_empty_run:
continue
# Compute I(q) and store the results
q_i = q*wl/wl_i
i_q = self._sphere_model(q_i, scale=flux)
for j in range(n_pixels):
det_pos = out_ws.getInstrument().getDetector(j).getPos()
r = math.sqrt(det_pos.Y()*det_pos.Y()+det_pos.X()*det_pos.X())
sigma = 0.01
scale = math.exp(-r*r/(2.0*sigma*sigma))
data_y = out_ws.dataY(j)
data_y[i] += int(i_q*scale)
data_e = out_ws.dataE(j)
data_e[i] = math.sqrt(data_e[i]*data_e[i]+i_q*i_q*scale*scale)
self.setProperty("OutputWorkspace", out_ws)
self.setProperty("MonitorWorkspace", mon_ws)
def _sphere_model(self, q, scale):
"""
Return I(q) for a sphere model
@param q: q-value
@param scale: normalization factor to give I(q)
"""
radius = self.getProperty("SphereRadius").value
bck = self.getProperty("Background").value
qr = q*radius
bes = 3.0*(math.sin(qr)-qr*math.cos(qr))/(qr*qr*qr) if not qr == 0.0 else 1.0
vol = 4.0*math.pi/3.0*radius*radius*radius
f2 = vol*bes*bes*1.0e-6
return scale*f2+bck
#############################################################################################
AlgorithmFactory.subscribe(USANSSimulation())
| gpl-3.0 | 395,813,981,326,832,300 | 40.937888 | 123 | 0.556132 | false |
ywl19891989/PlistParseUtils | src/module/ImageUtils.py | 1 | 6417 | # coding=gbk
'''
Created on 2014-3-27
@author: Hali
'''
import sys
import os
import Image
from PlistParser import Frame
def printUsage():
print "Usage: ImageUtils.py [-s input=srcImgPath outSize=[(width,heigh)|(x,y,width,heigt)] outPath=outPath]"
print " [-c input=srcImgPath srcRect=(x,y,w,h) outPath=outPath]"
print " [-cs input=srcImgPath srcRect=(x,y,w,h) outPath=outPath outSize=(w,h)]"
print "Options:"
print " -s scale the image to input size"
print " input: srcImgPath the source image to scale"
print " outSize: size of image to scale [no space]"
print " outPath: path of Image to save"
print ""
print " -c crop the rect of image and save to outPath"
print " input: srcImgPath the source image to crop"
print " srcRect: rect of image to be crop [no space]"
print " outPath: path of croped Image to save"
print ""
print " -cs crop the rect of image and save to outPath"
print " input: srcImgPath the source image to crop"
print " srcRect: rect of image to be crop [no space]"
print " outPath: path of croped Image to save"
print " outSize: size of image crop to sace [no space]"
print ""
print "Scale Sample: ./ImageUtils.py -s input=./test.png outSize={20,20} outPath=./test-scale.png"
print "Crop Sample: ./ImageUtils.py -c input=./test.png srcRect={0,0,20,20} outPath=./test-crop.png"
print "Crop&Scale Sample: ./ImageUtils.py -cs input=./test.png outSize={10,10,20,20} outPath=./test-scale.png outSize=(100,100)"
print ""
def scaleImg(img, box):
if len(box) != 4:
print "box arg len is Not enough!"
sys.exit();
if (box[2] == 0 or box[3] == 0):
print "Error! outImg size(%d, %d) invalid!" % (box[2], box[3])
sys.exit()
img = img.resize((box[2], box[3]))
newImg = Image.new("RGB", (box[2], box[3]), (255, 255, 255))
newImg.putalpha(0)
newImg.paste(img)
return newImg
def cropImg(img, frame):
x, y = int(frame.x), int(frame.y)
w, h = int(frame.w), int(frame.h)
ox, oy = int(frame.ox), int(frame.oy)
ow, oh = int(frame.ow), int(frame.oh)
px = int((ow - w)/2 + ox)
py = int((oh - h)/2 - oy)
rotation = 0
if frame.rotated == True:
w, h = h, w
rotation = 90
box = (x, y, x + w, y + h)
if frame.ow == 0:
frame.ow = 1
if frame.oh == 0:
frame.oh = 1
newImg = img.resize((frame.ow, frame.oh))
newImg.putalpha(255)
if w > 0 and h > 0:
cropImg = img.crop(box)
cropImg = cropImg.rotate(rotation)
for i in range(cropImg.size[0]):
for j in range(cropImg.size[1]):
newImg.putpixel((i + px, j + py), cropImg.getpixel((i, j)))
return newImg
def checkArgs(args):
if (len(args) != 4 and len(args) != 5):
printUsage()
sys.exit()
argMode = args[0]
if argMode == "-s":
inputPath, outSize, outPath = args[1:]
inputPath = inputPath.split("=")[1]
outPath = outPath.split("=")[1]
outSize = outSize.split("=")[1]
outSize = outSize.replace("(", "")
outSize = outSize.replace(")", "")
sizeArg = outSize.split(",")
if len(sizeArg) == 2:
outSize = (0, 0, int(sizeArg[0]), int(sizeArg[1]))
elif len(sizeArg) == 4:
outSize = (int(sizeArg[0]), int(sizeArg[1]), int(sizeArg[2]), int(sizeArg[3]))
if not os.path.exists(inputPath):
print "input filePath(%s) not exist!" % inputPath
sys.exit()
inputImg = Image.open(inputPath)
newImg = scaleImg(inputImg, outSize)
dirName = os.path.dirname(outPath)
if not os.path.exists(dirName):
os.makedirs(dirName)
newImg.save(outPath)
elif argMode == "-c":
inputPath, srcRect, outPath = args[1:]
inputPath = inputPath.split("=")[1]
outPath = outPath.split("=")[1]
srcRect = srcRect.split("=")[1]
srcRect = srcRect.replace("(", "")
srcRect = srcRect.replace(")", "")
rectArg = srcRect.split(",")
if not len(rectArg) == 4:
print "in crop mode, src rect arg(%s) invalid!" % (args[2].split("=")[1])
sys.exit()
srcRect = (int(rectArg[0]), int(rectArg[1]), int(rectArg[2]), int(rectArg[3]))
if not os.path.exists(inputPath):
print "input filePath(%s) not exist!" % inputPath
sys.exit()
inputImg = Image.open(inputPath)
frame = Frame()
x, y, w, h = srcRect
frame.init( x, y, w, h, 0, 0, w, h)
newImg = cropImg(inputImg, frame)
newImg.save(outPath)
elif argMode == "-cs":
inputPath, srcRect, outPath, outSize = args[1:]
inputPath = inputPath.split("=")[1]
outPath = outPath.split("=")[1]
srcRect = srcRect.split("=")[1]
srcRect = srcRect.replace("(", "")
srcRect = srcRect.replace(")", "")
rectArg = srcRect.split(",")
if not len(rectArg) == 4:
print "in crop mode, src rect arg(%s) invalid!" % (args[2].split("=")[1])
sys.exit()
srcRect = (int(rectArg[0]), int(rectArg[1]), int(rectArg[2]), int(rectArg[3]))
outSize = outSize.split("=")[1]
outSize = outSize.replace("(", "")
outSize = outSize.replace(")", "")
sizeArg = outSize.split(",")
if not len(sizeArg) == 2:
print "in crop mode, out size arg(%s) invalid!" % (args[2].split("=")[1])
sys.exit()
outSize = (int(sizeArg[0]), int(sizeArg[1]))
if not os.path.exists(inputPath):
print "input filePath(%s) not exist!" % inputPath
sys.exit()
inputImg = Image.open(inputPath)
frame = Frame()
x, y, w, h = srcRect
ow, oh = outSize
frame.init( x, y, w, h, 0, 0, w, h)
newImg = cropImg(inputImg, frame)
newImg = scaleImg(newImg, (0, 0, ow, oh))
newImg.save(outPath)
if __name__ == '__main__':
curDir = os.getcwd()
checkArgs(sys.argv[1:]) | mit | 6,670,034,634,960,313,000 | 32.602094 | 132 | 0.527505 | false |
bytedance/fedlearner | example/tree_model/make_data.py | 1 | 5382 | # pylint: disable=unsubscriptable-object
import os
import argparse
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
def quantize_data(header, dtypes, X):
for i, (h, dtype) in enumerate(zip(header, dtypes)):
if h[0] != 'f' or dtype != np.int32:
continue
x = X[:, i].copy()
nan_mask = np.isnan(x)
bins = np.quantile(x[~nan_mask], np.arange(33)/32)
bins = np.unique(bins)
X[:, i][~nan_mask] = np.digitize(
x[~nan_mask], bins, right=True)
X[:, i][nan_mask] = 33
return np.asarray([tuple(i) for i in X], dtype=list(zip(header, dtypes)))
def write_tfrecord_data(filename, data, header, dtypes):
fout = tf.io.TFRecordWriter(filename)
for i in range(data.shape[0]):
example = tf.train.Example()
for h, d, x in zip(header, dtypes, data[i]):
if d == np.int32:
example.features.feature[h].int64_list.value.append(x)
else:
example.features.feature[h].float_list.value.append(x)
fout.write(example.SerializeToString())
def write_data(output_type, filename, X, y, role, verify_example_ids):
if role == 'leader':
data = np.concatenate((X[:, :X.shape[1]//2], y), axis=1)
N = data.shape[1]-1
header = ['f%05d'%i for i in range(N)] + ['label']
dtypes = [np.float]*(N//2) + [np.int32]*(N - N//2) + [np.int32]
elif role == 'follower':
data = X[:, X.shape[1]//2:]
N = data.shape[1]
header = ['f%05d'%i for i in range(N)]
dtypes = [np.float]*(N//2) + [np.int32]*(N - N//2)
else:
data = np.concatenate((X, y), axis=1)
N = data.shape[1]-1
header = ['f%05d'%i for i in range(N)] + ['label']
dtypes = [np.float]*(N//2) + [np.int32]*(N - N//2) + [np.int32]
if verify_example_ids:
data = np.concatenate(
[[[i] for i in range(data.shape[0])], data], axis=1)
header = ['example_id'] + header
dtypes = [np.int32] + dtypes
data = quantize_data(header, dtypes, data)
if output_type == 'tfrecord':
write_tfrecord_data(filename, data, header, dtypes)
else:
np.savetxt(
filename,
data,
delimiter=',',
header=','.join(header),
fmt=['%d' if i == np.int32 else '%f' for i in dtypes],
comments='')
def process_mnist(X, y):
X = X.reshape(X.shape[0], -1)
X = np.asarray([X[i] for i, yi in enumerate(y) if yi in (2, 3)])
y = np.asarray([[y[i] == 3] for i, yi in enumerate(y) if yi in (2, 3)],
dtype=np.int32)
return X, y
def make_data(args):
if args.dataset == 'mnist':
(x_train, y_train), (x_test, y_test) = \
tf.keras.datasets.mnist.load_data()
x_train, y_train = process_mnist(x_train, y_train)
x_test, y_test = process_mnist(x_test, y_test)
else:
data = load_iris()
x_train = x_test = data.data
y_train = y_test = np.minimum(data.target, 1).reshape(-1, 1)
if not os.path.exists('data'):
os.makedirs('data')
os.makedirs('data/leader_test')
os.makedirs('data/follower_test')
os.makedirs('data/local_test')
write_data(
args.output_type,
'data/leader_train.%s'%args.output_type, x_train, y_train,
'leader', args.verify_example_ids)
write_data(
args.output_type,
'data/follower_train.%s'%args.output_type, x_train, y_train,
'follower', args.verify_example_ids)
write_data(
args.output_type,
'data/local_train.%s'%args.output_type, x_train, y_train,
'local', False)
write_data(
args.output_type,
'data/leader_test/part-0001.%s'%args.output_type, x_test, y_test,
'leader', args.verify_example_ids)
write_data(
args.output_type,
'data/follower_test/part-0001.%s'%args.output_type, x_test, y_test,
'follower', args.verify_example_ids)
write_data(
args.output_type,
'data/local_test/part-0001.%s'%args.output_type, x_test, y_test,
'local', False)
write_data(
args.output_type,
'data/leader_test/part-0002.%s'%args.output_type, x_test, y_test,
'leader', args.verify_example_ids)
write_data(
args.output_type,
'data/follower_test/part-0002.%s'%args.output_type, x_test, y_test,
'follower', args.verify_example_ids)
write_data(
args.output_type,
'data/local_test/part-0002.%s'%args.output_type, x_test, y_test,
'local', False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='FedLearner Tree Model Trainer.')
parser.add_argument('--verify-example-ids',
type=bool,
default=False,
help='If set to true, the first column of the '
'data will be treated as example ids that '
'must match between leader and follower')
parser.add_argument('--dataset', type=str, default='mnist',
help='whether to use mnist or iris dataset')
parser.add_argument('--output-type', type=str, default='csv',
help='Output csv or tfrecord')
make_data(parser.parse_args())
| apache-2.0 | 6,132,635,942,032,100,000 | 35.612245 | 77 | 0.554255 | false |
JakubPetriska/poker-cfr | verification/dbr_correctness_test.py | 1 | 6481 | import os
import unittest
from unittest import TestSuite
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import acpc_python_client as acpc
from response.data_biased_response import DataBiasedResponse
from evaluation.exploitability import Exploitability
from tools.game_tree.builder import GameTreeBuilder
from tools.sampling import SamplesTreeNodeProvider
from tools.game_tree.node_provider import StrategyTreeNodeProvider
from tools.game_tree.nodes import ActionNode
from tools.walk_trees import walk_trees
from tools.game_utils import is_correct_strategy
from tools.io_util import write_strategy_to_file
FIGURES_FOLDER = 'verification/dbr_correctness'
P_MAX_VALUES = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1.0]
class DbrCorrectnessTests(unittest.TestCase):
def test_kuhn_dbr_correctness(self):
kuhn_test_spec = {
'title': 'Kuhn Poker DBR strategy performance',
'game_file_path': 'games/kuhn.limit.2p.game',
'test_counts': 1,
'training_iterations': 100,
}
self.train_and_show_results(kuhn_test_spec)
# def test_kuhn_bigdeck_cfr_correctness(self):
# kuhn_bigdeck_test_spec = {
# 'title': 'Kuhn Bigdeck Poker CFR trained strategy exploitability',
# 'game_file_path': 'games/kuhn.bigdeck.limit.2p.game',
# 'test_counts': 1,
# 'training_iterations': 1000,
# 'checkpoint_iterations': 10
# }
# self.train_and_show_results(kuhn_bigdeck_test_spec)
# def test_kuhn_bigdeck_2round_cfr_correctness(self):
# kuhn_bigdeck_2round_test_spec = {
# 'title': 'Kuhn Bigdeck 2round Poker CFR trained strategy exploitability',
# 'game_file_path': 'games/kuhn.bigdeck.2round.limit.2p.game',
# 'test_counts': 1,
# 'training_iterations': 1000,
# 'checkpoint_iterations': 10
# }
# self.train_and_show_results(kuhn_bigdeck_2round_test_spec)
# def test_leduc_cfr_correctness(self):
# leduc_test_spec = {
# 'title': 'Leduc Hold\'em Poker CFR trained strategy exploitability',
# 'game_file_path': 'games/leduc.limit.2p.game',
# 'test_counts': 1,
# 'training_iterations': 1000,
# 'checkpoint_iterations': 10
# }
# self.train_and_show_results(leduc_test_spec)
def train_and_show_results(self, test_spec):
game = acpc.read_game_file(test_spec['game_file_path'])
weak_opponent_samples_tree = GameTreeBuilder(game, SamplesTreeNodeProvider()).build_tree()
weak_opponent_strategy_tree = GameTreeBuilder(game, StrategyTreeNodeProvider()).build_tree()
def on_node(samples_node, strategy_node):
if isinstance(samples_node, ActionNode):
child_count = len(samples_node.children)
samples_count = random.randrange(15)
for i, a in enumerate(samples_node.children):
if i < (child_count - 1) and samples_count > 0:
action_samples_count = random.randrange(samples_count + 1)
samples_count -= action_samples_count
samples_node.action_decision_counts[a] = action_samples_count
else:
samples_node.action_decision_counts[a] = samples_count
samples_sum = np.sum(samples_node.action_decision_counts)
if samples_sum > 0:
strategy_node.strategy = samples_node.action_decision_counts / samples_sum
else:
for a in strategy_node.children:
strategy_node.strategy[a] = 1 / len(strategy_node.children)
walk_trees(on_node, weak_opponent_samples_tree, weak_opponent_strategy_tree)
self.assertTrue(is_correct_strategy(weak_opponent_strategy_tree))
exploitability = Exploitability(game)
num_test_counts = test_spec['test_counts']
data = np.zeros([num_test_counts, 2, len(P_MAX_VALUES)])
for i in range(num_test_counts):
print('%s/%s' % (i + 1, num_test_counts))
for j, p_max in enumerate(P_MAX_VALUES):
print('Pmax: %s - %s/%s' % (p_max, j + 1, len(P_MAX_VALUES)))
dbr = DataBiasedResponse(game, weak_opponent_samples_tree, p_max=p_max)
dbr.train(test_spec['training_iterations'])
data[i, 0, j] = exploitability.evaluate(dbr.game_tree)
data[i, 1, j] = exploitability.evaluate(weak_opponent_strategy_tree, dbr.game_tree)
plt.figure(dpi=160)
for k in range(i + 1):
run_index = math.floor(k / 2)
xdata = data[k, 0, :] if k < i or j == (len(P_MAX_VALUES) - 1) else data[k, 0, 0:j+1]
ydata = data[k, 1, :] if k < i or j == (len(P_MAX_VALUES) - 1) else data[k, 1, 0:j+1]
plt.plot(
xdata,
ydata,
label='Run %s' % (run_index + 1),
marker='o',
linewidth=0.8)
if 'title' in test_spec:
plt.title(test_spec['title'])
plt.xlabel('DBR trained strategy exploitability [mbb/g]')
plt.ylabel('Random opponent exploitation by DBR strategy [mbb/g]')
plt.grid()
if num_test_counts > 1:
plt.legend()
game_name = test_spec['game_file_path'].split('/')[1][:-5]
figure_output_path = '%s/%s(it:%s).png' % (FIGURES_FOLDER, game_name, test_spec['training_iterations'])
figures_directory = os.path.dirname(figure_output_path)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
plt.savefig(figure_output_path)
print('\033[91mThis test needs your assistance! ' +
'Check the generated graph %s!\033[0m' % figure_output_path)
test_classes = [
DbrCorrectnessTests
]
def load_tests(loader, tests, pattern):
suite = TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
if __name__ == "__main__":
unittest.main(verbosity=2)
| mit | -8,551,517,427,911,411,000 | 40.544872 | 119 | 0.584015 | false |
callowayproject/Transmogrify | doc_src/conf.py | 1 | 6438 | # -*- coding: utf-8 -*-
#
# app documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 21 13:18:22 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
import transmogrify
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Transmogrify'
copyright = u'2016, The Calloway Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = transmogrify.get_version()
# The full version, including alpha/beta/rc tags.
release = transmogrify.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'transmogrifydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'app.tex', u'transmogrify Documentation',
u'me', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 | 904,517,533,806,405,200 | 31.846939 | 80 | 0.715595 | false |
istvanzk/RasPiConnectServer | ExecuteFiles/ExecuteFileDirectory.py | 1 | 3226 | #!/usr/local/bin/python3
# Filename: ExecuteFileDirectory.py
# Version 1.0 04/09/13 JS MiloCreek
# Version 3.0 04.04.2016 IzK (Python3.4+)
import Config
import glob
import os
import xml.etree.ElementTree as ET
import BuildResponse
import time
def Execute_File_Directory(root):
# find the interface object type
objectServerID = root.find("./OBJECTSERVERID").text
objectFlags = root.find("./OBJECTFLAGS").text
objectName = root.find("./OBJECTNAME").text
outgoingXMLData = BuildResponse.buildHeader(root)
if (Config.debug()):
print(("objectServerID = %s" % objectServerID))
# we have the objectServerID so now we can choose the correct
# program
if (objectServerID == "FDC-1"):
print(glob.glob("ClientXMLConfigFiles/*.xml"))
file_list = glob.glob("ClientXMLConfigFiles/*.xml")
responseData = ""
for pathname in file_list:
responseData += "<FILENAME>"
responseData += os.path.basename(pathname)
responseData += "</FILENAME>"
outgoingXMLData += BuildResponse.buildResponse(responseData)
else:
# invalid RaspiConnect Code
outgoingXMLData += Validate.buildValidateResponse("NO")
outgoingXMLData += BuildResponse.buildFooter()
if (Config.debug()):
print(outgoingXMLData)
return outgoingXMLData
def Execute_File_Read(root):
# find the interface object type
objectServerID = root.find("./OBJECTSERVERID").text
objectFlags = root.find("./OBJECTFLAGS").text
objectName = root.find("./OBJECTNAME").text
outgoingXMLData = BuildResponse.buildHeader(root)
if (Config.debug()):
print("objectServerID = %s" % objectServerID)
# we have the objectServerID so now we can choose the correct
# program
if (objectServerID == "FRC-1"):
responseData = ""
print(os.getcwd())
with open ("./ClientXMLConfigFiles/"+objectName, "r") as myfile:
responseData=myfile.read().replace('\n', '')
outgoingXMLData += BuildResponse.buildResponse(responseData)
else:
# invalid RaspiConnect Code
outgoingXMLData += Validate.buildValidateResponse("NO")
outgoingXMLData += BuildResponse.buildFooter()
if (Config.debug()):
print(outgoingXMLData)
return outgoingXMLData
def Execute_File_Write(root):
# find the interface object type
objectServerID = root.find("./OBJECTSERVERID").text
objectFlags = root.find("./OBJECTFLAGS").text
objectName = root.find("./OBJECTNAME").text
objectResponseBody = root.find("./OBJECTRESPONSEBODY").text
outgoingXMLData = BuildResponse.buildHeader(root)
if (Config.debug()):
print(("objectServerID = %s" % objectServerID))
if (Config.debug()):
print(("objectResponseBody = %s" % objectResponseBody))
# we have the objectServerID so now we can choose the correct
# program
if (objectServerID == "FWC-1"):
myfile = open("./ClientXMLConfigFiles/"+objectName, "w")
myfile.write(objectResponseBody)
myfile.close
responseData = "OK"
outgoingXMLData += BuildResponse.buildResponse(responseData)
else:
# invalid RaspiConnect Code
outgoingXMLData += Validate.buildValidateResponse("NO")
outgoingXMLData += BuildResponse.buildFooter()
if (Config.debug()):
print(outgoingXMLData)
return outgoingXMLData
# End of ExecuteFiles.py
| gpl-3.0 | -8,829,375,677,220,969,000 | 18.202381 | 66 | 0.717297 | false |
google/objax | tests/jit.py | 1 | 3861 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for ObJAX JIT."""
import unittest
import jax.numpy as jn
from jax.core import ConcretizationTypeError
import objax
from objax.typing import JaxArray
class LinearArgs(objax.nn.Linear):
def __call__(self, x: JaxArray, some_args: float) -> JaxArray:
"""Returns the results of applying the linear transformation to input x."""
y = jn.dot(x, self.w.value) * some_args
if self.b is not None:
y += self.b.value
return y
class LinearTrain(objax.nn.Linear):
def __call__(self, x: JaxArray, training: bool) -> JaxArray:
"""Returns the results of applying the linear transformation to input x."""
y = jn.dot(x, self.w.value)
if training:
y = -y
if self.b is not None:
y += self.b.value
return y
class TestJit(unittest.TestCase):
def test_on_linear(self):
k = objax.nn.Linear(3, 3)
kj = objax.Jit(k)
x = objax.random.normal((64, 3))
y1 = kj(x)
k.w.assign(k.w.value + 1)
y2 = kj(x)
k.w.assign(k.w.value - 1)
y3 = kj(x)
self.assertAlmostEqual(((y1 - y3) ** 2).sum(), 0)
self.assertNotEqual(((y1 - y2) ** 2).sum(), 0)
def test_double_jit(self):
k = objax.nn.Linear(3, 3)
kj = objax.Jit(objax.Jit(k))
x = objax.random.normal((64, 3))
y1 = kj(x)
k.w.assign(k.w.value + 1)
y2 = kj(x)
k.w.assign(k.w.value - 1)
y3 = kj(x)
self.assertAlmostEqual(((y1 - y3) ** 2).sum(), 0)
self.assertNotEqual(((y1 - y2) ** 2).sum(), 0)
def test_jit_kwargs(self):
x = objax.random.normal((64, 3))
kj = objax.Jit(LinearArgs(3, 3))
y1 = kj(x, 1)
y2 = kj(x, some_args=1)
y3 = kj(x, some_args=2)
self.assertEqual(y1.tolist(), y2.tolist())
self.assertNotEqual(y1.tolist(), y3.tolist())
kj = objax.Jit(LinearTrain(3, 3))
with self.assertRaises(ConcretizationTypeError):
kj(x, training=True)
def test_trainvar_assign(self):
m = objax.ModuleList([objax.TrainVar(jn.zeros(2))])
def increase():
m[0].assign(m[0].value + 1)
return m[0].value
jit_increase = objax.Jit(increase, m.vars())
jit_increase()
self.assertEqual(m[0].value.tolist(), [1., 1.])
def test_trainvar_and_ref_assign(self):
m = objax.ModuleList([objax.TrainVar(jn.zeros(2))])
m.append(objax.TrainRef(m[0]))
def increase():
m[0].assign(m[0].value + 1)
m[1].assign(m[1].value + 1)
return m[0].value
jit_increase = objax.Jit(increase, m.vars())
v = jit_increase()
self.assertEqual(v.tolist(), [2., 2.])
self.assertEqual(m[0].value.tolist(), [2., 2.])
def test_constant_optimization(self):
m = objax.nn.Linear(3, 4)
jit_constant = objax.Jit(m, objax.VarCollection())
x = objax.random.normal((10, 3))
self.assertEqual(((m(x) - jit_constant(x)) ** 2).sum(), 0)
# Modify m (which was supposed to be constant!)
m.b.assign(m.b.value + 1)
self.assertEqual(((m(x) - jit_constant(x)) ** 2).sum(), 40)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -312,967,119,624,460,350 | 30.909091 | 83 | 0.579125 | false |
cryptapus/electrum-myr | gui/qt/installwizard.py | 1 | 17005 | import sys
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum
from electrum.wallet import Wallet
from electrum.util import UserCancelled
from electrum.base_wizard import BaseWizard
from electrum.i18n import _
from seed_dialog import SeedLayout, KeysLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
apply(run_next, out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
self.terminate()
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_main_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.set_main_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.set_main_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.set_main_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
self.set_main_layout(playout.layout())
return playout.new_password()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
self.set_main_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.emit(QtCore.SIGNAL('accept'))
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = map(lambda x: x[0], choices)
c_titles = map(lambda x: x[1], choices)
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.set_main_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.set_main_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.set_main_layout(vbox, title, next_enabled=test(default))
return ' '.join(unicode(line.text()).split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.set_main_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.set_main_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 0:
auto_connect = True
elif r == 1:
auto_connect = True
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.set_main_layout(nlayout.layout()):
auto_connect = False
else:
auto_connect = True
network.auto_connect = auto_connect
self.config.set_key('auto_connect', auto_connect, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.set_main_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
| mit | -8,881,902,131,464,503,000 | 36.373626 | 140 | 0.594119 | false |
futureneer/simple_ur | simple_ur_driver/src/simple_ur_driver/ur_simulation.py | 1 | 8862 | #!/usr/bin/env python
# ROS IMPORTS
import roslib; roslib.load_manifest('simple_ur_driver')
import rospy
import tf; import tf_conversions as tf_c
import PyKDL
# URDF
from urdf_parser_py.urdf import URDF
from pykdl_utils.kdl_kinematics import KDLKinematics
# MSGS and SERVICES
from simple_ur_msgs.srv import *
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PoseStamped
from predicator_msgs.msg import *
from std_msgs.msg import *
import time
import threading
import socket
# URX Universal Robot Driver
import urx
# OTHER
import logging
import numpy as np
from pid import PID
class URDriver():
MAX_ACC = .5
MAX_VEL = 1.8
JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
def __init__(self):
rospy.init_node('ur_simulation',anonymous=True)
rospy.logwarn('SIMPLE_UR SIMULATION DRIVER LOADING')
# TF
self.broadcaster_ = tf.TransformBroadcaster()
self.listener_ = tf.TransformListener()
# SERVICES
self.servo_to_pose_service = rospy.Service('simple_ur_msgs/ServoToPose', ServoToPose, self.servo_to_pose_call)
self.set_stop_service = rospy.Service('simple_ur_msgs/SetStop', SetStop, self.set_stop_call)
self.set_teach_mode_service = rospy.Service('simple_ur_msgs/SetTeachMode', SetTeachMode, self.set_teach_mode_call)
self.set_servo_mode_service = rospy.Service('simple_ur_msgs/SetServoMode', SetServoMode, self.set_servo_mode_call)
# PUBLISHERS AND SUBSCRIBERS
self.driver_status_publisher = rospy.Publisher('/ur_robot/driver_status',String)
self.robot_state_publisher = rospy.Publisher('/ur_robot/robot_state',String)
self.joint_state_publisher = rospy.Publisher('joint_states',JointState)
# self.follow_pose_subscriber = rospy.Subscriber('/ur_robot/follow_goal',PoseStamped,self.follow_goal_cb)
# Predicator
self.pub_list = rospy.Publisher('/predicator/input', PredicateList)
self.pub_valid = rospy.Publisher('/predicator/valid_input', ValidPredicates)
rospy.sleep(1)
pval = ValidPredicates()
pval.pheader.source = rospy.get_name()
pval.predicates = ['moving', 'stopped', 'running']
pval.assignments = ['robot']
self.pub_valid.publish(pval)
# Rate
self.run_rate = rospy.Rate(100)
self.run_rate.sleep()
### Set Up Simulated Robot ###
self.driver_status = 'IDLE'
self.robot_state = 'POWER OFF'
robot = URDF.from_parameter_server()
self.kdl_kin = KDLKinematics(robot, 'base_link', 'ee_link')
# self.q = self.kdl_kin.random_joint_angles()
self.q = [-1.5707,-.785,-3.1415+.785,-1.5707-.785,-1.5707,0] # Start Pose?
self.start_pose = self.kdl_kin.forward(self.q)
self.F_start = tf_c.fromMatrix(self.start_pose)
# rospy.logwarn(self.start_pose)
# rospy.logwarn(type(self.start_pose))
# pose = self.kdl_kin.forward(q)
# joint_positions = self.kdl_kin.inverse(pose, q+0.3) # inverse kinematics
# if joint_positions is not None:
# pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
# J = self.kdl_kin.jacobian(q)
# rospy.logwarn('q:'+str(q))
# rospy.logwarn('joint_positions:'+str(joint_positions))
# rospy.logwarn('pose:'+str(pose))
# if joint_positions is not None:
# rospy.logwarn('pose_sol:'+str(pose_sol))
# rospy.logwarn('J:'+str(J))
### START LOOP ###
while not rospy.is_shutdown():
if self.driver_status == 'TEACH':
self.update_from_marker()
# if self.driver_status == 'SERVO':
# self.update_follow()
# Publish and Sleep
self.publish_status()
self.send_command()
self.run_rate.sleep()
# Finish
rospy.logwarn('SIMPLE UR - Simulation Finished')
def update_from_marker(self):
try:
F_target_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/endpoint_interact',rospy.Time(0)))
F_target_base = tf_c.fromTf(self.listener_.lookupTransform('/base_link','/endpoint_interact',rospy.Time(0)))
F_base_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/base_link',rospy.Time(0)))
self.F_command = F_base_world.Inverse()*F_target_world
M_command = tf_c.toMatrix(self.F_command)
joint_positions = self.kdl_kin.inverse(M_command, self.q) # inverse kinematics
if joint_positions is not None:
pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
self.q = joint_positions
else:
rospy.logwarn('no solution found')
# self.send_command()
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
rospy.logwarn(str(e))
def send_command(self):
self.current_joint_positions = self.q
msg = JointState()
msg.header.stamp = rospy.get_rostime()
msg.header.frame_id = "simuated_data"
msg.name = self.JOINT_NAMES
msg.position = self.current_joint_positions
msg.velocity = [0]*6
msg.effort = [0]*6
self.joint_state_publisher.publish(msg)
pose = self.kdl_kin.forward(self.q)
F = tf_c.fromMatrix(pose)
# F = self.F_command
self.current_tcp_pose = tf_c.toMsg(F)
self.current_tcp_frame = F
self.broadcaster_.sendTransform(tuple(F.p),tuple(F.M.GetQuaternion()),rospy.Time.now(), '/endpoint','/base_link')
def set_teach_mode_call(self,req):
if req.enable == True:
# self.rob.set_freedrive(True)
self.driver_status = 'TEACH'
return 'SUCCESS - teach mode enabled'
else:
# self.rob.set_freedrive(False)
self.driver_status = 'IDLE'
return 'SUCCESS - teach mode disabled'
def set_servo_mode_call(self,req):
if req.mode == 'SERVO':
self.driver_status = 'SERVO'
return 'SUCCESS - servo mode enabled'
elif req.mode == 'DISABLE':
self.driver_status = 'IDLE'
return 'SUCCESS - servo mode disabled'
def set_stop_call(self,req):
rospy.logwarn('SIMPLE UR - STOPPING ROBOT')
self.rob.stop()
return 'SUCCESS - stopped robot'
def servo_to_pose_call(self,req):
if self.driver_status == 'SERVO':
rospy.logwarn(req)
self.F_command = tf_c.fromMsg(req.target)
M_command = tf_c.toMatrix(self.F_command)
# Calculate IK
joint_positions = self.kdl_kin.inverse(M_command, self.q) # inverse kinematics
if joint_positions is not None:
pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
self.q = joint_positions
else:
rospy.logwarn('no solution found')
# self.send_command(F_command)
return 'SUCCESS - moved to pose'
else:
rospy.logerr('SIMPLE UR -- Not in servo mode')
return 'FAILED - not in servo mode'
def publish_status(self):
self.driver_status_publisher.publish(String(self.driver_status))
self.robot_state_publisher.publish(String(self.robot_state))
ps = PredicateList()
ps.pheader.source = rospy.get_name()
ps.statements = []
statement = PredicateStatement( predicate='moving',
confidence=1,
value=PredicateStatement.FALSE,
num_params=1,
params=['robot', '', ''])
ps.statements += [statement]
statement = PredicateStatement( predicate='stopped',
confidence=1,
value=PredicateStatement.TRUE,
num_params=1,
params=['robot', '', ''])
ps.statements += [statement]
statement = PredicateStatement( predicate='running',
confidence=1,
value=PredicateStatement.TRUE,
num_params=1,
params=['robot', '', ''])
ps.statements += [statement]
self.pub_list.publish(ps)
def check_robot_state(self):
self.robot_state == 'RUNNING SIMULATION'
self.driver_status = 'IDLE - SIMULATION'
if __name__ == "__main__":
robot_driver = URDriver() | bsd-2-clause | 5,631,890,719,074,194,000 | 41.004739 | 122 | 0.583164 | false |
NathanMH/Positivity-Check | positivity_check/test/test_tweet_check.py | 1 | 1558 | import os
import unittest
from tweet_check import Tweeter
class TestTweetCheck(unittest.TestCase):
""" Test TweetCheck """
def test_init_without_username(self):
""" Test TweetCheck with an empty username string. """
username = ""
twat = Tweeter(username)
self.assertEqual(twat.tweets_filename, username + ".txt")
def test_get_user_tweets_int(self):
""" Test get_user_tweets with number that won't exist. """
username = "TestTwat"
twat = Tweeter(username)
twat.get_user_tweets(1000000)
def test_get_user_tweets_str(self):
""" Test get_user_tweets with a string? """
twat = Tweeter("TestTwat")
twat.get_user_tweets("five")
self.assertGreater(len(twat.tweets), 0)
def test_get_user_tweets_float(self):
""" Test get_user_tweets with a string? """
twat = Tweeter("TestTwat")
twat.get_user_tweets(0.381)
self.assertGreater(len(twat.tweets), 0)
def test_nonexistant_twat(self):
""" Test when username provided doesn't exit. """
twat = Tweeter("TestTwat291sr03")
twat.get_user_tweets(1)
def test_tweet_saving(self):
""" Test saving the tweets to a file TODO Move to json """
twat = Tweeter("TestTwat")
twat.get_user_tweets(1)
twat.store_tweets(os.getcwd() + "\\positivity_check\\results\\")
self.assertEqual(os.path.isfile(os.getcwd() + "\\positivity_check\\results\\" + twat.tweets_filename), True)
if __name__ == '__main__':
pass
| mit | -3,095,819,941,544,872,000 | 32.148936 | 116 | 0.615533 | false |
madscatt/sasmol | src/python/test_sasmol/test_sasop/test_intg_sasop_Move_rotate.py | 1 | 4901 | '''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from sasmol.test_sasmol.util import env,util
from unittest import main, skipIf
from mocker import Mocker, MockerTestCase, ANY, ARGS
import sasmol.sasmol as sasmol
import sasmol.sasop as sasop
import sasmol.sascalc as sascalc
import numpy
import warnings; warnings.filterwarnings('ignore')
import os
floattype=os.environ['SASSIE_FLOATTYPE']
DataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','pdb_common')+os.path.sep
class Test_intg_sasop_Move_rotate(MockerTestCase):
def setUp(self):
self.o=sasmol.SasMol(0)
def assert_list_almost_equal(self,a,b,places=5):
if (len(a)!=len(b)):
raise TypeError
else:
for i in range(len(a)):
if isinstance(a[i],(int,float,numpy.generic)):
if (numpy.isnan(a[i]) and numpy.isnan(b[i])): continue
self.assertAlmostEqual(a[i],b[i],places)
else:
self.assert_list_almost_equal(a[i],b[i],places)
def test_one_atom_pdb(self):
self.o.read_pdb(DataPath+'1ATM.pdb')
axis = 'x'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_coor = self.o.coor()
result_com = self.o.calccom(0)
print '\nresult_coor:\n'; util.printfl([result_coor]); print '\nresult_com:\n',util.printfl([result_com])
#
expected_coor = numpy.array([[[73.944, -41.652, 41.799]]], floattype)
expected_com = numpy.array([73.944, -41.652, 41.799], floattype)
self.assert_list_almost_equal(expected_coor, result_coor,3)
self.assert_list_almost_equal(expected_com, result_com,3)
def test_two_aa_pdb(self):
self.o.read_pdb(DataPath+'2AAD.pdb')
axis = 'y'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_coor = self.o.coor()
result_com = self.o.calccom(0)
print '\nresult_coor:\n'; util.printfl([result_coor]); print '\nresult_com:\n',util.printfl([result_com])
#
expected_coor = numpy.array([[[41.652, 41.799, -73.944], [40.456, 42.563, -74.229], [40.463, 43.093, -75.667], [39.401, 43.279, -76.264], [40.336, 43.734, -73.210], [39.926, 43.168, -71.856], [39.354, 44.782, -73.67], [39.946, 44.177, -70.721], [41.647, 43.330, -76.231], [41.730, 43.852, -77.592], [42.184, 42.820, -78.617], [42.656, 43.169, -79.712], [42.648, 45.097, -77.671], [43.910, 44.816, -77.054], [42.000, 46.273, -76.970]]], floattype)
expected_com = numpy.array([41.276, 43.708, -75.680], floattype)
self.assert_list_almost_equal(expected_coor, result_coor,1)
self.assert_list_almost_equal(expected_com, result_com,2)
def test_rna_pdb(self):
self.o.read_pdb(DataPath+'rna.pdb')
axis = 'z'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_com = self.o.calccom(0)
print '\nresult_com:\n',util.printfl([result_com])
#
expected_com = numpy.array([-4.352, -8.033, 9.231], floattype)
self.assert_list_almost_equal(expected_com, result_com,2)
def test_1CRN_pdb(self):
self.o.read_pdb(DataPath+'1CRN.pdb')
axis = 'z'
frame = 0
theta=numpy.pi/2.0
#
self.o.rotate(frame,axis,theta)
result_com = self.o.calccom(0)
print '\nresult_com:\n',util.printfl([result_com])
#
expected_com = numpy.array([-9.775, 9.300, 6.978], floattype)
self.assert_list_almost_equal(expected_com, result_com,2)
@skipIf(os.environ['SASSIE_LARGETEST']=='n',"I am not testing large files")
def test_1KP8_pdb(self):
self.o.read_pdb(DataPath+'1KP8.pdb')
axis = 'x'
frame = 0
theta=12.0
#
self.o.rotate(frame,axis,theta)
result_com = self.o.calccom(0)
print '\nresult_com:\n',util.printfl([result_com])
#
expected_com = numpy.array([83.286, 14.288, 22.003], floattype)
self.assert_list_almost_equal(expected_com, result_com,2)
def tearDown(self):
pass
if __name__ == '__main__':
main()
| gpl-3.0 | -109,566,236,787,960,100 | 35.849624 | 454 | 0.606815 | false |
michaelneuder/image_quality_analysis | bin/data_analysis/prediction_target_plotter.py | 1 | 1940 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (13,10)
plt.rcParams['font.size'] = 18
import csv
import scipy.stats as stats
def main():
prediction_plot = []
target_plot = []
with open('../nets/wip/post_training.csv', mode='r') as csv_file:
reader = csv.reader(csv_file)
line_count = 0
for line in reader:
if line_count > 0:
prediction_plot.append(float(line[1]))
target_plot.append(float(line[0]))
line_count+=1
csv_file.close()
prediction_plot = np.asarray(prediction_plot)
target_plot = np.asarray(target_plot)
slope, intercept, r_value, p_value_, std_err = stats.linregress(prediction_plot, target_plot)
plt.plot(prediction_plot, target_plot, 'r.', alpha=.25)
plt.title('prediction vs target')
plt.xlabel('prediction')
plt.ylabel('target')
plt.text(0.12, .9, 'r-squared = {0:.5f}'.format(r_value**2), style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
plt.show()
mean = target_plot.mean()
differences_squared = []
differences = []
for i in range(len(target_plot)):
difference = target_plot[i] - mean
differences.append(abs(difference))
differences_squared.append(difference**2)
differences = np.asarray(differences)
differences_squared = np.asarray(differences_squared)
x_plot = np.arange(len(target_plot))
plt.plot(x_plot, prediction_plot, 'r+')
plt.xlabel('pixel')
plt.ylabel('prediction')
plt.show()
n, bins, patches = plt.hist(prediction_plot, 50, normed=1, facecolor='blue', alpha=0.5, label='prediction')
n, bins, patches = plt.hist(target_plot, 50, normed=1, facecolor='red', alpha=0.5, label='target')
plt.xlabel('difference')
plt.ylabel('quantity')
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| mit | 8,428,636,198,298,739,000 | 32.448276 | 111 | 0.623196 | false |
niubileme/shadowsocks-manyuser | shadowsocks/servers.py | 1 | 1940 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 mengskysama
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import os
import logging
import thread
import config
import signal
import time
if config.LOG_ENABLE:
logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',datefmt='%Y, %b %d %a %H:%M:%S',filename=config.LOG_FILE,level=config.LOG_LEVEL)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, \
asyncdns, manager
import manager
import config
from dbtransfer import DbTransfer
def handler_SIGQUIT():
return
def main():
configer = {
'server': '%s' % config.SS_BIND_IP,
'local_port': 1081,
'port_password': {
},
'method': '%s' % config.SS_METHOD,
'manager_address': '%s:%s' % (config.MANAGE_BIND_IP, config.MANAGE_PORT),
'timeout': 185, # some protocol keepalive packet 3 min Eg bt
'fast_open': False,
'verbose': 1
}
t = thread.start_new_thread(manager.run, (configer,))
time.sleep(1)
t = thread.start_new_thread(DbTransfer.thread_db, ())
time.sleep(1)
t = thread.start_new_thread(DbTransfer.thread_push, ())
time.sleep(1)
t = thread.start_new_thread(DbTransfer.thread_reset, ())
while True:
time.sleep(100)
if __name__ == '__main__':
main()
| apache-2.0 | 3,208,279,885,306,194,400 | 29.3125 | 181 | 0.668557 | false |
will4906/PatentCrawler | crawler/settings.py | 1 | 3450 | # -*- coding: utf-8 -*-
# Scrapy settings for crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
from config import base_settings as bs
BOT_NAME = 'crawler'
SPIDER_MODULES = ['crawler.spiders']
NEWSPIDER_MODULE = 'crawler.spiders'
RETRY_ENABLED = True
RETRY_TIMES = 3
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = bs.DOWNLOAD_DELAY
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = True
COOKIES_DEBUG = True
DUPEFILTER_DEBUG = True
REDIRECT_ENABLED = True
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'crawler.middlewares.CrawlerSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'crawler.middlewares.MyCustomDownloaderMiddleware': 543,
'crawler.middlewares.PatentMiddleware': 544,
# 'crawler.middlewares.RandomUserAgentMiddleware': 542
}
DOWNLOAD_TIMEOUT = bs.TIMEOUT
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'crawler.pipelines.CrawlerPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| apache-2.0 | 5,084,145,785,278,684,000 | 34.204082 | 109 | 0.764348 | false |
darrencheng0817/AlgorithmLearning | Python/leetcode/CountNumbersWithUniqueDigits.py | 1 | 1060 | '''
Created on 1.12.2016
@author: Darren
''''''
Given a non-negative integer n, count all numbers with unique digits, x, where 0 ≤ x < 10n.
Example:
Given n = 2, return 91. (The answer should be the total numbers in the range of 0 ≤ x < 100, excluding [11,22,33,44,55,66,77,88,99])
A direct way is to use the backtracking approach.
Backtracking should contains three states which are (the current number, number of steps to get that number and a bitmask which represent which number is marked as visited so far in the current number). Start with state (0,0,0) and count all valid number till we reach number of steps equals to 10n.
This problem can also be solved using a dynamic programming approach and some knowledge of combinatorics.
Let f(k) = count of numbers with unique digits with length equals k.
f(1) = 10, ..., f(k) = 9 * 9 * 8 * ... (9 - k + 2) [The first factor is 9 because a number cannot start with 0].
Credits:Special thanks to @memoryless for adding this problem and creating all test cases."
'''
| mit | -5,553,119,902,178,163,000 | 46.5 | 301 | 0.708491 | false |
andymeneely/attack-surface-metrics | attacksurfacemeter/loaders/cflow_line_parser.py | 1 | 1154 | __author__ = 'kevin'
import re
from attacksurfacemeter.loaders.base_line_parser import BaseLineParser
class CflowLineParser(BaseLineParser):
""""""
_instance = None
@staticmethod
def get_instance(cflow_line=None):
if CflowLineParser._instance is None:
CflowLineParser._instance = CflowLineParser()
CflowLineParser._instance.load(cflow_line)
return CflowLineParser._instance
indent = " "
def __init__(self):
super(CflowLineParser, self).__init__()
self._level = 0
def load(self, cflow_line):
self.__init__()
split_line = cflow_line.split(CflowLineParser.indent)
function_info = split_line[-1].strip()
self._level = len(split_line) - 1
function_name = re.search(r"(\w+\(\))", function_info).group(0)
self._function_name = function_name[:function_name.index('(')]
match = re.search(r"(?:at\s)(\..*)(?::\d+>)", function_info)
if match:
self._function_signature = match.group(1)
def get_level(self, cflow_line=None):
self._load_if_new(cflow_line)
return self._level
| mit | 7,283,357,729,071,608,000 | 25.837209 | 71 | 0.60312 | false |
debrouwere/facebook-insights | facebookinsights/utils/api.py | 1 | 2320 | # encoding: utf-8
import copy
import facepy
from . import url
def getdata(obj, key, default=None):
if key in obj:
return obj[key]['data']
else:
return default
class GraphAPI(facepy.GraphAPI):
def __init__(self, *vargs, **kwargs):
self.base = []
super(GraphAPI, self).__init__(*vargs, **kwargs)
def _segmentize_endpoint(self, endpoint):
if not isinstance(endpoint, list):
endpoint = [endpoint]
return endpoint
def _resolve_endpoint(self, endpoint, options={}):
endpoint = self._segmentize_endpoint(endpoint)
resolved_url = "/".join(self.base + endpoint)
# remove facepy options, retain everything
# that needs to end up in the querystring
blacklist = ['path', 'page', 'retry', 'data', 'method', 'relative_url']
if options:
qs = url.encode({key: value for key, value in options.items() if key not in blacklist})
return resolved_url + '?' + qs
else:
return resolved_url
def partial(self, base):
client = GraphAPI(self.oauth_token)
client.base = client.base + self._segmentize_endpoint(base)
return client
def all(self, endpoint, paramsets, method='GET', body=False, **options):
""" A nicer interface for batch requests to the
same endpoint but with different parameters, e.g.
different date ranges. """
requests = []
for params in paramsets:
params = copy.copy(params)
params.update(options)
segments = self._segmentize_endpoint(endpoint)
relative_url = params.get('relative_url')
resolved_url = self._resolve_endpoint(segments + [relative_url], params)
request = {
'method': method,
'relative_url': resolved_url,
}
if body:
request['body'] = body
requests.append(request)
return self.batch(requests)
def get(self, relative_endpoint=[], *vargs, **kwargs):
""" An endpoint can be specified as a string
or as a list of path segments. """
endpoint = self._resolve_endpoint(relative_endpoint)
return super(GraphAPI, self).get(endpoint, *vargs, **kwargs)
| isc | -8,699,935,582,617,644,000 | 31.676056 | 99 | 0.58319 | false |
ryanleland/Genetix.py | genetix/chromosome.py | 1 | 1632 | # -*- coding: utf-8 -*-
import random
from genetix.gene import Gene
class Chromosome(object):
"""The base Chromosome class, which is a container for genes, and handles
mutation via the offspring method.
"""
def __init__(self, names=[], genes=[]):
self.names = names
self.genes = genes
@classmethod
def construct(cls, blueprint):
names = []
genes = []
for name, values in blueprint.items():
names.append(name)
genes.append(Gene(values))
return cls(names, genes)
@classmethod
def offspring(cls, x, y, crossover_rate, mutation_rate):
assert len(x) == len(y)
genes = []
gene_count = len(x)
# Calculate mutation and crossover
mutation = int(10000 * mutation_rate)
crossover = random.randrange(0, int(gene_count * crossover_rate))
# Populate with X
for i in range(0, crossover):
genes.append(x.genes[i])
# Populate with Y
for i in range(crossover, gene_count):
genes.append(y.genes[i])
for gene in genes:
if mutation > random.randrange(0, 10000):
gene.mutate()
return cls(x.names, genes)
def get_gene(self, index):
return self.genes[index]
def set_gene(self, index, gene):
self.genes[index] = gene
def mutate_gene(self, index):
self.genes[index].mutate()
def __len__(self):
return len(self.genes)
def __repr__(self):
return "\t".join(["%s:%s" % (name, gene) for name, gene in zip(self.names, self.genes)])
| mit | 8,750,449,154,566,175,000 | 23.727273 | 96 | 0.568015 | false |
mtbc/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/webclient_gateway.py | 1 | 87374 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# webclient_gateway
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
# Carlos Neves <carlos(at)glencoesoftware(dot)com>, 2008
#
# Version: 1.0
#
import cStringIO
import traceback
import logging
logger = logging.getLogger(__name__)
try:
from PIL import Image # see ticket:2597
except ImportError:
try:
import Image # see ticket:2597
except:
logger.error("You need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/")
logger.error(traceback.format_exc())
from StringIO import StringIO
import time
from datetime import datetime
import Ice
import omero.gateway
import omero.scripts
from omero.rtypes import rint, rstring, rlong, rlist, rtime
from omero.model import \
ExperimenterI, ExperimenterGroupI
from omero.gateway import TagAnnotationWrapper, \
AnnotationWrapper, \
OmeroGatewaySafeCallWrapper, CommentAnnotationWrapper
from omero.gateway import KNOWN_WRAPPERS
from django.utils.encoding import smart_str
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
try:
import hashlib
hash_sha1 = hashlib.sha1
except:
import sha
hash_sha1 = sha.new
def defaultThumbnail(size=(120,120)):
if isinstance(size, int):
size = (size,size)
if len(size) == 1:
size = (size[0],size[0])
img = Image.open(settings.DEFAULT_IMG)
img.thumbnail(size, Image.ANTIALIAS)
f = cStringIO.StringIO()
img.save(f, "PNG")
f.seek(0)
return f.read()
class OmeroWebGateway (omero.gateway.BlitzGateway):
def __init__ (self, *args, **kwargs):
"""
Create the connection wrapper. Does not attempt to connect at this stage
Initialises the omero.client
@param username: User name. If not specified, use 'omero.gateway.anon_user'
@type username: String
@param passwd: Password.
@type passwd: String
@param client_obj: omero.client
@param group: name of group to try to connect to
@type group: String
@param clone: If True, overwrite anonymous with False
@type clone: Boolean
@param try_super: Try to log on as super user ('system' group)
@type try_super: Boolean
@param host: Omero server host.
@type host: String
@param port: Omero server port.
@type port: Integer
@param extra_config: Dictionary of extra configuration
@type extra_config: Dict
@param secure: Initial underlying omero.client connection type (True=SSL/False=insecure)
@type secure: Boolean
@param anonymous:
@type anonymous: Boolean
@param useragent: Log which python clients use this connection. E.g. 'OMERO.webadmin'
@type useragent: String
@param _shareId: Active share ID
@type _shareId: Long
"""
super(OmeroWebGateway, self).__init__(*args, **kwargs)
self._shareId = None
def getShareId(self):
"""
Returns active share id .
@return: Share ID
@rtype: Long
"""
if self.getEventContext().shareId is not None:
if self.getEventContext().shareId != self._shareId and self._shareId > 0:
self._shareId = self.getEventContext().shareId
return self._shareId
##############################################
# Session methods #
def changeActiveGroup(self, gid): # TODO: should be moved to ISession
"""
Every time session is created default group becomes active group
and is loaded with the security for the current user and thread.
Public data has to be created in the context of the group where user,
who would like to look at these data, is a member of.
Public data can be only visible by the member of group and owners.
@param gid: New active group ID
@type gid: Long
@return: Boolean
"""
try:
for k in self._proxies.keys():
self._proxies[k].close()
self.c.sf.setSecurityContext(ExperimenterGroupI(gid, False))
self.getAdminService().setDefaultGroup(self.getUser()._obj, ExperimenterGroupI(gid, False))
self._ctx = self.getAdminService().getEventContext()
return True
except omero.SecurityViolation:
logger.error(traceback.format_exc())
return False
except:
logger.error(traceback.format_exc())
return False
##############################################
## Forgotten password ##
def isForgottenPasswordSet(self):
"""
Retrieves a configuration value "omero.resetpassword.config" for
Forgotten password form from the backend store.
@return: Boolean
"""
conf = self.getConfigService()
try:
return bool(conf.getConfigValue("omero.resetpassword.config").title())
except:
logger.error(traceback.format_exc())
return False
def reportForgottenPassword(self, username, email):
"""
Allows to reset the password (temporary password is sent). The
given email must match the email for the user listed under the name
argument.
@param username: omename
@type username: String
@param email: email address
@type email: String
"""
admin_serv = self.getAdminService()
admin_serv.reportForgottenPassword(username, email)
##############################################
## IAdmin ##
def isAnythingCreated(self):
"""
Checks if any of the experimenter was created before
@return: Boolean
"""
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["default_names"] = rlist([rstring("user"), rstring("system"), rstring("guest")])
f = omero.sys.Filter()
f.limit = rint(1)
p.theFilter = f
sql = "select g from ExperimenterGroup as g where g.name not in (:default_names)"
if len(q.findAllByQuery(sql, p, self.SERVICE_OPTS)) > 0:
return False
return True
def listLdapAuthExperimenters(self):
"""
Lists all IDs of experimenters who are authenticated by LDAP
(has set dn on password table).
@return: List of experimetner IDs
@rtype: L{Dict of String: Long}
"""
admin_serv = self.getAdminService()
return admin_serv.lookupLdapAuthExperimenters()
def getLdapAuthExperimenter(self, eid):
"""
Return DN of the specific experimenter if uses LDAP authentication
(has set dn on password table) or None.
@param eid: experimenter ID
@type eid: L{Long}
@return: Distinguished Name
@rtype: String
"""
admin_serv = self.getAdminService()
return admin_serv.lookupLdapAuthExperimenter(long(eid))
def getExperimenters(self):
"""
Return all experimenters apart from current user.
@return: Generator yielding experimetners list
@rtype: L{ExperimenterWrapper} generator
"""
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["id"] = rlong(self.getEventContext().userId)
sql = "select e from Experimenter as e where e.id != :id "
for e in q.findAllByQuery(sql, p, self.SERVICE_OPTS):
yield ExperimenterWrapper(self, e)
#def getCurrentSupervisor(self):
# """
# Gets the owner of a group for current user.
#
# @return: ExperimenterWrapper
# """
#
# p = omero.sys.ParametersI()
# p.map = {}
# p.map["id"] = rlong(self.getEventContext().groupId)
# # TODO: there can now be multiple supervisors
# p.page(0,1)
# supervisor = self.getQueryService().findByQuery(\
# """select e from ExperimenterGroup as g
# join g.groupExperimenterMap as m join m.child as e
# where m.owner = true and g.id = :id""", p)
# return ExperimenterWrapper(self, supervisor)
#def getScriptwithDetails(self, sid):
# script_serv = self.getScriptService()
# return script_serv.getScriptWithDetails(long(sid))
#def lookupScripts(self):
# script_serv = self.getScriptService()
# return script_serv.getScripts()
def getServerVersion(self):
"""
Retrieves a configuration value "omero.version" from the backend store.
@return: String
"""
conf = self.getConfigService()
return conf.getConfigValue("omero.version")
#########################################################
## From Bram b(dot)gerritsen(at)nki(dot)nl ##
def findWellInPlate (self, plate_name, row, column):
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map['pname'] = rstring(str(plate_name))
p.map['row'] = rint(int(row))
p.map['column'] = rint(int(column))
sql = """select well from Well as well
left outer join fetch well.plate as pt
left outer join fetch well.wellSamples as ws
inner join fetch ws.image as img
where well.plate.name = :pname and well.row = :row
and well.column = :column"""
well = q.findByQuery(sql, p, self.SERVICE_OPTS)
if well is None:
return None
else:
return WellWrapper(self, well, None)
####################################################################################
## Container Queries ###
####################################################################################
def listTags(self, eid=None):
params = omero.sys.ParametersI()
params.orphan()
params.map = {}
params.map['ns'] = rstring(omero.constants.metadata.NSINSIGHTTAGSET)
sql = "select tg from TagAnnotation tg where ((ns=:ns) or (not exists ( select aal from AnnotationAnnotationLink as aal where aal.child=tg.id))) "
if eid is not None:
params.map["eid"] = rlong(long(eid))
sql+=" and tg.details.owner.id = :eid"
q = self.getQueryService()
for ann in q.findAllByQuery(sql, params, self.SERVICE_OPTS):
yield TagAnnotationWrapper(self, ann)
def countOrphans (self, obj_type, eid=None):
links = {'Dataset':('ProjectDatasetLink', DatasetWrapper),
'Image':('DatasetImageLink', ImageWrapper),
'Plate':('ScreenPlateLink', PlateWrapper)}
if obj_type not in links:
raise TypeError("'%s' is not valid object type. Must use one of %s" % (obj_type, links.keys()) )
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
links = {'Dataset':('ProjectDatasetLink', DatasetWrapper),
'Image':('DatasetImageLink', ImageWrapper),
'Plate':('ScreenPlateLink', PlateWrapper)}
if obj_type not in links:
raise TypeError("'%s' is not valid object type. Must use one of %s" % (obj_type, links.keys()) )
q = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
if eid is not None:
p.map["eid"] = rlong(long(eid))
eidFilter = "obj.details.owner.id=:eid and "
eidWsFilter = " and ws.details.owner.id=:eid"
else:
eidFilter = ""
eidWsFilter = ""
sql = "select count(obj.id) from %s as obj " \
"join obj.details.creationEvent "\
"join obj.details.owner join obj.details.group " \
"where %s" \
"not exists (select obl from %s as obl where " \
"obl.child=obj.id)" % (obj_type, eidFilter, links[obj_type][0])
if obj_type == 'Image':
sql += "and not exists ( "\
"select ws from WellSample as ws "\
"where ws.image=obj.id %s)" % eidWsFilter
rslt = q.projection(sql, p, self.SERVICE_OPTS)
if len(rslt) > 0:
if len(rslt[0]) > 0:
return rslt[0][0].val
return 0
def listImagesInDataset (self, oid, eid=None, page=None, load_pixels=False):
"""
List Images in the given Dataset.
Optinally filter by experimenter 'eid'
@param eid: experimenter id
@type eid: Long
@param page: page number
@type page: Long
@return: Generator yielding Images
@rtype: L{ImageWrapper} generator
"""
q = self.getQueryService()
p = omero.sys.ParametersI()
p.map["oid"] = rlong(long(oid))
if page is not None:
p.page(((int(page)-1)*settings.PAGE), settings.PAGE)
if load_pixels:
pixels = "join fetch im.pixels "
else:
pixels = ""
sql = "select im from Image im "\
"join fetch im.details.creationEvent "\
"join fetch im.details.owner join fetch im.details.group " \
"left outer join fetch im.datasetLinks dil "\
"left outer join fetch dil.parent d %s" \
"where d.id = :oid" % pixels
if eid is not None:
p.map["eid"] = rlong(long(eid))
sql += " and im.details.owner.id=:eid"
sql+=" order by im.name ASC"
for e in q.findAllByQuery(sql, p, self.SERVICE_OPTS):
kwargs = {'link': omero.gateway.BlitzObjectWrapper(self, e.copyDatasetLinks()[0])}
yield ImageWrapper(self, e, None, **kwargs)
# DATA RETRIVAL BY TAGs
def findTag (self, name, desc=None):
"""
Retrieves Tag by given Name and description
@param name name of tag
@type name String
@param desc description of tag
@type desc String
@return: TagAnnotation
@rtype: AnnotationWrapper
"""
"""TODO: #1015
It does not support SPW"""
query_serv = self.getQueryService()
res = list()
p = omero.sys.Parameters()
p.map = {}
p.map["text"] = rstring(str(name))
if desc is not None:
p.map["desc"] = rstring(str(desc))
#p.map["eid"] = rlong(self.getEventContext().userId)
f = omero.sys.Filter()
f.limit = rint(1)
p.theFilter = f
sql = "select tg from TagAnnotation tg " \
"where tg.textValue=:text"
if desc is not None:
sql+= " and tg.description=:desc"
sql+=" and tg.ns is null order by tg.textValue"
res = query_serv.findAllByQuery(sql, p, self.SERVICE_OPTS)
if len(res) > 0:
return TagAnnotationWrapper(self, res[0])
return None
# AVATAR #
def uploadMyUserPhoto(self, filename, format, data):
"""
Uploads a photo for the user which will be displayed on his/her profile.
This photo will be saved as an OriginalFile object
with the given format, and attached to the user's Experimenter
object via an File Annotation with
the namespace: "openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO).
If such an OriginalFile instance already exists,
it will be overwritten. If more than one photo is present, the oldest
version will be modified (i.e. the highest updateEvent id).
Note: as outlined in ticket:1794, this photo will be placed in the "user"
group and therefore will be visible to everyone on the system.
@param filename name which will be used.
@type filename String
@param format Format.value string. 'image/jpeg' and 'image/png' are common values.
@type format String
@param data Data from the image. This will be written to disk.
@type data String
@return ID of the overwritten or newly created user photo OriginalFile object.
@rtype Long
"""
admin_serv = self.getAdminService()
pid = admin_serv.uploadMyUserPhoto(filename, format, data)
if pid is not None:
return pid
def hasExperimenterPhoto(self, oid=None):
"""
Check if File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) is linked
to the given user ID. If user id not set, owned by the current user.
@param oid experimenter ID
@type oid Long
@return True or False
@rtype Boolean
"""
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])
if len(ann) > 0:
return True
else:
return False
except:
logger.error(traceback.format_exc())
return False
def getExperimenterPhoto(self, oid=None):
"""
Get File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) linked
to the given user ID. If user id not set, owned by the current user.
@param oid experimenter ID
@type oid Long
@return Data from the image.
@rtype String
"""
photo = None
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])
if len(ann) > 0:
ann = ann[0]
store = self.createRawFileStore()
try:
store.setFileId(ann.file.id.val)
photo = store.read(0, long(ann.file.size.val))
finally:
store.close()
else:
photo = self.getExperimenterDefaultPhoto()
except:
logger.error(traceback.format_exc())
photo = self.getExperimenterDefaultPhoto()
if photo == None:
photo = self.getExperimenterDefaultPhoto()
return photo
def getExperimenterPhotoSize(self, oid=None):
"""
Get size of File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) linked
to the given user ID. If user id not set, owned by the current user.
@param oid experimenter ID
@type oid Long
@return Tuple including dimention and size of the file
@rtype Tuple
"""
photo = None
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])[0]
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])[0]
store = self.createRawFileStore()
try:
store.setFileId(ann.file.id.val)
photo = store.read(0, long(ann.file.size.val))
finally:
store.close()
try:
im = Image.open(StringIO(photo))
except:
logger.error(traceback.format_exc())
return None
else:
return (im.size, ann.file.size.val)
except:
return None
def deleteExperimenterPhoto(self, oid=None):
ann = None
meta = self.getMetadataService()
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])[0]
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])[0]
except:
logger.error(traceback.format_exc())
raise IOError("Photo does not exist.")
else:
exp = self.getUser()
links = exp._getAnnotationLinks()
# there should be only one ExperimenterAnnotationLink
# but if there is more then one all of them should be deleted.
for l in links:
self.deleteObjectDirect(l)
self.deleteObjects("/Annotation", [ann.id.val]) # No error handling?
def cropExperimenterPhoto(self, box, oid=None):
"""
Crop File annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO) linked
to the given user ID. If user id not set, owned by the current user.
New dimentions are defined by squer positions box = (x1,y1,x2,y2)
@param box tuple of new square positions
@type box Tuple
@param oid experimenter ID
@type oid Long
"""
# TODO: crop method could be moved to the server side
photo = None
meta = self.getMetadataService()
ann = None
try:
if oid is None:
ann = meta.loadAnnotations("Experimenter", [self.getEventContext().userId], None, None, None).get(self.getEventContext().userId, [])[0]
else:
ann = meta.loadAnnotations("Experimenter", [long(oid)], None, None, None).get(long(oid), [])[0]
store = self.createRawFileStore()
try:
store.setFileId(ann.file.id.val)
photo = store.read(0, long(ann.file.size.val))
finally:
store.close()
except:
logger.error(traceback.format_exc())
raise IOError("Photo does not exist.")
else:
region = None
try:
im = Image.open(StringIO(photo))
region = im.crop(box)
except IOError:
logger.error(traceback.format_exc())
raise IOError("Cannot open that photo.")
else:
imdata=StringIO()
region.save(imdata, format=im.format)
self.uploadMyUserPhoto(ann.file.name.val, ann.file.mimetype.val, imdata.getvalue())
def getExperimenterDefaultPhoto(self):
"""
If file annotation with the namespace:
"openmicroscopy.org/omero/experimenter/photo" (NSEXPERIMENTERPHOTO)
is not linked to experimenter this method generate default picture of the person.
@return Data from the image.
@rtype String
"""
img = Image.open(settings.DEFAULT_USER)
img.thumbnail((150,150), Image.ANTIALIAS)
f = cStringIO.StringIO()
img.save(f, "PNG")
f.seek(0)
return f.read()
def getFileFormat(self, format):
"""
Get file annotation format for the given value.
@return Omero File format
@rtype String
"""
query_serv = self.getQueryService()
return query_serv.findByString("Format", "value", format).getValue().val;
################################################
## Counters
def getCollectionCount(self, parent, child, ids):
"""
Counts the number of members in a collection for a given object.
@param parent The fully-qualified classname of the object to be tested
@type parent String
@param child Name of the property on that class, omitting getters and setters.
@type child String
@param ids Set of Longs, the ids of the objects to test
@type ids L{Long}
@return A map from id integer to count integer
@rtype L{(Long, Long)}
"""
container = self.getContainerService()
return container.getCollectionCount(parent, child, ids, None, self.SERVICE_OPTS)
################################################
## Validators
def checkOmeName(self, ome_name, old_omeName=None):
if ome_name == old_omeName:
return False
query_serv = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["omeName"] = rstring(smart_str(ome_name))
sql = "select e from Experimenter as e where e.omeName = (:omeName)"
exps = query_serv.findAllByQuery(sql, p, self.SERVICE_OPTS)
if len(exps) > 0:
return True
else:
return False
def checkGroupName(self, name, old_name=None):
if name == old_name:
return False
query_serv = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["name"] = rstring(smart_str(name))
sql = "select g from ExperimenterGroup as g where g.name = (:name)"
grs = query_serv.findAllByQuery(sql, p, self.SERVICE_OPTS)
if len(grs) > 0:
return True
else:
return False
def checkEmail(self, email, old_email=None):
if email == "":
return False
if email == old_email:
return False
query_serv = self.getQueryService()
p = omero.sys.Parameters()
p.map = {}
p.map["email"] = rstring(smart_str(email))
sql = "select e from Experimenter as e where e.email = (:email)"
exps = query_serv.findAllByQuery(sql, p, self.SERVICE_OPTS)
if len(exps) > 0:
return True
else:
return False
##############################################
## Sets methods ##
def changeUserPassword(self, omeName, password, my_password):
"""
Change the password for the a given user.
@param omeName Experimetner omename
@type omeName String
@param password Must pass validation in the security sub-system.
@type password String
@param my_password Must pass validation in the security sub-system.
@type my_password String
"""
admin_serv = self.getAdminService()
self.c.sf.setSecurityPassword(my_password)
admin_serv.changeUserPassword(omeName, rstring(str(password)))
def changeMyPassword(self, password, old_password):
"""
Change the password for the current user by passing the old password.
@param password Must pass validation in the security sub-system.
@type password String
@param old_password Old password
@type old_password String
@return None or error message if password could not be changed
@rtype String
"""
admin_serv = self.getAdminService()
admin_serv.changePasswordWithOldPassword(rstring(str(old_password)), rstring(str(password)))
def createExperimenter(self, omeName, firstName, lastName, email, isAdmin, isActive, defaultGroup, otherGroups, password, middleName=None, institution=None):
"""
Create and return a new user in the given groups with password.
@param omeName A new username.
@type omeName String
@param firstName A new first name.
@type firstName String
@param lastName A new last name.
@type lastName String
@param email A new email.
@type email String
@param isAdmin An Admin permission.
@type isAdmin Boolean
@param isActive Active user (user can log in).
@type isActive Boolean
@param defaultGroup Instance of ExperimenterGroup selected as a first active group.
@type defaultGroup ExperimenterGroupI
@param otherGroups List of ExperimenterGroup instances. Can be empty.
@type otherGroups L{ExperimenterGroupI}
@param password Must pass validation in the security sub-system.
@type password String
@param middleName A middle name.
@type middleName String
@param institution An institution.
@type institution String
@return ID of the newly created Experimenter Not null.
@rtype Long
"""
experimenter = ExperimenterI()
experimenter.omeName = rstring(str(omeName))
experimenter.firstName = rstring(str(firstName))
experimenter.middleName = middleName is not None and rstring(str(middleName)) or None
experimenter.lastName = rstring(str(lastName))
experimenter.email = rstring(str(email))
experimenter.institution = (institution!="" and institution is not None) and rstring(str(institution)) or None
listOfGroups = list()
# system group
if isAdmin:
g = self.getObject("ExperimenterGroup", attributes={'name':'system'})
listOfGroups.append(g._obj)
# user group
if isActive:
g = self.getObject("ExperimenterGroup", attributes={'name':'user'})
listOfGroups.append(g._obj)
for g in otherGroups:
listOfGroups.append(g._obj)
admin_serv = self.getAdminService()
return admin_serv.createExperimenterWithPassword(experimenter, rstring(str(password)), defaultGroup._obj, listOfGroups)
def updateExperimenter(self, experimenter, omeName, firstName, lastName, email, isAdmin, isActive, defaultGroup, otherGroups, middleName=None, institution=None):
"""
Update an existing user including groups user is a member of.
Password cannot be changed by calling that method.
@param experimenter An existing Experimenter instance.
@type experimenter ExperimenterWrapper
@param omeName A new username.
@type omeName String
@param firstName A new first name.
@type firstName String
@param lastName A new last name.
@type lastName String
@param email A new email.
@type email String
@param isAdmin An Admin permission.
@type isAdmin Boolean
@param isActive Active user (user can log in).
@type isActive Boolean
@param defaultGroup Instance of ExperimenterGroup selected as a first active group.
@type defaultGroup ExperimenterGroupI
@param otherGroups List of ExperimenterGroup instances. Can be empty.
@type otherGroups L{ExperimenterGroupI}
@param middleName A middle name.
@type middleName String
@param institution An institution.
@type institution String
"""
up_exp = experimenter._obj
up_exp.omeName = rstring(str(omeName))
up_exp.firstName = rstring(str(firstName))
up_exp.middleName = middleName is not None and rstring(str(middleName)) or None
up_exp.lastName = rstring(str(lastName))
up_exp.email = rstring(str(email))
up_exp.institution = (institution!="" and institution is not None) and rstring(str(institution)) or None
# old list of groups
old_groups = list()
for ogr in up_exp.copyGroupExperimenterMap():
if ogr is None:
continue
old_groups.append(ogr.parent)
# create list of new groups
new_groups = list()
# default group
new_groups.append(defaultGroup._obj)
# system group
if isAdmin:
g = self.getObject("ExperimenterGroup", attributes={'name':'system'})
if defaultGroup.id != g.id:
new_groups.append(g._obj)
# user group
if isActive:
g = self.getObject("ExperimenterGroup", attributes={'name':'user'})
new_groups.append(g._obj)
# rest of groups
for g in otherGroups:
new_groups.append(g._obj)
addGroups = list()
rmGroups = list()
# remove
for ogr in old_groups:
flag = False
for ngr in new_groups:
if ngr.id.val == ogr.id.val:
flag = True
if not flag:
rmGroups.append(ogr)
# add
for ngr in new_groups:
flag = False
for ogr in old_groups:
if ogr.id.val == ngr.id.val:
flag = True
if not flag:
addGroups.append(ngr)
admin_serv = self.getAdminService()
admin_serv.updateExperimenter(up_exp)
if len(addGroups) > 0:
admin_serv.addGroups(up_exp, addGroups)
admin_serv.setDefaultGroup(up_exp, defaultGroup._obj)
if len(rmGroups) > 0:
admin_serv.removeGroups(up_exp, rmGroups)
def setMembersOfGroup(self, group, new_members):
"""
Change members of the group. Returns a list of existing group members
that could not be removed from the group because it is their only group.
@param group An existing ExperimenterGroup instance.
@type group ExperimenterGroupI
@param new_members List of new new Experimenter Ids.
@type new_members L{Long}
@return List of Experimenters not removed from group
@rtype List of L{ExperimenterWrapper}
"""
experimenters = list(self.getObjects("Experimenter"))
new_membersIds = [nm.id for nm in new_members]
old_members = group.getMembers()
old_membersIds = [om.id for om in old_members]
old_available = list()
for e in experimenters:
if e.id not in old_membersIds:
old_available.append(e)
old_availableIds = [oa.id for oa in old_available]
new_available = list()
for e in experimenters:
if e.id not in new_membersIds:
new_available.append(e)
new_availableIds = [na.id for na in new_available]
rm_exps = list(set(old_membersIds) - set(new_membersIds))
add_exps = list(set(old_availableIds) - set(new_availableIds))
to_remove = list()
to_add = list()
for e in experimenters:
if e.id in rm_exps:
# removing user from their default group #9193
# if e.getDefaultGroup().id != group.id:
to_remove.append(e._obj)
if e.id in add_exps:
to_add.append(e._obj)
admin_serv = self.getAdminService()
userGid = admin_serv.getSecurityRoles().userGroupId
failures = []
for e in to_add:
admin_serv.addGroups(e, [group._obj])
for e in to_remove:
# Experimenter needs to stay in at least 1 non-user group
gs = [l.parent.id.val for l in e.copyGroupExperimenterMap() if l.parent.id.val != userGid]
if len(gs) == 1:
failures.append(ExperimenterWrapper(self, e))
continue
admin_serv.removeGroups(e, [group._obj])
return failures
def setOwnersOfGroup(self, group, new_owners):
"""
Change members of the group.
@param group An existing ExperimenterGroup instance.
@type group ExperimenterGroupI
@param new_members List of new new Experimenter Ids.
@type new_members L{Long}
"""
experimenters = list(self.getObjects("Experimenter"))
new_ownersIds = [no.id for no in new_owners]
old_owners = group.getOwners()
old_ownersIds = [oo.id for oo in old_owners]
old_available = list()
for e in experimenters:
if e.id not in old_ownersIds:
old_available.append(e)
old_availableIds = [oa.id for oa in old_available]
new_available = list()
for e in experimenters:
if e.id not in new_ownersIds:
new_available.append(e)
new_availableIds = [na.id for na in new_available]
rm_exps = list(set(old_ownersIds) - set(new_ownersIds))
add_exps = list(set(old_availableIds) - set(new_availableIds))
to_remove = list()
to_add = list()
for e in experimenters:
if e.id in rm_exps:
# removing user from their default group #9193
# if e.getDefaultGroup().id != group.id:
to_remove.append(e._obj)
if e.id in add_exps:
to_add.append(e._obj)
admin_serv = self.getAdminService()
admin_serv.addGroupOwners(group._obj, to_add)
admin_serv.removeGroupOwners(group._obj, to_remove)
#def deleteExperimenter(self, experimenter):
# """
# Removes a user by removing the password information for that user as well
# as all GroupExperimenterMap instances.
#
# @param user Experimenter to be deleted. Not null.
# @type user ExperimenterI
# """
# admin_serv = self.getAdminService()
# admin_serv.deleteExperimenter(experimenter)
def createGroup(self, name, permissions, owners=list(), description=None):
"""
Create and return a new group with the given owners.
@param group A new ExperimenterGroup instance.
@type group ExperimenterGroupI
@param owners List of Experimenter instances. Can be empty.
@type owners L{ExperimenterI}
@param permissions Permissions instances.
@type permissions L{PermissionsI}
@return ID of the newly created ExperimenterGroup Not null.
@rtype Long
"""
new_gr = ExperimenterGroupI()
new_gr.name = rstring(str(name))
new_gr.description = (description!="" and description is not None) and rstring(str(description)) or None
new_gr.details.permissions = permissions
admin_serv = self.getAdminService()
gr_id = admin_serv.createGroup(new_gr)
group = admin_serv.getGroup(gr_id)
listOfOwners = list()
for exp in owners:
listOfOwners.append(exp._obj)
admin_serv.addGroupOwners(group, listOfOwners)
return gr_id
def updateGroup(self, group, name, permissions, owners=list(), description=None):
"""
Update an existing user including groups user is a member of.
Password cannot be changed by calling that method.
@param group A new ExperimenterGroup instance.
@type group ExperimenterGroupI
@param name A new group name.
@type name String
@param permissions Permissions instances.
@type permissions L{PermissionsI}
@param owners List of Experimenter instances. Can be empty.
@type owners L{ExperimenterI}
@param description A description.
@type description String
"""
up_gr = group._obj
up_gr.name = rstring(str(name))
up_gr.description = (description!="" and description is not None) and rstring(str(description)) or None
# old list of owners
old_owners = list()
for oex in up_gr.copyGroupExperimenterMap():
if oex is None:
continue
if oex.owner.val:
old_owners.append(oex.child)
add_exps = list()
rm_exps = list()
# remove
for oex in old_owners:
flag = False
for nex in owners:
if nex._obj.id.val == oex.id.val:
flag = True
if not flag:
rm_exps.append(oex)
# add
for nex in owners:
flag = False
for oex in old_owners:
if oex.id.val == nex._obj.id.val:
flag = True
if not flag:
add_exps.append(nex._obj)
admin_serv = self.getAdminService()
# Should we update updateGroup so this would be atomic?
admin_serv.updateGroup(up_gr)
if permissions is not None:
logger.warning("WARNING: changePermissions was called!!!")
admin_serv.changePermissions(up_gr, permissions)
admin_serv.addGroupOwners(up_gr, add_exps)
admin_serv.removeGroupOwners(up_gr, rm_exps)
def updateMyAccount(self, experimenter, firstName, lastName, email, defaultGroupId, middleName=None, institution=None):
"""
Allows a user to update his/her own information and set the default group for a given user.
@param experimenter A data transfer object. Only the fields: firstName, middleName,
lastName, email, and institution are checked. Not null.
@type experimenter ExperimenterWrapper
@param firstName A new first name.
@type firstName String
@param lastName A new last name.
@type lastName String
@param email A new email.
@type email String
@param defaultGroup Instance of ExperimenterGroup selected as a first active group.
@type defaultGroup ExperimenterGroupI
@param middleName A middle name.
@type middleName String
@param institution An institution.
@type institution String
"""
up_exp = experimenter._obj
up_exp.firstName = rstring(str(firstName))
up_exp.middleName = middleName is not None and rstring(str(middleName)) or None
up_exp.lastName = rstring(str(lastName))
up_exp.email = rstring(str(email))
up_exp.institution = (institution!="" and institution is not None) and rstring(str(institution)) or None
admin_serv = self.getAdminService()
admin_serv.updateSelf(up_exp)
defultGroup = self.getObject("ExperimenterGroup", long(defaultGroupId))._obj
admin_serv.setDefaultGroup(up_exp, defultGroup)
self.changeActiveGroup(defultGroup.id)
def setDefaultGroup(self, group_id, exp_id=None):
"""
Sets the default group for the specified experimenter, or current user if not specified.
"""
group_id = long(group_id)
exp_id = exp_id is not None and long(exp_id) or self.getEventContext().userId
admin_serv = self.getAdminService()
admin_serv.setDefaultGroup(ExperimenterI(exp_id, False), ExperimenterGroupI(group_id, False))
def updatePermissions(self, obj, permissions):
"""
Allow to change the permission on the object.
@param obj A wrapped entity or an unloaded reference to an entity. Not null.
@type obj BlitzObjectWrapper
@param perm The permissions value for this entity. Not null.
@type perm PermissionsI
"""
admin_serv = self.getAdminService()
if permissions is not None:
logger.warning("WARNING: changePermissions was called!!!")
admin_serv.changePermissions(obj._obj, permissions)
def saveObject (self, obj):
"""
Provide method for directly updating object graphs. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
"""
u = self.getUpdateService()
u.saveObject(obj, self.SERVICE_OPTS)
def saveArray (self, objs):
"""
Provide method for directly updating list of object graphs. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj List of entities or an unloaded references to an entity. Not null.
@type obj L{ObjectI}
"""
u = self.getUpdateService()
u.saveArray(objs, self.SERVICE_OPTS)
def saveAndReturnObject (self, obj):
"""
Provide method for directly updating object graphs and return it. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
@return Saved object
@rtype ObjectI
"""
u = self.getUpdateService()
res = u.saveAndReturnObject(obj, self.SERVICE_OPTS)
res.unload()
obj = omero.gateway.BlitzObjectWrapper(self, res)
return obj
def saveAndReturnId (self, obj):
"""
Provide method for directly updating object graphs and return ID. Act recursively on
the entire object graph, replacing placeholders and details where necessary,
and then "merging" the final graph. This means that the objects that are
passed into methods are copied over to new instances which are then returned.
The original objects should be discarded.
@param obj An entity or an unloaded reference to an entity. Not null.
@type obj ObjectI
@return ID of saved object
@rtype Long
"""
u = self.getUpdateService()
res = u.saveAndReturnObject(obj, self.SERVICE_OPTS)
res.unload()
return res.id.val
def saveAndReturnFile(self, binary, oFile_id):
"""
Provide method for directly updating a file object and return binary.
Assumes that the checksum algorithm used for file integrity verification is SHA-1.
@param binary Binary. Not null.
@type binary String
@param oFile_id File Id in order to manage the state of the service. Not null.
@type oFile_id Long
@return Shallow copy of file.
"""
store = self.createRawFileStore()
store.setFileId(oFile_id, self.SERVICE_OPTS);
pos = 0
rlen = 0
hash = hash_sha1()
for chunk in binary.chunks():
rlen = len(chunk)
store.write(chunk, pos, rlen)
hash.update(chunk)
pos = pos + rlen
ofile = store.save(self.SERVICE_OPTS)
store.close()
serverhash = ofile.hash.val
clienthash = hash.hexdigest()
if serverhash != clienthash:
msg = "SHA-1 checksums do not match in file upload: client has %s but server has %s" % (clienthash, serverhash)
logger.error(msg)
raise Exception(msg)
return ofile
##############################################
## IShare
def getShare (self, oid):
"""
Gets share for the given share id.
@param oid: Share ID.
@type oid: Long
@return: ShareWrapper or None
@rtype: L{ShareWrapper}
"""
sh_serv = self.getShareService()
sh = sh_serv.getShare(long(oid))
if sh is not None:
return ShareWrapper(self, sh)
else:
return None
def getOwnShares(self):
"""
Gets all owned shares for the current user.
@return: Shares that user owns
@rtype: L{ShareWrapper} generator
"""
sh = self.getShareService()
for e in sh.getOwnShares(False):
yield ShareWrapper(self, e)
def getMemberShares(self):
"""
Gets all shares where current user is a member.
@return: Shares that user is a member of
@rtype: L{ShareWrapper} generator
"""
sh = self.getShareService()
for e in sh.getMemberShares(False):
yield ShareWrapper(self, e)
def getMemberCount(self, share_ids):
"""
Returns a map from share id to the count of total members (including the
owner). This is represented by ome.model.meta.ShareMember links.
@param share_ids: List of IDs
@type share_ids: List of Longs
@return: Dict of shareId: member-count
@rtype: Dict of long: long
"""
sh = self.getShareService()
return sh.getMemberCount(share_ids)
def getCommentCount(self, share_ids):
"""
Returns a map from share id to comment count.
@param share_ids: List of IDs
@type share_ids: List of Longs
@return: Dict of shareId: comment-count
@rtype: Dict of long: long
"""
sh = self.getShareService()
return sh.getCommentCount(share_ids)
def getContents(self, share_id):
"""
Looks up all items belonging to the share, wrapped in object wrapper
@param share_id: share ID
@type share_id: Long
@return: Share contents
@rtype: L{omero.gateway.BlitzObjectWrapper} generator
"""
sh = self.getShareService()
for e in sh.getContents(long(share_id)):
try:
obj = omero.gateway.BlitzObjectWrapper(self, e)
except:
obj = omero.gateway.BlitzObjectWrapper(self,None)
obj._obj = e
yield obj
def getComments(self, share_id):
"""
Looks up all comments which belong to the share, wrapped in object wrapper
@param share_id: share ID
@type share_id: Long
@return: Share comments
@rtype: L{AnnotationWrapper} generator
"""
sh = self.getShareService()
for e in sh.getComments(long(share_id)):
yield AnnotationWrapper(self, e)
def getAllMembers(self, share_id):
"""
Get all {@link Experimenter users} who are a member of the share.
@param share_id: share ID
@type share_id: Long
@return: Members of share
@rtype: L{ExperimenterWrapper} generator
"""
sh = self.getShareService()
for e in sh.getAllMembers(long(share_id)):
yield ExperimenterWrapper(self, e)
def getAllGuests(self, share_id):
"""
Get the email addresses for all share guests.
@param share_id: share ID
@type share_id: Long
@return: List of e-mail addresses
@rtype: List of Strings
"""
sh = self.getShareService()
return sh.getAllGuests(long(share_id))
def getAllUsers(self, share_id):
"""
Get a single set containing the login names of the users as well email addresses for guests.
@param share_id: share ID
@type share_id: Long
@return: List of usernames and e-mail addresses
@rtype: List of Strings
"""
sh = self.getShareService()
return sh.getAllUsers(long(share_id))
def prepareRecipients(self, recipients):
recps = list()
for m in recipients:
try:
if m.email is not None and m.email!="":
recps.append(m.email)
except:
logger.error(traceback.format_exc())
logger.info(recps)
if len(recps) == 0:
raise AttributeError("Recipients list is empty")
return recps
def addComment(self, host, blitz_id, share_id, comment):
sh = self.getShareService()
new_cm = sh.addComment(long(share_id), str(comment))
members = list(self.getAllMembers(long(share_id)))
sh = self.getShare(long(share_id))
if self.getEventContext().userId != sh.owner.id.val:
members.append(sh.getOwner())
if sh.active:
try:
for m in members:
try:
if m.id == self.getEventContext().userId:
members.remove(m)
except:
logger.error(traceback.format_exc())
recipients = self.prepareRecipients(members)
except Exception:
logger.error(traceback.format_exc())
else:
t = settings.EMAIL_TEMPLATES["add_comment_to_share"]
message = t['text_content'] % (host, blitz_id)
message_html = t['html_content'] % (host, blitz_id, host, blitz_id)
try:
title = 'OMERO.web - new comment for share %i' % share_id
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
return CommentAnnotationWrapper(self, new_cm)
def removeImage(self, share_id, image_id):
sh = self.getShareService()
img = self.getObject("Image", image_id)
sh.removeObject(long(share_id), img._obj)
def createShare(self, host, blitz_id, image, message, members, enable, expiration=None):
sh = self.getShareService()
q = self.getQueryService()
items = list()
ms = list()
p = omero.sys.Parameters()
p.map = {}
#images
if len(image) > 0:
p.map["ids"] = rlist([rlong(long(a)) for a in image])
sql = "select im from Image im join fetch im.details.owner join fetch im.details.group where im.id in (:ids) order by im.name"
items.extend(q.findAllByQuery(sql, p, self.SERVICE_OPTS))
#members
if members is not None:
p.map["ids"] = rlist([rlong(long(a)) for a in members])
sql = "select e from Experimenter e " \
"where e.id in (:ids) order by e.omeName"
ms = q.findAllByQuery(sql, p, self.SERVICE_OPTS)
sid = sh.createShare(message, rtime(expiration), items, ms, [], enable)
#send email if avtive
if enable:
try:
recipients = self.prepareRecipients(ms)
except Exception:
logger.error(traceback.format_exc())
else:
t = settings.EMAIL_TEMPLATES["create_share"]
message = t['text_content'] % (host, blitz_id, self.getUser().getFullName())
message_html = t['html_content'] % (host, blitz_id, host, blitz_id, self.getUser().getFullName())
try:
title = 'OMERO.web - new share %i' % sid
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
def updateShareOrDiscussion (self, host, blitz_id, share_id, message, add_members, rm_members, enable, expiration=None):
sh = self.getShareService()
sh.setDescription(long(share_id), message)
sh.setExpiration(long(share_id), rtime(expiration))
sh.setActive(long(share_id), enable)
if len(add_members) > 0:
sh.addUsers(long(share_id), add_members)
if len(rm_members) > 0:
sh.removeUsers(long(share_id), rm_members)
#send email if avtive
if len(add_members) > 0:
try:
recipients = self.prepareRecipients(add_members)
except Exception:
logger.error(traceback.format_exc())
else:
t = settings.EMAIL_TEMPLATES["add_member_to_share"]
message = t['text_content'] % (host, blitz_id, self.getUser().getFullName())
message_html = t['html_content'] % (host, blitz_id, host, blitz_id, self.getUser().getFullName())
try:
title = 'OMERO.web - update share %i' % share_id
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
if len(rm_members) > 0:
try:
recipients = self.prepareRecipients(rm_members)
except Exception:
logger.error(traceback.format_exc())
else:
t = settings.EMAIL_TEMPLATES["remove_member_from_share"]
message = t['text_content'] % (host, blitz_id)
message_html = t['html_content'] % (host, blitz_id, host, blitz_id)
try:
title = 'OMERO.web - update share %i' % share_id
text_content = message
html_content = message_html
msg = EmailMultiAlternatives(title, text_content, settings.SERVER_EMAIL, recipients)
msg.attach_alternative(html_content, "text/html")
msg.send()
logger.error("Email was sent")
except:
logger.error(traceback.format_exc())
##############################################
## History methods ##
#def getLastAcquiredImages (self):
# tm = self.getTimelineService()
# p = omero.sys.Parameters()
# p.map = {}
# f = omero.sys.Filter()
# f.ownerId = rlong(self.getEventContext().userId)
# f.groupId = rlong(self.getEventContext().groupId)
# f.limit = rint(6)
# p.theFilter = f
# for e in tm.getMostRecentObjects(['Image'], p, False)["Image"]:
# yield ImageWrapper(self, e)
def listLastImportedImages (self):
"""
Retrieve most recent imported images
controlled by the security system.
@return: Generator yielding Images
@rtype: L{ImageWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.groupId = rlong(self.getEventContext().groupId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentObjects(['Image'], p, False, self.SERVICE_OPTS)["Image"]:
yield ImageWrapper(self, e)
def listMostRecentShares (self):
"""
Retrieve most recent shares
controlled by the security system.
@return: Generator yielding SessionAnnotationLink
@rtype: L{ShareWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentShareCommentLinks(p, self.SERVICE_OPTS):
yield ShareWrapper(self, e.parent)
def listMostRecentShareComments (self):
"""
Retrieve most recent share comments
controlled by the security system.
@return: Generator yielding SessionAnnotationLink
@rtype: L{SessionCommentWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentShareCommentLinks(p, self.SERVICE_OPTS):
yield AnnotationWrapper(self, e.child, link=ShareWrapper(self, e.parent))
def listMostRecentComments (self):
"""
Retrieve most recent comment annotations
controlled by the security system.
@return: Generator yielding BlitzObjectWrapper
@rtype: L{BlitzObjectWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(self.getEventContext().userId)
f.groupId = rlong(self.getEventContext().groupId)
f.limit = rint(10)
p.theFilter = f
for e in tm.getMostRecentAnnotationLinks(None, ['CommentAnnotation'], None, p, self.SERVICE_OPTS):
yield omero.gateway.BlitzObjectWrapper(self, e)
def listMostRecentTags (self):
"""
Retrieve most recent tag annotations
controlled by the security system.
@return: Generator yielding BlitzObjectWrapper
@rtype: L{BlitzObjectWrapper} generator
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
#f.ownerId = rlong(self.getEventContext().userId)
f.groupId = rlong(self.getEventContext().groupId)
f.limit = rint(200)
p.theFilter = f
for e in tm.getMostRecentAnnotationLinks(None, ['TagAnnotation'], None, p, self.SERVICE_OPTS):
yield omero.gateway.BlitzObjectWrapper(self, e.child)
def getDataByPeriod (self, start, end, eid, otype=None, page=None):
"""
Retrieve given data objects by the given period of time
controlled by the security system.
@param start Starting data
@type start Long
@param end Finishing data
@type end Long
@param otype Data type: Project, Dataset, Image
@type otype String
@return: Map of project, dataset and image lists
@rtype: Map
"""
tm = self.getTimelineService()
p = omero.sys.ParametersI()
p.exp(eid)
if page is not None:
p.page(((int(page)-1)*settings.PAGE), settings.PAGE)
else:
p.page(None, 100)
im_list = list()
ds_list = list()
pr_list = list()
if otype is not None and otype in ("Image", "Dataset", "Project"):
otype = otype.title()
for e in tm.getByPeriod([otype], rtime(long(start)), rtime(long(end)), p, True, self.SERVICE_OPTS)[otype]:
wrapper = KNOWN_WRAPPERS.get(otype.title(), None)
im_list.append(wrapper(self, e))
else:
res = tm.getByPeriod(['Image', 'Dataset', 'Project'], rtime(long(start)), rtime(long(end)), p, True, self.SERVICE_OPTS)
try:
for e in res['Image']:
im_list.append(ImageWrapper(self, e))
except:
pass
try:
for e in res['Dataset']:
ds_list.append(DatasetWrapper(self, e))
except:
pass
try:
for e in res['Project']:
pr_list.append(ProjectWrapper(self, e))
except:
pass
return {'project': pr_list, 'dataset':ds_list, 'image':im_list}
def countDataByPeriod (self, start, end, eid, otype=None):
"""
Counts given data objects by the given period of time
controlled by the security system.
@param start Starting data
@type start Long
@param end Finishing data
@type end Long
@param otype Data type: Project, Dataset, Image
@type otype String
@return: Counter
@rtype: Long
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.ownerId = rlong(eid)
#f.groupId = rlong(self.getEventContext().groupId)
p.theFilter = f
if otype == 'image':
return tm.countByPeriod(['Image'], rtime(long(start)), rtime(long(end)), p, self.SERVICE_OPTS)['Image']
elif otype == 'dataset':
return tm.countByPeriod(['Dataset'], rtime(long(start)), rtime(long(end)), p, self.SERVICE_OPTS)['Dataset']
elif otype == 'project':
return tm.countByPeriod(['Project'], rtime(long(start)), rtime(long(end)), p, self.SERVICE_OPTS)['Project']
else:
c = tm.countByPeriod(['Image', 'Dataset', 'Project'], rtime(long(start)), rtime(long(end)), p, self.SERVICE_OPTS)
return c['Image']+c['Dataset']+c['Project']
def getEventsByPeriod (self, start, end, eid):
"""
Retrieve event log objects by the given period of time
controlled by the security system.
@param start Starting data
@type start Long
@param end Finishing data
@type end Long
@return: List of event logs
@rtype: List
"""
tm = self.getTimelineService()
p = omero.sys.Parameters()
p.map = {}
f = omero.sys.Filter()
f.limit = rint(100000)
try:
f.groupId = rlong(self.SERVICE_OPTS.getOmeroGroup())
except:
f.groupId = rlong(self.getEventContext().groupId)
f.ownerId = rlong(eid or self.getEventContext().userId)
p.theFilter = f
service_opts = self.createServiceOptsDict()
service_opts.setOmeroGroup(str(f.groupId.val))
return tm.getEventLogsByPeriod(rtime(start), rtime(end), p, service_opts)
#yield EventLogWrapper(self, e)
def regroupFilesets (self, dsIds, fsIds):
"""
For each Fileset, make sure all the Images are in the same Dataset
as each other.
We choose a 'target' Dataset that already has one of the Fileset Images in it,
and is in the dsIds list.
This method is used when preparing to chgrp the specified datasets
"""
for fileset in self.getObjects("Fileset", fsIds):
gid = fileset.getDetails().group.id.val
self.SERVICE_OPTS.setOmeroGroup(gid)
fsImgs = fileset.copyImages() # all these need to be in same Datasaet
# find one of the Datasets (obj_ids) that contains one of these images...
target_ds = None
for i in fsImgs:
for d in i.listParents():
if d.id in dsIds:
target_ds = d.id
break
if target_ds is not None:
break
# move ALL fs images into that Dataset
for i in fsImgs:
correct_dataset = False
for plink in i.getParentLinks():
if plink.parent.id != target_ds:
self.deleteObjectDirect(plink._obj)
else:
correct_dataset = True
if not correct_dataset:
link = omero.model.DatasetImageLinkI()
link.setChild(omero.model.ImageI(i.getId(), False))
link.setParent(omero.model.DatasetI(target_ds, False))
self.saveObject(link)
#conn.chgrpObjects(dtype, obj_ids, group_id, container_id)
omero.gateway.BlitzGateway = OmeroWebGateway
class OmeroWebSafeCallWrapper(OmeroGatewaySafeCallWrapper): #pragma: no cover
"""
Function or method wrapper that handles L{Ice.ObjectNotExistException}
by re-creating the server side proxy.
"""
def handle_exception(self, e, *args, **kwargs):
if e.__class__ is Ice.ObjectNotExistException:
# Restored proxy object re-creation logic from the pre-#5835
# version of # _safeCallWrap() from omero.gateway. (See #6365)
logger.warn('Attempting to re-create proxy and re-call method.')
try:
self.proxyObjectWrapper._obj = \
self.proxyObjectWrapper._create_func()
func = getattr(self.proxyObjectWrapper._obj, self.attr)
return func(*args, **kwargs)
except Exception, e:
self.debug(e.__class__.__name__, args, kwargs)
raise
else:
super(OmeroWebSafeCallWrapper, self).handle_exception(
e, *args, **kwargs)
omero.gateway.SafeCallWrapper = OmeroWebSafeCallWrapper
class OmeroWebObjectWrapper (object):
annotation_counter = None
def countParents (self):
l = self.listParents()
if l is not None:
return len(l)
def countAnnotations (self):
"""
Count on annotations linked to the object and set the value
on the custom fiels 'annotation_counter'.
@return Counter
"""
if self.annotation_counter is not None:
return self.annotation_counter
else:
container = self._conn.getContainerService()
m = container.getCollectionCount(self._obj.__class__.__name__, type(self._obj).ANNOTATIONLINKS, [self._oid], None)
if m[self._oid] > 0:
self.annotation_counter = m[self._oid]
return self.annotation_counter
else:
return None
def getPermissions(self):
p = None
if self.details.getPermissions() is None:
return 'unknown'
else:
p = self.details.getPermissions()
if p.isGroupWrite():
flag = 'Read-Write'
elif p.isGroupAnnotate():
flag = 'Read-Annotate'
elif p.isGroupRead():
flag = 'Read-Only'
elif p.isUserRead():
flag = 'Private'
else:
flag = p
return flag
def warpName(self):
"""
Warp name of the object if names is longer then 30 characters.
@return Warped string.
"""
try:
l = len(self.name)
if l < 30:
return self.name
elif l >= 30:
splited = []
for v in range(0,len(self.name),30):
splited.append(self.name[v:v+30]+"\n")
return "".join(splited)
except:
logger.info(traceback.format_exc())
return self.name
def getPermsCss(self):
"""
Returns a string that can be used as classes on an html element to
indicate the permissions flags of the object. E.g. "canEdit canLink"
Flags/classes are canEdit, canAnnotate, canLink, canDelete
"""
flags = []
if self.canEdit(): flags.append("canEdit")
if self.canAnnotate(): flags.append("canAnnotate")
if self.canLink(): flags.append("canLink")
if self.canDelete(): flags.append("canDelete")
if self.canChgrp(): flags.append("canChgrp")
return " ".join(flags)
class ExperimenterWrapper (OmeroWebObjectWrapper, omero.gateway.ExperimenterWrapper):
"""
omero_model_ExperimenterI class wrapper overwrite omero.gateway.ExperimenterWrapper
and extend OmeroWebObjectWrapper.
"""
ldapUser = None
def __prepare__ (self, **kwargs):
super(ExperimenterWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('ldapUser'):
self.annotation_counter = kwargs['ldapUser']
def isEditable(self):
return self.omeName.lower() not in ('guest')
def isLdapUser(self):
"""
Return DN of the specific experimenter if uses LDAP authentication
(has set dn on password table) or None.
@param eid: experimenter ID
@type eid: L{Long}
@return: Distinguished Name
@rtype: String
"""
if self.ldapUser == None:
admin_serv = self._conn.getAdminService()
self.ldapUser = admin_serv.lookupLdapAuthExperimenter(self.id)
return self.ldapUser
def getDefaultGroup(self):
geMap = self.copyGroupExperimenterMap()
if self.sizeOfGroupExperimenterMap() > 0 and geMap[0] is not None:
return ExperimenterGroupWrapper(self._conn, geMap[0].parent)
return None
def getOtherGroups(self, excluded_names=("user","guest"), excluded_ids=list()):
for gem in self.copyGroupExperimenterMap():
if gem is None:
continue
flag = False
if gem.parent.name.val in excluded_names:
flag = True
if gem.parent.id.val in excluded_ids:
flag = True
if not flag:
yield ExperimenterGroupWrapper(self._conn, gem.parent)
omero.gateway.ExperimenterWrapper = ExperimenterWrapper
class ExperimenterGroupWrapper (OmeroWebObjectWrapper, omero.gateway.ExperimenterGroupWrapper):
"""
omero_model_ExperimenterGroupI class wrapper overwrite omero.gateway.ExperimenterGroupWrapper
and extend OmeroWebObjectWrapper.
"""
def isEditable(self):
return self.name.lower() not in ('guest', 'user')
def groupSummary(self):
"""
Returns lists of 'leaders' and 'members' of the specified group (default is current group)
as a dict with those keys.
@return: {'leaders': list L{ExperimenterWrapper}, 'colleagues': list L{ExperimenterWrapper}}
@rtype: dict
"""
summary = self._conn.groupSummary(self.getId())
if settings.UI_MENU_DROPDOWN.get("LEADERS", None):
self.leaders = summary["leaders"]
self.leaders.sort(key=lambda x: x.getLastName().lower())
if settings.UI_MENU_DROPDOWN.get("COLLEAGUES", None):
self.colleagues = summary["colleagues"]
self.colleagues.sort(key=lambda x: x.getLastName().lower())
# Only show 'All Members' option if configured, and we're not in a private group
if settings.UI_MENU_DROPDOWN.get("ALL", None):
if self.details.permissions.isGroupRead() or self._conn.isAdmin() or self.isOwner():
self.all = True
def getOwners(self):
for gem in self.copyGroupExperimenterMap():
if gem is None:
continue
if gem.owner.val:
yield ExperimenterWrapper(self._conn, gem.child)
def getOwnersNames(self):
owners = list()
for e in self.getOwners():
owners.append(e.getFullName())
return ", ".join(owners)
def getMembers(self, excluded_omename=list(), excluded_ids=list()):
for gem in self.copyGroupExperimenterMap():
if gem is None:
continue
flag = False
if gem.child.omeName.val in excluded_omename:
flag = True
if gem.parent.id.val in excluded_ids:
flag = True
if not flag:
yield ExperimenterWrapper(self._conn, gem.child)
def isOwner(self):
""" Returns True if current user is Owner of this group """
return self.getId() in self._conn.getEventContext().leaderOfGroups
def isLocked(self):
if self.name == "user":
return True
elif self.name == "system":
return True
elif self.name == "guest":
return True
else:
False
omero.gateway.ExperimenterGroupWrapper = ExperimenterGroupWrapper
class ProjectWrapper (OmeroWebObjectWrapper, omero.gateway.ProjectWrapper):
"""
omero_model_ProjectI class wrapper overwrite omero.gateway.ProjectWrapper
and extend OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(ProjectWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
omero.gateway.ProjectWrapper = ProjectWrapper
class DatasetWrapper (OmeroWebObjectWrapper, omero.gateway.DatasetWrapper):
"""
omero_model_DatasetI class wrapper overwrite omero.gateway.DatasetWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(DatasetWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
omero.gateway.DatasetWrapper = DatasetWrapper
class ImageWrapper (OmeroWebObjectWrapper, omero.gateway.ImageWrapper):
"""
omero_model_ImageI class wrapper overwrite omero.gateway.ImageWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(ImageWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
"""
This override standard omero.gateway.ImageWrapper.getChannels
and catch exceptions.
"""
def getChannels (self):
try:
return super(ImageWrapper, self).getChannels()
except Exception:
logger.error('Failed to load channels:', exc_info=True)
return None
def showOriginalFilePaths (self):
"""
This determines whether we want to show the paths of
Original Imported Files.
"""
return super(ImageWrapper, self).countFilesetFiles() > 0
def getFilesetImages(self):
"""
Returns a list of the 'sibling' images (including this one) that belong
to this image's Fileset. Sorted by name.
If Image has no Fileset, return an empty list.
@rtype: List of {ImageWrapper}
"""
fileset = self.getFileset()
if fileset is not None:
fsImgs = fileset.copyImages()
fsImgs.sort(key=lambda x: x.getName().lower())
return fsImgs
return []
omero.gateway.ImageWrapper = ImageWrapper
class PlateWrapper (OmeroWebObjectWrapper, omero.gateway.PlateWrapper):
"""
omero_model_PlateI class wrapper overwrite omero.gateway.PlateWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(PlateWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
omero.gateway.PlateWrapper = PlateWrapper
class WellWrapper (OmeroWebObjectWrapper, omero.gateway.WellWrapper):
"""
omero_model_ImageI class wrapper overwrite omero.gateway.ImageWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(WellWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
if kwargs.has_key('link'):
self.link = kwargs.has_key('link') and kwargs['link'] or None
omero.gateway.WellWrapper = WellWrapper
class PlateAcquisitionWrapper (OmeroWebObjectWrapper, omero.gateway.PlateAcquisitionWrapper):
"""
omero_model_PlateI class wrapper overwrite omero.gateway.PlateWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(PlateAcquisitionWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
omero.gateway.PlateAcquisitionWrapper = PlateAcquisitionWrapper
class ScreenWrapper (OmeroWebObjectWrapper, omero.gateway.ScreenWrapper):
"""
omero_model_ScreenI class wrapper overwrite omero.gateway.ScreenWrapper
and extends OmeroWebObjectWrapper.
"""
annotation_counter = None
def __prepare__ (self, **kwargs):
super(ScreenWrapper, self).__prepare__(**kwargs)
if kwargs.has_key('annotation_counter'):
self.annotation_counter = kwargs['annotation_counter']
omero.gateway.ScreenWrapper = ScreenWrapper
class EventLogWrapper (omero.gateway.BlitzObjectWrapper):
"""
omero_model_EventLogI class wrapper extends omero.gateway.BlitzObjectWrapper.
"""
LINK_CLASS = "EventLog"
class ShareWrapper (omero.gateway.BlitzObjectWrapper):
"""
omero_model_ShareI class wrapper extends BlitzObjectWrapper.
"""
def getShareType(self):
if self.itemCount == 0:
return "Discussion"
else:
return "Share"
def isEmpty(self):
if self.itemCount == 0:
return True
return False
def getExpireDate(self):
"""
Gets the end date for the share
@return: End Date-time
@rtype: datetime object
"""
#workaround for problem of year 2038
try:
d = self.started+self.timeToLive
if d > 2051222400000:
return datetime(2035, 1, 1, 0, 0, 0)
return datetime.fromtimestamp(d / 1000)
except:
logger.info(traceback.format_exc())
return None
def getStartDate(self):
"""
Gets the start date of the share
@return: Start Date-time
@rtype: datetime object
"""
return datetime.fromtimestamp(self.getStarted()/1000)
def isExpired(self):
"""
Returns True if we are past the end date of the share
@return: True if share expired
@rtype: Boolean
"""
#workaround for problem of year 2038
now = time.time()
try:
d = long(self.started+self.timeToLive)
if (d / 1000) > now:
return False
return True
except:
logger.info(traceback.format_exc())
return None
def isOwned(self):
"""
Returns True if share is owned by the current user
@return: True if owned
@rtype: Boolean
"""
try:
if self.owner.id.val == self._conn.getEventContext().userId:
return True
except:
logger.error(traceback.format_exc())
return False
def getOwner(self):
"""
The owner of this share
@return: Owner
@rtype: L{ExperimenterWrapper}
"""
return omero.gateway.ExperimenterWrapper(self._conn, self.owner)
omero.gateway.refreshWrappers()
| gpl-2.0 | -5,932,833,563,807,024,000 | 36.435304 | 165 | 0.563394 | false |
MichaelClerx/libcellml | tests/resources/generator/algebraic_eqn_computed_var_on_rhs/model.py | 1 | 1041 | # The content of this file was generated using the Python profile of libCellML 0.2.0.
from enum import Enum
from math import *
__version__ = "0.1.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 0
VARIABLE_COUNT = 2
class VariableType(Enum):
CONSTANT = 1
COMPUTED_CONSTANT = 2
ALGEBRAIC = 3
VOI_INFO = {"name": "", "units": "", "component": ""}
STATE_INFO = [
]
VARIABLE_INFO = [
{"name": "a", "units": "dimensionless", "component": "my_algebraic_eqn", "type": VariableType.COMPUTED_CONSTANT},
{"name": "x", "units": "dimensionless", "component": "my_algebraic_eqn", "type": VariableType.COMPUTED_CONSTANT}
]
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialize_states_and_constants(states, variables):
variables[0] = 1.0
def compute_computed_constants(variables):
variables[1] = variables[0]
def compute_rates(voi, states, rates, variables):
pass
def compute_variables(voi, states, rates, variables):
pass
| apache-2.0 | -6,894,229,068,872,666,000 | 19.019231 | 117 | 0.670509 | false |
jamiebull1/transport-carbon | transport_carbon/distance.py | 1 | 3345 | '''
Created on 9 Jan 2014
@author: Jamie
'''
import math
import requests
import re
from pygeocoder import Geocoder
import stations
from g_directions import GoogleDirections
''' Distance conversion constants '''
CHAINS_PER_MILE = 0.0125
KM_PER_MILE = 1.6093
EARTH_RADIUS = 6378137 # earth radius in meters
''' GCD_UPLIFT (Great Circle Distance) is an uplift to account for
non-optimal routing and stacking (now included in GHG factors
so set to zero) '''
GCD_UPLIFT = 0.00 # This may be different in years before 2013
def air_distance(origin, destination, units='km'):
''' Uses great circle distance and an uplift factor of 9% following
Defra's guidance '''
latlng_a = Geocoder.geocode(origin).coordinates
latlng_b = Geocoder.geocode(destination).coordinates
dist = great_circle_distance(latlng_a, latlng_b)
if units == 'km':
dist = dist / 1000.0
elif units == 'miles':
dist = dist / 1000.0 / KM_PER_MILE
else:
raise Exception('%s is not a valid unit system. Use "km" or "miles"' % units)
return dist * (1 + GCD_UPLIFT)
def road_distance(origin, destination, mode='driving', units='km'):
''' Uses the Google Directions API '''
gd = GoogleDirections()
options = {'mode': mode}
dist = gd.query(origin, destination, options).distance
if units == 'km':
dist = dist / 1000.0
elif units == 'miles':
dist = dist / 1000.0 / KM_PER_MILE
else:
raise Exception('%s is not a valid unit system. Use "km" or "miles"' % units)
return dist
def rail_distance(origin, destination, units='km'):
''' Uses the site railmiles.org as an unofficial API. It's very shaky so would
like to find a better source to use '''
origin = stations.closest(origin)
destination = stations.closest(destination)
query = {'origin': origin,
'destination': destination,
'type': 'text',
'shortestroutes': 'on'}
page = requests.post('http://mileage.railmiles.org/rmv2a.php/', data=query)
if "Error: " in page.text:
raise Exception('railmiles.org returned an error')
miles_pattern = re.compile('>Distance: (.*)mi \d')
miles = int(re.findall(miles_pattern, page.text)[0])
chains_pattern = re.compile('>Distance: \S* (.*)ch -')
chains = int(re.findall(chains_pattern, page.text)[0])
dist = miles + chains * CHAINS_PER_MILE
if units == 'km':
dist = dist * KM_PER_MILE
elif units == 'miles':
pass
else:
raise Exception('%s is not a valid unit system. Use "km" or "miles"' % units)
return dist
def sea_distance(origin, destination, units='km'):
raise NotImplementedError("sea_distance is not yet implemented")
def great_circle_distance(latlng_a, latlng_b):
''' From Gist https://gist.github.com/gabesmed/1826175 '''
lat1, lon1 = latlng_a
lat2, lon2 = latlng_b
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = (math.sin(dLat / 2) * math.sin(dLat / 2) +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
math.sin(dLon / 2) * math.sin(dLon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = EARTH_RADIUS * c
return d
def main():
pass
if __name__ == "__main__":
main() | mit | -1,925,966,121,862,075,100 | 29.697248 | 85 | 0.625411 | false |
SavinaRoja/challenges | Project_Euler/pemaths/py_maths.py | 1 | 2656 | #A library of utility methods that can be used in multiple problems presented
#by Project Euler.
#By Paul Barton
import math
import time
def Eratosthenes(n):
"""
A Sieve of Eratosthenes method for the rapid computation of primes less
than or equal to the provided integer n. Coerces to integer. Returns a list
comprised of primes.
"""
n = int(n)
candidates = range(n+1)
fin = int(n**0.5)
#Loop over the candidates, marking out each multiple.
#If the current candidate is already checked off then
#continue to the next iteration.
for i in xrange(2, fin+1):
if not candidates[i]:
continue
candidates[2*i::i] = [None] * (n/i - 1) # This is an obscure python
#list expression that returns a list [from 2*i by multiples of i to end]
#Filter out non-primes and return the list.
return [i for i in candidates[2:] if i]
def primeFactorize(n, powers=True, primes=False):
"""
A function that computes the prime factors of a given integer n. The
optional "powers" argument provides the ability to return a 2D list pairing
primes with their respective powers (True), or a 1D list representing only
the primes (False). The function will accept a pre-generated list of primes
as the optional "primes" argument (use-case: repetitive calls), otherwise it
will call Eratosthenes itself to generate them.
"""
n = int(n)
if not primes:
primes = Eratosthenes(n)
pfacts = []
if n in [0,1]:
print('0 and 1 have no prime factors')
return []
else:
while n != 1:
for p in primes:
d, r = n / p, n % p
c = 0
while not r:
c += 1
n = d
d, r = n / p, n % p
if c:
pfacts.append([p, c])
if powers:
return pfacts
else:
newlist = []
for i in pfacts:
newlist.append(i[0])
return newlist
def factorial(n):
"""
This function computes the value of n factorial, provided an integer n.
This works well for n <= 997 on my machine due to recursion limits. If more
is needed, math.factorial may be used (for repeated operations, it *should*
be used as it is twice as fast). This function will attempt to coerce its
input to an integer.
"""
n = int(n)
return _factorial(n)
def _factorial(n):
"""The recursive core of the factorial function"""
if n in [0,1]:
return 1
else:
c = _factorial(n - 1)
return (c * n)
| unlicense | 1,077,721,230,768,148,900 | 32.2 | 80 | 0.591491 | false |
deepmind/dm_control | dm_control/mjcf/skin_test.py | 1 | 1410 | # Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_control.mjcf.skin."""
import os
from absl.testing import absltest
from dm_control.mjcf import skin
from dm_control.utils import io as resources
ASSETS_DIR = os.path.join(os.path.dirname(__file__), 'test_assets')
SKIN_FILE_PATH = os.path.join(ASSETS_DIR, 'skins/test_skin.skn')
class FakeMJCFBody:
def __init__(self, full_identifier):
self.full_identifier = full_identifier
class SkinTest(absltest.TestCase):
def test_can_parse_and_write_back(self):
contents = resources.GetResource(SKIN_FILE_PATH, mode='rb')
parsed = skin.parse(contents, body_getter=FakeMJCFBody)
reconstructed = skin.serialize(parsed)
self.assertEqual(reconstructed, contents)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 1,139,408,963,325,310,500 | 30.333333 | 78 | 0.696454 | false |
googleads/google-ads-python | google/ads/googleads/v6/errors/types/enum_error.py | 1 | 1100 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.errors",
marshal="google.ads.googleads.v6",
manifest={"EnumErrorEnum",},
)
class EnumErrorEnum(proto.Message):
r"""Container for enum describing possible enum errors."""
class EnumError(proto.Enum):
r"""Enum describing possible enum errors."""
UNSPECIFIED = 0
UNKNOWN = 1
ENUM_VALUE_NOT_PERMITTED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,778,896,397,649,906,000 | 27.947368 | 74 | 0.701818 | false |
iankirkman/Stat-558 | compare_to_scikit.py | 1 | 1093 | '''
Compare results to scikit-learn's ElasticNetCV function.
Note that the lambda/alpha parameters are different in sklearn's objective function.
Scikit alpha_ = lambda * (2-alpha) / 2
Scikit l1_ratio_ = alpha / (2-alpha)
'''
import numpy as np
import matplotlib.pyplot as plt
import elastic_net
from sklearn.linear_model import ElasticNetCV
# Create standardized, simulated data
X = np.diag(np.random.uniform(5,10,100))
X = (X-np.mean(X))/np.std(X)
Y = np.random.normal(0,1,100)
# Fit the scikit-learn model
sk = ElasticNetCV()
sk.fit(X,Y)
# Print the coefficients
print(sk.coef_)
# Use scikit's grid search results to set our parameters
print(elastic_net(X,Y,lam=sk.alpha_*(1+sk.l1_ratio_),a=2*sk.l1_ratio_/(1+sk.l1_ratio_))[1])
# We see that the resuls are similar, but not perfectly matched
# Now let's run our grid search on lambda to see if we find a similar optimal parameter
print('Sklearn optimal param: %f'%(sk.alpha_*(1+sk.l1_ratio_)))
opt_lam = elastic_net(X,Y,lam=[.01*i for i in range(1,50)],a=2*sk.l1_ratio_/(1+sk.l1_ratio_))[0]
print('Our optimal param: %f'%opt_lam)
| gpl-3.0 | 1,068,368,223,651,237,500 | 29.361111 | 96 | 0.717292 | false |
DaniLabs/rexploit | rexploit/modules/routers/pirelli/drga225_mac2wpa.py | 1 | 1155 | # coding=utf-8
from rexploit.interfaces.iexploit import IExploit
class Exploit(IExploit):
def __init__(self):
super(Exploit, self).__init__(
name="DRG A225 WiFi router default WPA",
category="generator",
authors={
"Vulnerability discovery": "Muris Kurgas",
"Rexploit module": "Daniel Diez"
},
date="17/02/2014",
cwe="310",
targets=[
"DRG A225"
],
references={
"http://www.remote-exploit.org/content/Pirelli_Discus_DRG_A225_WiFi_router.pdf": "Remote-IExploit"
},
description="Generates WPA key for Pirelli Discus DRG A225 (used e.g. by Croatian T-com)"
)
# This code is work of Ján Trenčanský
def run(self, mac):
mac = mac.upper()
mac = mac.replace("-", "")
mac = mac.replace(":", "")
const = int('D0EC31', 16)
inp = int(mac[6:], 16)
result = (inp - const) // 4
ssid = "Discus--" + mac[6:]
key = "YW0" + str(result)
return "{0} - {1}".format(key, ssid)
| gpl-3.0 | -8,720,566,525,411,298,000 | 31 | 114 | 0.500868 | false |
mpetyx/pyrif | 3rdPartyLibraries/FuXi-master/test/test_superproperty_entailment.py | 1 | 2642 | #!/usr/bin/env python
# encoding: utf-8
import unittest
from pprint import pprint
from cStringIO import StringIO
from rdflib import Graph, Namespace
from FuXi.Rete.RuleStore import SetupRuleStore
from FuXi.Rete.Util import generateTokenSet
from FuXi.DLP.DLNormalization import NormalFormReduction
EX = Namespace('http://example.org/')
EX_TERMS = Namespace('http://example.org/terms/')
expected_triples = [
(EX.john, EX_TERMS.has_sibling, EX.jack),
(EX.john, EX_TERMS.brother, EX.jack),
(EX.jack, EX_TERMS.has_brother, EX.john),
]
ABOX = \
"""
@prefix exterms: <http://example.org/terms/> .
@prefix : <http://example.org/> .
:john exterms:has_brother :jack .
:jack exterms:brother :john .
"""
TBOX = \
"""
@prefix exterms: <http://example.org/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#>.
exterms:Agent
a rdfs:Class .
exterms:Person
a rdfs:Class ;
rdfs:subClassOf exterms:Agent .
exterms:has_sibling
a rdf:Property .
exterms:has_brother
a rdf:Property ;
rdfs:subPropertyOf exterms:has_sibling ;
rdfs:domain exterms:Person ;
rdfs:range exterms:Person .
exterms:brother
a rdf:Property ;
owl:equivalentProperty exterms:has_brother ;
rdfs:domain exterms:Person ;
rdfs:range exterms:Person .
"""
class test_superproperty_entailment(unittest.TestCase):
def setUp(self):
self.rule_store, self.rule_graph, self.network = SetupRuleStore(
makeNetwork=True)
self.tBoxGraph = Graph().parse(StringIO(TBOX), format='n3')
self.aBoxGraph = Graph().parse(StringIO(ABOX), format='n3')
NormalFormReduction(self.tBoxGraph)
def testReasoning(self):
print('setting up DLP...')
self.network.setupDescriptionLogicProgramming(self.tBoxGraph)
pprint(list(self.network.rules))
print(self.network)
print('feeding TBox... ')
self.network.feedFactsToAdd(generateTokenSet(self.tBoxGraph))
print('feeding ABox...')
self.network.feedFactsToAdd(generateTokenSet(self.aBoxGraph))
self.network.inferredFacts.bind('ex', EX)
self.network.inferredFacts.bind('exterms', EX_TERMS)
print(self.network.inferredFacts.serialize(format='n3'))
print('Checking...')
for triple in expected_triples:
self.failUnless(
triple in self.network.inferredFacts, "Missing %s" % (
repr(triple)))
if __name__ == '__main__':
unittest.main()
| mit | 970,624,890,381,832,100 | 27.717391 | 72 | 0.657078 | false |
matthazinski/youtube2internetarchive | youtube2internetarchive.py | 1 | 8885 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2012 emijrp
# Copyright (C) 2015 Matt Hazinski <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Instructions:
1) Create a subdirectory "download" and add a videostodo.txt file with YouTube links.
2) In the current directory, create a keys.txt file with your IA S3 keys. Accesskey and secretkey in two separated lines.
3) Install youtube-dl
4) Modify preferences if desired (see below).
5) Run this script: python youtube2internetarchive.py [english|spanish] [cc|all] [collectionname]
(where param 1 is language for the video dates,
param 2 is a filter to upload only Creative Commons or all
param 3 is the collection name in Internet Archive)
"""
# Keys: http://archive.org/account/s3.php
# Documentation: http://archive.org/help/abouts3.txt
# https://wiki.archive.org/twiki/bin/view/Main/IAS3BulkUploader
import json
import os
import glob
import re
import subprocess
import sys
import time
import unicodedata
import urllib
import internetarchive
num2month = {
'spanish': {'01':'enero', '02': 'febrero', '03':'marzo', '04':'abril', '05':'mayo', '06':'junio', '07':'julio', '08':'agosto','09':'septiembre','10':'octubre', '11':'noviembre', '12':'diciembre'},
'english': {'01':'january', '02': 'february', '03':'march', '04':'april', '05':'may', '06':'june', '07':'july', '08':'august','09':'september','10':'october', '11':'november', '12':'december'},
}
# Start preferences
sizelimit = 0 # file size, if you want to skip those videos greater than this size, 10000*1024*1024 for 10GB. Set to 0 to never skip.
if len(sys.argv) < 4:
print 'python youtube2internetarchive.py [english|spanish] [cc|all] [collectionname]'
sys.exit()
language = sys.argv[1]
if language not in num2month.keys():
print 'Bad language parameter'
sys.exit()
cc = sys.argv[2].lower()
if cc == 'cc':
cc = True
else:
cc = False
collection = sys.argv[3]
encoder = 'ffmpeg' # valid options are: 'ffmpeg', 'avconv', 'none'
# youtube-dl muxes bestvideo+bestaudio with
# ffmpeg/avconv, else just does 'best' quality.
subject_contains_collection = False
id_contains_collection = False
# End preferences
accesskey = open('keys.txt', 'r').readlines()[0].strip()
secretkey = open('keys.txt', 'r').readlines()[1].strip()
videotodourls = [l.strip() for l in open('download/videostodo.txt', 'r').readlines()]
def quote(t):
return re.sub(ur"'", ur"\'", t)
def removeoddchars(s):
#http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
s = ''.join((c for c in unicodedata.normalize('NFD', u'%s' % s) if unicodedata.category(c) != 'Mn'))
s = re.sub(ur"(?im)[^a-z0-9_\.-]", ur"", s) # greek chars and others cause errors in item name if not removed
return s
def updatetodo(l):
f = open('videostodo.txt', 'w')
f.write('\n'.join(l))
f.close()
while len(videotodourls) > 0:
os.chdir('download')
videotodourl = videotodourls[0]
videohtml = unicode(urllib.urlopen(videotodourl).read(), 'utf-8')
videoid = videotodourl.split('watch?v=')[1]
#check if it is on IA
searchurl = 'http://archive.org/search.php?query=%s' % (re.sub(ur"(?im)^-+", ur"", videoid))
rawsearch = unicode(urllib.urlopen(searchurl).read(), 'utf-8', errors='replace')
print searchurl
while not re.search(ur"\d+ through \d+", rawsearch): #error in IA search engine? retry....
print 'Error while searching in IA... waiting some seconds and retrying'
time.sleep(15)
rawsearch = unicode(urllib.urlopen(searchurl).read(), 'utf-8')
if not re.search(ur"1 through 0 of <b>0</b>", rawsearch):
print "It is on Internet Archive http://archive.org/search.php?query=%s" % videoid
videotodourls.remove(videotodourl)
updatetodo(videotodourls)
os.chdir('..')
continue
#verify license in youtube
if cc and not re.search(ur"(?i)/t/creative_commons", videohtml):
print "It is not Creative Commons", videotodourl
videotodourls.remove(videotodourl)
updatetodo(videotodourls)
os.chdir('..')
continue
#get tags
tags = re.findall(ur"search=tag\">([^<]+)</a>", videohtml)
tags = [quote(tag) for tag in tags]
if encoder == 'avconv':
os.system('youtube-dl --title --continue --retries 4 --write-info-json --write-description --write-thumbnail --write-annotations --all-subs --ignore-errors --format bestvideo+bestaudio/best %s' % (videotodourl))
elif encoder == 'ffmpeg':
os.system('youtube-dl --title --continue --retries 4 --write-info-json --write-description --write-thumbnail --write-annotations --all-subs --ignore-errors --prefer-ffmpeg --format bestvideo+bestaudio/best %s' % (videotodourl))
else:
os.system('youtube-dl --title --continue --retries 4 --write-info-json --write-description --write-thumbnail --write-annotations --all-subs --ignore-errors --format best %s' % (videotodourl)) #mp4 (18)
videofilename = ''
jsonfilename = ''
for dirname, dirnames, filenames in os.walk('.'):
if dirname == '.':
for f in filenames:
if f.endswith('%s.mp4' % videoid):
videofilename = unicode(f, 'utf-8')
break #stop searching, do not explore subdirectories
if videofilename:
videobasename = os.path.splitext(videofilename)[0]
jsonfilename = '%s.info.json' % (videobasename)
if sizelimit > 0:
if os.path.getsize(videofilename) > sizelimit:
print 'Video is greater than', sizelimit, 'bytes'
print 'Skiping...'
videotodourls.remove(videotodourl)
updatetodo(videotodourls)
os.chdir('..')
continue
else:
print 'No video downloaded, an error occurred'
videotodourls.remove(videotodourl)
updatetodo(videotodourls)
os.chdir('..')
continue
json_ = json.loads(unicode(open(jsonfilename, 'r').read(), 'utf-8'))
upload_date = json_['upload_date'][:4] + '-' + json_['upload_date'][4:6] + '-' + json_['upload_date'][6:8]
upload_year = json_['upload_date'][:4]
upload_month = num2month[language][json_['upload_date'][4:6]]
description = json_['description']
uploader = json_['uploader']
title = re.sub(u"%", u"/", json_['title']) # 6%7
if id_contains_collection:
itemname = removeoddchars('%s-%s' % (collection, videofilename.split(videoid)[0][:-1])) # [:-1] to remove the -
else:
itemname = removeoddchars(videofilename.split(videoid)[0][:-1]) # [:-1] to remove the -
itemname = itemname[:88] + '-' + videoid
videofilename_ = removeoddchars(videofilename)
if not re.search(ur"Item cannot be found", unicode(urllib.urlopen('http://archive.org/details/%s' % (itemname)).read(), 'utf-8')):
print 'That item exists at Internet Archive', 'http://archive.org/details/%s' % (itemname)
videotodourls.remove(videotodourl)
updatetodo(videotodourls)
os.chdir('..')
continue
if subject_contains_collection:
subject = (u'; '.join([collection, upload_month, upload_year] + tags))
else:
subject = (u'; '.join([upload_month, upload_year] + tags))
item = internetarchive.get_item(itemname)
md = dict(mediatype='movies', creator=uploader, language=language, collection=collection, title=title, description=u'{0} <br/><br/>Source: <a href="{1}">{2}</a><br/>Uploader: <a href="http://www.youtube.com/user/{3}">{4}</a><br/>Upload date: {5}'.format(description, videotodourl, videotodourl, uploader, uploader, upload_date), date=upload_date, year=upload_year, subject=subject, originalurl=videotodourl, licenseurl=(cc and 'http://creativecommons.org/licenses/by/3.0/' or ''))
item.upload(glob.glob(videobasename + '*'), metadata=md, access_key=accesskey, secret_key=secretkey)
print 'You can browse it in http://archive.org/details/%s' % (itemname)
videotodourls.remove(videotodourl)
updatetodo(videotodourls)
os.remove(videofilename)
os.remove(jsonfilename)
os.chdir('..')
| gpl-3.0 | 4,083,559,178,138,422,300 | 43.873737 | 484 | 0.650647 | false |
ptphp/PyLib | src/dev/spider/config.py | 1 | 7777 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys,hashlib,os
#add
isDEV=0
def getDefaultVal(flg):
dict={}
if str(flg)=="1":
dict['house_flag']=1
dict['borough_name']=""
dict['house_addr']=""
dict['house_title']=""
dict['house_city']=""
dict['house_region']=""
dict['house_section']=""
dict['house_type']=0
dict['house_price']=0
dict['house_area']=0
dict['house_room']=0
dict['house_hall']=0
dict['house_toilet']=0
dict['house_veranda']=0
dict['house_topfloor']=0
dict['house_floor']=0
dict['house_age']=0
dict['house_toward']=0
dict['house_fitment']=0
dict['house_feature']=""
dict['house_belong']=0
dict['house_desc']=""
dict['owner_name']=""
dict['owner_phone']=""
dict['owner_phone_pic']=""
dict['house_posttime']=""
elif str(flg)=="2":
dict['house_flag']=2
dict['borough_name']=""
dict['house_addr']=""
dict['house_title']=""
dict['house_city']=""
dict['house_region']=""
dict['house_section']=""
dict['house_type']=0
dict['house_price']=0
dict['house_area']=0
dict['house_deposit']=0
dict['house_room']=0
dict['house_hall']=0
dict['house_toilet']=0
dict['house_veranda']=0
dict['house_topfloor']=0
dict['house_floor']=0
dict['house_age']=0
dict['house_toward']=0
dict['house_fitment']=0
dict['house_feature']=""
dict['house_desc']=""
dict['owner_name']=""
dict['owner_phone']=""
dict['owner_phone_pic']=""
dict['house_posttime']=""
elif str(flg)=="3":
dict['house_flag']=3
dict['borough_name']=""
dict['house_addr']=""
dict['house_title']=""
dict['house_city']=""
dict['house_region']=""
dict['house_section']=""
dict['house_type']=0
dict['house_price']=0
dict['house_price_max']=0
dict['house_area']=0
dict['house_area_max']=0
dict['house_room']=0
dict['house_hall']=0
dict['house_toilet']=0
dict['house_veranda']=0
dict['house_topfloor']=0
dict['house_floor']=0
dict['house_age']=0
dict['house_toward']=0
dict['house_fitment']=0
dict['house_feature']=""
dict['house_belong']=0
dict['house_desc']=""
dict['owner_name']=""
dict['owner_phone']=""
dict['owner_phone_pic']=""
dict['house_posttime']=""
else:
dict['house_flag']=4
dict['borough_name']=""
dict['house_addr']=""
dict['house_title']=""
dict['house_city']=""
dict['house_region']=""
dict['house_section']=""
dict['house_type'] =""
dict['house_price']=0
dict['house_price_max']=0
dict['house_area']=0
dict['house_area_max']=0
dict['house_deposit']=""
dict['house_room']=""
dict['house_hall']=""
dict['house_toilet']=""
dict['house_veranda']=""
dict['house_topfloor']=0
dict['house_floor']=0
dict['house_age']=0
dict['house_toward']=0
dict['house_fitment']=0
dict['house_feature'] =""
dict['house_desc'] =""
dict['owner_name']=""
dict['owner_phone']=""
dict['owner_phone_pic']=""
dict['house_posttime']=""
return dict
#add end
#citylist_58=["su","cz","sh","wx","nb","nj","hz","zz"]
citylist_58=["su"]
#citylist_gj=["su","changzhou","sh","wx","nb","nj","hz","zz"]
citylist_gj=["su"]
#citylist_sf=["suzhou","ks","cz","sh","wuxi","nb","nanjing","hz","zz"]
citylist_sf=["ks"]
citynameDict_sf11 ={
#add
'su':u'苏州',
'suzhou':u'苏州',
'ks':u'昆山',
'cz':u'常州',
'sh':u'上海',
'wuxi':u'无锡',
'nb':u'宁波',
'nj':u'南京',
'hz':u'杭州',
'zz':u'郑州',
'nanjing':u'南京',
}
citynameDict_sf ={
'ks':u'昆山',
}
reload(sys)
sys.setdefaultencoding('utf-8') #@UndefinedVariable
def checkPath(f1,f2,var):
hash = hashlib.md5(var).hexdigest().upper() #@UndefinedVariable
h1 = str(hash[0:2])+"\\"
h2 = str(hash[2:4])+"\\"
h3 = str(hash[4:6])+"\\"
h4 = str(hash[6:])+"\\"
path = f1+f2+h1+h2+h3+h4
if os.path.isdir(path):
return True
else:
return False
def makePath(f1,f2,var):
hash = hashlib.md5(var).hexdigest().upper() #@UndefinedVariable
h1 = str(hash[0:2])+"\\"
h2 = str(hash[2:4])+"\\"
h3 = str(hash[4:6])+"\\"
h4 = str(hash[6:])+"\\"
path = f1+f2+h1+h2+h3+h4
# print path
if not os.path.isdir(path):
os.makedirs(path)
def toward(str):
if not str:
return 6
dict = {
5 : '东西',
6 : '南北',
7 : '东南',
8 : '西南',
9 : '东北',
10 : '西北',
1 :'东',
2 : '南',
3 : '西',
4 : '北',
}
res = []
for v in dict:
if str.find(dict[v])!=-1:
res.append(v)
if res:
if len(res)==1:
return res[0]
else:
return res[len(res)-1]
def housetype_s(str):
if not str:
return 3
dict ={
2 : '平房',
3 : '普通住宅',
7 : '商住两用',
4 : '公寓',
5 : '别墅',
6 : '其他',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def house_room_s(str):
if not str:
return 2
dict ={
1 : '一居',
2 : '二居',
3 : '三居',
4 : '四居',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def house_room_s1(str):
if str=='1室':
return 1
if str=='2室':
return 2
if str=='3室':
return 3
if str=='4室':
return 4
return 5
def housetype(str):
if not str:
return 6
dict ={
2 : '平房',
3 : '普通住宅',
7 : '商住两用',
4 : '公寓',
5 : '别墅',
6 : '其他',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 6
def payType(str):
if str=='季':
return 3
if str=='半年':
return 6
if str=='年':
return 12
def fitment(str):
if not str:
return 2
dict ={
1 : '毛坯',
2 : '中等装修',
3 : '精装修',
4 : '豪华装修',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 2
def fitment_s(str):
if not str:
return 2
dict ={
1 : '毛坯',
2 : '中等装修',
3 : '精装修',
4 : '豪华装修',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def belong(str):
if not str:
return 0
dict ={
1 : '商品房',
2 : '经济适用房',
3 : '公房',
4 : '使用权',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 0
def install(str):
if not str:
return 0
dict ={
6 : '床',
8 : '热水器',
9 : ' 洗衣机',
10 : ' 空调',
11 : ' 冰箱',
12 : ' 电视机',
13 : '宽带',
}
res =''
for v in dict:
if str.find(dict[v])!=-1:
res+='%d,' % v
return res
def deposit(str):
if not str:
return 0
dict ={
2 : '面议',
1 : '押一付三',
3 : '押一付一',
6 : '半年付',
7 : '年付',
}
for v in dict:
if str.find(dict[v])!=-1:
return v
else:
return 2
| apache-2.0 | 692,675,300,925,219,100 | 20.752187 | 70 | 0.456373 | false |
Groestlcoin/electrumx-grs | electrumx_rpc.py | 1 | 2898 | #!/usr/bin/env python3
#
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Script to send RPC commands to a running ElectrumX server.'''
import argparse
import asyncio
import json
from functools import partial
from os import environ
from lib.jsonrpc import JSONSession, JSONRPCv2
from server.controller import Controller
class RPCClient(JSONSession):
def __init__(self):
super().__init__(version=JSONRPCv2)
self.max_send = 0
self.max_buffer_size = 5*10**6
async def wait_for_response(self):
await self.items_event.wait()
await self.process_pending_items()
def send_rpc_request(self, method, params):
handler = partial(self.handle_response, method)
self.send_request(handler, method, params)
def handle_response(self, method, result, error):
if method in ('groups', 'peers', 'sessions') and not error:
lines_func = getattr(Controller, '{}_text_lines'.format(method))
for line in lines_func(result):
print(line)
elif error:
print('error: {} (code {:d})'
.format(error['message'], error['code']))
else:
print(json.dumps(result, indent=4, sort_keys=True))
def rpc_send_and_wait(port, method, params, timeout=15):
loop = asyncio.get_event_loop()
coro = loop.create_connection(RPCClient, 'localhost', port)
try:
transport, rpc_client = loop.run_until_complete(coro)
rpc_client.send_rpc_request(method, params)
try:
coro = rpc_client.wait_for_response()
loop.run_until_complete(asyncio.wait_for(coro, timeout))
except asyncio.TimeoutError:
print('request timed out after {}s'.format(timeout))
except OSError:
print('cannot connect - is ElectrumX-GRS catching up, not running, or '
'is {:d} the wrong RPC port?'.format(port))
finally:
loop.close()
def main():
'''Send the RPC command to the server and print the result.'''
parser = argparse.ArgumentParser('Send electrumx-GRS an RPC command')
parser.add_argument('-p', '--port', metavar='port_num', type=int,
help='RPC port number')
parser.add_argument('command', nargs=1, default=[],
help='command to send')
parser.add_argument('param', nargs='*', default=[],
help='params to send')
args = parser.parse_args()
port = args.port
if port is None:
port = int(environ.get('RPC_PORT', 8000))
# Get the RPC request.
method = args.command[0]
params = args.param
if method in ('log', 'disconnect'):
params = [params]
rpc_send_and_wait(port, method, params)
if __name__ == '__main__':
main()
| mit | -8,563,575,540,143,616,000 | 30.16129 | 79 | 0.615252 | false |
Endeios/myPyQtRef | AnApp/main.py | 1 | 1964 | '''
Created on 09/ott/2013
@author: bveronesi
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import imp
import sys
import functools
import os
class AnApp(QWidget):
mySignal = pyqtSignal()
def __init__(self,mainapp,*args):
self.loaded_module = None
self.app = mainapp
self.app.ende_map = dict()
QWidget.__init__(self,*args)
self.a_label = QLabel("Testing Signals")
self.a_button = QPushButton("TEST !")
self.load_button = QPushButton("Load Module")
self.test_emission_button = QPushButton("EMIT")
load_ext_module = functools.partial(self.load_module,"SelfRegistering")
emission = functools.partial(self.mySignal.emit)
self.load_button.clicked.connect(load_ext_module)
self.a_button.clicked.connect(self.test_func)
self.test_emission_button.clicked.connect(emission)
layout = QVBoxLayout()
layout.addWidget(self.a_label)
layout.addWidget(self.a_button)
layout.addWidget(self.load_button)
layout.addWidget(self.test_emission_button)
self.setLayout(layout)
self.app.ende_map["test"] = self.mySignal
def load_module(self,module_name):
path = list()
cwd = os.getcwd()
modulePath = cwd+os.sep+"plugins"
path.append(modulePath)
foundModule= imp.find_module(module_name, path)
self.loaded_module = imp.load_module("test",*foundModule)
self.loaded_module.register(self.app.ende_map)
def test_func(self):
if self.loaded_module is not None:
self.loaded_module.test("ciao",10)
self.mySignal.emit()
else:
print("No loaded module: "+str(self.loaded_module))
print(self.app)
if __name__ == '__main__':
app = QApplication(sys.argv)
w = AnApp(app)
w.show()
sys.exit(app.exec_()) | gpl-2.0 | 8,724,718,509,633,793,000 | 28.772727 | 79 | 0.602342 | false |
rporter/verilog_integration | test/optimize.py | 1 | 3768 | # Copyright (c) 2012, 2013 Rich Porter - see LICENSE for further details
import re
import coverage
import database
import mdb
import message
################################################################################
# ids can be given in the form range-or-id,+
# where range-or-id is [0-9]+(..[0-9]+)
parser = message.reportOptionParser()
parser.add_option('', '--order', help='order sequence '+str(database.optimize.options.keys()), default=[], action='append', choices=database.optimize.options.keys())
parser.add_option('-r', '--regression', default=[], help='Regression root id', action='append')
parser.add_option('', '--robust', default=False, help='Attempt to make test set robust', action='store_true')
parser.add_option('-t', '--test', default=[], help='Test id', action='append')
parser.add_option('', '--threshold', default=0, help='Coverage threshold for "incr" order')
parser.add_option('-x', '--xml', help='xml out', default='optimize_%d.xml')
options, values = parser.parse_args()
################################################################################
mdb.db.connection.set_default_db(db='../db/mdb.db')
mdb_conn=mdb.mdb('optimize', activity='optimizing')
################################################################################
# generate lists
def to_list(args, values=[]) :
def ignoring(arg) :
message.warning('Ignoring %(arg)s', arg=arg)
def cast(x) :
try :
return int(x)
except :
ignoring(x)
return None
if isinstance(args, list) :
return to_list(args[1:], to_list(args[0], values)) if args else values
_args = args.split(',')
if len(_args) > 1 :
return to_list(_args, values)
_match = re.match('(?P<from>\d+)\.{2,3}(?P<to>\d+)', args)
if _match :
_to, _from = cast(_match.group('to')), cast(_match.group('from'))
if _from > _to : _to, _from = _from, _to
if _to is not None and _from is not None :
return range(_from, _to + 1) + values
ignoring(args)
return values
if cast(args) :
return [cast(args), ] + values
return values
################################################################################
if not options.order :
options.order = ['cvg', ]
if options.regression is None :
# presume leftover args are ids
options.regression = values
regressions = to_list(options.regression)
tests = to_list(options.test)
if not regressions and not tests :
message.fatal('No invocations provided')
message.information('optimizing begins')
################################################################################
coverage.messages.hush_creation()
optimize_opts = {'threshold' : options.threshold, 'robust' : options.robust}
def iteration(ordering, iter_cnt=1, xml=None) :
# use current optimization group if this is not first iteration
order = ordering[0]
message.note('Iteration %(iter_cnt)d uses "%(order)s"', **locals())
if xml :
opt = database.optimize.options[order](xml=xml, **optimize_opts)
else :
opt = database.optimize.options[order](regressions, tests, **optimize_opts)
run = opt.run()
optimize_opts['previous'] = opt
if len(ordering) > 1 :
return iteration(ordering[1:], iter_cnt+1, run)
# always return last optimization run
return opt, run
opt, xml = iteration(options.order)
# annotate optimized coverage result to this invocation
opt.insert(mdb_conn.log_id)
################################################################################
if options.xml :
try :
outfile = options.xml % (regressions[0] if regressions else tests[0])
except :
outfile = options.xml
message.information('dumping optimize to ' + outfile)
with open(outfile, 'w') as desc :
xml.write(desc)
message.success('optimizing ends')
mdb.finalize_all()
| mit | -1,550,146,424,974,501,000 | 32.945946 | 167 | 0.584926 | false |
botswana-harvard/ambition-subject | ambition_subject/constants.py | 1 | 1107 | from ambition_prn.constants import AZT_3TC_with_ATZ_r_or_Lopinavir_r
from ambition_prn.constants import AZT_3TC_with_EFV_NVP_or_DTG
from ambition_prn.constants import TDF_3TC_FTC_with_ATZ_r_or_Lopinavir_r
from ambition_prn.constants import TDF_3TC_FTC_with_EFV_or_NVP
from ambition_validators import HEADACHE, VISUAL_LOSS
from ambition_visit_schedule import DAY1, DAY3, DAY5, DAY7, DAY14, DAY12, DAY10
from ambition_visit_schedule import WEEK16, WEEK10, WEEK8, WEEK6, WEEK4
ALREADY_REPORTED = 'Already reported'
AMS_A4 = 'AMS_A'
AMS_N3 = 'AMS_N'
AWAITING_RESULTS = 'awaiting_results'
CEREBRAL_OEDEMA = 'cerebral_oedema'
CXR_DESCRIPTION = 'Some CXR description'
DEAD = 'dead'
DIFFUSE = 'diffuse'
ECOLI = 'e_coli'
FOCAL_NEUROLOGIC_DEFICIT = 'focal_neurologic_deficit'
HISTOPATHOLOGY_REPORT = 'Some long report'
INFARCTS = 'infarcts'
INFILTRATE_LOCATION = 'infiltrate_location'
NEW_NEUROLOGY = 'new_neurology'
NVP = 'NVP'
POSITIVE = 'positive'
RESULTS_UNKNOWN = 'results_unknown'
ROUTINE_APPT = 'routine'
THERAPEUTIC_PL = 'therapeutic_lp'
THREE_DOSES = 'three_doses'
TWO_DOSES = 'two_doses'
VIBRIO = 'vibrio'
| gpl-3.0 | -505,848,286,590,011,140 | 35.9 | 79 | 0.766938 | false |
nukru/Swarm-Surveys | app/surveys/forms.py | 1 | 12147 | from flask.ext.wtf import Form
from flask.ext.babel import gettext
from wtforms import TextField, BooleanField, RadioField
from wtforms.validators import Required, Regexp, Optional
from wtforms import IntegerField, HiddenField
from wtforms import ValidationError
from wtforms.validators import StopValidation
from wtforms import SelectField
from app.models import Question, QuestionChoice, QuestionYN, QuestionText,Answer, QuestionLikertScale
from flask import g, flash
from app import db
from utiles import generate_answer
class LikertField(RadioField):
'''my implement of likert field'''
def __init__(self, label='', validators=None, labelMin="", labelMax="", **kwargs):
self.labelMin=labelMin
self.labelMax=labelMax
super(LikertField, self).__init__(label, validators, **kwargs)
def __call__(self, **kwargs):
'''render likert as table
'''
from wtforms.widgets.core import html_params, HTMLString
kwargs.setdefault('id', self.id)
kwargs.setdefault('class_', " table table-condensed likert")
html = ['<%s %s>' % ("table", html_params(**kwargs))]
html.append('<tr>')
html.append('<td></td>')
for subfield in self:
html.append('<td>%s</td>' % (subfield.label))
html.append('</tr>')
html.append('<tr>')
html.append('<td class="type-info">%s</td>' % (self.labelMin))
for subfield in self:
html.append('<td>%s</td>' % (subfield()))
html.append('<td class="type-info">%s</td>' % (self.labelMax))
html.append('</tr>')
html.append('</%s>' % "table")
return HTMLString(''.join(html))
# return super(RadioFeild, self).__call__(**kwargs)
def __call1__(self, **kwargs):
'''render likert as list
'''
from wtforms.widgets.core import html_params, HTMLString
kwargs.setdefault('id', self.id)
kwargs.setdefault('class_', "likert")
html = ['<%s %s>' % (self.widget.html_tag, html_params(**kwargs))]
html.append('<li>%s</li>' % (self.labelMin))
for subfield in self:
if self.widget.prefix_label:
html.append('<li>%s %s</li>' % (subfield.label, subfield()))
else:
html.append('<li>%s %s</li>' % (subfield(), subfield.label))
html.append('<li>%s</li>' % (self.labelMax))
html.append('</%s>' % self.widget.html_tag)
return HTMLString(''.join(html))
# return super(RadioField, self).__call__(**kwargs)
class MyRadioField(RadioField):
def __init__(self, label='', validators=None, horizontal=False,**kwargs):
self.horizontal=horizontal
# kwargs.setdefault('coerce', "int")
super(MyRadioField, self).__init__(label, validators, **kwargs)
def __call__(self, **kwargs):
if self.horizontal:
kwargs.setdefault('class_', "radioField_horizontal")
self.widget.prefix_label=True
else:
kwargs.setdefault('class_', "radio")
self.widget.prefix_label=False
return super(MyRadioField, self).__call__(**kwargs)
class CheckAnswerExpected(object):
'''check if the answer is the expected
'''
def __init__(self, message=None):
if not message:
self.message = gettext("wrong answer")
else: # pragma: no cover
self.message = message
self.message_continue = gettext("wrong answer, you can continue")
def __call__(self, form, field):
question = Question.query.get(field.name[1:])
answer = generate_answer(question, form, g.user)
db.session.add(answer)
db.session.commit()
if not answer.answerAttempt():
if answer.isMoreAttempt():
raise ValidationError(self.message)
else:
flash(self.message_continue)
class CheckSubquestion(object):
'''check whether to answer the question or not
'''
def __call__(self, form, field):
question = Question.query.get(field.name[1:])
data = form["c"+str(question.parent.id)].data
if isinstance (question.parent,QuestionYN):
if data.lower()==question.condition.value.lower():
pass
# raise ValidationError('This field is required.')
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
if isinstance (question.parent,QuestionText) or \
isinstance(question.parent,QuestionChoice):
if question.condition.operation=="<":
if data<question.condition.value:
pass
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
if question.condition.operation=="==":
if data==question.condition.value:
pass
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
if question.condition.operation==">":
if int(data)>int(question.condition.value):
pass
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
class RequiredSelectField(object):
''' check if there is answer
'''
def __init__(self, message=None):
if not message:
self.message = gettext("Option not valid")
else: # pragma: no cover
self.message = message
def __call__(self, form, field):
if field.data=="":
raise ValidationError(gettext("Option not valid"))
def generate_form(questions):
'''dynamically generates the forms for surveys
'''
def frange(x, y, jump):
'''implement of range to floats:
'''
while x < y:
yield '{0:g}'.format(float(x))
x += jump
class AnswerForm(Form):
time = HiddenField('time',default=0)
for question in questions:
setattr(AnswerForm,"globalTimec"+str(question.id),HiddenField('globalTimec'+str(question.id),default=0))
setattr(AnswerForm,"differentialTimec"+str(question.id),HiddenField('differentialTimec'+str(question.id),default=0))
#added "c" to that the key is valid
#First element must be a string, otherwise fail to valid choice
if isinstance (question,QuestionYN):
choices = [('Yes',gettext('Yes')),('No',gettext('No'))]
if question.isSubquestion:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices,validators = [CheckSubquestion()]))
else:
if question.isExpectedAnswer():
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices, validators = [Required(),CheckAnswerExpected()]))
elif question.required:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices,validators = [Required()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices,validators = [Optional()]))
if isinstance (question,QuestionText):
if question.isSubquestion:
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer',
validators = [CheckSubquestion()]))
else:
if question.required:
if question.regularExpression !="":
if question.isExpectedAnswer():
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',
validators=[Required(), Regexp(question.regularExpression,0,question.errorMessage),
CheckAnswerExpected()]))
else:
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',
validators=[Required(), Regexp(question.regularExpression,0,question.errorMessage)]))
elif question.isNumber:
if question.isExpectedAnswer():
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer',validators = [Required(),
CheckAnswerExpected()]))
else:
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer'))
else:
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',validators = [Required()]))
else:
if question.regularExpression !="":
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',
validators=[Optional(), Regexp(question.regularExpression,0,question.errorMessage)]))
elif question.isNumber:
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer',validators = [Optional()]))
else:
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',validators = [Optional()]))
if isinstance (question,QuestionChoice):
if question.is_range:
list = [(str(index),choice) for index,choice in enumerate(
frange(question.range_min,
question.range_max+question.range_step,
question.range_step))]
else:
list = [(str(index),choice) for index, choice in enumerate(question.choices)]
if question.render == "select":
list.insert(0,("",""))
if question.isSubquestion:
if question.render=="select":
setattr(AnswerForm,"c"+str(question.id),SelectField('Answer',
choices = list,validators = [RequiredSelectField(),CheckSubquestion()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
horizontal=question.render=="horizontal",
choices = list,validators = [CheckSubquestion()]))
else:
if question.required:
if question.render =="select":
setattr(AnswerForm,"c"+str(question.id),SelectField('Answer',
choices = list,validators = [RequiredSelectField()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
horizontal=question.render=="horizontal",
choices = list,validators = [Required()]))
else:
if question.render =="select":
setattr(AnswerForm,"c"+str(question.id),SelectField('Answer',
choices = list,validators = [RequiredSelectField()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
horizontal=question.render=="horizontal",
choices = list,validators = [Optional()]))
if isinstance (question, QuestionLikertScale):
list = [(str(index),choice) for index,choice in enumerate(range(question.minLikert,question.maxLikert+1))]
if question.required:
setattr(AnswerForm,"c"+str(question.id),LikertField('Answer',
choices = list,
labelMin= question.labelMin,
labelMax=question.labelMax,
validators = [Required()]))
else:
setattr(AnswerForm,"c"+str(question.id),RadioField('Answer',
choices = list,validators = [Optional()]))
form = AnswerForm()
return form | apache-2.0 | 3,480,353,636,565,798,000 | 44.669173 | 124 | 0.54639 | false |
pavanky/arrayfire-python | arrayfire/tests/simple/algorithm.py | 1 | 2636 | #!/usr/bin/python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
from . import _util
def simple_algorithm(verbose = False):
display_func = _util.display_func(verbose)
print_func = _util.print_func(verbose)
a = af.randu(3, 3)
k = af.constant(1, 3, 3, dtype=af.Dtype.u32)
af.eval(k)
print_func(af.sum(a), af.product(a), af.min(a), af.max(a),
af.count(a), af.any_true(a), af.all_true(a))
display_func(af.sum(a, 0))
display_func(af.sum(a, 1))
display_func(af.product(a, 0))
display_func(af.product(a, 1))
display_func(af.min(a, 0))
display_func(af.min(a, 1))
display_func(af.max(a, 0))
display_func(af.max(a, 1))
display_func(af.count(a, 0))
display_func(af.count(a, 1))
display_func(af.any_true(a, 0))
display_func(af.any_true(a, 1))
display_func(af.all_true(a, 0))
display_func(af.all_true(a, 1))
display_func(af.accum(a, 0))
display_func(af.accum(a, 1))
display_func(af.scan(a, 0, af.BINARYOP.ADD))
display_func(af.scan(a, 1, af.BINARYOP.MAX))
display_func(af.scan_by_key(k, a, 0, af.BINARYOP.ADD))
display_func(af.scan_by_key(k, a, 1, af.BINARYOP.MAX))
display_func(af.sort(a, is_ascending=True))
display_func(af.sort(a, is_ascending=False))
b = (a > 0.1) * a
c = (a > 0.4) * a
d = b / c
print_func(af.sum(d));
print_func(af.sum(d, nan_val=0.0));
display_func(af.sum(d, dim=0, nan_val=0.0));
val,idx = af.sort_index(a, is_ascending=True)
display_func(val)
display_func(idx)
val,idx = af.sort_index(a, is_ascending=False)
display_func(val)
display_func(idx)
b = af.randu(3,3)
keys,vals = af.sort_by_key(a, b, is_ascending=True)
display_func(keys)
display_func(vals)
keys,vals = af.sort_by_key(a, b, is_ascending=False)
display_func(keys)
display_func(vals)
c = af.randu(5,1)
d = af.randu(5,1)
cc = af.set_unique(c, is_sorted=False)
dd = af.set_unique(af.sort(d), is_sorted=True)
display_func(cc)
display_func(dd)
display_func(af.set_union(cc, dd, is_unique=True))
display_func(af.set_union(cc, dd, is_unique=False))
display_func(af.set_intersect(cc, cc, is_unique=True))
display_func(af.set_intersect(cc, cc, is_unique=False))
_util.tests['algorithm'] = simple_algorithm
| bsd-3-clause | 4,533,998,014,543,162,400 | 27.344086 | 62 | 0.597876 | false |
savant-nz/carbon | Scripts/SCons/iOS.sconscript.py | 1 | 3869 | #
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not
# distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import glob
import os
import sys
Import('*')
vars = Variables()
vars.AddVariables(
('architecture', 'Sets the target build architecture, must be ARM64 or x64. This controls whether the '
'build targets iOS devices (ARM64) or the iOS simulator (x64).', 'ARM64')
)
Help(vars.GenerateHelpText(Environment()))
# Get target architecture
architecture = ARGUMENTS.get('architecture', 'ARM64')
if architecture not in ['ARM64', 'x64']:
print('Error: invalid build architecture')
Exit(1)
xcodePath = '/Applications/Xcode.app/Contents/Developer'
# Get path to the SDK and associated flags
if architecture == 'ARM64':
sdkPath = xcodePath + '/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk'
versionMinFlag = '-mios-version-min='
else:
sdkPath = xcodePath + '/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk'
versionMinFlag = '-mios-simulator-version-min='
if not os.path.exists(sdkPath):
print('Error: could not the iOS SDK, check that Xcode is installed and up to date')
Exit(1)
# Require iOS 12.0 as the minimum version
versionMinFlag += '12.0'
# Create build environment
env = SConscript('Compilers/Clang.sconscript.py')
sharedFlags = ['-arch', {'x64': 'x86_64', 'ARM64': 'arm64'}[architecture],
'-isysroot', sdkPath, versionMinFlag]
env['ASFLAGS'] = sharedFlags
env['CCFLAGS'] += sharedFlags + ['-fobjc-arc', '-fobjc-legacy-dispatch']
env['LINKFLAGS'] += sharedFlags
env['AR'] = 'xcrun libtool'
env['ARCOM'] = '$AR $ARFLAGS $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET $SOURCES > /dev/null 2>&1'
env['ARFLAGS'] = ['-static']
# Flags for the iOS simulator
if not architecture.startswith('ARM'):
env['CCFLAGS'] += ['-fobjc-abi-version=2']
env['LINKFLAGS'] += ['-Xlinker', '-objc_abi_version', '-Xlinker', '2']
# Bitcode support (added in iOS 9)
if architecture.startswith('ARM'):
env['ASFLAGS'] += ['-fembed-bitcode']
env['CCFLAGS'] += ['-fembed-bitcode']
env['LINKFLAGS'] += ['-fembed-bitcode']
# This method sets up the environment for linking Carbon as a static library into a final application
def SetupForLinkingCarbon(self, **keywords):
defaultDependencies = ['AngelScript', 'Bullet', 'FreeImage', 'OpenAssetImport', 'Vorbis', 'ZLib']
dependencies = keywords.get('dependencies', defaultDependencies)
self['LIBPATH'] += GetDependencyLIBPATH(*dependencies)
self['LIBS'] += dependencies
self['FRAMEWORKS'] += ['CoreGraphics', 'Foundation', 'GameKit', 'OpenAL', 'OpenGLES', 'QuartzCore', 'StoreKit',
'UIKit']
env.AddMethod(SetupForLinkingCarbon)
# Add a method for setting up an environment ready for building against the installed SDK
def Carbonize(self, **keywords):
if 'carbonroot' in ARGUMENTS:
self['CPPPATH'] += [os.path.join(ARGUMENTS['carbonroot'], 'Source')]
self['LIBPATH'] += [os.path.join(ARGUMENTS['carbonroot'], 'Build/iOS', architecture, 'Clang', buildType)]
self['LIBS'] += ['CarbonEngine' + {True: 'Debug', False: ''}[isDebugBuild]]
self.SetupForLinkingCarbon()
else:
self['CPPPATH'] += ['/Applications/Carbon SDK/Include']
self['LIBPATH'] += ['/Applications/Carbon SDK/Library']
self['LIBS'] += ['CarbonEngineiOS' + {True: 'Debug', False: ''}[isDebugBuild]]
self.SetupForLinkingCarbon(dependencies=[])
self.Append(**keywords)
self.SetupPrecompiledHeader(keywords)
env.AddMethod(Carbonize)
# Return all the build setup details for this platform
details = {'platform': 'iOS', 'architecture': architecture, 'compiler': 'Clang', 'env': env,
'isCarbonEngineStatic': True}
Return('details')
| mpl-2.0 | -4,635,211,874,022,274,000 | 37.306931 | 115 | 0.679504 | false |
get9/monkeyshines | robotsparser.py | 1 | 1038 | import requests
import logging
from fetcher import fetch
from os.path import join
from urlobj import URLObj
from urllib.parse import urljoin, urlsplit, urlunsplit
class RobotsParser:
def __init__(self, domain):
self.domain = domain
# Check if the file even exists first.
def exists(self):
resp = fetch(URLObj(join(self.domain, 'robots.txt')))
return (resp is not None) and (resp.status_code == requests.codes.ok)
# Actually parse the file.
def parse(self):
logging.info("Parsing robots.txt")
blackpaths = []
resp = fetch(URLObj(join(self.domain, 'robots.txt')))
for line in resp.text.split('\n'):
line = line.strip()
if line.startswith('#'):
continue
elif line is None:
continue
elif line.startswith('Disallow'):
badpath = line.split(':')[1].strip().strip('/')
blackpaths.append(badpath)
return [join(self.domain, b) for b in blackpaths]
| apache-2.0 | 617,103,501,883,788,000 | 31.4375 | 77 | 0.597303 | false |
LouisePaulDelvaux/Til-Liam | src_liam/process.py | 1 | 10436 | from __future__ import division, print_function
import collections
import numpy as np
import config
from diff_h5 import diff_array
from data import append_carray_to_table, ColumnArray
from expr import Expr, type_to_idx, idx_to_type, expr_eval, Variable
from context import EntityContext
import utils
import importlib
from links import Many2One
class BreakpointException(Exception):
pass
class Process(object):
def __init__(self):
self.name = None
self.entity = None
def attach(self, name, entity):
self.name = name
self.entity = entity
def run_guarded(self, simulation, const_dict):
try:
context = EntityContext(self.entity, const_dict.copy())
self.run(context)
except BreakpointException:
simulation.stepbystep = True
def run(self, context):
raise NotImplementedError()
def expressions(self):
raise NotImplementedError()
def __str__(self):
return "<process '%s'>" % self.name
class Compute(Process):
'''these processes only compute an expression and do not store their
result (but they usually have side-effects). No class inherits from
this but we use it when a user does not store anywhere the result of
an expression (with a side effect) which *does* return a value.
new() is a good example for this'''
def __init__(self, expr):
super(Compute, self).__init__()
self.expr = expr
def run(self, context):
expr_eval(self.expr, context)
def expressions(self):
if isinstance(self.expr, Expr):
yield self.expr
class ExtProcess(Process):
'''these processes are not real Liam2 processes
The file containing the function should be in the path and
the function itself must be named "main".
'''
def __init__(self, name, arg):
super(ExtProcess, self).__init__()
self.name = name
self.args = arg
def run_guarded(self, simulation, const_dict):
context = EntityContext(self.entity, const_dict.copy())
self.run(simulation, context['period'])
def run(self, simulation, period):
module = importlib.import_module(self.name)
if self.args is not None:
arg_bis = list(self.args)
for index, arg in enumerate(self.args):
if arg == 'period':
arg_bis[index] = int(period/100)
elif arg == 'simulation':
arg_bis[index] = simulation
else:
arg_bis[index]= arg
arg_bis = tuple(arg_bis)
module.main(*arg_bis)
else:
module.main()
def expressions(self):
if isinstance(self.expr, Expr):
yield self.expr
class Assignment(Process):
def __init__(self, expr):
super(Assignment, self).__init__()
self.predictor = None
self.kind = None # period_individual, period, individual, global
self.expr = expr
def attach(self, name, entity, kind=None):
super(Assignment, self).attach(name, entity)
if self.predictor is None:
self.predictor = name
self.kind = kind
def run(self, context):
value = expr_eval(self.expr, context)
self.store_result(value, context) # add context to enable link
def store_result(self, result, context):
if result is None:
return
if self.name is None:
raise Exception('trying to store None key')
if isinstance(result, np.ndarray):
res_type = result.dtype.type
else:
res_type = type(result)
if self.kind == 'period_individual':
# we cannot store/cache self.entity.array[self.name] because the
# array object can change (eg when enlarging it due to births)
target = self.entity.array
else:
target = self.entity.temp_variables
if '.' not in self.predictor:
#TODO: assert type for temporary variables too
if self.kind is not None:
target_type_idx = type_to_idx[target[self.predictor].dtype.type]
res_type_idx = type_to_idx[res_type]
if res_type_idx > target_type_idx:
raise Exception(
"trying to store %s value into '%s' field which is of "
"type %s" % (idx_to_type[res_type_idx].__name__,
self.predictor,
idx_to_type[target_type_idx].__name__))
# the whole column is updated
target[self.predictor] = result
elif '.' in self.predictor:
predictor_split = self.predictor.split('.')
#initialisation
target_entity = self.entity
source_context = context
#add index
for link_name in predictor_split[:-1]:
link = target_entity.links[link_name]
if isinstance(link,Many2One):
target_context = link._target_context(source_context)
ids = expr_eval( Variable(link._link_field) , source_context)
target_ids = target_context.id_to_rownum
target_ids = target_ids[ids]
source_context = target_context
target_entity = link._target_entity()
else:
raise Exception("Only Many2One link "
" can be used. '%s' is %s" % (target_entity, type(target_entity)))
target_array = target_entity.array
# on ne doit pas avoir de temp_variable, encore qu'on pourrait
try:
target_array[predictor_split[-1]][target_ids] = result
except:
import pdb
print(predictor_split)
pdb.set_trace()
def expressions(self):
if isinstance(self.expr, Expr):
yield self.expr
max_vars = 0
class ProcessGroup(Process):
def __init__(self, name, subprocesses):
super(ProcessGroup, self).__init__()
self.name = name
self.subprocesses = subprocesses
self.calls = collections.Counter()
@property
def _modified_fields(self):
fnames = [v.predictor for _, v in self.subprocesses
if isinstance(v, Assignment)]
if not fnames:
return []
fnames.insert(0, 'id')
temp = self.entity.temp_variables
array = self.entity.array
alen = len(array)
fields = [(k, temp[k] if k in temp else array[k])
for k in utils.unique(fnames)]
return [(k, v) for k, v in fields
if isinstance(v, np.ndarray) and v.shape == (alen,)]
def _tablename(self, period):
self.calls[(period, self.name)] += 1
num_calls = self.calls[(period, self.name)]
if num_calls > 1:
return '{}_{}'.format(self.name, num_calls)
else:
return self.name
def _autodump(self, period):
fields = self._modified_fields
if not fields:
return
fname, numrows = config.autodump
h5file = config.autodump_file
name = self._tablename(period)
dtype = np.dtype([(k, v.dtype) for k, v in fields])
table = h5file.createTable('/{}'.format(period), name, dtype,
createparents=True)
fnames = [k for k, _ in fields]
print("writing {} to {}/{}/{} ...".format(', '.join(fnames),
fname, period, name))
context = EntityContext(self.entity, {'period': period})
append_carray_to_table(context, table, numrows)
print("done.")
def _autodiff(self, period, numdiff=10, raiseondiff=False):
fields = self._modified_fields
if not fields:
return
fname, numrows = config.autodiff
h5file = config.autodump_file
tablepath = '/{}/{}'.format(period, self._tablename(period))
print("comparing with {}{} ...".format(fname, tablepath))
if tablepath in h5file:
table = h5file.getNode(tablepath)
disk_array = ColumnArray.from_table(table, stop=numrows)
diff_array(disk_array, ColumnArray(fields), numdiff, raiseondiff)
else:
print(" SKIPPED (could not find table)")
def run_guarded(self, simulation, const_dict):
global max_vars
periods = const_dict['periods']
idx = const_dict['period_idx']
period = periods[idx]
print()
for k, v in self.subprocesses:
# print(" *", end=' ')
if k is not None:
print(k, end=' ')
utils.timed(v.run_guarded, simulation, const_dict)
# print "done."
simulation.start_console(v.entity, period,
const_dict['__globals__'])
if config.autodump is not None:
self._autodump(period)
if config.autodiff is not None:
self._autodiff(period)
# purge all local variables
temp_vars = self.entity.temp_variables
all_vars = self.entity.variables
local_var_names = set(temp_vars.keys()) - set(all_vars.keys())
num_locals = len(local_var_names)
if config.debug and num_locals:
local_vars = [v for k, v in temp_vars.iteritems()
if k in local_var_names and
isinstance(v, np.ndarray)]
max_vars = max(max_vars, num_locals)
temp_mem = sum(v.nbytes for v in local_vars)
avgsize = sum(v.dtype.itemsize for v in local_vars) / num_locals
print(("purging {} variables (max {}), will free {} of memory "
"(avg field size: {} b)".format(num_locals, max_vars,
utils.size2str(temp_mem),
avgsize)))
for var in local_var_names:
del temp_vars[var]
def expressions(self):
for _, p in self.subprocesses:
for e in p.expressions():
yield e
| gpl-3.0 | 8,371,587,177,291,615,000 | 33.442244 | 102 | 0.545803 | false |
srdp11/kd-tree | discrete_slider.py | 1 | 1244 | from matplotlib.widgets import Slider
class DiscreteSlider(Slider):
"""A matplotlib slider widget with discrete steps."""
def __init__(self, *args, **kwargs):
"""Identical to Slider.__init__, except for the "increment" kwarg.
"increment" specifies the step size that the slider will be discritized
to."""
self.inc = kwargs.pop('increment', 1.0)
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
discrete_val = int(val / self.inc) * self.inc
# We can't just call Slider.set_val(self, discrete_val), because this
# will prevent the slider from updating properly (it will get stuck at
# the first step and not "slide"). Instead, we'll keep track of the
# the continuous value as self.val and pass in the discrete value to
# everything else.
xy = self.poly.xy
xy[2] = discrete_val, 1
xy[3] = discrete_val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt % discrete_val)
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.items():
func(discrete_val)
| mit | -839,302,486,298,906,000 | 40.466667 | 79 | 0.606109 | false |
wdv4758h/ZipPy | edu.uci.python.benchmark/src/benchmarks/whoosh/tests/bench.py | 1 | 5642 | __author__ = 'zwei'
from whoosh import matching, scoring
def test_filter():
lm = lambda: matching.ListMatcher(list(range(2, 10)))
fm = matching.FilterMatcher(lm(), frozenset([3, 9]))
assert list(fm.all_ids()) == [3, 9]
fm = matching.FilterMatcher(lm(), frozenset([1, 5, 9, 13]))
assert list(fm.all_ids()) == [5, 9]
def test_exclude():
em = matching.FilterMatcher(matching.ListMatcher([1, 2, 5, 9, 10]),
frozenset([2, 9]), exclude=True)
assert list(em.all_ids()) == [1, 5, 10]
em = matching.FilterMatcher(matching.ListMatcher([1, 2, 5, 9, 10]),
frozenset([2, 9]), exclude=True)
assert list(em.all_ids()) == [1, 5, 10]
em = matching.FilterMatcher(matching.ListMatcher([1, 2, 5, 9, 10]),
frozenset([2, 9]), exclude=True)
em.next()
em.next()
em = em.copy()
ls = []
while em.is_active():
ls.append(em.id())
em.next()
assert ls == [10]
test_exclude()
def test_simple_union():
lm1 = matching.ListMatcher([1, 4, 10, 20, 90])
lm2 = matching.ListMatcher([0, 4, 20])
um = matching.UnionMatcher(lm1, lm2)
ls = []
while um.is_active():
ls.append((um.id(), um.score()))
um.next()
assert ls == [(0, 1.0), (1, 1.0), (4, 2.0), (10, 1.0), (20, 2.0), (90, 1.0)]
lm1 = matching.ListMatcher([1, 4, 10, 20, 90])
lm2 = matching.ListMatcher([0, 4, 20])
um = matching.UnionMatcher(lm1, lm2)
assert list(um.all_ids()) == [0, 1, 4, 10, 20, 90]
lm1 = matching.ListMatcher([1, 4, 10, 20, 90])
lm2 = matching.ListMatcher([0, 4, 20])
um = matching.UnionMatcher(lm1, lm2)
um.next()
um.next()
um = um.copy()
ls = []
while um.is_active():
ls.append(um.id())
um.next()
assert ls == [4, 10, 20, 90]
def test_inverse():
s = matching.ListMatcher([1, 5, 10, 11, 13])
inv = matching.InverseMatcher(s, 15)
ids = []
while inv.is_active():
ids.append(inv.id())
inv.next()
assert ids == [0, 2, 3, 4, 6, 7, 8, 9, 12, 14]
def test_simple_intersection():
lm1 = matching.ListMatcher([1, 4, 10, 20, 90])
lm2 = matching.ListMatcher([0, 4, 20])
im = matching.IntersectionMatcher(lm1, lm2)
ls = []
while im.is_active():
ls.append((im.id(), im.score()))
im.next()
assert ls == [(4, 2.0), (20, 2.0)]
lm1 = matching.ListMatcher([1, 4, 10, 20, 90])
lm2 = matching.ListMatcher([0, 4, 20])
im = matching.IntersectionMatcher(lm1, lm2)
assert list(im.all_ids()) == [4, 20]
lm1 = matching.ListMatcher([1, 4, 10, 20, 90])
lm2 = matching.ListMatcher([0, 4, 20])
im = matching.IntersectionMatcher(lm1, lm2)
im.next()
im.next()
im = im.copy()
ls = []
while im.is_active():
ls.append(im.id())
im.next()
assert not ls
def test_replacements():
sc = scoring.WeightScorer(0.25)
a = matching.ListMatcher([1, 2, 3], [0.25, 0.25, 0.25], scorer=sc)
b = matching.ListMatcher([1, 2, 3], [0.25, 0.25, 0.25], scorer=sc)
um = matching.UnionMatcher(a, b)
a2 = a.replace(0.5)
assert a2.__class__ == matching.NullMatcherClass
um2 = um.replace(0.5)
assert um2.__class__ == matching.IntersectionMatcher
um2 = um.replace(0.6)
assert um2.__class__ == matching.NullMatcherClass
wm = matching.WrappingMatcher(um, boost=2.0)
wm = wm.replace(0.5)
assert wm.__class__ == matching.WrappingMatcher
assert wm.boost == 2.0
assert wm.child.__class__ == matching.IntersectionMatcher
ls1 = matching.ListMatcher([1, 2, 3], [0.1, 0.1, 0.1],
scorer=scoring.WeightScorer(0.1))
ls2 = matching.ListMatcher([1, 2, 3], [0.2, 0.2, 0.2],
scorer=scoring.WeightScorer(0.2))
ls3 = matching.ListMatcher([1, 2, 3], [0.3, 0.3, 0.3],
scorer=scoring.WeightScorer(0.3))
mm = matching.MultiMatcher([ls1, ls2, ls3], [0, 4, 8])
mm = mm.replace(0.25)
assert mm.current == 2
dm = matching.DisjunctionMaxMatcher(ls1, ls2)
dm = dm.replace(0.15)
assert dm is ls2
test_replacements()
def create_matchers():
id1 = [i for i in range(1000)]
id2 = [i + 1 for i in range(1000)]
id3 = [i * 2 + i % 5 for i in range(1000)]
id4 = [i * i for i in range(1000)]
id5 = [1001 - i for i in range(1000)]
id6 = [i * 3 // 2 for i in range(1000)]
vl1 = [0.1 for i in range(1000)]
vl2 = [0.2 for i in range(1000)]
vl3 = [0.3 for i in range(1000)]
vl4 = [0.4 for i in range(1000)]
vl5 = [0.5 for i in range(1000)]
vl6 = [0.6 for i in range(1000)]
sc1 = scoring.WeightScorer(0.15)
sc2 = scoring.WeightScorer(0.25)
sc3 = scoring.WeightScorer(0.35)
sc4 = scoring.WeightScorer(0.45)
sc5 = scoring.WeightScorer(0.55)
sc6 = scoring.WeightScorer(0.65)
ls1 = matching.ListMatcher(id1, vl1, sc1)
ls2 = matching.ListMatcher(id2, vl2, sc2)
ls3 = matching.ListMatcher(id3, vl3, sc3)
ls4 = matching.ListMatcher(id4, vl4, sc4)
ls5 = matching.ListMatcher(id5, vl5, sc5)
ls6 = matching.ListMatcher(id6, vl6, sc6)
um1 = matching.UnionMatcher(ls1, ls2)
um2 = matching.UnionMatcher(ls3, ls4)
um3 = matching.UnionMatcher(ls5, ls6)
inv = matching.InverseMatcher(um3, 15)
mm = matching.MultiMatcher([um1, um2, inv], [0, 9, 18])
return mm
def domatch(matcher):
return [_id for _id in matcher.all_ids()]
def main(n):
for i in range(n):
matchers = create_matchers()
ret = domatch(matchers)
return ret
print(main(1000)) | bsd-3-clause | 660,784,214,368,457,500 | 30.35 | 80 | 0.569302 | false |
shantnu/ntlk_intro | Nltk_Intro_Part2.py | 1 | 1403 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
# In[2]:
stopwords.words('english')[:16]
# In[3]:
# https://en.wikipedia.org/wiki/Cadet_Nurse_Corps
para = "The program was open to all women between the ages of 17 and 35, in good health, who had graduated from an accredited high school. Successful applicants were eligible for a government subsidy, paying for tuition, books, uniforms, and a stipend. In exchange, they were required to pledge to actively serve in essential civilian or federal government services for the duration of World War II. All state nursing schools were eligible to participate in the program. However, they needed to be accredited by the accrediting agency in their state, and connected with a hospital that had been approved by the American College of Surgeons."
words = word_tokenize(para)
print(words)
useful_words = [word for word in words if word not in stopwords.words('english')]
print(useful_words)
# In[4]:
movie_reviews.words()
# In[5]:
movie_reviews.categories()
# In[6]:
movie_reviews.fileids()[:4]
# In[7]:
all_words = movie_reviews.words()
freq_dist = nltk.FreqDist(all_words)
freq_dist.most_common(20)
# In[ ]:
| agpl-3.0 | -2,384,037,332,292,930,000 | 20.921875 | 641 | 0.751247 | false |
RPGOne/Skynet | pytorch-master/torch/legacy/nn/LookupTable.py | 1 | 5017 | import torch
from .Module import Module
from .utils import clear
class LookupTable(Module):
def __init__(self, nIndex, nOutput, paddingValue=-1, maxNorm=None, normType=None):
super(LookupTable, self).__init__()
self.weight = torch.Tensor(nIndex, nOutput)
self.gradWeight = torch.Tensor(nIndex, nOutput).zero_()
self.paddingValue = paddingValue
self.maxNorm = maxNorm
self.normType = normType
self.shouldScaleGradByFreq = False
self._gradOutput = None
self._sorted = None
self._indices = None
self._count = torch.IntTensor()
self._input = torch.LongTensor()
self.reset()
def accUpdateOnly(self):
self.gradWeight = None
return self
def setPadding(self, paddingValue):
self.paddingValue = paddingValue
return self
def setMaxNorm(self, maxNorm):
self.maxNorm = maxNorm
return self
def setNormType(self, normType):
self.normType = normType
return self
def scaleGradByFreq(self):
self.shouldScaleGradByFreq = True
return self
def reset(self, stdv=1):
self.weight.normal_(0, stdv)
def _makeInputContiguous(self, input):
# make sure input is a contiguous torch.LongTensor
if not input.is_contiguous() or not type(input) is type(self._input):
self.copiedInput = True
self._input.resize_(input.size()).copy_(input)
return self._input
else:
self.copiedInput = False
return input
def updateOutput(self, input):
self.renorm(input)
input = self._makeInputContiguous(input)
if input.dim() == 1:
torch.index_select(self.weight, 0, input, out=self.output)
elif input.dim() == 2:
torch.index_select(self.weight, 0, input.view(-1), out=self.output)
self.output = self.output.view(input.size(0), input.size(1), self.weight.size(1))
else:
raise RuntimeError("input must be a vector or matrix")
return self.output
def updateGradInput(self, input, gradOutput):
# the input can be of any type (as in the forward it's
# converted anyway to LongTensor) thus, need to allocate
# new memory each time the user changes the input type
if type(self.gradInput) != type(input):
self.gradInput = input.new()
if not self.gradInput.is_same_size(input):
self.gradInput.resize_as_(input).zero_()
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
input = self._input if self.copiedInput else input
if input.dim() == 2:
input = input.view(-1)
elif input.dim() != 1:
raise RuntimeError("input must be a vector or matrix")
if not gradOutput.is_contiguous():
if self._gradOutput is None:
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
self._backend.LookupTable_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self._count,
self._sorted,
self._indices,
self.shouldScaleGradByFreq,
self.paddingValue or 0,
scale
)
def renorm(self, input):
if self.maxNorm is None:
return
# copy input into _input, so _input is continous.
# The copied _input will be modified in the C code.
self._input.resize_(input.size()).copy_(input)
row_idx = self._input
if row_idx.dim() == 2:
row_idx = row_idx.view(-1)
elif row_idx.dim() != 1:
raise RuntimeError("input must be a vector or matrix")
# "row_idx" and "weight" will be modified in the C code
self._backend.LookupTable_renorm(
self._backend.library_state,
row_idx,
self.weight,
self.maxNorm,
self.normType or 2
)
def type(self, type=None, tensorCache=None):
if type is None:
return self._type
super(LookupTable, self).type(type, tensorCache)
if type == 'torch.cuda.FloatTensor':
# CUDA uses _sorted and _indices temporary tensors
self._sorted = torch.cuda.LongTensor()
self._indices = torch.cuda.LongTensor()
self._count = torch.cuda.LongTensor()
self._input = torch.cuda.LongTensor()
else:
# self._count and self._input should only be converted if using Cuda
self._count = torch.IntTensor()
self._input = torch.LongTensor()
return self
def clearState(self):
clear(self, '_count', '_input', '_sorted', '_indices', '_gradOutput')
return super(LookupTable, self).clearState()
| bsd-3-clause | -1,934,741,924,229,302,500 | 32.006579 | 93 | 0.586805 | false |
cloudbase/heat2arm | heat2arm/parser/common/functions/ref_function.py | 1 | 4088 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the definition of the base class for referencing functions.
"""
import logging
from heat2arm.parser.common.exceptions import (FunctionApplicationException,
FunctionArgumentException)
from heat2arm.parser.common.function import Function
LOG = logging.getLogger("__heat2arm__.RefFunction")
class RefFunction(Function):
""" RefFunction is the base class for all functions which implement the
simple referencing mechanic.
It takes the form:
" 'RefFunction': 'RefName' "
It simply returns the default value of the specified parameter or the
referenced resource.
"""
# for the parameter accessing functions, define the default
# field's name of the parameter's definition.
_param_default_field_name = ""
# _exceptions is a dict of attributes whose getting is not necessary and
# the associated default value to be used.
# ex: CFN's GetAtt for an AvailabilityZone.
_exceptions = {}
def _check_args(self, args):
""" _check_args validates the provided set of arguments. """
if not isinstance(args, str):
raise FunctionArgumentException(
"Referencing function '%s' expects a "
"single string parameter, but got: '%s'" % (
self.name,
args
)
)
def apply(self, args):
""" apply applies the function to the given set of data and returns
the result.
"""
self._check_args(args)
# check if reference is for a parameter:
if args in self._template.parameters:
# if so, check if it has a default field:
if (self._param_default_field_name in
self._template.parameters[args]):
# return the default value.
return self._template.parameters[args][
self._param_default_field_name
]
else:
# the value might not be necessary, so we just log a warning
# and return the provided args:
LOG.warning(
"Default field for parameter '%s' was not provided; "
"ignoring in case it wasn't needed and simply returning "
"the name of the parameter.",
args
)
return args
# else, it must be a reference to another resource, in which case
# we trivially return the resource's name if present:
if args in self._template.resources:
return args
# finally; make sure that it is not an exceptional case:
if args in self._exceptions:
# if so; log a warning and proceed to use the exception's default:
LOG.warning(
"'%s': referencing exception applied for '%s'. "
"Defaulting to '%s'.", self.name, args,
self._exceptions[args]
)
return self._exceptions[args]
# else, rock bottom:
raise FunctionApplicationException(
"Invalid referencing with '%s'. Reference to '%s' "
"could not be resolved. If it is a value of significance, "
"please add it to the '[cfn|hot]_ref_exceptions' "
"configuration option." % (
self.name,
args
)
)
| apache-2.0 | 5,772,038,701,485,492,000 | 35.5 | 78 | 0.591487 | false |
Peter92/MouseTrack | mousetracks/image/scipy/gaussian.py | 1 | 8012 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import
import numpy
import math
from . import _ni_support
try:
from . import _nd_image
except ImportError:
from scipy.ndimage import _nd_image
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
| gpl-3.0 | 3,616,977,460,178,571,000 | 34.928251 | 73 | 0.611208 | false |
hehongliang/tensorflow | tensorflow/python/keras/layers/normalization.py | 1 | 30004 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.BatchNormalization')
class BatchNormalization(Layer):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Arguments:
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
else:
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if fused is None:
fused = True
self.supports_masking = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
if not isinstance(self.axis, list):
raise TypeError('axis must be int or list, type given: %s'
% type(self.axis))
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused:
# Currently fused batch norm doesn't support renorm. It also only supports
# an input tensor of rank 4 and a channel dimension on axis 1 or 3.
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
self.fused = (not self.renorm and
ndims == 4 and
self.axis in [[1], [3]] and
self.virtual_batch_size is None and
self.adjustment is None)
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
param_dtype = dtypes.float32
else:
param_dtype = self.dtype or dtypes.float32
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
dtype=param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(
1.0, dtype=param_dtype, shape=param_shape)
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
dtype=param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(
0.0, dtype=param_dtype, shape=param_shape)
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_weight(
name=name,
shape=shape,
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN)
return var
with distribution_strategy_context.get_distribution_strategy(
).colocate_vars_with(self.moving_mean):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
with distribution_strategy_context.get_distribution_strategy(
).colocate_vars_with(self.moving_variance):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape)
self.renorm_stddev_weight = _renorm_variable('renorm_stddev_weight',
())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, momentum]) as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (variable - math_ops.cast(value, variable.dtype)) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
momentum)
variance_update = self._assign_moving_average(self.moving_variance,
variance, momentum)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = tf_utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = tf_utils.smart_cond(training,
lambda: d,
lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
"""Updates the var and weight, returns their updated ratio."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = self._assign_moving_average(var, value, self.renorm_momentum)
new_weight = self._assign_moving_average(weight, weight_value,
self.renorm_momentum)
# TODO(yuefengz): the updates to var and weighted can not be batched
# together if we fetch their updated values here. Consider calculating
# new values and delaying the updates.
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return tf_utils.smart_cond(training, _do_update, _fake_update)
# TODO(yuefengz): colocate the operations
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight, mean)
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight, stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
in_eager_mode = context.executing_eagerly()
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: moving_mean)
variance = tf_utils.smart_cond(training,
lambda: variance,
lambda: moving_variance)
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
else:
new_mean, new_variance = mean, variance
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
new_mean, new_variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
def _do_update(var, value):
if in_eager_mode and not self.trainable:
return
return self._assign_moving_average(var, value, self.momentum)
mean_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
variance_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if not context.executing_eagerly():
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
else:
mean, variance = self.moving_mean, self.moving_variance
mean = math_ops.cast(mean, inputs.dtype)
variance = math_ops.cast(variance, inputs.dtype)
if offset is not None:
offset = math_ops.cast(offset, inputs.dtype)
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer':
initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer':
initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
if self.renorm:
config['renorm'] = True
config['renorm_clipping'] = self.renorm_clipping
config['renorm_momentum'] = self.renorm_momentum
if self.virtual_batch_size is not None:
config['virtual_batch_size'] = self.virtual_batch_size
# Note: adjustment is not serializable.
if self.adjustment is not None:
logging.warning('The `adjustment` function of this `BatchNormalization` '
'layer cannot be serialized and has been omitted from '
'the layer config. It will not be included when '
're-creating the layer from the saved config.')
base_config = super(BatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 | -8,833,905,877,213,097,000 | 43.64881 | 80 | 0.639748 | false |
MacHu-GWU/uszipcode-project | uszipcode/search.py | 1 | 31542 | # -*- coding: utf-8 -*-
"""
This module allows developer to query zipcode with super clean API.
"""
import heapq
import math
import sys
from collections import OrderedDict
import sqlalchemy as sa
from pathlib_mate import Path
from six import integer_types, string_types
from sqlalchemy.orm import sessionmaker
from .db import (
is_simple_db_file_exists, is_db_file_exists,
connect_to_simple_zipcode_db, connect_to_zipcode_db,
download_simple_db_file, download_db_file
)
from .model import SimpleZipcode, Zipcode, ZipcodeType
from .pkg.fuzzywuzzy.process import extract, extractOne
from .state_abbr import STATE_ABBR_SHORT_TO_LONG, STATE_ABBR_LONG_TO_SHORT
SORT_BY_DIST = "dist"
"""
a string for ``sort_by`` arguments. order the result by distance from a coordinates.
"""
DEFAULT_LIMIT = 5
"""
default number of results to return.
"""
HOME = Path.home().abspath
HOME_USZIPCODE = Path(HOME, ".uszipcode").abspath
class SearchEngine(object):
"""
Zipcode Search Engine.
:type simple_zipcode: bool
:param simple_zipcode: default True, if True, use the simple zipcode
db. Rich Demographics, Real Estate, Employment, Education info is not
available. If False, use the rich info database.
:type db_file_dir: str
:param db_file_dir: where you want to download the sqlite database to. This
property allows you to customize where you want to store the data file
locally. by default it is ${HOME}/.uszipcode
:type download_url: str
:param download_url: where you want to download the sqlite database file from.
This property allows you to upload the .sqlite file to your private file
host and download from it. In case the default download url fail.
:param engine: a sqlachemy engine object. It allows you to use any
backend database.
Usage::
>>> search = SearchEngine()
>>> zipcode = search.by_zipcode("10001")
Context Manager::
>>> with SearchEngine() as search:
... for zipcode in search.by_coordinates(lat, lng, radius):
... # do what every you want
:meth:`SearchEngine.query` provides mass options to customize your query.
:attr:`SearchEngine.ses` is a ``sqlalchemy.orm.Session`` object, you can
use it for query. For example::
>>> from uszipcode import SearchEngine, SimpleZipcode
>>> search = SearchEngine()
>>> search.ses.query(SimpleZipcode).filter(SimpleZipcode.zipcode=="10001")
.. note::
:class:`SearchEngine` is not multi-thread safe. You should create different
instance for each thread.
"""
_city_list = None
_state_list = None
"""
all available state list, in long format
"""
_state_to_city_mapper = None
_city_to_state_mapper = None
def __init__(self,
simple_zipcode=True,
db_file_dir=HOME_USZIPCODE,
download_url=None,
engine=None):
Path(db_file_dir).mkdir(exist_ok=True)
if engine is None:
if simple_zipcode:
if not is_simple_db_file_exists(db_file_dir):
download_simple_db_file(db_file_dir, download_url=download_url)
engine = connect_to_simple_zipcode_db(db_file_dir)
self.zip_klass = SimpleZipcode
else: # pragma: no cover
if not is_db_file_exists(db_file_dir):
download_db_file(db_file_dir, download_url=download_url)
engine = connect_to_zipcode_db(db_file_dir)
self.zip_klass = Zipcode
self.engine = engine
self.ses = sessionmaker(bind=engine)()
def __enter__(self): # pragma: no cover
return self
def __exit__(self, *exc_info): # pragma: no cover
self.close()
def __del__(self): # pragma: no cover
# Cleanup connection if still open
if self.ses:
self.close()
def close(self):
"""
close database connection.
"""
self.ses.close()
def _get_cache_data(self):
self._city_list = set()
self._state_list = list()
self._state_to_city_mapper = dict()
self._city_to_state_mapper = dict()
for major_city, state in self.ses.query(self.zip_klass.major_city, self.zip_klass.state):
if major_city is not None:
self._city_list.add(major_city)
if state is not None:
state = state.upper()
try:
self._state_to_city_mapper[state].append(major_city)
except:
self._state_to_city_mapper[state] = [major_city, ]
try:
self._city_to_state_mapper[major_city].append(state)
except:
self._city_to_state_mapper[major_city] = [state, ]
self._city_list = list(self._city_list)
self._city_list.sort()
self._state_list = list(STATE_ABBR_LONG_TO_SHORT)
self._state_list.sort()
self._state_to_city_mapper = OrderedDict(
sorted(
self._state_to_city_mapper.items(),
key=lambda x: x[0]
)
)
for v in self._state_to_city_mapper.values():
v.sort()
self._city_to_state_mapper = OrderedDict(
sorted(
self._city_to_state_mapper.items(),
key=lambda x: x[0]
)
)
for v in self._city_to_state_mapper.values():
v.sort()
@property
def city_list(self): # pragma: no cover
"""
Return all available city name.
"""
if self._city_list is None:
self._get_cache_data()
return self._city_list
@property
def state_list(self): # pragma: no cover
"""
Return all available state name.
"""
if self._state_list is None:
self._get_cache_data()
return self._state_list
@property
def state_to_city_mapper(self): # pragma: no cover
if self._state_to_city_mapper is None:
self._get_cache_data()
return self._state_to_city_mapper
@property
def city_to_state_mapper(self): # pragma: no cover
if self._city_to_state_mapper is None:
self._get_cache_data()
return self._city_to_state_mapper
def find_state(self, state, best_match=True, min_similarity=70):
"""
Fuzzy search correct state.
:param best_match: bool, when True, only the best matched state
will be return. otherwise, will return all matching states.
"""
result_state_short_list = list()
# check if it is a abbreviate name
if state.upper() in STATE_ABBR_SHORT_TO_LONG:
result_state_short_list.append(state.upper())
# if not, find out what is the state that user looking for
else:
if best_match:
state_long, confidence = extractOne(state, self.state_list)
if confidence >= min_similarity:
result_state_short_list.append(
STATE_ABBR_LONG_TO_SHORT[state_long])
else:
for state_long, confidence in extract(state, self.state_list):
if confidence >= min_similarity:
result_state_short_list.append(
STATE_ABBR_LONG_TO_SHORT[state_long])
if len(result_state_short_list) == 0:
message = ("'%s' is not a valid state name, use 2 letter "
"short name or correct full name please.")
raise ValueError(message % state)
return result_state_short_list
def find_city(self, city, state=None, best_match=True, min_similarity=70):
"""
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
"""
# find out what is the city that user looking for
if state:
state_sort = self.find_state(state, best_match=True)[0]
city_pool = self.state_to_city_mapper[state_sort.upper()]
else:
city_pool = self.city_list
result_city_list = list()
if best_match:
city, confidence = extractOne(city, city_pool)
if confidence >= min_similarity:
result_city_list.append(city)
else:
for city, confidence in extract(city, city_pool):
if confidence >= min_similarity:
result_city_list.append(city)
if len(result_city_list) == 0:
raise ValueError("'%s' is not a valid city name" % city)
return result_city_list
@staticmethod
def _resolve_sort_by(sort_by, flag_radius_query):
"""
Result ``sort_by`` argument.
:param sort_by: str, or sqlalchemy ORM attribute.
:param flag_radius_query:
:return:
"""
if sort_by is None:
if flag_radius_query:
sort_by = SORT_BY_DIST
elif isinstance(sort_by, string_types):
if sort_by.lower() == SORT_BY_DIST:
if flag_radius_query is False:
msg = "`sort_by` arg can be 'dist' only under distance based query!"
raise ValueError(msg)
sort_by = SORT_BY_DIST
elif sort_by not in SimpleZipcode.__table__.columns:
msg = "`sort_by` arg has to be one of the Zipcode attribute or 'dist'!"
raise ValueError(msg)
else:
sort_by = sort_by.name
return sort_by
def query(self,
zipcode=None,
prefix=None,
pattern=None,
city=None,
state=None,
lat=None,
lng=None,
radius=None,
population_lower=None,
population_upper=None,
population_density_lower=None,
population_density_upper=None,
land_area_in_sqmi_lower=None,
land_area_in_sqmi_upper=None,
water_area_in_sqmi_lower=None,
water_area_in_sqmi_upper=None,
housing_units_lower=None,
housing_units_upper=None,
occupied_housing_units_lower=None,
occupied_housing_units_upper=None,
median_home_value_lower=None,
median_home_value_upper=None,
median_household_income_lower=None,
median_household_income_upper=None,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Query zipcode the simple way.
:param zipcode: int or str, find the exactly matched zipcode. Will be
automatically zero padding to 5 digits
:param prefix: str, zipcode prefix.
:param pattern: str, zipcode wildcard.
:param city: str, city name.
:param state: str, state name, two letter abbr or state full name.
:param lat: latitude.
:param lng: longitude.
:param radius: number, only returns zipcodes within a specific circle.
:param population_lower:
:param population_upper:
:param population_density_lower:
:param population_density_upper:
:param land_area_in_sqmi_lower:
:param land_area_in_sqmi_upper:
:param water_area_in_sqmi_lower:
:param water_area_in_sqmi_upper:
:param housing_units_lower:
:param housing_units_upper:
:param occupied_housing_units_lower:
:param occupied_housing_units_upper:
:param median_home_value_lower:
:param median_home_value_upper:
:param median_household_income_lower:
:param median_household_income_upper:
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
if None, allows to return any type of zipcode.
if specified, only return specified zipcode type.
:param sort_by: str or :class:`~uszipcode.model.Zipcode` attribute,
specified which field is used for sorting.
:param ascending: bool, True means ascending, False means descending.
:param returns: int or None, limit the number of result to returns.
:return: list of :class:`~uszipcode.model.SimpleZipcode` or
:class:`~uszipcode.model.Zipcode`.
"""
filters = list()
# by coordinates
_n_radius_param_not_null = sum([
isinstance(lat, (integer_types, float)),
isinstance(lng, (integer_types, float)),
isinstance(radius, (integer_types, float)),
])
if _n_radius_param_not_null == 3:
flag_radius_query = True
if radius <= 0: # pragma: no cover
raise ValueError("`radius` parameters can't less than 0!")
elif radius <= 50: # pragma: no cover
radius_coef = 1.05
elif radius <= 100: # pragma: no cover
radius_coef = 1.10
elif radius <= 250: # pragma: no cover
radius_coef = 1.25
elif radius <= 500: # pragma: no cover
radius_coef = 1.5
else: # pragma: no cover
radius_coef = 2.0
if radius >= 250: # pragma: no cover
msg = ("\nwarning! search within radius >= 250 miles "
"may greatly slow down the query!")
sys.stdout.write(msg)
# define lat lng boundary
dist_btwn_lat_deg = 69.172
dist_btwn_lon_deg = math.cos(lat) * 69.172
lat_degr_rad = abs(radius * radius_coef / dist_btwn_lat_deg)
lon_degr_rad = abs(radius * radius_coef / dist_btwn_lon_deg)
lat_lower = lat - lat_degr_rad
lat_upper = lat + lat_degr_rad
lng_lower = lng - lon_degr_rad
lng_upper = lng + lon_degr_rad
filters.append(self.zip_klass.lat >= lat_lower)
filters.append(self.zip_klass.lat <= lat_upper)
filters.append(self.zip_klass.lng >= lng_lower)
filters.append(self.zip_klass.lng <= lng_upper)
elif _n_radius_param_not_null == 0:
flag_radius_query = False
else:
msg = "You can either specify all of `lat`, `lng`, `radius` or none of them"
raise ValueError(msg)
# by city or state
if (state is not None) and (city is not None):
try:
state = self.find_state(state, best_match=True)[0]
city = self.find_city(city, state, best_match=True)[0]
filters.append(self.zip_klass.state == state)
filters.append(self.zip_klass.major_city == city)
except ValueError: # pragma: no cover
return []
elif (state is not None):
try:
state = self.find_state(state, best_match=True)[0]
filters.append(self.zip_klass.state == state)
except ValueError: # pragma: no cover
return []
elif (city is not None):
try:
city = self.find_city(city, None, best_match=True)[0]
filters.append(self.zip_klass.major_city == city)
except ValueError: # pragma: no cover
return []
else:
pass
# by common filter
if sum([zipcode is None, prefix is None, pattern is None]) <= 1:
msg = "You can only specify one of the `zipcode`, `prefix` and `pattern`!"
raise ValueError(msg)
if zipcode_type is not None:
filters.append(self.zip_klass.zipcode_type == zipcode_type)
if zipcode is not None:
filters.append(self.zip_klass.zipcode == str(zipcode))
if prefix is not None:
filters.append(self.zip_klass.zipcode.startswith(str(prefix)))
if pattern is not None:
filters.append(self.zip_klass.zipcode.like(
"%%%s%%" % str(pattern)))
if population_lower is not None:
filters.append(self.zip_klass.population >= population_lower)
if population_upper is not None:
filters.append(self.zip_klass.population <= population_upper)
if population_density_lower is not None:
filters.append(self.zip_klass.population_density >=
population_density_lower)
if population_density_upper is not None:
filters.append(self.zip_klass.population_density <=
population_density_upper)
if land_area_in_sqmi_lower is not None:
filters.append(self.zip_klass.land_area_in_sqmi >=
land_area_in_sqmi_lower)
if land_area_in_sqmi_upper is not None:
filters.append(self.zip_klass.land_area_in_sqmi <=
land_area_in_sqmi_upper)
if water_area_in_sqmi_lower is not None:
filters.append(self.zip_klass.water_area_in_sqmi >=
water_area_in_sqmi_lower)
if water_area_in_sqmi_upper is not None:
filters.append(self.zip_klass.water_area_in_sqmi <=
water_area_in_sqmi_upper)
if housing_units_lower is not None:
filters.append(self.zip_klass.housing_units >= housing_units_lower)
if housing_units_upper is not None:
filters.append(self.zip_klass.housing_units <= housing_units_upper)
if occupied_housing_units_lower is not None:
filters.append(self.zip_klass.occupied_housing_units >=
occupied_housing_units_lower)
if occupied_housing_units_upper is not None:
filters.append(self.zip_klass.occupied_housing_units <=
occupied_housing_units_upper)
if median_home_value_lower is not None:
filters.append(self.zip_klass.median_home_value >=
median_home_value_lower)
if median_home_value_upper is not None:
filters.append(self.zip_klass.median_home_value <=
median_home_value_upper)
if median_household_income_lower is not None:
filters.append(self.zip_klass.median_household_income >=
median_household_income_lower)
if median_household_income_upper is not None:
filters.append(self.zip_klass.median_household_income <=
median_household_income_upper)
# --- solve coordinates and other search sort_by conflict ---
sort_by = self._resolve_sort_by(sort_by, flag_radius_query)
q = self.ses.query(self.zip_klass).filter(*filters)
if sort_by is None:
pass
elif sort_by == SORT_BY_DIST:
pass
else:
field = getattr(self.zip_klass, sort_by)
if ascending:
by = field.asc()
else:
by = field.desc()
q = q.order_by(by)
if flag_radius_query:
# if we query by radius, then ignore returns limit before the
# distance calculation, and then manually limit the returns
pairs = list()
for z in q:
dist = z.dist_from(lat, lng)
if dist <= radius:
pairs.append((dist, z))
if sort_by == SORT_BY_DIST:
if ascending:
if returns:
pairs_new = heapq.nsmallest(
returns, pairs, key=lambda x: x[0])
else:
pairs_new = list(sorted(pairs, key=lambda x: x[0]))
else:
if returns:
pairs_new = heapq.nlargest(
returns, pairs, key=lambda x: x[0])
else:
pairs_new = list(
sorted(pairs, key=lambda x: x[0], reverse=True))
return [z for _, z in pairs_new]
else:
return [z for _, z in pairs[:returns]]
else:
if returns:
return q.limit(returns).all()
else:
return q.all()
def by_zipcode(self,
zipcode,
zipcode_type=None,
zero_padding=True):
"""
Search zipcode by exact 5 digits zipcode. No zero padding is needed.
:param zipcode: int or str, the zipcode will be automatically
zero padding to 5 digits.
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
by default, it returns any zipcode type.
:param zero_padding: bool, toggle on and off automatic zero padding.
"""
if zero_padding:
zipcode = str(zipcode).zfill(5)
else: # pragma: no cover
zipcode = str(zipcode)
res = self.query(
zipcode=zipcode,
sort_by=None,
returns=1,
zipcode_type=zipcode_type,
)
if len(res):
return res[0]
else:
return self.zip_klass()
def by_prefix(self,
prefix,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by first N digits.
Returns multiple results.
"""
return self.query(
prefix=prefix,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_pattern(self,
pattern,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode by wildcard.
Returns multiple results.
"""
return self.query(
pattern=pattern,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_city(self,
city,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by fuzzy City name.
My engine use fuzzy match and guess what is the city you want.
"""
return self.query(
city=city,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_state(self,
state,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_city_and_state(self,
city,
state,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by fuzzy city and state name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
city=city,
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_coordinates(self,
lat,
lng,
radius=25.0,
zipcode_type=ZipcodeType.Standard,
sort_by=SORT_BY_DIST,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃.
"""
return self.query(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_population(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population range.
"""
return self.query(
population_lower=lower,
population_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_population_density(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population_density.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population density range.
`population density` is `population per square miles on land`
"""
return self.query(
population_density_lower=lower,
population_density_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_land_area_in_sqmi(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.land_area_in_sqmi.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by land area / sq miles range.
"""
return self.query(
land_area_in_sqmi_lower=lower,
land_area_in_sqmi_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_water_area_in_sqmi(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.water_area_in_sqmi.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by water area / sq miles range.
"""
return self.query(
water_area_in_sqmi_lower=lower,
water_area_in_sqmi_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by house of units.
"""
return self.query(
housing_units_lower=lower,
housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_occupied_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.occupied_housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by occupied house of units.
"""
return self.query(
occupied_housing_units_lower=lower,
occupied_housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_median_home_value(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_home_value.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median home value.
"""
return self.query(
median_home_value_lower=lower,
median_home_value_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def by_median_household_income(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_household_income.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median household income.
"""
return self.query(
median_household_income_lower=lower,
median_household_income_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
def inspect_raw_data(self, zipcode):
sql = "SELECT * FROM {} WHERE zipcode = '{}'".format(
self.zip_klass.__tablename__,
zipcode,
)
stmt = sa.text(sql)
return dict(self.engine.execute(stmt).fetchone())
| mit | 1,295,276,514,535,288,300 | 35.447552 | 97 | 0.535271 | false |
brunobord/meuhdb | meuhdb/tests/__init__.py | 1 | 1083 | from unittest import TestCase
from os import unlink
from tempfile import mkstemp
from meuhdb.core import MeuhDb
class InMemoryDatabase(TestCase):
def setUp(self):
self.db = MeuhDb() # in-memory DB
class InMemoryDatabaseData(InMemoryDatabase):
def setUp(self):
super(InMemoryDatabaseData, self).setUp()
self.db.set('one', {'name': 'Alice', 'good': True, 'chief': True})
self.db.set('two', {'name': 'Bob', 'good': True})
self.db.set('three', {'name': 'Carl', 'good': False})
class TempStorageDatabase(TestCase):
options = {}
def setUp(self):
self.fd, self.filename = mkstemp()
self.db = MeuhDb(self.filename, **self.options)
def tearDown(self):
unlink(self.filename)
class TempStorageDatabaseData(TempStorageDatabase):
def setUp(self):
super(TempStorageDatabaseData, self).setUp()
self.db.set('one', {'name': 'Alice', 'good': True, 'chief': True})
self.db.set('two', {'name': 'Bob', 'good': True})
self.db.set('three', {'name': 'Carl', 'good': False})
| mit | 3,061,921,066,519,527,000 | 27.5 | 74 | 0.625115 | false |
Genomon-Project/paplot | scripts/paplot/pmsignature.py | 1 | 8967 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 15:40:29 2016
@author: okada
$Id: pmsignature.py 205 2017-08-08 06:25:59Z aokada $
"""
########### js template
js_header = """(function() {
msig_data = {};
"""
js_footer = """
})();
Object.freeze(msig_data);
"""
js_dataset = """
msig_data.tooltip_format = {{
{tooltip_ref}
alt:{tooltip_alt},
strand:{tooltip_strand},
mutation_title:{mutation_title},
mutation_partial:{mutation_partial},
}};
msig_data.ref_reduce_rate = [1,1,1,1,1];
msig_data.label_colors = {{'A': '{color_A}', 'C': '{color_C}', 'G': '{color_G}', 'T': '{color_T}', 'plus': '{color_plus}', 'minus': '{color_minus}'}};
msig_data.signatures = [{signatures}];
msig_data.sig_colors = [{colors}];
msig_data.dataset_ref = [{dataset_ref}];
msig_data.dataset_alt = [{dataset_alt}];
msig_data.dataset_strand = [{dataset_strand}];
// [ID, signature, value]
msig_data.mutations = [{mutations}];
msig_data.mutation_count = [{mutation_count}];
msig_data.Ids = [{Ids}];
"""
js_tooltip_ref_template = "ref{index}:{tooltip_format},"
########### HTML template
html_integral_template = """<table>
<tr>
<td style="vertical-align: top;" ><div style="float: left;" id="div_rate"></div></td>
<td style="vertical-align: top;><!-- legend --> <div style="float: left;" id='div_rate_legend_html'></div><div style="float: left;" id='div_rate_legend_svg'></div></td>
</tr>
<tr>
<td style="vertical-align: top;><div style="float: left;" id="div_integral"></div></td>
<td style="vertical-align: top;><!-- legend --> <div style="float: left;" id='div_integral_legend_html'></div><div style="float: left;" id='div_integral_legend_svg'></div></td>
</tr>
<tr>
<td colspan=2 style="padding-top: 20px;">
<p>View mode: <select id="chart_mode"></select></p>
<p>Sort by: <select id="chart_sort"></select></p>
</td>
</tr>
</table>
"""
########### functions
def output_html(params, config):
dataset = convert_tojs(params, config)
if dataset != None and dataset != {}:
create_html(dataset, params, config)
return dataset
def convert_tojs(params, config):
import os
import json
import paplot.subcode.tools as tools
import paplot.convert as convert
import paplot.color as color
# data read
try:
json_data = json.load(open(params["data"]))
except Exception as e:
print ("failure open data %s, %s" % (params["data"], e.message))
return None
key_id_list = tools.config_getstr(config, "result_format_pmsignature", "key_id")
key_ref = tools.config_getstr(config, "result_format_pmsignature", "key_ref")
key_alt = tools.config_getstr(config, "result_format_pmsignature", "key_alt")
key_strand = tools.config_getstr(config, "result_format_pmsignature", "key_strand")
key_mutations = tools.config_getstr(config, "result_format_pmsignature", "key_mutation")
key_mutation_count = tools.config_getstr(config, "result_format_pmsignature", "key_mutation_count")
sig_num = len(json_data[key_ref])
if sig_num == 0:
print ("no data %s" % params["data"])
return {}
# signature names
signature_list = []
for s in range(sig_num):
signature_list.append("Signature %d" % (s+1))
# each signature colors
sig_color_list = color.create_color_array(sig_num, color.r_set2)
# use background?
if tools.config_getboolean(config, "result_format_pmsignature", "background"):
signature_list.append("Background ")
sig_color_list.append(color.r_set2_gray)
# Id list
id_txt = ""
if key_id_list in json_data:
id_txt = convert.list_to_text(json_data[key_id_list])
# mutations
mutations_txt = ""
if key_mutations in json_data:
for m in json_data[key_mutations]:
mutations_txt += "[%d,%d,%f]," % (m[0],m[1],m[2])
# signature
dataset_ref = ""
for sig in json_data[key_ref]:
tmp = ""
for sub in sig:
tmp += "[" + ",".join(map(str, sub)) + "],"
dataset_ref += ("[" + tmp + "],")
dataset_alt = ""
for sig in json_data[key_alt]:
tmp = ""
for sub in sig:
tmp += "[" + ",".join(map(str, sub)) + "],"
dataset_alt += ("[" + tmp + "],")
dataset_strand = ""
for sig in json_data[key_strand]:
dataset_strand += "[" + ",".join(map(str, sig)) + "],"
# tooltips
# for ref
keys_di = {"a": "", "c": "", "g": "", "t": "", "ca": "", "cg": "", "ct": "", "ta": "", "tc": "", "tg": "", "plus": "", "minus": "", "id": "", "sig":""}
tooltip_refs_txt = ""
for r in range(len(json_data[key_ref][0])):
tooltip_refs_txt += js_tooltip_ref_template.format(
index = r, tooltip_format = convert.pyformat_to_jstooltip_text(keys_di, config, "pmsignature", "", "tooltip_format_ref")
)
mutation_count_txt = ""
if (key_mutation_count != "") and (key_mutation_count in json_data.keys()):
for v in json_data[key_mutation_count]:
mutation_count_txt += "%d," % v
# output
sig_num_sift = 0
if tools.config_getboolean(config, "result_format_pmsignature", "background"):
sig_num_sift = 1
ellipsis = "%s%d" % (params["ellipsis"], (sig_num + sig_num_sift))
js_file = "data_%s.js" % ellipsis
html_file = "graph_%s.html" % ellipsis
f = open(params["dir"] + "/" + js_file, "w")
f.write(js_header \
+ js_dataset.format(Ids = id_txt, \
color_A = tools.config_getstr(config, "pmsignature", "color_A", "#06B838"), \
color_C = tools.config_getstr(config, "pmsignature", "color_C", "#609CFF"), \
color_G = tools.config_getstr(config, "pmsignature", "color_G", "#B69D02"), \
color_T = tools.config_getstr(config, "pmsignature", "color_T", "#F6766D"), \
color_plus = tools.config_getstr(config, "pmsignature", "color_plus", "#00BEC3"), \
color_minus = tools.config_getstr(config, "pmsignature", "color_minus", "#F263E2"), \
signatures = convert.list_to_text(signature_list), \
colors = convert.list_to_text(sig_color_list), \
mutations = mutations_txt, \
dataset_ref = dataset_ref, \
dataset_alt = dataset_alt, \
dataset_strand = dataset_strand, \
tooltip_ref = tooltip_refs_txt, \
tooltip_alt = convert.pyformat_to_jstooltip_text(keys_di, config, "pmsignature", "", "tooltip_format_alt"), \
tooltip_strand = convert.pyformat_to_jstooltip_text(keys_di, config, "pmsignature", "", "tooltip_format_strand"), \
mutation_title = convert.pyformat_to_jstooltip_text(keys_di, config, "pmsignature", "", "tooltip_format_mutation_title"), \
mutation_partial = convert.pyformat_to_jstooltip_text(keys_di, config, "pmsignature", "", "tooltip_format_mutation_partial"), \
mutation_count = mutation_count_txt, \
)
)
f_template = open(os.path.dirname(os.path.abspath(__file__)) + "/templates/data_pmsignature.js")
js_function = f_template.read()
f_template.close()
f.write(js_function)
f.write(js_footer)
f.close()
integral = True
if key_id_list == "" or key_mutations == "" or key_mutation_count == "":
integral = False
return {"sig_num": sig_num,
"js": js_file,
"html": html_file,
"intergral": integral,
}
def create_html(dataset, params, config):
import os
import paplot.subcode.tools as tools
import paplot.prep as prep
html_div_template = "<div style='float: left;' id='div_pm{id}'></div>\n"
html_add_template = "msig_draw.add_div('div_pm{id}');\n"
div_text = ""
add_text = ""
for i in range(dataset["sig_num"]):
div_text += html_div_template.format(id = i)
add_text += html_add_template.format(id = i)
integral_text = ""
if dataset["intergral"] == True:
integral_text = html_integral_template
f_template = open(os.path.dirname(os.path.abspath(__file__)) + "/templates/graph_pmsignature.html")
html_template = f_template.read()
f_template.close()
sig_num_sift = 0
if tools.config_getboolean(config, "result_format_pmsignature", "background"):
sig_num_sift = 1
f_html = open(params["dir"] + "/" + dataset["html"], "w")
f_html.write(
html_template.format(project = params["project"],
title = "%s(#sig %d)" % (params["title"], dataset["sig_num"] + sig_num_sift),
data_js = dataset["js"],
version = prep.version_text(),
date = tools.now_string(),
divs = div_text,
add_divs = add_text,
integral = integral_text,
style = "../style/%s" % os.path.basename(tools.config_getpath(config, "style", "path", "default.js")),
))
f_html.close()
| mit | -2,607,200,996,559,732,700 | 35.45122 | 176 | 0.577228 | false |
Jajcus/pyxmpp2 | pyxmpp2/test/xmppserializer.py | 1 | 2903 | #!/usr/bin/python -u
# -*- coding: UTF-8 -*-
# pylint: disable=C0111
import unittest
from xml.etree import ElementTree
from pyxmpp2.xmppserializer import XMPPSerializer
from pyxmpp2.utils import xml_elements_equal
class TestXMPPSerializer(unittest.TestCase):
def test_emit_head(self):
serializer = XMPPSerializer("jabber:client")
output = serializer.emit_head("fromX", "toY")
self.assertTrue(output.startswith("<stream:stream "))
self.assertTrue("xmlns='jabber:client'" in output
or 'xmlns="jabber:client"' in output)
self.assertFalse("xmlns:xml" in output)
xml = ElementTree.XML(output + "</stream:stream>")
self.assertEqual(xml.tag,
"{http://etherx.jabber.org/streams}stream")
self.assertEqual(xml.get('from'), 'fromX')
self.assertEqual(xml.get('to'), 'toY')
self.assertEqual(xml.get('version'), '1.0')
self.assertEqual(len(xml), 0)
def test_emit_head_no_from_to(self):
serializer = XMPPSerializer("jabber:client")
output = serializer.emit_head(None, None)
xml = ElementTree.XML(output + "</stream:stream>")
self.assertEqual(xml.get('from'), None)
self.assertEqual(xml.get('to'), None)
def test_emit_tail(self):
serializer = XMPPSerializer("jabber:client")
output = serializer.emit_head("fromX", "toY")
output += serializer.emit_tail()
xml = ElementTree.XML(output)
self.assertEqual(len(xml), 0)
def test_emit_stanza(self):
serializer = XMPPSerializer("jabber:client")
output = serializer.emit_head("from", "to")
stanza = ElementTree.XML("<message xmlns='jabber:client'>"
"<body>Body</body>"
"<sub xmlns='http://example.org/ns'>"
"<sub1 />"
"<sub2 xmlns='http://example.org/ns2' />"
"</sub>"
"</message>")
output += serializer.emit_stanza(stanza)
output += serializer.emit_tail()
xml = ElementTree.XML(output)
self.assertEqual(len(xml), 1)
self.assertEqual(len(xml[0]), 2)
self.assertTrue(xml_elements_equal(xml[0], stanza))
# no prefix for stanza elements
self.assertTrue("<message><body>" in output)
# no prefix for stanza child
self.assertTrue("<sub " in output)
# ...and its same-namespace child
self.assertTrue("<sub1/" in output or "<sub1 " in output)
# prefix for other namespace child
self.assertTrue("<sub2" in output)
# pylint: disable=W0611
from pyxmpp2.test._support import load_tests, setup_logging
def setUpModule():
setup_logging()
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | -8,768,695,044,483,091,000 | 35.746835 | 77 | 0.578367 | false |
beni55/sentry | setup.py | 1 | 4038 | #!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
setup_requires = []
if 'test' in sys.argv:
setup_requires.append('pytest')
dev_requires = [
'flake8>=2.0,<2.1',
]
tests_require = [
'exam>=0.5.1',
'eventlet',
'pytest',
'pytest-cov>=1.4',
'pytest-django',
'pytest-timeout',
'python-coveralls',
'nydus',
'mock>=0.8.0',
'redis',
'unittest2',
]
install_requires = [
'cssutils>=0.9.9,<0.9.10',
'BeautifulSoup>=3.2.1,<3.3.0',
'django-celery>=3.0.11,<3.1.0',
'celery>=3.0.15,<3.1.0',
'django-crispy-forms>=1.2.3,<1.3.0',
'Django>=1.5.1,<1.6',
'django-paging>=0.2.5,<0.3.0',
'django-picklefield>=0.3.0,<0.4.0',
'django-static-compiler>=0.3.0,<0.4.0',
'django-templatetag-sugar>=0.1.0,<0.2.0',
'gunicorn>=0.17.2,<0.18.0',
'logan>=0.5.7,<0.6.0',
'nydus>=0.10.0,<0.11.0',
'Pygments>=1.6.0,<1.7.0',
'pynliner>=0.4.0,<0.5.0',
'python-dateutil>=1.5.0,<2.0.0',
'raven>=3.3.8',
'redis>2.7.0,<2.8.0',
'simplejson>=3.1.0,<3.2.0',
'South>=0.7.6,<0.8.0',
'httpagentparser>=1.2.1,<1.3.0',
'django-social-auth>=0.7.24,<0.8.0',
'setproctitle>=1.1.7,<1.2.0',
]
postgres_requires = [
'psycopg2>=2.4.0,<2.5.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='sentry',
version='6.0.2',
author='David Cramer',
author_email='[email protected]',
url='http://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
tests_require=tests_require,
cmdclass={'test': PyTest},
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| bsd-3-clause | 506,437,212,185,443,800 | 25.741722 | 92 | 0.624071 | false |
evidation-health/bokeh | bokeh/crossfilter/models.py | 1 | 30616 | from __future__ import absolute_import
import logging
import six
import pandas as pd
import numpy as np
from ..plotting import curdoc
from ..models import ColumnDataSource, GridPlot, Panel, Tabs, Range
from ..models.widgets import Select, MultiSelect, InputWidget
# crossfilter plotting utilities
from .plotting import make_histogram_source, make_histogram, cross, hide_axes
from .plugins import CrossScatterPlugin, CrossBarPlugin, CrossLinePlugin
# bokeh plotting functions
from ..plot_object import PlotObject
from ..properties import Dict, Enum, Instance, List, String, Any, Int
logger = logging.getLogger(__name__)
class DiscreteFacet(object):
"""Pairing of a field and a unique value, representing a subset of the
total data."""
def __init__(self, field, value, label=None):
"""Sets object properties and creates label if not provided.
Args:
field (str): name of the column
value: unique value defined for the column
label (str, optional): string representation of the value
"""
if label is None:
label = str(value)
self.field = field
self.label = label
self._value = value
def __repr__(self):
return "%s:%s"%(self.field, self.label)
def filter(self, df):
"""Filters the provided DataFrame to the subset corresponding to value.
Args:
df (DataFrame): contains a column of ``field``
Returns:
DataFrame: filtered to rows, where column ``field`` has values
equal to ``_value``.
"""
return df[df[self.field] == self._value]
class ContinuousFacet(DiscreteFacet):
"""Represents a range of values for a field in a DataFrame."""
def __init__(self, field, value, bins, label=None):
"""Calls parent ``DiscreteFacet`` and stores bins for later filtering.
Args:
field (str): name of the column
value (str): center of range of values in the column
bins (list[float]): start and inclusive stop value for the bin
label (str, optional): string representation
"""
super(ContinuousFacet, self).__init__(field, value, label=label)
self.bins = bins
def filter(self, df):
"""Filters the provided DataFrame to the subset corresponding to bins.
Args:
df (DataFrame): contains a column of ``field``
Returns:
DataFrame: filtered to rows, where column ``field`` has values
within the bounds of ``bins``.
"""
if self.bins[0] is not None:
df = df[df[self.field] > self.bins[0]]
if self.bins[1] is not None:
df = df[df[self.field] <= self.bins[1]]
return df
class CrossFilter(PlotObject):
"""Interactive filtering and faceting application with multiple plot types"""
# identify properties for the data
columns = List(Dict(String, Any))
data = Instance(ColumnDataSource)
filtered_data = Instance(ColumnDataSource)
# list of datasources to use for filtering widgets
filter_sources = Dict(String, Instance(ColumnDataSource))
# list of columns we are filtering
filtering_columns = List(String)
# dict of column name to filtering widgets
filter_widgets = Dict(String, Instance(PlotObject))
# dict which aggregates all the selections from the different filtering
# widgets
filtered_selections = Dict(String, Dict(String, Any))
# list of facet vars
facet_x = List(String, default=[])
facet_y = List(String, default=[])
facet_tab = List(String, default=[])
# the displayed plot object
plot = Instance(PlotObject)
x_range = Instance(Range)
y_range = Instance(Range)
# configuration properties for the plot
plot_type = Enum("line", "scatter", "bar")
plot_map = {'line': CrossLinePlugin,
'scatter': CrossScatterPlugin,
'bar': CrossBarPlugin}
x = String
y = String
agg = String
color = String
title = String
height = Int()
width = Int()
# identify the selector/drop-down properties
plot_selector = Instance(Select)
x_selector = Instance(Select)
y_selector = Instance(Select)
agg_selector = Instance(Select)
def __init__(self, *args, **kwargs):
"""Creates original and filtered ColumnDataSource and handles defaults.
The df and starting configuration are only provided the first time
init is called, within the create method.
Kwargs:
df (DataFrame): the data to use in the crossfilter app
plot_type (str, optional): starting plot type
agg (str, optional): starting aggregation type
"""
if 'df' in kwargs:
self._df = kwargs.pop('df')
# initialize a "pure" and filtered data source based on df
kwargs['data'] = ColumnDataSource(data=self.df)
kwargs['filtered_data'] = ColumnDataSource(data=self.df)
# default plot type
if 'plot_type' not in kwargs:
kwargs['plot_type'] = "scatter"
# default aggregation type
if 'agg' not in kwargs:
kwargs['agg'] = 'sum'
if 'plot_map' in kwargs:
self.plot_map = kwargs.pop('plot_map')
super(CrossFilter, self).__init__(**kwargs)
@classmethod
def create(cls, **kwargs):
"""Performs all one-time construction of bokeh objects.
This classmethod is required due to the way that bokeh handles the
python and javascript components. The initialize method will be
called each additional time the app is updated (including once in
the create method), but the PlotObject infrastructure will find that
the object already exists in any future calls, and will not create a
new object.
Kwargs:
df (DataFrame): the data to use in the crossfilter app
plot_type (str, optional): starting plot type
agg (str, optional): starting aggregation type
"""
obj = cls(**kwargs)
obj.set_metadata()
choices = obj.make_plot_choices()
obj.update_plot_choices(choices)
obj.set_plot()
obj.set_input_selector()
return obj
def set_input_selector(self):
"""Creates and configures each selector (drop-down menu)."""
col_names = [x['name'] for x in self.columns]
col_names.append('None')
self.plot_selector = Select.create(
title="PlotType",
name="plot_type",
value=self.plot_type,
options=["line", "scatter", "bar"],
)
self.x_selector = Select.create(
name="x",
value=self.x,
options=col_names,
)
self.y_selector = Select.create(
name="y",
value=self.y,
options=col_names,
)
self.agg_selector = Select.create(
name='agg',
value=self.agg,
options=['sum', 'mean', 'last', 'count', 'percent'],
)
def update_plot_choices(self, input_dict):
"""Sets object attributes corresponding to input_dict's values.
Args:
input_dict (dict): dict with x, y, and plot_type keys
"""
for k, v in input_dict.items():
if getattr(self, k) is None:
setattr(self, k, v)
def get_plot_class(self):
"""Return the class for the current plot selection."""
return self.plot_map[self.plot_type]
def column_descriptor_dict(self):
"""Creates column stats dict with keys of column names.
Returns:
dict: dict with key per column in data, where values are column stats
"""
column_descriptors = {}
for x in self.columns:
column_descriptors[x['name']] = x
return column_descriptors
@property
def continuous_columns(self):
"""Returns list of column descriptors for the non-Discrete columns.
Returns:
list(dict): list of dicts, containing metadata about columns
"""
return [x for x in self.columns if x['type'] != 'DiscreteColumn']
@property
def discrete_columns(self):
"""Returns list of column descriptors for the Discrete columns.
Returns:
list(dict): list of dicts, containing metadata about columns
"""
return [x for x in self.columns if x['type'] == 'DiscreteColumn']
def make_plot_choices(self):
"""Selects first two continuous columns for x,y during initial setup
Returns:
dict: x, y, and plot_type keys and values for initial setup
"""
# prefer continuous columns to initialize with, otherwise use what we have
if len(self.continuous_columns) > 1:
x, y = [x['name'] for x in self.continuous_columns[:2]]
else:
x, y = [x['name'] for x in self.columns[:2]]
return {'x': x, 'y': y, 'plot_type': 'scatter'}
def set_plot(self):
"""Makes and sets the plot based on the current configuration of app."""
self.update_xy_ranges(source=self.df)
plot = self.make_plot()
self.plot = plot
curdoc()._add_all()
def make_plot(self):
"""Makes the correct plot layout type, based on app's current config.
Returns:
PlotObject: one plot, grid of plots, or tabs of plots/grids of plots
"""
if self.facet_tab:
facets = self.make_facets(dimension='tab')
# generate a list of panels, containing plot/plots for each facet
tabs = [self.make_tab(content=self.create_plot_page(
tab_facet=facet), tab_label=self.facet_title(facet)) for facet
in facets]
return Tabs(tabs=tabs)
else:
return self.create_plot_page()
def create_plot_page(self, tab_facet=None):
"""Generates a single visible page of a plot or plots.
Args:
tab_facet (DiscreteFacet or ContinuousFacet): a facet to filter on
Returns:
PlotObject: a single or grid of plots
"""
# no faceting
if all([len(self.facet_x) == 0,
len(self.facet_y) == 0]):
plot_page = self.make_single_plot(facet=tab_facet)
# x xor y faceting
if all([(len(self.facet_x) != 0) ^ (len(self.facet_y) != 0)]):
plot_page = self.make_1d_facet_plot(facet=tab_facet)
# x and y faceting
if all([len(self.facet_x) != 0,
len(self.facet_y) != 0]):
plot_page = self.make_2d_facet_plot(facet=tab_facet)
if isinstance(plot_page, GridPlot):
self.apply_grid_style(plot_page)
return plot_page
@staticmethod
def make_tab(content, tab_label):
"""Creates a container for the contents of a tab.
Args:
content (PlotObject): the primary content of the tab
tab_label (str): the text to place in the tab
Returns:
Panel: represents a single tab in a group of tabs
"""
return Panel(child=content, title=tab_label)
def make_facets(self, dimension):
"""Creates combination of all facets for the provided dimension
Args:
dimension (str): name of the dimension to create facets for
Returns:
list(list(DiscreteFacet or ContinuousFacet)): list of list of
unique facet combinations
"""
if dimension == 'x':
facets = self.facet_x
elif dimension == 'y':
facets = self.facet_y
else:
facets = self.facet_tab
# create facets for each column
column_descriptor_dict = self.column_descriptor_dict()
all_facets = [[]]
for field in facets:
# create facets from discrete columns
if column_descriptor_dict[field]['type'] == 'DiscreteColumn':
field_facets = [DiscreteFacet(field, val) for val in
np.unique(self.df[field].values)]
# combine any facets as required
all_facets = cross(all_facets, field_facets)
else:
# create quantile based discrete data and pairs of bins
categorical, bins = pd.qcut(self.df[field], 4, retbins=True)
cats = categorical.cat.categories
bins = [[bins[idx], bins[idx + 1]] for idx in
range(len(bins) - 1)]
bins[0][0] = None
# create list of facets
field_facets = [ContinuousFacet(field, value, bin) for
bin, value in zip(bins, cats)]
# combine any facets as required
all_facets = cross(all_facets, field_facets)
return all_facets
@staticmethod
def facet_title(facets):
"""Joins list of facets by commas.
Args:
facets (list(DiscreteFacet or ContinuousFacet)): list of facets,
which are a combination of column and unique value within it
Returns:
str: string representation of the combination of facets
"""
title = ",".join([str(x) for x in facets])
return title
def facet_data(self, facets, df=None):
"""Filters data to the rows associated with the given facet.
Args:
facets (list(DiscreteFacet or ContinuousFacet)): list of facets,
which are a combination of column and unique value within it
df (DataFrame, optional): data to be filtered on
Returns:
DataFrame: filtered DataFrame based on provided facets
"""
if df is None:
df = self.filtered_df
for f in facets:
df = f.filter(df)
return df
def make_1d_facet_plot(self, facet=None):
"""Creates the faceted plots when a facet is added to the x axis.
Returns:
GridPlot: a grid of plots, where each plot has subset of data
"""
if self.facet_x:
all_facets = self.make_facets('x')
else:
all_facets = self.make_facets('y')
plots = []
# loop over facets and create single plots for data subset
for facets in all_facets:
title = self.facet_title(facets)
if facet:
facets += facet
df = self.facet_data(facets, self.filtered_df)
plot = self.make_single_plot(
df=df, title=title, plot_height=200, plot_width=200,
tools="pan,wheel_zoom,reset", facet=facets
)
# append single plot to list of plots
plots.append(plot)
# create squarish grid based on number of plots
chunk_size = int(np.ceil(np.sqrt(len(plots))))
# create list of lists of plots, where each list of plots is a row
grid_plots = []
for i in range(0, len(plots), chunk_size):
chunk = plots[i:i + chunk_size]
grid_plots.append(chunk)
self.hide_internal_axes(grid_plots)
# return the grid as the plot
return GridPlot(children=grid_plots, plot_width=200*chunk_size)
def make_2d_facet_plot(self, facet=None):
"""Creates the grid of plots when there are both x and y facets.
Returns:
GridPlot: grid of x and y facet combinations
"""
# ToDo: gracefully handle large combinations of facets
all_facets_x = self.make_facets('x')
all_facets_y = self.make_facets('y')
grid_plots = []
# y faceting down column
for facets_y in all_facets_y:
# x faceting across row
row = []
for facets_x in all_facets_x:
# build the facets and title
facets = facets_x + facets_y
title = self.facet_title(facets)
# must filter by any extra facets provided for facet tab
if facet:
filter_facets = facets + facet
else:
filter_facets = facets
df = self.facet_data(filter_facets, self.filtered_df)
plot = self.make_single_plot(
df=df, title=title, plot_height=200, plot_width=200,
tools="pan,wheel_zoom,reset", facet=facets
)
row.append(plot)
# append the row to the list of rows
grid_plots.append(row)
self.hide_internal_axes(grid_plots)
# return the grid of plots as the plot
return GridPlot(children=grid_plots, plot_width=200*len(all_facets_x))
@staticmethod
def apply_facet_style(plot):
"""Applies facet-specific style for a given plot.
Override this method to modify the look of a customized CrossFilter
for all plugins. Or, apply custom styles in the plugin, since the
plugin will be told if it is currently being faceted.
"""
plot.title_text_font_size = "9pt"
plot.min_border = 0
def apply_single_plot_style(self, plot):
"""Applies styles when we have only one plot.
Override this method to modify the look of a customized CrossFilter
for all plugins.
"""
plot.min_border_left = 60
def apply_grid_style(self, grid_plot):
"""Applies facet-specific style for the grid of faceted plots.
Override this method to modify the look of a customized CrossFilter
for all plugins. Or, apply custom styles in the plugin, since the
plugin will be told if it is currently being faceted.
"""
grid_plot.title_text_font_size = "12pt"
grid_plot.title_text_font_style = "bold"
grid_plot.title = self.title
@staticmethod
def hide_internal_axes(grid_plots):
"""Hides the internal axes for a grid of plots.
Args:
grid_plots (list(list(Figure))): list of rows (list), containing plots
"""
for i, row in enumerate(grid_plots):
is_bottom = i + 1 == len(grid_plots)
for j, plot in enumerate(row):
if j != 0:
if is_bottom:
hide_axes(plot, axes='y')
else:
hide_axes(plot)
elif j == 0 and not is_bottom:
hide_axes(plot, axes='x')
def make_single_plot(self, df=None, title=None,
plot_width=700,
plot_height=680,
tools="pan,wheel_zoom,box_zoom,save,resize,"
"box_select,reset",
facet=None):
"""Creates a plot based on the current app configuration.
Args:
df (DataFrame, optional): data to use for the plot
title (str, optional): plot title
plot_width (float, optional): width of plot in pixels
plot_height (float, optional): height of plot in pixels
tools (str, optional): comma separated string of tool names
Returns:
PlotObject: the generated plot
"""
faceting = False
# df is not provided when we are not faceting
if df is None:
source = self.filtered_data
else:
df = self.facet_data(facets=facet, df=df)
# create column data source with filtered df
source = ColumnDataSource(data=df)
faceting = True
# check for tab faceting and filter if provided
if facet:
df = self.facet_data(facets=facet, df=df)
source = ColumnDataSource(data=df)
# get the helper class for the plot type selected
plot_class = self.get_plot_class()
# initialize the plugin class
plugin = plot_class(source=source,
title_text_font_size="12pt",
title_text_font_style = "bold",
plot_height=plot_height,
plot_width=plot_width,
tools=tools,
title=title,
x_range=self.x_range,
y_range=self.y_range,
facet=faceting,
crossfilter=self)
# generate plot
plot = plugin.get_plot()
# apply faceting-specific styling if required
if facet:
self.apply_facet_style(plot)
self.title = plugin.title
else:
self.apply_single_plot_style(plot)
self.title = plot.title
return plot
def update_xy_ranges(self, source):
"""Updates common x_range, y_range to use for creating figures.
Args:
source (ColumnDataSource): the source to return correct range for
"""
plt_cls = self.get_plot_class()
x_range, y_range = plt_cls.make_xy_ranges(cf=self)
# store x and y range from the plot class
self.x_range = x_range
self.y_range = y_range
def plot_attribute_change(self, obj, attrname, old, new):
"""Updates app's attribute and plot when view configuration changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
setattr(self, obj.name, new)
self.set_plot()
def facet_change(self, obj, attrname, old, new):
"""Updates plot when any facet configuration changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
self.set_plot()
@property
def df(self):
"""The core data that is used by the app for plotting.
Returns:
DataFrame: the original data structure
"""
if hasattr(self, '_df'):
return self._df
else:
if self.data:
return self.data.to_df()
@property
def filtered_df(self):
"""The subset of the data to use for plotting.
Returns:
DataFrame: the original data structure
"""
if hasattr(self, '_df'):
return self._df
else:
if self.filtered_data:
return self.filtered_data.to_df()
def update(self, **kwargs):
"""Updates CrossFilter attributes each time the model changes.
The events are setup each time so that we can add event handlers to
the selection/filtering widgets as they are added.
"""
super(CrossFilter, self).update(**kwargs)
self.setup_events()
def setup_events(self):
"""Registers events each time the app changes state."""
# watch the app's filtering_columns attribute to setup filters
self.on_change('filtering_columns', self, 'setup_filter_widgets')
# register any available filter widget
for obj in self.filter_widgets.values():
if isinstance(obj, InputWidget):
obj.on_change('value', self, 'handle_filter_selection')
# watch app column data source attribute for changes
for obj in self.filter_sources.values():
obj.on_change('selected', self, 'handle_filter_selection')
# selector event registration
if self.plot_selector:
self.plot_selector.on_change('value', self, 'plot_attribute_change')
if self.x_selector:
self.x_selector.on_change('value', self, 'plot_attribute_change')
if self.y_selector:
self.y_selector.on_change('value', self, 'plot_attribute_change')
if self.agg_selector:
self.agg_selector.on_change('value', self, 'plot_attribute_change')
# register to watch the app's facet attributes
self.on_change('facet_x', self, 'facet_change')
self.on_change('facet_y', self, 'facet_change')
self.on_change('facet_tab', self, 'facet_change')
def handle_filter_selection(self, obj, attrname, old, new):
"""Filters the data source whenever a filter widget changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
df = self.df
# loop over the column metadata
for descriptor in self.columns:
colname = descriptor['name']
# handle discrete selections
if descriptor['type'] == 'DiscreteColumn' and \
colname in self.filter_widgets:
selected = self.filter_widgets[colname].value
if not selected:
continue
if isinstance(selected, six.string_types):
df = df[colname == selected]
else:
df = df[np.in1d(df[colname], selected)]
# handle time or continuous selections
elif descriptor['type'] in ('TimeColumn', 'ContinuousColumn') and \
colname in self.filter_widgets:
obj = self.filter_sources[colname]
# hack because we don't have true range selection
if not obj.selected:
continue
# TODO: (bev) This works until CF selections are not made on
# [multi]lines and [multi]patches
min_idx = np.min(obj.selected['1d']['indices'])
max_idx = np.max(obj.selected['1d']['indices'])
min_val = obj.data['centers'][min_idx]
max_val = obj.data['centers'][max_idx]
df = df[(df[colname] >= min_val) & (df[colname] <= max_val)]
# update filtered data and force plot update
for colname in self.data.column_names:
self.filtered_data.data[colname] = df[colname]
self.filtered_data._dirty = True
self.set_plot()
def clear_selections(self, obj, attrname, old, new):
"""Updates filter widgets and sources as they are removed.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
diff = set(old) - set(new)
column_descriptor_dict = self.column_descriptor_dict()
# delete any removed filter widgets
if len(diff) > 0:
for col in diff:
metadata = column_descriptor_dict[col]
if metadata['type'] != 'DiscreteColumn':
del self.filter_sources[col]
del self.filter_widgets[col]
# update the data based on latest changes
if diff:
self.handle_filter_selection(obj, attrname, old, new)
def setup_filter_widgets(self, obj, attrname, old, new):
"""Creates new filter widget each time a new column is added to filters.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
self.clear_selections(obj, attrname, old, new)
# add new widget as required for each column set to filter on
column_descriptor_dict = self.column_descriptor_dict()
for col in self.filtering_columns:
metadata = column_descriptor_dict[col]
if not col in self.filter_widgets:
# discrete
if metadata['type'] == 'DiscreteColumn':
select = MultiSelect.create(
name=col,
options=self.df[col].unique().tolist())
self.filter_widgets[col] = select
# continuous
else:
source = make_histogram_source(self.df[col])
self.filter_sources[col] = source
hist_plot = make_histogram(self.filter_sources[col],
plot_width=200, plot_height=100,
title_text_font_size='8pt',
tools='box_select'
)
hist_plot.title = col
self.filter_widgets[col] = hist_plot
curdoc()._add_all()
def set_metadata(self):
"""Creates a list of dicts, containing summary info for each column.
The descriptions are stored in the ``columns`` property.
"""
descriptors = []
columns = self.df.columns
for c in columns:
# get description for column from pandas DataFrame
desc = self.df[c].describe()
# DiscreteColumn
if 'top' in desc.index:
descriptors.append({
'type': "DiscreteColumn",
'name': c,
'count': desc['count'],
'unique': desc['unique'],
'top': desc['top'],
'freq': desc['freq'],
})
# TimeColumn
elif 'first' in desc.index:
descriptors.append({
'type': "TimeColumn",
'name': c,
'count': desc['count'],
'unique': desc['unique'],
'first': desc['first'],
'last': desc['last'],
})
# ContinuousColumn
else:
descriptors.append({
'type': "ContinuousColumn",
'name': c,
'count': desc['count'],
'mean': "%.2f"%desc['mean'],
'std': "%.2f"%desc['std'],
'min': "%.2f"%desc['min'],
'max': "%.2f"%desc['max'],
})
self.columns = descriptors
| bsd-3-clause | 4,780,065,910,283,773,000 | 32.278261 | 82 | 0.559805 | false |
fcalo/crawler_comics | crawler/updater.py | 1 | 7386 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, urllib2, urllib, cookielib
import time, logging, logging.handlers
from datetime import datetime
from pprint import pprint
import csv, shutil, re
from ftplib import FTP_TLS, error_perm
from db import DB
from utils import *
from urllib import quote
import traceback
#hack
import ssl, socket
class mFTP_TLS(FTP_TLS):
def __init__(self, host='', user='', passwd='', acct='', keyfile=None, certfile=None, timeout=60):
FTP_TLS.__init__(self, host, user, passwd, acct, keyfile, certfile, timeout)
def connect(self, host='', port=0, timeout=-999):
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
try:
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile, ssl_version=ssl.PROTOCOL_TLSv1)
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
except Exception as e:
print e
return self.welcome
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
#conn.unwrap()
conn.close
finally:
conn.close()
return self.voidresp()
class Updater(object):
def __init__(self, verbose = False, id_task = None, supplier = None):
self.verbose = verbose
self.supplier = supplier
self.config = {}
config_file = os.path.join(os.path.dirname(__file__), "updater.conf")
execfile(config_file, self.config)
#logger
self.logger = logging.getLogger('UPDATER')
hdlr = logging.handlers.TimedRotatingFileHandler(os.path.join(os.path.dirname(__file__), \
self.config['log_file'].replace(".log", "%s.log" % id_task)),"d",2)
hdlr.suffix = "-%s" % id_task if id_task else "%Y-%m-%d-%H-%M"
formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
self.logger.info("[__init__]")
#initialite DB
self.db = DB(self.logger, config_file)
if not id_task:
self.id_task = self.db.start_new_task()
else:
self.id_task = int(id_task)
self.name_supplier = self.db.get_name_supplier(self.supplier)
#initialite csv
self.filename_csv = os.path.join(os.path.dirname(__file__), "csv/%s" % self.config['csv_filename'] % (self.supplier, self.id_task))
self.filename_stock_master = os.path.join(os.path.dirname(__file__), "csv/%s" % "STOCK_MASTER_%d.csv" % self.id_task)
self.print_line(self.config["csv_header"], True)
def get_metas_orderer(self, data):
"""select metas required"""
return [data[meta] for meta in self.config['csv_header'] if meta in data and data[meta]]
def print_line(self, line, header = False):
"""print line in csv"""
#~ pprint([str(i).replace(",", ".") if is_number(i) else i for i in line])
#~ pprint([is_number(i) for i in line])
with open(self.filename_csv, 'wb' if header else 'ab') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t',quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
csvwriter.writerow(line)
def download_stock_master(self):
""" download csv to compare stock """
connected = False
tries = 0
self.logger.info("[download_stock_master] Descargando...")
while not connected:
try:
ftps = mFTP_TLS()
ftps.connect(self.config['ftp_host'], port=990, timeout = 60)
ftps.login(self.config['ftp_user'], self.config['ftp_pass'])
ftps.prot_p()
connected = True
except:
tries +=1
if tries > 5:
raise
time.sleep(tries)
ftps.retrbinary("RETR " + self.config['ftp_filename'] ,open(self.filename_stock_master, 'wb').write)
ftps.quit()
def load_data_stock(self):
self.logger.info("[load_data_stock] leyendo...")
self.data_stock = {}
with open(self.filename_stock_master, 'rb') as f:
reader = csv.reader(f)
header = True
for row in reader:
if not header:
data_line = dict(zip(self.config["csv_header"], [r.decode('iso-8859-1').encode('utf8') for r in row]))
self.data_stock[data_line['id']] = data_line
header = False
def run(self):
try:
self.db.init_task(self.id_task)
self.download_stock_master()
self.load_data_stock()
last_task = self.db.get_last_task_supplier(self.supplier)
self.logger.info("[run] generando %s" % self.supplier)
ids = []
for data in self.db.get_data_supplier(self.supplier):
if data['id'] in self.data_stock:
data_master_stock = self.data_stock[data['id']]
if data['id'] in ids:
#url change
continue
ids.append(data['id'])
# stock checks
print data['id'], last_task, data['last_seen_task']
if last_task > data['last_seen_task'] and int(data_master_stock['stock']) > 9:
data_master_stock['catalogid'] = "-%s" % data_master_stock['catalogid']
if data_master_stock['stock'] in ['0', '10', '40']:
if data['stock'] != 40:
data_master_stock['stock'] = data['stock']
data_master_stock['instock_message'] = "Pre-Reserva" if data_master_stock['stock'] == "40" \
else "Añadir a Lista de Espera" if data_master_stock['stock'] == "0" \
else "Envío 5 a 7 Días" if data_master_stock['stock'] == "10" \
else "En Stock - 48 Horas"
if not 'categories' in data_master_stock:
data_master_stock['categories'] = data['categories']
data['control'] = ""
else:
data['control'] = "" if data_master_stock['categories'] == data['categories'] else "novedad"
data_master_stock['distributor'] = self.name_supplier
self.print_line(self.get_metas_orderer(data_master_stock))
else:
#~ self.print_line(self.get_metas_orderer(data))
pass
#from master
self.logger.info("[run] buscando desaparecidos en origen %s" % self.supplier)
for data in self.data_stock.values():
if 'distributor' in data and data['distributor'] == self.name_supplier and not data['id'] in ids:
if data['stock'] == "0":
data['catalogid'] = "-%s" % data['catalogid']
data['instock_message'] = "Pre-Reserva" if data['stock'] == "40" \
else "Añadir a Lista de Espera" if data['stock'] == "0" \
else "Envío 5 a 7 Días" if data['stock'] == "10" \
else "En Stock - 48 Horas"
if not 'categories' in data:
data['categories'] = ""
self.print_line(self.get_metas_orderer(data))
self.logger.info("[run] %s generado" % self.supplier)
self.db.finish_task(self.id_task)
except Exception as e:
self.db.finish_task(self.id_task, True)
exc_type, exc_obj, exc_tb = sys.exc_info()
#~ fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#~ print(exc_type, fname, exc_tb.tb_lineno)
#~
self.logger.error("%s\n %d: %s" %(traceback.format_exc(), exc_tb.tb_lineno, str(e)))
raise
if __name__ == '__main__':
if len(sys.argv) == 1:
print "indica Proveedor: updater.py [supplier]"
updater = Updater(supplier = sys.argv[1], id_task = sys.argv[2])
updater.run()
| mit | 3,110,870,707,839,183,000 | 30.404255 | 133 | 0.63252 | false |
relic7/prodimages | python/magickLoad_PrdCmpFile3.py | 1 | 14644 | #!/usr/bin/env python
## Walk Root Directory and Return List or all Files in all Subdirs too
def recursive_dirlist(rootdir):
import os
regex_CR2 = re.compile(r'.+?\.[CR2cr2]{3}')
regex_jpg = re.compile(r'.+?\.[JPGjpg]{3}')
regex_png = re.compile(r'.+?\.[pngPNG]{3}')
walkedlist = []
for dirname, subdirnames, filenames in os.walk(rootdir):
# append path of all filenames to walkedlist
for filename in filenames:
file_path = os.path.abspath(os.path.join(dirname, filename))
if os.path.isfile(file_path):
walkedlist.append(file_path)
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
#if '.git' in dirnames:
# don't go into any .git directories.
# dirnames.remove('.git')
walkedset = list(set(sorted(walkedlist)))
return walkedset
def rename_retouched_file(src_imgfilepath):
import os,re
regex_coded = re.compile(r'.+?/[1-9][0-9]{8}_[1-6]\.jpg')
imgfilepath = src_imgfilepath
if re.findall(regex_coded,imgfilepath):
filedir = imgfilepath.split('/')[:-1]
filedir = '/'.join(filedir)
print filedir
filename = imgfilepath.split('/')[-1]
colorstyle = str(filename[:9])
testimg = filename.split('_')[-1]
alttest = testimg.split('.')[0]
ext = filename.split('.')[-1]
ext = ".{}".format(ext.lower())
# if its 1
if str.isdigit(alttest) & len(alttest) == 1:
if alttest == '1':
src_img_primary = src_imgfilepath.replace('_1.','.')
os.rename(src_imgfilepath, src_img_primary)
return src_img_primary
else:
alttest = int(alttest)
print alttest
alttest = alttest - 1
alt = '_alt0{}'.format(str(alttest))
print alt
if alt:
#print type(filedir), type(colorstyle), type(alt), type(ext)
#print filedir, colorstyle, alt, ext
filename = "{}{}{}".format(colorstyle,alt,ext)
renamed = os.path.join(filedir, filename)
print renamed
## except UnboundLocalError:
## print "UnboundLocalError{}".format(imgfilepath)
if renamed:
os.rename(src_imgfilepath, renamed)
if os.path.isfile(renamed):
return renamed
else:
return src_imgfilepath
def get_exif_metadata_value(image_filepath, exiftag=None):
from PIL import Image
import pyexiv2
# Read EXIF data to initialize
image_metadata = pyexiv2.ImageMetadata(image_filepath)
metadata = image_metadata.read()
# Add and Write new Tag to File
if exiftag:
exifvalue = metadata[exiftag]
return (exiftag, exifvalue)
# image_metadata[exiftag] = exifvalue
# image_metadata.write()
else:
metadict = {}
for mtag, mvalue in metadata.iteritems():
metadict[mtag] = mvalue
return metadict
######## Make Images For Upload to Website ##########
### Large Jpeg Mogrfy Dir with _l jpgs
def subproc_magick_large_jpg(imgdir):
import subprocess,os,re
### Change to Large jpg dir to Mogrify using Glob
os.chdir(imgdir)
subprocess.call([
"mogrify",
'*.jpg[400x480]',
"-filter",
"Mitchell",
"-compress",
"none",
"-format",
"jpeg",
"-adaptive-sharpen",
"100",
"-unsharp",
"50",
"-quality",
"100",
])
### Medium Jpeg Mogrfy Dir with _m jpgs
def subproc_magick_medium_jpg(imgdir):
import subprocess,os,re
### Change to Medium jpg dir to Mogrify using Glob
os.chdir(imgdir)
subprocess.call([
"mogrify",
'*.jpg[200x240]',
"-filter",
"Mitchell",
"-compress",
"none",
"-format",
"jpeg",
"-adaptive-sharpen",
"100",
"-unsharp",
"50",
"-quality",
"100",
])
### Png Create with Mogrify globbing png directories
def subproc_magick_png(imgdir):
import subprocess,re,os
#imgdestpng_out = os.path.join(tmp_processing, os.path.basename(imgsrc_jpg))
os.chdir(imgdir)
subprocess.call([
"mogrify",
"-format",
"png",
'*.jpg',
"-define",
"png:preserve-colormap",
"-define",
"png:format=png24",
"-define",
"png:compression-level=N",
"-define",
"png:compression-strategy=N",
"-define",
"png:compression-filter=N",
"-format",
"png",
## new
"-colorspace",
"RGB",
"-filter",
"Spline",
"-define",
"filter:blur=0.88549061701764",
'-unsharp',
'2x2.4+0.5+0',
"-colorspace",
"sRGB",
'-quality',
'100',
## new
])
print "Done {}".format(imgdir)
return
##### Upload tmp_loading dir to imagedrop via FTP using Pycurl #####
def pycurl_upload_imagedrop(localFilePath):
import pycurl, os
#import FileReader
localFileName = localFilePath.split('/')[-1]
mediaType = "8"
ftpURL = "ftp://file3.bluefly.corp/ImageDrop/"
ftpFilePath = os.path.join(ftpURL, localFileName)
ftpUSERPWD = "imagedrop:imagedrop0"
if localFilePath != "" and ftpFilePath != "":
## Create send data
### Send the request to Edgecast
c = pycurl.Curl()
c.setopt(pycurl.URL, ftpFilePath)
# c.setopt(pycurl.PORT , 21)
c.setopt(pycurl.USERPWD, ftpUSERPWD)
#c.setopt(pycurl.VERBOSE, 1)
c.setopt(c.CONNECTTIMEOUT, 5)
c.setopt(c.TIMEOUT, 8)
c.setopt(c.FAILONERROR, True)
# c.setopt(pycurl.FORBID_REUSE, 1)
# c.setopt(pycurl.FRESH_CONNECT, 1)
f = open(localFilePath, 'rb')
c.setopt(pycurl.INFILE, f)
c.setopt(pycurl.INFILESIZE, os.path.getsize(localFilePath))
c.setopt(pycurl.INFILESIZE_LARGE, os.path.getsize(localFilePath))
# c.setopt(pycurl.READFUNCTION, f.read());
# c.setopt(pycurl.READDATA, f.read());
c.setopt(pycurl.UPLOAD, 1L)
try:
c.perform()
c.close()
print "Successfully Uploaded --> {0}".format(localFileName)
## return 200
except pycurl.error, error:
errno, errstr = error
print 'An error occurred: ', errstr
try:
c.close()
except:
print "Couldnt Close Cnx"
pass
return errno
#####
###
## backup for 56 then 7 curl err
def upload_to_imagedrop(file):
import ftplib
session = ftplib.FTP('file3.bluefly.corp', 'imagedrop', 'imagedrop0')
fileread = open(file, 'rb')
filename = str(file.split('/')[-1])
session.cwd("ImageDrop/")
session.storbinary('STOR ' + filename, fileread, 8*1024)
fileread.close()
session.quit()
########### RUN #################
# def convert_jpg_png(imgsrc_jpg,imgdest_png):
import os, sys, re, shutil, datetime, glob
### Can pass as sys.argv a direcectory with nested directories containing jpgs. Must have nested dirs
try:
testdir = sys.argv[1]
if os.path.isdir(testdir):
rootdir = testdir
else:
rootdir = '/mnt/Post_Complete/Complete_to_Load/Drop_FinalFilesOnly'
except IndexError:
rootdir = '/mnt/Post_Complete/Complete_to_Load/Drop_FinalFilesOnly'
### Regex Pattern Defs
regex_CR2 = re.compile(r'.+?\.[CR2cr2]{3}')
regex_jpg = re.compile(r'.+?\.[JPGjpg]{3}')
regex_png = re.compile(r'.+?\.[pngPNG]{3}')
regex_coded = re.compile(r'.+?/[1-9][0-9]{8}_[1-6]\.jpg')
regex_primary_jpg = re.compile(r'.+?/[1-9][0-9]{8}\.jpg')
regex_alt_jpg = re.compile(r'.+?/[1-9][0-9]{8}_alt0[1-6]\.jpg')
### Date Defs
todaysdate = '{:%Y,%m,%d}'.format(datetime.datetime.now())
todaysdatefull = '{:%Y,%m,%d,%H,%M}'.format(datetime.datetime.now())
todaysdatearch = '{:%Y,%m,%d,%H,%M}'.format(datetime.datetime.now())
### Define tmp and archive paths prior to Creating
tmp_processing = os.path.join("/mnt/Post_Complete/Complete_to_Load/.tmp_processing" , "tmp_" + str(todaysdatefull).replace(",", ""))
tmp_processing_l = os.path.join(tmp_processing, "largejpg")
tmp_processing_m = os.path.join(tmp_processing, "mediumjpg")
tmp_loading = os.path.join("/mnt/Post_Complete/Complete_Archive/.tmp_loading" , "tmp_" + str(todaysdatefull).replace(",", ""))
## Define for Creating Archive dirs
archive = '/mnt/Post_Complete/Complete_Archive/Uploaded'
archive_uploaded = os.path.join(archive, "dateloaded_" + str(todaysdate).replace(",", ""), "uploaded_" + str(todaysdatearch).replace(",", ""))
imgdest_jpg_final = os.path.join(archive_uploaded, 'JPG_RETOUCHED_ORIG')
imgdest_png_final = os.path.join(archive_uploaded, 'PNG')
###################
## Create Lock File
###################
#locker = os.path.join(rootdir, 'LOCKED.lock')
#if os.path.isfile(locker):
# break
#else:
# with open(locker, 'wb') as f:
# f.write(todaysdatefull)
# f.close()
###########
## Test for ex
walkedout_tmp = glob.glob(os.path.join(rootdir, '*/*.*g'))
if len(walkedout_tmp) == 0:
print "Nothing to Process"
else:
### Make Tmp Folders for Processing And Uploading -- tmp_dirs are dated with time(hr:min)to prevent collisions
try:
os.makedirs(archive_uploaded, 16877)
except:
pass
try:
os.makedirs(tmp_processing, 16877)
except:
pass
try:
os.makedirs(tmp_processing_l, 16877)
except:
pass
try:
os.makedirs(tmp_processing_m, 16877)
except:
pass
try:
os.makedirs(tmp_loading, 16877)
except:
pass
try:
os.makedirs(imgdest_png_final, 16877)
except:
pass
try:
os.makedirs(imgdest_jpg_final, 16877)
except:
pass
####################################################
## Begin Processing and compiling images for Loading
####################################################
## Move All DropFinal Files from Retouchers dirs to tmp_processing from drop folders Then Mogrify to create pngs copy to load and arch dirs
walkedout_tmp = glob.glob(os.path.join(rootdir, '*/*.*g'))
[ shutil.move(file, os.path.join(tmp_processing, os.path.basename(file))) for file in walkedout_tmp ]
### Rename Files moved into Temp Processing Floder
walkedout_tmp = glob.glob(os.path.join(tmp_processing, '*.jpg'))
[ rename_retouched_file(file) for file in walkedout_tmp ]
## Copy Full Size Retouched Jpg to tmp Large and Med jpg folders for Glob Mogrify AND to Final Archive JPG_RETOUCHED_ORIG
walkedout_renamed = glob.glob(os.path.join(tmp_processing, '*.jpg'))
## Large
[ shutil.copy2(file, os.path.join(tmp_processing_l, os.path.basename(file))) for file in walkedout_renamed ]
walkedout_large = glob.glob(os.path.join(tmp_processing_l, '*.jpg'))
### Remove alt images and rename as _l
for f in walkedout_large:
if re.findall(regex_alt_jpg, f):
os.remove(f)
elif re.findall(regex_primary_jpg, f):
f_large = f.replace('.jpg', '_l.jpg')
os.rename(f, f_large)
## Mofrify directory of only primary renamed _l Files to 400x480
subproc_magick_large_jpg(tmp_processing_l)
## Medium
[ shutil.copy2(file, os.path.join(tmp_processing_m, os.path.basename(file))) for file in walkedout_renamed ]
walkedout_medium = glob.glob(os.path.join(tmp_processing_m, '*.jpg'))
### Bypass rename alt images and rename only primary jpgs as _m
for f in walkedout_medium:
if re.findall(regex_primary_jpg, f):
f_medium = f.replace('.jpg', '_m.jpg')
os.rename(f, f_medium)
## Mofrify directory of renamed _m Files and unrenamed alts to 200x240
subproc_magick_medium_jpg(tmp_processing_m)
####
#### JPEGS Have Been CREATED in Each of the tmp_processing folders named _l + _m
####
## PNG
##### PNG CREATE FROM RETOUCHED JPGS ## All files in Root of tmp_processing will be mogrified to PNGs leaving JPG to Arch
## make png frpm hirez jpg then move copy to losding and orig to archive
subproc_magick_png(tmp_processing)
### Glob created PNGs and copy to Load Dir then Store in Arch dir
tmp_png = glob.glob(os.path.join(tmp_processing, '*.png'))
[ shutil.copy2(file, os.path.join(tmp_loading, os.path.basename(file))) for file in tmp_png ]
[ shutil.move(file, os.path.join(imgdest_png_final, os.path.basename(file))) for file in tmp_png ]
## ARCHIVED Backup
## All JPGs in Root dir Only of tmp_processing will be now Archived as all Conversions are completed
jpgs_to_archive = glob.glob(os.path.join(tmp_processing, '*.jpg'))
[ shutil.move(file, os.path.join(imgdest_jpg_final, os.path.basename(file))) for file in jpgs_to_archive ]
###### All PNGs Created and moved to Archive plus Copy sent to Load Directory
###
######
#### All Files Converted for Upload, Now glob search and move large and medium named jpgs to tmp loading
###
load_jpgs = glob.glob(os.path.join(tmp_processing, '*/*.jpg'))
[ shutil.move(file, os.path.join(tmp_loading, os.path.basename(file))) for file in load_jpgs ]
## UPLOAD FTP with PyCurl everything in tmp_loading
###
import time
upload_tmp_loading = glob.glob(os.path.join(tmp_loading, '*.*g'))
for upload_file in upload_tmp_loading:
#### UPLOAD upload_file via ftp to imagedrop using Pycurl
## Then rm loading tmp dir
try:
code = pycurl_upload_imagedrop(upload_file)
if code:
print code, upload_file
time.sleep(float(3))
try:
ftpload_to_imagedrop(upload_file)
print "Uploaded {}".format(upload_file)
time.sleep(float(.3))
shutil.move(upload_file, archive_uploaded)
except:
pass
else:
print "Uploaded {}".format(upload_file)
time.sleep(float(.3))
shutil.move(upload_file, archive_uploaded)
except:
print "Error moving Finals to Arch {}".format(file)
## After completed Process and Load to imagedrop
### Finally Remove the 2 tmp folder trees for process and load if Empty
upload_tmp_loading_remainder = glob.glob(os.path.join(tmp_loading, '*.*g'))
if len(upload_tmp_loading_remainder) == 0:
shutil.rmtree(tmp_loading)
upload_tmp_processing_png_remainder = glob.glob(os.path.join(tmp_processing, '*.*g'))
upload_tmp_processing_jpg_remainder = glob.glob(os.path.join(tmp_processing, '*/*.*g'))
if len(upload_tmp_processing_png_remainder) == 0 and len(upload_tmp_processing_jpg_remainder) == 0:
shutil.rmtree(tmp_processing)
###################
## Remove Lock file
###################
#if os.path.isfile(locker):
# os.remove(locker)
| mit | -1,768,923,151,258,156,500 | 31.470067 | 142 | 0.60926 | false |
schristakidis/p2ner | p2ner/core/statsDecorators.py | 1 | 4884 | # -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.core.core import P2NER
from p2ner.util.logger import LOG as log
import time
statspluginclassname = "stats"
def counter(_name):
def wrap(f):
def wrapped_f(*args, **kwargs):
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
try:
stats.incrementKey(_name)
except:
stats.addKey(_name, 1)
return f(*args, **kwargs)
return wrapped_f
return wrap
def setValue(_name):
def wrap(f):
def wrapped_f(*args, **kwargs):
ret = f(*args, **kwargs)
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
try:
stats.setKey(_name, ret)
except:
stats.addKey(_name, ret)
return ret
return wrapped_f
return wrap
def valuecounter(_name, _value):
def wrap(f):
def wrapped_f(*args, **kwargs):
ret = f(*args, **kwargs)
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
if ret==_value:
try:
stats.incrementKey(_name)
except:
stats.addKey(_name, 1)
return ret
return wrapped_f
return wrap
def neqvaluecounter(_name, _value):
def wrap(f):
def wrapped_f(*args, **kwargs):
ret = f(*args, **kwargs)
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
if ret != _value:
try:
stats.incrementKey(_name)
except:
stats.addKey(_name, 1)
return ret
return wrapped_f
return wrap
def incrementValuecounter(_name):
def wrap(f):
def wrapped_f(*args, **kwargs):
ret = f(*args, **kwargs)
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
incr = ret
try:
stats.incrementKey(_name, incr)
except:
stats.addKey(_name, incr)
#log.debug("%s: %d" % (_name, stats.getKey(_name)))
return ret
return wrapped_f
return wrap
def ratio(_name, _up, _down):
def wrap(f):
def wrapped_f(*args, **kwargs):
ret = f(*args, **kwargs)
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
if stats.hasKey(_down):
d = stats.getKey(_down)
n = 0
if stats.hasKey(_up):
n = stats.getKey(_up)
r = float(n)/d
try:
stats.setKey(_name, r)
except:
stats.addKey(_name, r)
#log.debug("%s: %d" % (_name, stats.getKey(_name)))
return ret
return wrapped_f
return wrap
def timeratio(_name, _up):
def wrap(f):
def wrapped_f(*args, **kwargs):
ret = f(*args, **kwargs)
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
if hasattr(stats, 't0'):
n = 0
d = time.time() - stats.t0
if stats.hasKey(_up):
n = stats.getKey(_up)
r = float(n)/d
try:
stats.setKey(_name, r)
except:
stats.addKey(_name, r)
return ret
return wrapped_f
return wrap
def dumpStats():
ret = {}
if P2NER.plugins.hasPlugin(statspluginclassname):
stats = P2NER.plugins.getPlugin(statspluginclassname)
ret = stats.dumpKeys()
return ret
| apache-2.0 | -5,578,088,276,203,119,000 | 33.394366 | 76 | 0.518428 | false |
gregleno/romi | rominet/robot_wii_controler.py | 1 | 3816 | #!/usr/bin/env python
import logging
import math
from wiiremote import WiiRemote
from robot import Robot
try:
import cwiid
except ImportError:
cwiid = None
class RobotWiiController(object):
def __init__(self, robot):
self.log = logging.getLogger('romi')
self.robot = robot
self.robot.play_welcome_message()
self.wiimote = WiiRemote.connect()
self.nun_btn_z = False
self.control_robot_with_buttons = False
if self.wiimote is not None:
self.log.info("Connected to wiimote")
self.wiimote.set_callbacks(self.buttons_cb, self.nun_buttons_cb, self.nun_stick_cb,
self.nun_stick_disconnected_cb)
self.wiimote.monitor(100)
self.log.info("Started")
else:
self.log.error("Could not connect to wiimote")
def release(self):
if self.wiimote is not None:
self.wiimote.remove_callbacks()
self.wiimote.release()
self.wiimote = None
self.robot.set_speed_target(0, 0)
def buttons_cb(self, buttons):
if buttons & cwiid.BTN_1 and buttons & cwiid.BTN_B:
self.release()
else:
self._move_robot_with_buttons(buttons)
def nun_buttons_cb(self, buttons):
self.nun_btn_z = buttons & cwiid.NUNCHUK_BTN_Z
self._move_robot_with_stick(self.wiimote.get_nun_stick())
def nun_stick_cb(self, stick):
self._move_robot_with_stick(stick)
def nun_stick_disconnected_cb(self):
self.robot.set_speed_target(0, 0)
def _move_robot_with_stick(self, stick):
x = stick[0]
y = stick[1]
speed = math.sqrt(y * y + x * x)
left = speed
right = speed
if speed < 0.001:
left = right = 0
elif abs(y) < abs(x) / 2:
if x > 0:
right = -speed
else:
left = -speed
else:
if y > 0:
left = speed - max(0, -x)
right = speed - max(0, x)
else:
left = -speed + max(0, -x)
right = -speed + max(0, x)
if not self.nun_btn_z:
left *= 0.4
right *= 0.4
self.robot.set_speed_target(left, right)
def _move_robot_with_buttons(self, buttons):
speed = 0.3
if buttons & (cwiid.BTN_RIGHT | cwiid.BTN_DOWN | cwiid.BTN_UP | cwiid.BTN_LEFT):
self.control_robot_with_buttons = True
if (buttons & cwiid.BTN_RIGHT) and (buttons & cwiid.BTN_DOWN):
print "{}".format(cwiid.BTN_RIGHT | cwiid.BTN_DOWN)
self.robot.set_speed_target(speed, 0)
elif (buttons & cwiid.BTN_RIGHT) and (buttons & cwiid.BTN_UP):
self.robot.set_speed_target(0, speed)
elif (buttons & cwiid.BTN_LEFT) and (buttons & cwiid.BTN_UP):
self.robot.set_speed_target(0, -speed)
elif (buttons & cwiid.BTN_LEFT) and (buttons & cwiid.BTN_DOWN):
self.robot.set_speed_target(-speed, 0)
elif buttons & cwiid.BTN_RIGHT:
self.robot.set_speed_target(speed, speed)
elif buttons & cwiid.BTN_LEFT:
self.robot.set_speed_target(-speed, -speed)
elif buttons & cwiid.BTN_UP:
self.robot.set_speed_target(-speed, speed)
elif buttons & cwiid.BTN_DOWN:
self.robot.set_speed_target(speed, -speed)
else:
if self.control_robot_with_buttons:
self.control_robot_with_buttons = False
self.robot.set_speed_target(0, 0)
def main():
log = logging.getLogger('romi')
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
RobotWiiController(Robot())
if __name__ == "__main__":
main()
| gpl-3.0 | 3,825,865,961,917,650,000 | 31.338983 | 95 | 0.5587 | false |
RedHenLab/Audio | CNN/speaker/recognition.py | 1 | 3110 | import numpy as np
import cPickle as pickle
from AudioPipe.features import mfcc
from silence import remove_silence
import scipy.io.wavfile as wav
import librosa
from skgmm import GMMSet, GMM
class GMMRec(object):
def __init__(self):
self.features = []
self.gmmset = GMMSet()
self.classes = []
self.models = []
def delete_speaker(self, name):
if name in self.classes:
ind = self.classes.index(name)
del self.classes[ind]
del self.models[ind]
self.classes.remove(name)
ind = self.gmmset.y.index(name)
del self.gmmset.gmms[ind]
self.gmmset.y.remove(name)
else:
print name, "not in the list!"
def enroll_model(self, name, model):
if name not in self.classes:
self.classes.append(name)
self.models.append(model)
self.features.append(None)
gmm = self.load(model)
self.gmmset.add_new(gmm, name)
def enroll(self, name, mfcc_vecs, model=None):
if name not in self.classes:
feature = mfcc_vecs.astype(np.float32)
self.features.append(feature)
self.classes.append(name)
self.models.append(model)
else:
print name+" already enrolled, please delete the old one first!"
def get_mfcc(self, audio_path):
(sr, sig) = wav.read(audio_path)
if len(sig.shape) > 1:
sig = sig[:, 0]
cleansig = remove_silence(sr, sig)
mfcc_vecs = mfcc(cleansig, sr, numcep = 19)
mfcc_delta = librosa.feature.delta(mfcc_vecs.T)
mfcc_delta2 = librosa.feature.delta(mfcc_vecs.T, order=2)
feats=np.vstack([mfcc_vecs.T, mfcc_delta, mfcc_delta2])
return feats.T
def enroll_file(self, name, fn, model=None):
if name not in self.classes:
fn_mfcc = np.array(self.get_mfcc(fn))
self.enroll(name, fn_mfcc, model=model)
else:
print name+" already enrolled, please delete the old one first!"
def _get_gmm_set(self):
return GMMSet()
def train(self, gmm_order=None):
for name, feats, model in zip(self.classes, self.features, self.models):
if (name not in self.gmmset.y) and (name is not None) :
gmm = self.gmmset.fit_new(feats, name, gmm_order)
if model is not None:
self.dump(model, part=gmm)
else:
print name+" already trained, skip!"
def predict(self, mfcc_vecs):
feature = mfcc_vecs.astype(np.float32)
return self.gmmset.predict_one(feature)
def dump(self, fname, part = None):
with open(fname, 'w') as f:
if part is None:
pickle.dump(self, f, -1)
else:
pickle.dump(part, f, -1)
@staticmethod
def load(fname):
with open(fname, 'r') as f:
R = pickle.load(f)
return R
| gpl-2.0 | -5,686,258,323,994,095,000 | 31.395833 | 80 | 0.548232 | false |
calancha/DIRAC | Core/Utilities/SiteCEMapping.py | 1 | 6279 | ########################################################################
# $HeadURL$
# File : SiteCEMapping.py
########################################################################
""" The SiteCEMapping module performs the necessary CS gymnastics to
resolve site and CE combinations. These manipulations are necessary
in several components.
Assumes CS structure of: /Resources/Sites/<GRIDNAME>/<SITENAME>
"""
__RCSID__ = "$Id$"
import re
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
#############################################################################
def getSiteCEMapping( gridName = '' ):
""" Returns a dictionary of all sites and their CEs as a list, e.g.
{'LCG.CERN.ch':['ce101.cern.ch',...]}
If gridName is specified, result is restricted to that Grid type.
"""
siteCEMapping = {}
gridTypes = gConfig.getSections( 'Resources/Sites/', [] )
if not gridTypes['OK']:
gLogger.warn( 'Problem retrieving sections in /Resources/Sites' )
return gridTypes
gridTypes = gridTypes['Value']
if gridName:
if not gridName in gridTypes:
return S_ERROR( 'Could not get sections for /Resources/Sites/%s' % gridName )
gridTypes = [gridName]
gLogger.debug( 'Grid Types are: %s' % ( ', '.join( gridTypes ) ) )
for grid in gridTypes:
sites = gConfig.getSections( '/Resources/Sites/%s' % grid, [] )
if not sites['OK']:
gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid )
return sites
for candidate in sites['Value']:
candidateCEs = gConfig.getValue( '/Resources/Sites/%s/%s/CE' % ( grid, candidate ), [] )
if candidateCEs:
siteCEMapping[candidate] = candidateCEs
else:
gLogger.debug( 'No CEs defined for site %s' % candidate )
return S_OK( siteCEMapping )
#############################################################################
def getCESiteMapping( gridName = '' ):
""" Returns a dictionary of all CEs and their associated site, e.g.
{'ce101.cern.ch':'LCG.CERN.ch', ...]}
Assumes CS structure of: /Resources/Sites/<GRIDNAME>/<SITENAME>
"""
ceSiteMapping = {}
gridTypes = gConfig.getSections( '/Resources/Sites/', [] )
if not gridTypes['OK']:
gLogger.warn( 'Problem retrieving sections in /Resources/Sites' )
return gridTypes
gridTypes = gridTypes['Value']
if gridName:
if not gridName in gridTypes:
return S_ERROR( 'Could not get sections for /Resources/Sites/%s' % gridName )
gridTypes = [gridName]
gLogger.debug( 'Grid Types are: %s' % ( ', '.join( gridTypes ) ) )
for grid in gridTypes:
sites = gConfig.getSections( '/Resources/Sites/%s' % grid, [] )
if not sites['OK']: #gConfig returns S_ERROR for empty sections until version
gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid )
return sites
if sites:
for candidate in sites['Value']:
siteCEs = gConfig.getValue( '/Resources/Sites/%s/%s/CE' % ( grid, candidate ), [] )
for ce in siteCEs:
if ceSiteMapping.has_key( ce ):
current = ceSiteMapping[ce]
gLogger.warn( 'CE %s already has a defined site %s but it is also defined for %s' % ( ce, current, candidate ) )
else:
ceSiteMapping[ce] = candidate
return S_OK( ceSiteMapping )
#############################################################################
def getSiteForCE( computingElement ):
""" Given a Grid CE name this method returns the DIRAC site name.
WARNING: if two or more sites happen to have the same ceName/queueName, then only the first found is returned
"""
finalSite = ''
gridTypes = gConfig.getSections( '/Resources/Sites/', [] )
if not gridTypes['OK']:
gLogger.warn( 'Problem retrieving sections in /Resources/Sites' )
return gridTypes
gridTypes = gridTypes['Value']
for grid in gridTypes:
sites = gConfig.getSections( '/Resources/Sites/%s' % grid, [] )
if not sites['OK']:
gLogger.warn( 'Problem retrieving /Resources/Sites/%s section' % grid )
return sites
if sites:
siteList = sites['Value']
for candidate in siteList:
siteCEs = gConfig.getValue( '/Resources/Sites/%s/%s/CE' % ( grid, candidate ), [] )
if computingElement in siteCEs:
finalSite = candidate
break
return S_OK( finalSite )
#############################################################################
def getCEsForSite( siteName ):
""" Given a DIRAC site name this method returns a list of corresponding CEs.
"""
if not re.search( '.', siteName ):
return S_ERROR( '%s is not a valid site name' % siteName )
gridName = siteName.split( '.' )[0]
siteSection = '/Resources/Sites/%s/%s/CE' % ( gridName, siteName )
ces = gConfig.getValue( siteSection, [] )
return S_OK( ces )
#############################################################################
def getQueueInfo( ceUniqueID, diracSiteName = '' ):
"""
Extract information from full CE Name including associate DIRAC Site
"""
try:
subClusterUniqueID = ceUniqueID.split( '/' )[0].split( ':' )[0]
queueID = ceUniqueID.split( '/' )[1]
except:
return S_ERROR( 'Wrong full queue Name' )
if not diracSiteName:
gLogger.debug( "SiteName not given, looking in /LocaSite/Site" )
diracSiteName = gConfig.getValue( '/LocalSite/Site', '' )
if not diracSiteName:
gLogger.debug( "Can't find LocalSite name, looking in CS" )
result = getSiteForCE( subClusterUniqueID )
if not result['OK']:
return result
diracSiteName = result['Value']
if not diracSiteName:
gLogger.error( 'Can not find corresponding Site in CS' )
return S_ERROR( 'Can not find corresponding Site in CS' )
gridType = diracSiteName.split( '.' )[0]
siteCSSEction = '/Resources/Sites/%s/%s/CEs/%s' % ( gridType, diracSiteName, subClusterUniqueID )
queueCSSection = '%s/Queues/%s' % ( siteCSSEction, queueID )
resultDict = { 'SubClusterUniqueID': subClusterUniqueID,
'QueueID': queueID,
'SiteName': diracSiteName,
'Grid': gridType,
'SiteCSSEction': siteCSSEction,
'QueueCSSection': queueCSSection }
return S_OK( resultDict )
| gpl-3.0 | 8,439,699,818,259,009,000 | 37.054545 | 124 | 0.589903 | false |
scampion/pimpy | pimpy/image/features/bindct.py | 1 | 2404 | #-*- coding:utf-8 -*-"""
u"""
pimpy.image.features.bindct enable to compute a binary dct
.. module:: bindct
:synopsis: Tools for video
:platform: Unix, Mac, Windows
.. moduleauthor:: Sebastien Campion <[email protected]>
"""
# pimpy
# Copyright (C) 2010 Sebastien Campion <[email protected]>
#
# pimpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pimpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pimpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from feature import Feature
from math import sqrt
import numpy
import numpy.fft as fft
import logging
class BinDCT(Feature):
u"""
A binary DCT image descriptor
:param name: sigsize input signature size default 64
:type name: int
"""
name = "bindct"
description = __doc__
sigsize=64
def __init__(self,**kwargs):
Feature.__init__(self,**kwargs)
self.log = logging.getLogger('pimpy.image.features.bindct')
def get(self,image):
"""
return dct descriptor
:rtype: numpy.array
"""
if image.format != "x-raw-gray" :
image.convert2gray()
self.log.warning("Gray conversion is not optimized use native decoder, original format %s" % image.format)
#self.log.debug("Image lenght is %i" % len(image.data))
#self.log.debug("Image size is %ix%i " % (image.height,image.width))
im = numpy.fromstring(image.data,dtype=numpy.uint8)
im = im.reshape((image.height,image.width))
hs = ws = sqrt(self.sigsize)
dct = fft.rfft2(im)
subdct = dct[0:ws,0:hs]
subdct[0][0] = dct[ws,hs]
subdct = subdct.reshape(self.sigsize,)
median = numpy.median(subdct)
self.sig = numpy.zeros(self.sigsize,dtype='bool')
self.sig[numpy.where( subdct > median )] = True
return self.sig
| agpl-3.0 | 6,329,867,416,227,239,000 | 31.486486 | 118 | 0.654326 | false |
psss/python-nitrate | source/immutable.py | 1 | 50597 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Python API for the Nitrate test case management system.
# Copyright (c) 2012 Red Hat, Inc. All rights reserved.
# Author: Petr Splichal <[email protected]>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
Immutable Nitrate objects
"""
import re
from six.moves import xmlrpc_client as xmlrpclib
import nitrate.config as config
from nitrate.config import log
from nitrate.base import Nitrate, NitrateNone, _getter, _idify
from nitrate.utils import pretty, color
from nitrate.xmlrpc_driver import NitrateError
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Build(Nitrate):
""" Product build """
# Local cache of Build
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Build id.")
name = property(_getter("name"), doc="Build name.")
product = property(_getter("product"), doc="Relevant product.")
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Name and product check
if "product" in kwargs and ("name" in kwargs or "build" in kwargs):
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name", kwargs.get("build"))
return cls._cache["{0}---in---{1}".format(name, product)], name
return super(Build, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by build id or product and build name """
# Backward compatibility for 'build' argument (now called 'name')
name = name if name is not None else kwargs.get("build")
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch build data from it
if inject:
self._fetch(inject)
# Initialized by build name and product
elif name is not None and product is not None:
self._name = name
# Detect product format
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
# Index by name-product (only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just check that the id was provided
elif not id:
raise NitrateError("Need either build id or both build name "
"and product to initialize the Build object.")
def __unicode__(self):
""" Build name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing build data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing build ID#{0} inject".format(
inject["build_id"]))
# Search by build id
elif self._id is not NitrateNone:
try:
log.info("Fetching build " + self.identifier)
inject = self._server.Build.get(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find build for " + self.identifier)
# Search by build name and product
else:
try:
log.info(u"Fetching build '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Build.check_build(
self.name, self.product.id)
self._id = inject["build_id"]
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("Build '{0}' not found in '{1}'".format(
self.name, self.product.name))
except KeyError:
if "args" in inject:
log.debug(inject["args"])
raise NitrateError("Build '{0}' not found in '{1}'".format(
self.name, self.product.name))
# Initialize data from the inject and index into cache
log.debug("Initializing Build ID#{0}".format(inject["build_id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["build_id"]
self._name = inject["name"]
self._product = Product(
{"id": inject["product_id"], "name": inject["product"]})
self._index("{0}---in---{1}".format(self.name, self.product.name))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Category(Nitrate):
""" Test case category """
# Local cache of Category objects indexed by category id
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product", "description"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Category id.")
name = property(_getter("name"), doc="Category name.")
product = property(_getter("product"), doc="Relevant product.")
description = property(_getter("description"), doc="Category description.")
@property
def synopsis(self):
""" Short category summary (including product info) """
return "{0}, {1}".format(self.name, self.product)
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Name and product check
if "product" in kwargs and ("name" in kwargs or "category" in kwargs):
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name", kwargs.get("category"))
return cls._cache["{0}---in---{1}".format(name, product)], name
return super(Category, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by category id or category name and product """
# Backward compatibility for 'category' argument (now called 'name')
name = name if name is not None else kwargs.get("category")
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch tag data from it
if inject:
self._fetch(inject)
# Initialized by category name and product
elif name is not None and product is not None:
self._name = name
# Detect product format
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
# Index by name-product (only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just check that the id was provided
elif not id:
raise NitrateError("Need either category id or both category "
"name and product to initialize the Category object.")
def __unicode__(self):
""" Category name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing category data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing category ID#{0} inject".format(inject["id"]))
# Search by category id
elif self._id is not NitrateNone:
try:
log.info("Fetching category {0}".format(self.identifier))
inject = self._server.Product.get_category(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find category for " + self.identifier)
# Search by category name and product
else:
try:
log.info(u"Fetching category '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Product.check_category(
self.name, self.product.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("Category '{0}' not found in"
" '{1}'".format(self.name, self.product.name))
# Initialize data from the inject and index into cache
log.debug("Initializing category ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["name"]
self._product = Product(
{"id": inject["product_id"], "name": inject["product"]})
self._index("{0}---in---{1}".format(self.name, self.product.name))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PlanType(Nitrate):
""" Plan type """
# Local cache of PlanType objects indexed by plan type id
_cache = {}
# By default we cache PlanType objects for ever
_expiration = config.NEVER_EXPIRE
# List of all object attributes (used for init & expiration)
_attributes = ["name"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Test plan type id")
name = property(_getter("name"), doc="Test plan type name")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Decorated
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Search cache by plan type name
if "name" in kwargs:
return cls._cache[kwargs["name"]], kwargs["name"]
# Othewise perform default search by id
return super(PlanType, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None):
""" Initialize by test plan type id or name """
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch data from it
if inject:
self._fetch(inject)
# Initialize by name
elif name is not None:
self._name = name
self._index(name)
# Otherwise just check that the test plan type id was provided
elif not id:
raise NitrateError(
"Need either id or name to initialize the PlanType object")
def __unicode__(self):
""" PlanType name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing test plan type data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing PlanType ID#{0} inject".format(inject["id"]))
# Search by test plan type id
elif self._id is not NitrateNone:
try:
log.info("Fetching test plan type " + self.identifier)
inject = self._server.TestPlan.get_plan_type(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find test plan type for " + self.identifier)
# Search by test plan type name
else:
try:
log.info(u"Fetching test plan type '{0}'".format(self.name))
inject = self._server.TestPlan.check_plan_type(self.name)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("PlanType '{0}' not found".format(
self.name))
# Initialize data from the inject and index into cache
log.debug("Initializing PlanType ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["name"]
self._index(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Priority Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Priority(Nitrate):
""" Test case priority """
_priorities = ['P0', 'P1', 'P2', 'P3', 'P4', 'P5']
def __init__(self, priority):
"""
Takes numeric priority id (1-5) or priority name which is one of:
P1, P2, P3, P4, P5
"""
if isinstance(priority, int):
if priority < 1 or priority > 5:
raise NitrateError(
"Not a valid Priority id: '{0}'".format(priority))
self._id = priority
else:
try:
self._id = self._priorities.index(priority)
except ValueError:
raise NitrateError("Invalid priority '{0}'".format(priority))
def __unicode__(self):
""" Return priority name for printing """
return self.name
@property
def id(self):
""" Numeric priority id """
return self._id
@property
def name(self):
""" Human readable priority name """
return self._priorities[self._id]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Product(Nitrate):
""" Product """
# Local cache of Product
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Product id")
name = property(_getter("name"), doc="Product name")
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Search the cache by product name
if "name" in kwargs:
name = kwargs.get("name")
return cls._cache[name], name
return super(Product, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None):
"""
Initialize the Product by id or name
Examples:
Product(60)
Product(id=60)
Product("Red Hat Enterprise Linux 6")
Product(name="Red Hat Enterprise Linux 6")
"""
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch test case data from it
if inject:
self._fetch(inject)
# Initialize by name
elif name is not None:
self._name = name
self._index(name)
# Otherwise just check that the product id was provided
elif not id:
raise NitrateError("Need id or name to initialize Product")
def __unicode__(self):
""" Product name for printing """
return self.name
@staticmethod
def search(**query):
""" Search for products """
return [Product(hash["id"])
for hash in Nitrate()._server.Product.filter(dict(query))]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch product data from the server """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.debug("Initializing Product ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._id = inject["id"]
self._name = inject["name"]
# Search by product id
elif self._id is not NitrateNone:
try:
log.info("Fetching product " + self.identifier)
inject = self._server.Product.filter({'id': self.id})[0]
log.debug("Initializing product " + self.identifier)
log.data(pretty(inject))
self._inject = inject
self._name = inject["name"]
except IndexError:
raise NitrateError(
"Cannot find product for " + self.identifier)
# Search by product name
else:
try:
log.info(u"Fetching product '{0}'".format(self.name))
inject = self._server.Product.filter({'name': self.name})[0]
log.debug(u"Initializing product '{0}'".format(self.name))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
except IndexError:
raise NitrateError(
"Cannot find product for '{0}'".format(self.name))
# Index the fetched object into cache
self._index(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanStatus Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PlanStatus(Nitrate):
""" Test plan status (is_active field) """
_statuses = ["DISABLED", "ENABLED"]
_colors = ["red", "green"]
def __init__(self, status):
"""
Takes bool, numeric status id or status name.
0 ... False ... DISABLED
1 ... True .... ENABLED
"""
if isinstance(status, int):
if not status in [0, 1]:
raise NitrateError(
"Not a valid plan status id: '{0}'".format(status))
# Save id (and convert possible bool to int)
self._id = int(status)
else:
try:
self._id = self._statuses.index(status)
except ValueError:
raise NitrateError("Invalid plan status '{0}'".format(status))
def __unicode__(self):
""" Return plan status name for printing """
return self.name
def __nonzero__(self):
""" Boolean status representation """
return self._id != 0
@property
def id(self):
""" Numeric plan status id """
return self._id
@property
def name(self):
""" Human readable plan status name """
return color(self._statuses[self.id], color=self._colors[self.id],
enabled=config.Coloring().enabled())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# RunStatus Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RunStatus(Nitrate):
""" Test run status """
_statuses = ['RUNNING', 'FINISHED']
def __init__(self, status):
"""
Takes numeric status id, status name or stop date.
A 'None' value is considered to be a 'no stop date' running:
0 ... RUNNING ... 'None'
1 ... FINISHED ... '2011-07-27 15:14'
"""
if isinstance(status, int):
if status not in [0, 1]:
raise NitrateError(
"Not a valid run status id: '{0}'".format(status))
self._id = status
else:
# Running or no stop date
if status == "RUNNING" or status == "None" or status is None:
self._id = 0
# Finished or some stop date
elif status == "FINISHED" or re.match("^[-0-9: ]+$", status):
self._id = 1
else:
raise NitrateError("Invalid run status '{0}'".format(status))
def __unicode__(self):
""" Return run status name for printing """
return self.name
@property
def id(self):
""" Numeric runstatus id """
return self._id
@property
def name(self):
""" Human readable runstatus name """
return self._statuses[self._id]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CaseStatus Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CaseStatus(Nitrate):
""" Test case status """
_casestatuses = ['PAD', 'PROPOSED', 'CONFIRMED', 'DISABLED', 'NEED_UPDATE']
def __init__(self, casestatus):
"""
Takes numeric status id (1-4) or status name which is one of:
PROPOSED, CONFIRMED, DISABLED, NEED_UPDATE
"""
if isinstance(casestatus, int):
if casestatus < 1 or casestatus > 4:
raise NitrateError(
"Not a valid casestatus id: '{0}'".format(casestatus))
self._id = casestatus
else:
try:
self._id = self._casestatuses.index(casestatus)
except ValueError:
raise NitrateError(
"Invalid casestatus '{0}'".format(casestatus))
def __unicode__(self):
""" Return casestatus name for printing """
return self.name
@property
def id(self):
""" Numeric casestatus id """
return self._id
@property
def name(self):
""" Human readable casestatus name """
return self._casestatuses[self._id]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Status Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Status(Nitrate):
"""
Test case run status.
Used for easy converting between id and name.
"""
_statuses = ['PAD', 'IDLE', 'PASSED', 'FAILED', 'RUNNING', 'PAUSED',
'BLOCKED', 'ERROR', 'WAIVED']
_colors = [None, "blue", "lightgreen", "lightred", "green", "yellow",
"red", "magenta", "lightcyan"]
def __init__(self, status):
"""
Takes numeric status id (1-8) or status name which is one of:
IDLE, PASSED, FAILED, RUNNING, PAUSED, BLOCKED, ERROR, WAIVED
"""
if isinstance(status, int):
if status < 1 or status > 8:
raise NitrateError(
"Not a valid Status id: '{0}'".format(status))
self._id = status
else:
try:
self._id = self._statuses.index(status)
except ValueError:
raise NitrateError("Invalid status '{0}'".format(status))
def __unicode__(self):
""" Return status name for printing """
return self.name
@property
def id(self):
""" Numeric status id """
return self._id
@property
def _name(self):
""" Status name, plain without coloring """
return self._statuses[self.id]
@property
def name(self):
""" Human readable status name """
return color(self._name, color=self._colors[self.id],
enabled=config.Coloring().enabled())
@property
def shortname(self):
""" Short same-width status string (4 chars) """
return color(self._name[0:4], color=self._colors[self.id],
enabled=config.Coloring().enabled())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class User(Nitrate):
""" User """
# Local cache of User objects indexed by user id
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "login", "email"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="User id.")
login = property(_getter("login"), doc="Login username.")
email = property(_getter("email"), doc="User email address.")
name = property(_getter("name"), doc="User first name and last name.")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Decorated
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Return current user
if id is None and 'login' not in kwargs and 'email' not in kwargs:
return cls._cache["i-am-current-user"], "current user"
# Search by login & email
if "login" in kwargs:
return cls._cache[kwargs["login"]], kwargs["login"]
if "email" in kwargs:
return cls._cache[kwargs["email"]], kwargs["email"]
# Default search by id
return super(User, cls)._cache_lookup(id, **kwargs)
@staticmethod
def search(**query):
""" Search for users """
return [User(hash)
for hash in Nitrate()._server.User.filter(dict(query))]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __new__(cls, id=None, *args, **kwargs):
""" Create a new object, handle caching if enabled """
# Convert login or email into name for better logging
if "login" in kwargs or "email" in kwargs:
name = kwargs.get("login", kwargs.get("email"))
return Nitrate.__new__(cls, id=id, name=name, *args, **kwargs)
else:
return Nitrate.__new__(cls, id=id, *args, **kwargs)
def __init__(self, id=None, login=None, email=None):
"""
Initialize by user id, login or email
Defaults to the current user if no id, login or email provided.
If xmlrpc initial object dict provided as the first argument,
data are initialized directly from it.
"""
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(
id or login or email)
if initialized: return
Nitrate.__init__(self, id, prefix="UID")
# If inject given, fetch data from it
if inject:
self._fetch(inject)
# Otherwise initialize by login or email
elif name is not None:
if "@" in name:
self._email = name
else:
self._login = name
self._index(name)
def __unicode__(self):
""" User login for printing """
return self.name if self.name is not None else u"No Name"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch user data from the server """
Nitrate._fetch(self, inject)
if inject is None:
# Search by id
if self._id is not NitrateNone:
try:
log.info("Fetching user " + self.identifier)
inject = self._server.User.filter({"id": self.id})[0]
except IndexError:
raise NitrateError(
"Cannot find user for " + self.identifier)
# Search by login
elif self._login is not NitrateNone:
try:
log.info(
"Fetching user for login '{0}'".format(self.login))
inject = self._server.User.filter(
{"username": self.login})[0]
except IndexError:
raise NitrateError("No user found for login '{0}'".format(
self.login))
# Search by email
elif self._email is not NitrateNone:
try:
log.info("Fetching user for email '{0}'".format(
self.email))
inject = self._server.User.filter({"email": self.email})[0]
except IndexError:
raise NitrateError("No user found for email '{0}'".format(
self.email))
# Otherwise initialize to the current user
else:
log.info("Fetching the current user")
inject = self._server.User.get_me()
self._index("i-am-current-user")
# Initialize data from the inject and index into cache
log.debug("Initializing user UID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._login = inject["username"]
self._email = inject["email"]
if inject["first_name"] and inject["last_name"]:
self._name = inject["first_name"] + " " + inject["last_name"]
else:
self._name = None
self._index(self.login, self.email)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Version(Nitrate):
""" Product version """
# Local cache of Version
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Version id")
name = property(_getter("name"), doc="Version name")
product = property(_getter("product"), doc="Version product")
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Search cache by the version name and product
if "product" in kwargs and ("version" in kwargs or "name" in kwargs):
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name", kwargs.get("version"))
return cls._cache["{0}---in---{1}".format(name, product)], name
# Default search by id otherwise
return super(Version, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by version id or product and version """
# Backward compatibility for 'version' argument (now called 'name')
name = name if name is not None else kwargs.get("version")
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch tag data from it
if inject:
self._fetch(inject)
# Initialize by version name and product
elif name is not None and product is not None:
self._name = name
# Convert product into object if necessary
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
# Index by name/product (but only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just make sure the version id was provided
elif not id:
raise NitrateError("Need either version id or both product "
"and version name to initialize the Version object.")
def __unicode__(self):
""" Version name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch version data from the server """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.debug("Processing Version ID#{0} inject".format(inject["id"]))
# Search by version id
elif self._id is not NitrateNone:
try:
log.info("Fetching version {0}".format(self.identifier))
inject = self._server.Product.filter_versions(
{'id': self.id})[0]
except IndexError:
raise NitrateError(
"Cannot find version for {0}".format(self.identifier))
# Search by product and name
else:
try:
log.info(u"Fetching version '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Product.filter_versions(
{'product': self.product.id, 'value': self.name})[0]
except IndexError:
raise NitrateError(
"Cannot find version for '{0}'".format(self.name))
# Initialize data from the inject and index into cache
log.debug("Initializing Version ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["value"]
self._product = Product(inject["product_id"])
# Index by product name & version name (if product is cached)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(self.name, self.product.name))
# Otherwise index by id only
else:
self._index()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Component(Nitrate):
""" Test case component """
# Local cache of Component objects indexed by component id plus
# additionaly by name-in-product pairs
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Component id.")
name = property(_getter("name"), doc="Component name.")
product = property(_getter("product"), doc="Relevant product.")
@property
def synopsis(self):
""" Short component summary (including product info) """
return "{0}, {1}".format(self.name, self.product)
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Name and product check
if 'product' in kwargs and 'name' in kwargs:
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name")
return cls._cache["{0}---in---{1}".format(name, product)], name
return super(Component, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by component id or component name and product """
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch component data from it
if inject:
self._fetch(inject)
# Initialized by product and component name
elif name is not None and product is not None:
# Detect product format
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
self._name = name
# Index by name-product (only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just check that the id was provided
elif id is None:
raise NitrateError("Need either component id or both product "
"and component name to initialize the Component object.")
def __unicode__(self):
""" Component name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing component data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing component ID#{0} inject".format(inject["id"]))
# Search by component id
elif self._id is not NitrateNone:
try:
log.info("Fetching component " + self.identifier)
inject = self._server.Product.get_component(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find component for " + self.identifier)
# Search by component name and product
else:
try:
log.info(u"Fetching component '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Product.check_component(
self.name, self.product.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("Component '{0}' not found in"
" '{1}'".format(self.name, self.product.name))
# Initialize data from the inject and index into cache
log.debug("Initializing component ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["name"]
self._product = Product(
{"id": inject["product_id"], "name": inject["product"]})
self._index("{0}---in---{1}".format(self.name, self.product.name))
@staticmethod
def search(**query):
""" Search for components """
return [Component(hash) for hash in
Nitrate()._server.Product.filter_components(dict(query))]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Bug(Nitrate):
""" Bug related to a test case or a case run """
# Local cache of Bug objects indexed by internal bug id
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["bug", "system", "testcase", "caserun"]
# Prefixes for bug systems, identifier width
_prefixes = {1: "BZ"}
_identifier_width = 7
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Bug id (internal).")
bug = property(_getter("bug"), doc="Bug (external id).")
system = property(_getter("system"), doc="Bug system.")
testcase = property(_getter("testcase"), doc="Test case.")
caserun = property(_getter("caserun"), doc="Case run.")
@property
def synopsis(self):
""" Short summary about the bug """
# Summary in the form: BUG#123456 (BZ#123, TC#456, CR#789)
return "{0} ({1})".format(self.identifier, ", ".join([str(self)] +
[obj.identifier for obj in (self.testcase, self.caserun)
if obj is not None]))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, bug=None, system=1, **kwargs):
"""
Initialize the bug
Provide external bug id, optionally bug system (Bugzilla by default).
"""
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id)
if initialized: return
Nitrate.__init__(self, id, prefix="BUG")
# If inject given, fetch bug data from it
if inject:
self._fetch(inject)
# Initialized by bug id and system id
elif bug is not None and system is not None:
self._bug = bug
self._system = system
# Otherwise just check that the id was provided
elif id is None:
raise NitrateError("Need bug id to initialize the Bug object.")
def __eq__(self, other):
"""
Custom bug comparison
Primarily decided by id. If unknown, compares by bug id & bug system.
"""
# Decide by internal id
if self._id is not NitrateNone and other._id is not NitrateNone:
return self.id == other.id
# Compare external id and bug system id
return self.bug == other.bug and self.system == other.system
def __unicode__(self):
""" Bug name for printing """
try:
prefix = self._prefixes[self.system]
except KeyError:
prefix = "BZ"
return u"{0}#{1}".format(prefix, str(self.bug).rjust(
self._identifier_width, "0"))
def __hash__(self):
""" Construct the uniqe hash from bug id and bug system id """
return _idify([self.system, self.bug])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch bug info from the server """
Nitrate._fetch(self, inject)
# No direct xmlrpc function for fetching so far
if inject is None:
raise NotImplementedError("Direct bug fetching not implemented")
# Process provided inject
self._id = int(inject["id"])
self._bug = int(inject["bug_id"])
self._system = int(inject["bug_system_id"])
self._testcase = TestCase(int(inject["case_id"]))
if inject["case_run_id"] is not None:
self._caserun = CaseRun(int(inject["case_run_id"]))
# Index the fetched object into cache
self._index()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Tag(Nitrate):
""" Tag Class """
# List of all object attributes (used for init & expiration)
_attributes = ["name"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Tag id")
name = property(_getter("name"), doc="Tag name")
# Local cache for Tag
_cache = {}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None):
""" Initialize by tag id or tag name """
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch tag data from it
if inject:
self._fetch(inject)
# Initialize by name
elif name is not None:
self._name = name
self._index(name)
# Otherwise just check that the tag name or id was provided
elif not id:
raise NitrateError("Need either tag id or tag name "
"to initialize the Tag object.")
def __unicode__(self):
""" Tag name for printing """
return self.name
def __hash__(self):
""" Use tag name for hashing """
# This is necessary until BZ#1084301 is fixed
return hash(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch tag data from the server """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.debug("Initializing Tag ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._id = inject["id"]
self._name = inject["name"]
# Search by tag id
elif self._id is not NitrateNone:
try:
log.info("Fetching tag " + self.identifier)
inject = self._server.Tag.get_tags({'ids': [self.id]})
log.debug("Initializing tag " + self.identifier)
log.data(pretty(inject))
self._inject = inject
self._name = inject[0]["name"]
except IndexError:
raise NitrateError(
"Cannot find tag for {0}".format(self.identifier))
# Search by tag name
else:
try:
log.info(u"Fetching tag '{0}'".format(self.name))
inject = self._server.Tag.get_tags({'names': [self.name]})
log.debug(u"Initializing tag '{0}'".format(self.name))
log.data(pretty(inject))
self._inject = inject
self._id = inject[0]["id"]
except IndexError:
raise NitrateError(
"Cannot find tag '{0}'".format(self.name))
# Index the fetched object into cache
self._index(self.name)
# We need to import mutable here because of cyclic import
from nitrate.mutable import TestCase, CaseRun
| lgpl-2.1 | 6,383,373,787,954,833,000 | 36.730798 | 79 | 0.480661 | false |
Scarygami/gae-gcs-push2deploy-secrets | lib/simplekv/memory/redisstore.py | 1 | 1046 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from io import BytesIO
from .. import KeyValueStore
class RedisStore(KeyValueStore):
"""Uses a redis-database as the backend.
:param redis: An instance of :py:class:`redis.StrictRedis`.
"""
def __init__(self, redis):
self.redis = redis
def _delete(self, key):
return self.redis.delete(key)
def keys(self):
return list(map(lambda b: b.decode(), self.redis.keys()))
def iter_keys(self):
return iter(self.keys())
def _has_key(self, key):
return self.redis.exists(key)
def _get(self, key):
val = self.redis.get(key)
if val is None:
raise KeyError(key)
return val
def _get_file(self, key, file):
file.write(self._get(key))
def _open(self, key):
return BytesIO(self._get(key))
def _put(self, key, value):
self.redis.set(key, value)
return key
def _put_file(self, key, file):
self._put(key, file.read())
return key
| apache-2.0 | -1,914,007,446,103,929,000 | 20.346939 | 65 | 0.576482 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.