repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mitsuhiko/django | tests/regressiontests/syndication/tests.py | 1 | 12521 | import datetime
import warnings
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'regressiontests.syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': '[email protected] (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, u'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:[email protected]'),
'mailto:[email protected]'
)
| bsd-3-clause | -1,829,755,882,208,441,900 | 39.785016 | 151 | 0.60115 | false |
grakiss888/testapi | opnfv_testapi/resources/test_models.py | 1 | 2316 | ##############################################################################
# Copyright (c) 2015 Orange
# [email protected] / [email protected]
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from opnfv_testapi.resources import models
from opnfv_testapi.tornado_swagger import swagger
from datetime import datetime
@swagger.model()
class TestCreateRequest(models.ModelBase):
"""
@property trust_indicator:
@ptype trust_indicator: L{TI}
"""
def __init__(self,
_id=None,
owner=None,
results=[],
public="false",
review="false",
status="private",
shared=[]):
self._id = _id
self.owner = owner
self.results = results.copy()
self.public = public
self.review = review
self.upload_date = datetime.now()
self.status = status
self.shared = shared
class ResultUpdateRequest(models.ModelBase):
"""
@property trust_indicator:
@ptype trust_indicator: L{TI}
"""
def __init__(self, trust_indicator=None):
self.trust_indicator = trust_indicator
@swagger.model()
class Test(models.ModelBase):
"""
@property trust_indicator: used for long duration test case
@ptype trust_indicator: L{TI}
"""
def __init__(self, _id=None, owner=None, results=[],
public="false", review="false", status="private",
shared=[], trust_indicator=None):
self._id = _id
self.owner = owner
self.results = results
self.public = public
self.review = review
self.upload_date = datetime.now()
self.status = status
self.shared = shared
@swagger.model()
class Tests(models.ModelBase):
"""
@property tests:
@ptype tests: C{list} of L{Test}
"""
def __init__(self):
self.tests = list()
@staticmethod
def attr_parser():
return {'tests': Test}
| apache-2.0 | -5,939,351,211,630,990,000 | 28.692308 | 78 | 0.549223 | false |
zqfan/leetcode | algorithms/python/find_minimum_in_rotated_sorted_array_ii.py | 1 | 1789 | #! /usr/bin/env python
# Copyright (C) 2014 ZhiQiang Fan <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# https://oj.leetcode.com/problems/find-minimum-in-rotated-sorted-array-ii/
class Solution:
# @param num, a list of integer
# @return an integer
def findMin(self, num):
if not num: return
left, mid, right = 0, 0, len(num) - 1
while left < right:
# it is sorted, such case means no rotated at all
if num[left] < num[right]:
return num[left]
mid = (left + right + 1) / 2
# such case means left is ordered, so rotated part is on the right.
if num[mid] > num[left]:
left = mid
elif num[mid] < num[left]:
right = mid
# mid can be equal to right, we need to increase left to avoid
# infinite loop, since num[left] >= num[right], it is safe to
# do so.
left += 1
else:
# we can not know the mininum in which side, so just increase
# left, in worst case, it is linear
left += 1
return num[mid]
| gpl-3.0 | 2,919,043,759,430,057,000 | 40.604651 | 79 | 0.607602 | false |
escher9/vdebug-python | plugin/python/vdebug/ui/vimui.py | 1 | 20451 | # coding=utf-8
import vdebug.ui.interface
import vdebug.util
import vim
import vdebug.log
import vdebug.opts
class Ui(vdebug.ui.interface.Ui):
"""Ui layer which manages the Vim windows.
"""
def __init__(self,breakpoints):
vdebug.ui.interface.Ui.__init__(self)
self.is_open = False
self.breakpoint_store = breakpoints
self.emptybuffer = None
self.breakpointwin = BreakpointWindow(self,'rightbelow 7new')
self.current_tab = "1"
self.tabnr = None
def is_modified(self):
modified = int(vim.eval('&mod'))
if modified:
return True
else:
return False
def open(self):
if self.is_open:
return
self.is_open = True
try:
existing_buffer = True
cur_buf_name = vim.eval("bufname('%')")
if cur_buf_name is None:
existing_buffer = False
cur_buf_name = ''
self.current_tab = vim.eval("tabpagenr()")
vim.command('silent tabnew')
self.empty_buf_num = vim.eval('bufnr("%")')
if existing_buffer:
vim.command('call Vdebug_edit("%s")' % cur_buf_name)
self.tabnr = vim.eval("tabpagenr()")
srcwin_name = self.__get_srcwin_name()
self.tracewin = TraceWindow(self,'vertical belowright new')
self.tracewin.create()
self.watchwin = WatchWindow(self,'belowright new')
self.watchwin.create()
self.stackwin = StackWindow(self,'belowright new')
self.stackwin.create()
self.statuswin = StatusWindow(self,'belowright new')
self.statuswin.create()
self.statuswin.set_status("loading")
self.watchwin.set_height(20)
self.statuswin.set_height(5)
self.tracewin.set_height(5)
logwin = LogWindow(self,'rightbelow 6new')
vdebug.log.Log.set_logger(\
vdebug.log.WindowLogger(\
vdebug.opts.Options.get('debug_window_level'),\
logwin))
winnr = self.__get_srcwinno_by_name(srcwin_name)
self.sourcewin = SourceWindow(self,winnr)
self.sourcewin.focus()
except Exception as e:
self.is_open = False
raise e
def set_source_position(self,file,lineno):
self.sourcewin.set_file(file)
self.sourcewin.set_line(lineno)
self.sourcewin.place_pointer(lineno)
def mark_as_stopped(self):
if self.is_open:
if self.sourcewin:
self.sourcewin.remove_pointer()
if self.statuswin:
self.statuswin.set_status("stopped")
self.remove_conn_details()
def set_conn_details(self,addr,port):
self.statuswin.insert("Connected to %s:%s" %(addr,port),2,True)
def remove_conn_details(self):
self.statuswin.insert("Not connected",2,True)
def set_listener_details(self,addr,port,idekey):
details = "Listening on %s:%s" %(addr,port)
if len(idekey):
details += " (IDE key: %s)" % idekey
self.statuswin.insert(details,1,True)
def get_current_file(self):
return vdebug.util.LocalFilePath(vim.current.buffer.name)
def get_current_row(self):
return vim.current.window.cursor[0]
def get_current_line(self):
return self.get_line(self.get_current_row())
def get_line(self,row):
return vim.eval("getline(" + str(row) + ")")
def register_breakpoint(self,breakpoint):
if breakpoint.type == 'line':
self.place_breakpoint(breakpoint.id,\
breakpoint.file,breakpoint.line)
if self.breakpointwin.is_open:
self.breakpointwin.add_breakpoint(breakpoint)
def place_breakpoint(self,sign_id,file,line):
vim.command('sign place '+str(sign_id)+\
' name=breakpt line='+str(line)+\
' file='+file.as_local())
def remove_breakpoint(self,breakpoint):
id = breakpoint.id
vim.command('sign unplace %i' % id)
if self.breakpointwin.is_open:
self.breakpointwin.remove_breakpoint(id)
def get_breakpoint_sign_positions(self):
sign_lines = self.command('sign place').split("\n")
positions = {}
for line in sign_lines:
if "name=breakpt" in line:
attributes = line.strip().split()
lineinfo = attributes[0].split('=')
idinfo = attributes[1].split('=')
positions[idinfo[1]] = lineinfo[1]
return positions
# Execute a vim command and return the output.
def command(self,cmd):
vim.command('redir => _tmp')
vim.command('silent %s' % cmd)
vim.command('redir END')
return vim.eval('_tmp')
def say(self,string):
""" Vim picks up Python prints, so just print """
print str(string)
vdebug.log.Log(string,vdebug.log.Logger.INFO)
def error(self,string):
vim.command('echohl Error | echo "'+\
str(string).replace('"','\\"')+\
'" | echohl None')
vdebug.log.Log(string,vdebug.log.Logger.ERROR)
def close(self):
if not self.is_open:
return
self.is_open = False
vdebug.log.Log.remove_logger('WindowLogger')
if self.tabnr:
vim.command('silent! '+self.tabnr+'tabc!')
if self.current_tab:
vim.command('tabn '+self.current_tab)
if self.empty_buf_num:
vim.command('bw' + self.empty_buf_num)
if self.tracewin:
self.tracewin.destroy()
if self.watchwin:
self.watchwin.destroy()
if self.stackwin:
self.stackwin.destroy()
if self.statuswin:
self.statuswin.destroy()
self.tracewin = None
self.watchwin = None
self.stackwin = None
self.statuswin = None
vim.command('set bexpr=IPythonBalloonExpr()')
def __get_srcwin_name(self):
return vim.current.buffer.name
def __get_srcwinno_by_name(self,name):
i = 1
vdebug.log.Log("Searching for win by name %s" % name,\
vdebug.log.Logger.INFO)
for w in vim.windows:
vdebug.log.Log("Win %d, name %s" %(i,w.buffer.name),\
vdebug.log.Logger.INFO)
if w.buffer.name == name:
break
else:
i += 1
vdebug.log.Log("Returning window number %d" % i,\
vdebug.log.Logger.INFO)
return i
def __get_buf_list(self):
return vim.eval("range(1, bufnr('$'))")
class SourceWindow(vdebug.ui.interface.Window):
file = None
pointer_sign_id = '6145'
breakpoint_sign_id = '6146'
prevline = 0
def __init__(self,ui,winno):
self.winno = str(winno)
def focus(self):
vim.command(self.winno+"wincmd w")
def command(self,cmd,silent = True):
self.focus()
prepend = "silent " if silent else ""
command_str = prepend + self.winno + "wincmd " + cmd
vim.command(command_str)
def set_file(self,file):
if file == self.file:
return
self.file = file
vdebug.log.Log("Setting source file: "+file,vdebug.log.Logger.INFO)
self.focus()
vim.command('call Vdebug_edit("%s")' % str(file).replace("\\", "\\\\"))
def set_line(self,lineno):
self.focus()
vim.command("normal %sgg" % str(lineno))
def get_file(self):
self.focus()
self.file = vdebug.util.LocalFilePath(vim.eval("expand('%:p')"))
return self.file
def clear_signs(self):
vim.command('sign unplace *')
def place_pointer(self,line):
vdebug.log.Log("Placing pointer sign on line "+str(line),\
vdebug.log.Logger.INFO)
vim.command('sign undefine current')
vim.command('sign define current text=-> texthl=DbgCurrentSign linehl=DbgCurrentLine')
vim.command('sign place '+self.pointer_sign_id+\
' name=current line='+str(line)+\
' file='+self.file)
overmargin = abs( self.prevline - int( vim.eval("line('.')") ) ) > 20
if overmargin:
vim.command('exe "normal zz"')
self.prevline = int(vim.eval("line('.')"))
if int(vim.eval("line('.')")) > 15:
vim.command('exe "normal 10\<C-e>"')
vim.command('redraw')
def remove_pointer(self):
vim.command('sign unplace %s' % self.pointer_sign_id)
class Window(vdebug.ui.interface.Window):
name = "WINDOW"
open_cmd = "new"
creation_count = 0
context_sav = None
def __init__(self,ui,open_cmd):
self.buffer = None
self.ui = ui
self.open_cmd = open_cmd
self.is_open = False
def getwinnr(self):
return int(vim.eval("bufwinnr('"+self.name+"')"))
def set_height(self,height):
height = int(height)
minheight = int(vim.eval("&winminheight"))
if height < minheight:
height = minheight
if height <= 0:
height = 1
self.command('set winheight=%i' % height)
def write(self, msg, return_focus = True, after = "normal G"):
if not self.is_open:
self.create()
if return_focus:
prev_win = vim.eval('winnr()')
if self.buffer_empty():
self.buffer[:] = str(msg).split('\n')
else:
self.buffer.append(str(msg).split('\n'))
self.command(after)
if return_focus:
vim.command('%swincmd W' % prev_win)
def insert(self, msg, lineno = None, overwrite = False, allowEmpty = False):
if not self.is_open:
self.create()
""" insert into current position in buffer"""
if len(msg) == 0 and allowEmpty == False:
return
if self.buffer_empty():
self.buffer[:] = str(msg).split('\n')
else:
if lineno == None:
(lineno, rol) = vim.current.window.cursor
remaining_buffer = str(msg).split('\n')
if overwrite:
lfrom = lineno + 1
else:
lfrom = lineno
remaining_buffer.extend(self.buffer[lfrom:])
del self.buffer[lineno:]
if self.buffer_empty():
self.buffer[:] = remaining_buffer
else:
for line in remaining_buffer:
self.buffer.append(line)
self.command(str(lfrom))
def delete(self,start_line,end_line):
try:
self.buffer[end_line]
remaining_buffer = self.buffer[end_line:]
del self.buffer[start_line:]
self.buffer.append(remaining_buffer)
except IndexError:
del self.buffer[start_line:]
def buffer_empty(self):
if len(self.buffer) == 1 \
and len(self.buffer[0]) == 0:
return True
else:
return False
def create(self):
""" create window """
vim.command('silent ' + self.open_cmd + ' ' + self.name)
vim.command("setlocal buftype=nofile modifiable "+ \
"winfixheight winfixwidth")
self.buffer = vim.current.buffer
self.is_open = True
self.creation_count += 1
self.on_create()
def destroy(self):
""" destroy window """
if self.buffer == None or len(dir(self.buffer)) == 0:
return
self.is_open = False
if int(vim.eval('buffer_exists("'+self.name+'")')) == 1:
vim.command('bwipeout ' + self.name)
def clean(self):
""" clean all datas in buffer """
self.buffer[:] = []
def command(self, cmd):
""" go to my window & execute command """
winnr = self.getwinnr()
if winnr != int(vim.eval("winnr()")):
vim.command(str(winnr) + 'wincmd w')
vim.command(cmd)
def accept_value(self,value):
self.write(value)
def accept_renderer(self,renderer):
self.write(renderer.render())
def redisplay(self):
if self.context_sav:
self.write(self.context_sav.render())
else:
self.write('')
class BreakpointWindow(Window):
name = "DebuggerBreakpoints"
is_visible = False
header = """===========================================================
ID | TYPE | DATA
==========================================================="""
def on_create(self):
self.clean()
self.write(self.header)
self.command('setlocal syntax=debugger_breakpoint')
for bp in self.ui.breakpoint_store.get_sorted_list():
self.add_breakpoint(bp)
if self.creation_count == 1:
cmd = 'silent! au BufWinLeave %s :silent! bdelete %s' %(self.name,self.name)
vim.command('%s | python debugger.runner.ui.breakpointwin.is_open = False' % cmd)
def add_breakpoint(self,breakpoint):
bp_str = " %-7i | %-11s | " %(breakpoint.id,breakpoint.type)
if breakpoint.type == 'line':
bp_str += "%s:%s" %(breakpoint.file,str(breakpoint.line))
elif breakpoint.type == 'conditional':
bp_str += "%s:%s when (%s)" \
%(breakpoint.file,str(breakpoint.line),breakpoint.condition)
elif breakpoint.type == 'exception':
bp_str += "Exception: %s" % breakpoint.exception
elif breakpoint.type == 'call' or \
breakpoint.type == 'return':
bp_str += "Function: %s" % breakpoint.function
self.write(bp_str)
def remove_breakpoint(self,breakpoint_id):
i = 0
for l in self.buffer:
bp_str = " %i " % breakpoint_id
bp_id_len = len(bp_str)
if l[:bp_id_len] == bp_str:
del self.buffer[i]
i += 1
class LogWindow(Window):
name = "DebuggerLog"
def on_create(self):
self.command('setlocal syntax=debugger_log')
if self.creation_count == 1:
vim.command('silent! au BufWinLeave %s :silent! bdelete %s' %(self.name,self.name))
def write(self, msg, return_focus = True):
Window.write(self, msg,return_focus=True)
class StackWindow(Window):
name = "DebuggerStack"
def on_create(self):
self.command('inoremap <buffer> <cr> <esc>'+\
':python debugger.handle_return_keypress()<cr>')
self.command('nnoremap <buffer> <cr> '+\
':python debugger.handle_return_keypress()<cr>')
self.command('nnoremap <buffer> <2-LeftMouse> '+\
':python debugger.handle_double_click()<cr>')
self.command('setlocal syntax=debugger_stack')
if self.creation_count == 1:
cmd = 'silent! au BufWinLeave %s :silent! bdelete %s' %(self.name,self.name)
vim.command('%s | python debugger.runner.ui.stackwin.is_open = False' % cmd)
def write(self, msg, return_focus = True):
Window.write(self, msg, after="normal gg")
class WatchWindow(Window):
name = "DebuggerWatch"
def on_create(self):
self.command('inoremap <buffer> <cr> <esc>'+\
':python debugger.handle_return_keypress()<cr>')
self.command('nnoremap <buffer> <cr> '+\
':python debugger.handle_return_keypress()<cr>')
self.command('nnoremap <buffer> <2-LeftMouse> '+\
':python debugger.handle_double_click()<cr>')
self.command('setlocal syntax=debugger_watch')
if self.creation_count == 1:
cmd = 'silent! au BufWinLeave %s :silent! bdelete %s' %(self.name,self.name)
vim.command('%s | python debugger.runner.ui.watchwin.is_open = False' % cmd)
def write(self, msg, return_focus = True):
Window.write(self, msg, after="normal gg")
class TraceWindow(WatchWindow):
name = "TraceWindow"
reserve_trace_code = None
last_context_rendered = None
class StatusWindow(Window):
name = "DebuggerStatus"
def on_create(self):
keys = vdebug.util.Keymapper()
output = "Status: starting\nListening on port\nNot connected\n\n"
output += "Press %s to start debugging, " %(keys.run_key())
output += "%s to stop/close. " %(keys.close_key())
output += "Type :help Vdebug for more information."
self.write(output)
self.command('setlocal syntax=debugger_status')
if self.creation_count == 1:
cmd = 'au BufWinLeave %s :silent! bdelete %s' %(self.name,self.name)
vim.command('%s | python debugger.runner.ui.statuswin.is_open = False' % cmd)
def set_status(self,status):
self.insert("Status: "+str(status),0,True)
class ResponseRenderer:
def __init__(self,response):
self.response = response
def render(self):
pass
class StackGetResponseRenderer(ResponseRenderer):
def render(self):
stack = self.response.get_stack()
string = ""
for s in stack:
where = s.get('where') if s.get('where') else 'main'
file = vdebug.util.FilePath(s.get('filename'))
line = "[%(num)s] %(where)s @ %(file)s:%(line)s" \
%{'num':s.get('level'),'where':where,\
'file':str(file.as_local()),'line':s.get('lineno')}
string += line + "\n"
return string
class ContextGetResponseRenderer(ResponseRenderer):
def __init__(self,response,title = None,contexts = {},current_context = 0):
ResponseRenderer.__init__(self,response)
self.title = title
self.contexts = contexts
self.current_context = current_context
def render(self,indent = 0):
res = self.__create_tabs()
if self.title:
res += "- %s\n\n" % self.title
properties = self.response.get_context()
num_props = len(properties)
vdebug.log.Log("Writing %i properties to the context window" % num_props,\
vdebug.log.Logger.INFO )
for idx, prop in enumerate(properties):
final = False
try:
next_prop = properties[idx+1]
except IndexError:
final = True
next_prop = None
res += self.__render_property(prop,next_prop,final,indent)
vdebug.log.Log("Writing to context window:\n"+res,vdebug.log.Logger.DEBUG)
return res
def __create_tabs(self):
res = []
if self.contexts:
for id,name in self.contexts.iteritems():
if self.current_context == id:
name = "*"+name
res.append("[ %s ]" % name)
if res:
return " ".join(res) + "\n\n"
else:
return ""
def __render_property(self,p,next_p,last = False,indent = 0):
line = "%(indent)s %(marker)s %(name)s = (%(type)s)%(value)s" \
%{'indent':"".rjust((p.depth * 2)+indent),\
'marker':self.__get_marker(p),'name':p.display_name.encode('latin1'),\
'type':p.type_and_size(),'value': " " + p.value}
line = line.rstrip() + "\n"
if vdebug.opts.Options.get('watch_window_style') == 'expanded':
depth = p.depth
if next_p and not last:
next_depth = next_p.depth
if depth == next_depth:
next_sep = "|"
num_spaces = depth * 2
elif depth > next_depth:
next_sep = "/"
num_spaces = (depth * 2) - 1
else:
next_sep = "\\"
num_spaces = (depth * 2) + 1
line += "".rjust(num_spaces+indent) + " " + next_sep + "\n"
elif depth > 0:
line += "".rjust((depth * 2) - 1 + indent) + " /" + "\n"
return line
def __get_marker(self,property):
char = vdebug.opts.Options.get('marker_default')
if property.has_children:
if property.child_count() == 0:
char = vdebug.opts.Options.get('marker_closed_tree')
else:
char = vdebug.opts.Options.get('marker_open_tree')
return char
| mit | 8,781,502,639,363,953,000 | 32.581281 | 95 | 0.544766 | false |
openaid-IATI/OIPA | OIPA/iati/PostmanJsonImport/importPostmanJson.py | 1 | 3012 | import json
import os
import urllib
from datetime import datetime
from OIPA import settings
class PostmanAPIImport(object):
fields_to_remove = ["event", "response"]
file_path = os.environ.get(
'OIPA_STATIC_ROOT',
os.path.join(
os.path.dirname(settings.BASE_DIR),
'public/static'))
def get_json(self):
request = urllib.request.Request(
"https://api.getpostman.com/collections/7423966-c07eebd3-61b2-47b4-9bfd-1bac7ec96c9f", # NOQA: E501
headers={"x-Api-Key": "675aba3b5dec4d39a1abf193d4386c7b"})
response = urllib.request.urlopen(request)
json_string = response.read()
result_for_test_datastore_iatistandard_org = json.loads(json_string.decode('utf-8-sig')) # NOQA: E501
result_for_iati_cloud = json.loads(json_string.decode('utf-8-sig'))
self.simplify(result_for_iati_cloud, 'iatidatastore.iatistandard.org')
self.simplify(result_for_test_datastore_iatistandard_org, 'test-datastore.iatistandard.org') # NOQA: E501
try:
with open(self.file_path + '/postman/postman_json_iati_cloud.json', 'w') as outfile: # NOQA: E501
json.dump(result_for_iati_cloud, outfile)
print("Postman json file for iati.cloud was created on: ", datetime.now()) # NOQA: E501
with open(self.file_path + '/postman/postman_json_test_datastore_iatistandard_org.json', # NOQA: E501
'w') as outfile: # NOQA: E501
json.dump(result_for_test_datastore_iatistandard_org, outfile)
print("Postman json file for test-datastore.iatistandard.org was created on: ", datetime.now()) # NOQA:E501
except IOError:
pass
def simplify(self, full_json, url_string_to_replace_with):
self.remove_fields(full_json['collection'], url_string_to_replace_with)
self.recursive_clean(full_json['collection']['item'], url_string_to_replace_with) # NOQA: E501
def remove_fields(self, before_remove_event, url_string_to_replace_with):
for fields in self.fields_to_remove:
if fields in before_remove_event:
del before_remove_event[fields]
if 'request' in before_remove_event:
self.url_replacing(before_remove_event, url_string_to_replace_with) # NOQA: E501
def recursive_clean(self, before_remove_response, url_string_to_replace_with): # NOQA: E501
for item in before_remove_response:
self.remove_fields(item, url_string_to_replace_with)
if 'item' in item:
self.recursive_clean(item['item'], url_string_to_replace_with)
@staticmethod
def url_replacing(element_string_to_be_replaced, url_string_to_replace_with): # NOQA: E501
new_url = element_string_to_be_replaced['request']['url']['raw'].replace('iati.cloud', url_string_to_replace_with) # NOQA: E501
element_string_to_be_replaced['request']['url']['raw'] = new_url
| agpl-3.0 | 2,579,911,666,852,841,500 | 48.377049 | 136 | 0.645418 | false |
praekelt/go-api-toolkit | go_api/tests/test_utils.py | 1 | 2551 | """
Tests for go_api utility functions.
"""
from twisted.internet.defer import Deferred, inlineCallbacks, succeed
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
from go_api.utils import defer_async, simulate_async
class DummyError(Exception):
"""
Exception for use in tests.
"""
class TestDeferAsync(TestCase):
def test_returns_deferred(self):
d = defer_async('foo', reactor=Clock())
self.assertTrue(isinstance(d, Deferred))
def test_fires_only_after_reactor_runs(self):
clock = Clock()
d = defer_async('foo', reactor=clock)
self.assertEqual(d.called, False)
clock.advance(0)
self.assertEqual(d.called, True)
self.assertEqual(d.result, 'foo')
class TestSimulateAsync(TestCase):
def test_wraps(self):
def simple():
"""doc"""
f = simulate_async(simple)
self.assertEqual(f.__name__, "simple")
self.assertEqual(f.__doc__, "doc")
self.assertEqual(f.__module__, __name__)
@inlineCallbacks
def test_handles_successful_return(self):
def simple():
return 'foo'
f = simulate_async(simple)
d = f()
self.assertTrue(isinstance(d, Deferred))
v = yield d
self.assertEqual(v, 'foo')
@inlineCallbacks
def test_handler_deferred_return(self):
def simple_deferred():
return succeed('foo')
f = simulate_async(simple_deferred)
d = f()
self.assertTrue(isinstance(d, Deferred))
v = yield d
self.assertEqual(v, 'foo')
@inlineCallbacks
def test_handles_exceptions(self):
def error():
raise DummyError()
f = simulate_async(error)
d = f()
self.assertTrue(isinstance(d, Deferred))
yield self.failUnlessFailure(d, DummyError)
def test_fires_only_after_reactor_runs(self):
def simple():
return 'foo'
clock = Clock()
f = simulate_async(simple, reactor=clock)
d = f()
self.assertEqual(d.called, False)
clock.advance(0)
self.assertEqual(d.called, True)
self.assertEqual(d.result, 'foo')
@inlineCallbacks
def test_complex_arguments(self):
def simple(*args, **kw):
return (args, kw)
f = simulate_async(simple)
result = yield f("foo", "bar", baz=3, boop="barp")
self.assertEqual(result, (
("foo", "bar"),
{"baz": 3, "boop": "barp"},
))
| bsd-3-clause | 4,411,355,153,900,558,000 | 27.032967 | 69 | 0.587221 | false |
mreineck/healpy | healpy/projector.py | 1 | 44436 | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""This module provides classes for some spherical projection.
To be used when calling SphereProjAxes class.
SphericalProj : a virtual class (do nothing). Just a template for derived
(useful) classes
GnomonicProj : Gnomonic projection
AzimuthalProj : Azimuthal equidistant or Lambert azimuthal equal-area projection
"""
from . import rotator as R
import numpy as np
from . import pixelfunc
from .pixelfunc import UNSEEN
pi = np.pi
dtor = np.pi / 180.
class SphericalProj(object):
"""
This class defines functions for spherical projection.
This class contains class method for spherical projection computation. It
should not be instantiated. It should be inherited from and methods should
be overloaded for desired projection.
"""
name = "None"
def __init__(self, rot=None, coord=None, flipconv=None, **kwds):
self.rotator = R.Rotator(rot=rot, coord=None, eulertype="ZYX")
self.coordsys = R.Rotator(coord=coord).coordout
self.coordsysstr = R.Rotator(coord=coord).coordoutstr
self.set_flip(flipconv)
self.set_proj_plane_info(**kwds)
def set_proj_plane_info(self, **kwds):
allNone = True
for v in kwds.values():
if v is not None:
allNone = False
if not allNone:
self._arrayinfo = dict(kwds)
else:
self._arrayinfo = None
def get_proj_plane_info(self):
return self._arrayinfo
arrayinfo = property(
get_proj_plane_info, doc="Dictionary with information on the projection array"
)
def __eq__(self, a):
if type(a) is not type(self):
return False
return (self.rotator == a.rotator) and (self.coordsys == a.coordsys)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
"""From angular direction to position in the projection plane (%s).
Input:
- theta: if phi is None, theta[0] contains theta, theta[1] contains phi
- phi : if phi is not None, theta,phi are direction
- lonlat: if True, angle are assumed in degree, and longitude, latitude
- flipconv is either 'astro' or 'geo'. None will be default.
Return:
- x, y: position in %s plane.
"""
pass
def vec2xy(self, vx, vy=None, vz=None, direct=False):
"""From unit vector direction to position in the projection plane (%s).
Input:
- vx: if vy and vz are None, vx[0],vx[1],vx[2] defines the unit vector.
- vy,vz: if defined, vx,vy,vz define the unit vector
- lonlat: if True, angle are assumed in degree, and longitude, latitude
- flipconv is either 'astro' or 'geo'. None will be default.
Return:
- x, y: position in %s plane.
"""
pass
def xy2ang(self, x, y=None, lonlat=False, direct=False):
"""From position in the projection plane to angular direction (%s).
Input:
- x : if y is None, x[0], x[1] define the position in %s plane.
- y : if defined, x,y define the position in projection plane.
- lonlat: if True, angle are assumed in degree, and longitude, latitude
- flipconv is either 'astro' or 'geo'. None will be default.
Return:
- theta, phi : angular direction.
"""
pass
def xy2vec(self, x, y=None, direct=False):
"""From position in the projection plane to unit vector direction (%s).
Input:
- x : if y is None, x[0], x[1] define the position in %s plane.
- y : if defined, x,y define the position in projection plane.
- lonlat: if True, angle are assumed in degree, and longitude, latitude
- flipconv is either 'astro' or 'geo'. None will be default.
Return:
- theta, phi : angular direction.
"""
pass
def xy2ij(self, x, y=None):
"""From position in the projection plane to image array index (%s).
Input:
- x : if y is None, x[0], x[1] define the position in %s plane.
- y : if defined, x,y define the position in projection plane.
- projinfo : additional projection information.
Return:
- i,j : image array indices.
"""
pass
def ij2xy(self, i=None, j=None):
"""From image array indices to position in projection plane (%s).
Input:
- if i and j are None, generate arrays of i and j as input
- i : if j is None, i[0], j[1] define array indices in %s image.
- j : if defined, i,j define array indices in image.
- projinfo : additional projection information.
Return:
- x,y : position in projection plane.
"""
pass
def projmap(self, map, vec2pix_func, rot=None, coord=None):
"""Create an array containing the projection of the map.
Input:
- vec2pix_func: a function taking theta,phi and returning pixel number
- map: an array containing the spherical map to project,
the pixelisation is described by vec2pix_func
Return:
- a 2D array with the projection of the map.
Note: the Projector must contain information on the array.
"""
x, y = self.ij2xy()
if np.__version__ >= "1.1":
matype = np.ma.core.MaskedArray
else:
matype = np.ma.array
if type(x) is matype and x.mask is not np.ma.nomask:
w = x.mask == False
else:
w = slice(None)
img = np.zeros(x.shape, np.float64) - np.inf
vec = self.xy2vec(np.asarray(x[w]), np.asarray(y[w]))
vec = (R.Rotator(rot=rot, coord=self.mkcoord(coord))).I(vec)
pix = vec2pix_func(vec[0], vec[1], vec[2])
# support masked array for map, or a dictionnary (for explicit pixelisation)
if isinstance(map, matype) and map.mask is not np.ma.nomask:
mpix = map[pix]
mpix[map.mask[pix]] = UNSEEN
elif isinstance(map, dict):
is_pix_seen = np.in1d(pix, map.keys()).reshape(pix.shape)
is_pix_unseen = ~is_pix_seen
mpix = np.zeros_like(img[w])
mpix[is_pix_unseen] = UNSEEN
pix_seen = pix[is_pix_seen]
iterable = (map[p] for p in pix_seen)
mpix[is_pix_seen] = np.fromiter(iterable, mpix.dtype, count=pix_seen.size)
else:
mpix = map[pix]
img[w] = mpix
return img
def set_flip(self, flipconv):
"""flipconv is either 'astro' or 'geo'. None will be default.
With 'astro', east is toward left and west toward right.
It is the opposite for 'geo'
"""
if flipconv is None:
flipconv = "astro" # default
if flipconv == "astro":
self._flip = -1
elif flipconv == "geo":
self._flip = 1
else:
raise ValueError("flipconv must be 'astro', 'geo' or None for default.")
def get_extent(self):
"""Get the extension of the projection plane.
Return:
extent = (left,right,bottom,top)
"""
pass
def get_fov(self):
"""Get the field of view in degree of the plane of projection
Return:
fov: the diameter in radian of the field of view
"""
return 2. * pi
def get_center(self, lonlat=False):
"""Get the center of the projection.
Input:
- lonlat : if True, will return longitude and latitude in degree,
otherwise, theta and phi in radian
Return:
- theta,phi or lonlat depending on lonlat keyword
"""
lon, lat = np.asarray(self.rotator.rots[0][0:2]) * 180 / pi
if lonlat:
return lon, lat
else:
return pi / 2. - lat * dtor, lon * dtor
def mkcoord(self, coord):
if self.coordsys is None:
return (coord, coord)
elif coord is None:
return (self.coordsys, self.coordsys)
elif type(coord) is str:
return (coord, self.coordsys)
else:
return (tuple(coord)[0], self.coordsys)
class GnomonicProj(SphericalProj):
"""This class provides class methods for Gnomonic projection.
"""
name = "Gnomonic"
def __init__(self, rot=None, coord=None, xsize=None, ysize=None, reso=None, **kwds):
super(GnomonicProj, self).__init__(
rot=rot, coord=coord, xsize=xsize, ysize=ysize, reso=reso, **kwds
)
def set_proj_plane_info(self, xsize=200, ysize=None, reso=1.5):
if xsize is None:
xsize = 200
if ysize is None:
ysize = xsize
if reso is None:
reso = 1.5
super(GnomonicProj, self).set_proj_plane_info(
xsize=xsize, ysize=ysize, reso=reso
)
def vec2xy(self, vx, vy=None, vz=None, direct=False):
if not direct:
vec = self.rotator(vx, vy, vz)
elif vy is None and vz is None:
vec = vx
elif vy is not None and vz is not None:
vec = vx, vy, vz
else:
raise ValueError("vy and vz must be both defined or both not defined")
flip = self._flip
mask = np.asarray(vec[0]) <= 0.
w = np.where(mask == False)
if not mask.any():
mask = np.ma.nomask
if not hasattr(vec[0], "__len__"):
if mask is not np.ma.nomask:
x = np.nan
y = np.nan
else:
x = flip * vec[1] / vec[0]
y = vec[2] / vec[0]
else:
x = np.zeros(vec[0].shape) + np.nan
y = np.zeros(vec[0].shape) + np.nan
x[w] = flip * vec[1][w] / vec[0][w]
y[w] = vec[2][w] / vec[0][w]
return x, y
vec2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
flip = self._flip
if y is None:
x, y = x
x, y = np.asarray(x), np.asarray(y)
rm1 = 1. / np.sqrt(1. + x ** 2 + y ** 2)
vec = (rm1, flip * rm1 * x, rm1 * y)
if not direct:
return self.rotator.I(vec)
else:
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
vec = R.dir2vec(theta, phi, lonlat=lonlat)
return self.vec2xy(vec, direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
return R.vec2dir(self.xy2vec(x, y, direct=direct), lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
if y is None:
x, y = x
dx = reso / 60. * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
j = np.around(xc + x / dx).astype(np.long)
i = np.around(yc + y / dx).astype(np.long)
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
dx = reso / 60. * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
if i is None and j is None:
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = (idx - xc) * dx # astro= '-' sign, geo '+' sign
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (idx - yc) * dx # (idx-yc) * dx
elif i is not None and j is not None:
x = (np.asarray(j) - xc) * dx
y = (np.asarray(i) - yc) * dx # (asarray(i)-yc) * dx
elif i is not None and j is None:
i, j = i
x = (np.asarray(j) - xc) * dx
y = (np.asarray(i) - yc) * dx # (i-yc) * dx
else:
raise TypeError("Wrong parameters")
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
xsize, ysize = self.arrayinfo["xsize"], self.arrayinfo["ysize"]
left, bottom = self.ij2xy(0, 0)
right, top = self.ij2xy(ysize - 1, xsize - 1)
return (left, right, bottom, top)
def get_fov(self):
vx, vy, vz = self.xy2vec(self.ij2xy(0, 0), direct=True)
a = np.arccos(vx)
return 2. * a
class MollweideProj(SphericalProj):
"""This class provides class methods for Mollweide projection.
"""
name = "Mollweide"
__molldata = []
def __init__(self, rot=None, coord=None, xsize=800, **kwds):
self.__initialise_data()
super(MollweideProj, self).__init__(rot=rot, coord=coord, xsize=xsize, **kwds)
def set_proj_plane_info(self, xsize):
super(MollweideProj, self).set_proj_plane_info(xsize=xsize)
def vec2xy(self, vx, vy=None, vz=None, direct=False):
if not direct:
theta, phi = R.vec2dir(self.rotator(vx, vy, vz))
else:
theta, phi = R.vec2dir(vx, vy, vz)
flip = self._flip
X, Y = MollweideProj.__molldata
# set phi in [-pi,pi]
phi = (phi + pi) % (2 * pi) - pi
lat = pi / 2. - theta
A = MollweideProj.__lininterp(X, Y, lat)
x = flip * 2. / pi * phi * np.cos(A)
y = np.sin(A)
return x, y
vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
flip = self._flip
if y is None:
x, y = x
mask = np.asarray(x) ** 2 / 4. + np.asarray(y) ** 2 > 1.
w = np.where(mask == False)
if not mask.any():
mask = np.ma.nomask
if not hasattr(x, "__len__"):
if mask is not np.ma.nomask:
return np.nan, np.nan, np.nan
else:
s = np.sqrt((1 - y) * (1 + y))
a = np.arcsin(y)
z = 2. / pi * (a + y * s)
phi = flip * pi / 2. * x / np.maximum(s, 1.e-6)
sz = np.sqrt((1 - z) * (1 + z))
vec = sz * np.cos(phi), sz * np.sin(phi), z
if not direct:
return self.rotator.I(vec)
else:
return vec
else:
vec = (
np.zeros(x.shape) + np.nan,
np.zeros(x.shape) + np.nan,
np.zeros(x.shape) + np.nan,
)
s = np.sqrt((1 - y[w]) * (1 + y[w]))
a = np.arcsin(y[w])
vec[2][w] = 2. / pi * (a + y[w] * s)
phi = flip * pi / 2. * x[w] / np.maximum(s, 1.e-6)
sz = np.sqrt((1 - vec[2][w]) * (1 + vec[2][w]))
vec[0][w] = sz * np.cos(phi)
vec[1][w] = sz * np.sin(phi)
if not direct:
return self.rotator.I(vec)
else:
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
return self.vec2xy(R.dir2vec(theta, phi, lonlat=lonlat), direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
vec = self.xy2vec(x, y, direct=direct)
return R.vec2dir(vec, lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = xsize // 2
if y is None:
x, y = x
xc, yc = (xsize - 1.) / 2., (ysize - 1.) / 2.
if hasattr(x, "__len__"):
j = np.around(x * xc / 2. + xc).astype(np.long)
i = np.around(yc + y * yc).astype(np.long)
mask = x ** 2 / 4. + y ** 2 > 1.
if not mask.any():
mask = np.ma.nomask
j = np.ma.array(j, mask=mask)
i = np.ma.array(i, mask=mask)
else:
if x ** 2 / 4. + y ** 2 > 1.:
i, j = np.nan, np.nan
else:
j = np.around(x * xc / 2. + xc).astype(np.long)
i = np.around(yc + y * yc).astype(np.long)
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = xsize // 2
xc, yc = (xsize - 1.) / 2., (ysize - 1.) / 2.
if i is None and j is None:
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (idx - yc) / yc
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = 2. * (idx - xc) / xc
mask = x ** 2 / 4. + y ** 2 > 1.
if not mask.any():
mask = np.ma.nomask
x = np.ma.array(x, mask=mask)
y = np.ma.array(y, mask=mask)
elif i is not None and j is not None:
y = (np.asarray(i) - yc) / yc
x = 2. * (np.asarray(j) - xc) / xc
if x ** 2 / 4. + y ** 2 > 1.:
x, y = np.nan, np.nan
elif i is not None and j is None:
i, j = i
y = (np.asarray(i) - yc) / yc
x = 2. * (np.asarray(j) - xc) / xc
if x ** 2 / 4. + y ** 2 > 1.:
x, y = np.nan, np.nan
else:
raise TypeError("i and j must be both given or both not given")
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
return (-2.0, 2.0, -1.0, 1.0)
@staticmethod
def __initialise_data():
if len(MollweideProj.__molldata) == 0:
X = (np.arange(1., 180., 1.) - 90.) * dtor
Y = MollweideProj.__findRoot(
MollweideProj.__fmoll, MollweideProj.__dfmoll, X.copy(), X, niter=10
)
X = np.concatenate([[-pi / 2], X, [pi / 2]])
Y = np.concatenate([[-pi / 2], Y, [pi / 2]])
MollweideProj.__molldata.append(X)
MollweideProj.__molldata.append(Y)
return
@staticmethod
def __findRoot(f, df, x0, argsf=None, argsdf=None, niter=100):
x = x0
niter = min(abs(niter), 1000)
i = 0
while i < niter:
dx = -f(x, argsf) / df(x, argsdf)
x += dx
i += 1
return x
@staticmethod
def __fmoll(x, args):
return 2. * x + np.sin(2. * x) - pi * np.sin(args)
@staticmethod
def __dfmoll(x, args):
return 2. * (1. + np.cos(2. * x))
@staticmethod
def __lininterp(X, Y, x):
idx = X.searchsorted(x)
y = Y[idx - 1] + (Y[idx] - Y[idx - 1]) / (X[idx] - X[idx - 1]) * (
x - X[idx - 1]
)
return y
class CartesianProj(SphericalProj):
"""This class provides class methods for Cartesian projection.
"""
name = "Cartesian"
def __init__(
self,
rot=None,
coord=None,
xsize=800,
ysize=None,
lonra=None,
latra=None,
**kwds
):
super(CartesianProj, self).__init__(
rot=rot,
coord=coord,
xsize=xsize,
ysize=ysize,
lonra=lonra,
latra=latra,
**kwds
)
def set_proj_plane_info(self, xsize, ysize, lonra, latra):
if lonra is None:
lonra = [-180., 180.]
else:
# shift lonra[1] into the range [lonra[0], lonra[0]+360]
lonra_span = np.mod(lonra[1] - lonra[0], 360)
if lonra_span == 0:
lonra_span = 360
lonra[1] = lonra[0] + lonra_span
if latra is None:
latra = [-90., 90.]
if (
len(lonra) != 2
or len(latra) != 2
or latra[0] < -90
or latra[1] > 90
or latra[0] >= latra[1]
):
raise TypeError(
"Wrong argument lonra or latra. Must be lonra=[a,b],latra=[c,d] "
"c<d, c>=-90, d<=+90"
)
lonra = self._flip * np.float64(lonra)[:: self._flip]
latra = np.float64(latra)
xsize = np.long(xsize)
if ysize is None:
ratio = (latra[1] - latra[0]) / (lonra[1] - lonra[0])
ysize = np.long(round(ratio * xsize))
else:
ysize = np.long(ysize)
ratio = float(ysize) / float(xsize)
super(CartesianProj, self).set_proj_plane_info(
xsize=xsize, lonra=lonra, latra=latra, ysize=ysize, ratio=ratio
)
def vec2xy(self, vx, vy=None, vz=None, direct=False):
if not direct:
theta, phi = R.vec2dir(self.rotator(vx, vy, vz))
else:
theta, phi = R.vec2dir(vx, vy, vz)
flip = self._flip
# set phi in [-pi,pi]
x = flip * ((phi + pi) % (2 * pi) - pi)
x /= dtor # convert in degree
y = pi / 2. - theta
y /= dtor # convert in degree
return x, y
vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
if y is None:
x, y = np.asarray(x)
else:
x, y = np.asarray(x), np.asarray(y)
flip = self._flip
theta = pi / 2. - y * dtor # convert in radian
phi = flip * x * dtor # convert in radian
# dir2vec does not support 2d arrays, so first use flatten and then
# reshape back to previous shape
if not direct:
vec = self.rotator.I(R.dir2vec(theta.flatten(), phi.flatten()))
else:
vec = R.dir2vec(theta.flatten(), phi.flatten())
vec = [v.reshape(theta.shape) for v in vec]
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
return self.vec2xy(R.dir2vec(theta, phi, lonlat=lonlat), direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
vec = self.xy2vec(x, y, direct=direct)
return R.vec2dir(vec, lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
lonra = self.arrayinfo["lonra"]
latra = self.arrayinfo["latra"]
if y is None:
x, y = np.asarray(x)
else:
x, y = np.asarray(x), np.asarray(y)
j = np.around((x - lonra[0]) / (lonra[1] - lonra[0]) * (xsize - 1)).astype(
np.int64
)
i = np.around((y - latra[0]) / (latra[1] - latra[0]) * (ysize - 1)).astype(
np.int64
)
if len(x.shape) > 0:
mask = (i < 0) | (i >= ysize) | (j < 0) | (j >= xsize)
if not mask.any():
mask = np.ma.nomask
j = np.ma.array(j, mask=mask)
i = np.ma.array(i, mask=mask)
else:
if j < 0 or j >= xsize or i < 0 or i >= ysize:
i = j = None
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
lonra = self.arrayinfo["lonra"]
latra = self.arrayinfo["latra"]
if i is not None and j is None:
i, j = np.asarray(i)
elif i is not None and j is not None:
i, j = np.asarray(i), np.asarray(j)
if i is None and j is None:
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (float(latra[1] - latra[0]) / (ysize - 1.)) * idx
y += latra[0]
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = float(lonra[1] - lonra[0]) / (xsize - 1.) * idx
x += lonra[0]
x = np.ma.array(x)
y = np.ma.array(y)
elif i is not None and j is not None:
y = (float(latra[1] - latra[0]) / (ysize - 1)) * i
y += latra[0]
x = (float(lonra[1] - lonra[0]) / (xsize - 1)) * j
x += lonra[0]
if len(i.shape) > 0:
mask = (x < -180) | (x > 180) | (y < -90) | (y > 90)
if not mask.any():
mask = np.ma.nomask
x = np.ma.array(x, mask=mask)
y = np.ma.array(y, mask=mask)
else:
if x < -180 or x > 180 or y < -90 or y > 90:
x = y = np.nan
else:
raise TypeError("i and j must be both given or both not given")
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
lonra = self.arrayinfo["lonra"]
latra = self.arrayinfo["latra"]
return (lonra[0], lonra[1], latra[0], latra[1])
get_extent.__doc__ = SphericalProj.get_extent.__doc__
def get_fov(self):
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
v1 = np.asarray(self.xy2vec(self.ij2xy(0, 0), direct=True))
v2 = np.asarray(self.xy2vec(self.ij2xy(ysize - 1, xsize - 1), direct=True))
a = np.arccos((v1 * v2).sum())
return 2 * a
# def get_fov(self):
# lonra = self.arrayinfo['lonra']
# latra = self.arrayinfo['latra']
# return np.sqrt((lonra[1]-lonra[0])**2+(latra[1]-latra[0])**2)
def get_center(self, lonlat=False):
lonra = self.arrayinfo["lonra"]
latra = self.arrayinfo["latra"]
xc = 0.5 * (lonra[1] + lonra[0])
yc = 0.5 * (latra[1] + latra[0])
return self.xy2ang(xc, yc, lonlat=lonlat)
get_center.__doc__ = SphericalProj.get_center.__doc__
class OrthographicProj(SphericalProj):
"""This class provides methods for orthographic projection
"""
name = "Orthographic"
def __init__(self, rot=None, coord=None, xsize=800, half_sky=False, **kwds):
super(OrthographicProj, self).__init__(
rot=rot, coord=coord, xsize=xsize, half_sky=half_sky, **kwds
)
def set_proj_plane_info(self, xsize, half_sky):
super(OrthographicProj, self).set_proj_plane_info(
xsize=xsize, half_sky=half_sky
)
def vec2xy(self, vx, vy=None, vz=None, direct=False):
if not direct:
theta, phi = R.vec2dir(self.rotator(vx, vy, vz))
else:
theta, phi = R.vec2dir(vx, vy, vz)
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
half_sky = self.arrayinfo["half_sky"]
flip = self._flip
# set phi in [-pi,pi]
phi = flip * (phi + pi) % (2 * pi) - pi
lat = pi / 2. - theta
x = np.cos(lat) * np.sin(phi)
if not half_sky:
x -= 1.0
y = np.sin(lat)
# unfold back of sphere
cosc = np.cos(lat) * np.cos(phi)
if np.any(cosc < 0):
hmask = cosc < 0
if hasattr(x, "__len__"):
if half_sky:
x[hmask] = np.nan
else:
x[hmask] *= -1
elif hmask:
if half_sky:
x = np.nan
else:
x *= -1
if half_sky:
mask = np.asarray(x) ** 2 + np.asarray(y) ** 2 > 1.0
else:
mask = (np.mod(np.asarray(x) + 2.0, 2.0) - 1.0) ** 2 + np.asarray(
y
) ** 2 > 1.0
if mask.any():
if not hasattr(x, "__len__"):
x = np.nan
y = np.nan
else:
x[mask] = np.nan
y[mask] = np.nan
return x, y
vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
if y is None:
x, y = x
if hasattr(x, "__len__"):
x, y = np.asarray(x), np.asarray(y)
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
half_sky = self.arrayinfo["half_sky"]
flip = self._flip
# re-fold back of sphere
mask = None
if not half_sky:
if hasattr(x, "__len__"):
if np.any(x > 0.0):
mask = x > 0.0
x[mask] *= -1
elif x > 0:
mask = 0
x = -x
x += 1.0
r = np.sqrt(x ** 2 + y ** 2)
if hasattr(r, "__len__"):
r[r > 1] = np.nan
elif r > 1:
r = np.nan
c = np.arcsin(r)
if hasattr(y, "__len__"):
y[np.abs(y) > 1] = np.nan
elif np.abs(y) > 1:
y = np.nan
lat = np.arcsin(y)
phi = np.arctan2(x, np.cos(c))
phi *= flip
if not mask is None:
if hasattr(phi, "__len__"):
phi[mask] = pi - phi[mask]
else:
phi = pi - phi
theta = pi / 2. - lat
vec = R.dir2vec(theta, phi)
if not direct:
return self.rotator.I(vec)
else:
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
return self.vec2xy(R.dir2vec(theta, phi, lonlat=lonlat), direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
return R.vec2dir(self.xy2vec(x, y, direct=direct), lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
xsize = int(self.arrayinfo["xsize"])
half_sky = self.arrayinfo["half_sky"]
if half_sky:
ratio = 1
else:
ratio = 2
ysize = xsize // ratio
if y is None:
x, y = np.asarray(x)
else:
x, y = np.asarray(x), np.asarray(y)
xc, yc = (xsize - 1.) / 2., (ysize - 1.) / 2.
if hasattr(x, "__len__"):
if half_sky:
mask = x ** 2 + y ** 2 > 1.0
else:
mask = (np.mod(x + 2.0, 2.0) - 1.0) ** 2 + y ** 2 > 1.0
if not mask.any():
mask = np.ma.nomask
j = np.ma.array(np.around(x * xc / ratio + xc).astype(np.long), mask=mask)
i = np.ma.array(np.around(yc + y * yc).astype(np.long), mask=mask)
else:
if (half_sky and x ** 2 + y ** 2 > 1.0) or (
not half_sky and (np.mod(x + 2.0, 2.0) - 1.0) ** 2 + y ** 2 > 1.0
):
i, j, = np.nan, np.nan
else:
j = np.around(x * xc / ratio + xc).astype(np.long)
i = np.around(yc + y * yc).astype(np.long)
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
xsize = int(self.arrayinfo["xsize"])
half_sky = self.arrayinfo["half_sky"]
if half_sky:
ratio = 1
else:
ratio = 2
ysize = xsize // ratio
xc, yc = (xsize - 1.) / 2., (ysize - 1.) / 2.
if i is None and j is None:
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (idx - yc) / yc
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = ratio * (idx - xc) / xc
elif i is not None and j is not None:
y = (np.asarray(i) - yc) / yc
x = ratio * (np.asarray(j) - xc) / xc
# if np.mod(x,1.0)**2+y**2 > 1.0: x,y=np.nan,np.nan
elif i is not None and j is None:
i, j = i
y = (np.asarray(i) - yc) / yc
x = ratio * (np.asarray(j) - xc) / xc
# if np.mod(x,1.0)**2.+y**2 > 1.: x,y=np.nan,np.nan
else:
raise TypeError("i and j must be both given or both not given")
if half_sky:
mask = x ** 2 + y ** 2 > 1.
else:
mask = (np.mod(x + 2.0, 2.0) - 1.0) ** 2 + y ** 2 > 1.
if not mask.any():
mask = np.ma.nomask
x = np.ma.array(x, mask=mask)
y = np.ma.array(y, mask=mask)
if len(x) == 0:
x = x[0]
if len(y) == 0:
y = y[0]
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
half_sky = self.arrayinfo["half_sky"]
if half_sky:
ratio = 1.0
else:
ratio = 2.0
return (-ratio, ratio, -1.0, 1.0)
get_extent.__doc__ = SphericalProj.get_extent.__doc__
class AzimuthalProj(SphericalProj):
"""This class provides methods for Lambert azimuthal equal-area projection and
azimuthal equidistant projection
"""
name = "Azimuthal"
def __init__(
self,
rot=None,
coord=None,
xsize=None,
ysize=None,
reso=None,
lamb=None,
half_sky=None,
**kwds
):
super(AzimuthalProj, self).__init__(
rot=rot,
coord=coord,
xsize=xsize,
ysize=ysize,
reso=reso,
lamb=lamb,
half_sky=half_sky,
**kwds
)
def set_proj_plane_info(
self, xsize=800, ysize=None, reso=1.5, lamb=True, half_sky=False
):
if xsize is None:
xsize = 800
if ysize is None:
ysize = xsize
if reso is None:
reso = 1.5
if lamb is None:
lamb = True
if half_sky is None:
half_sky = False
super(AzimuthalProj, self).set_proj_plane_info(
xsize=xsize, ysize=ysize, reso=reso, lamb=lamb, half_sky=half_sky
)
def vec2xy(self, vx, vy=None, vz=None, direct=False):
if not direct:
theta, phi = R.vec2dir(self.rotator(vx, vy, vz))
else:
theta, phi = R.vec2dir(vx, vy, vz)
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
flip = self._flip
lamb = self.arrayinfo["lamb"]
half_sky = self.arrayinfo["half_sky"]
# set phi in [-pi,pi]
phi = flip * ((phi + pi) % (2 * pi) - pi)
lat = pi / 2. - theta
if lamb:
kprime = np.sqrt(2. / (1. + np.cos(lat) * np.cos(phi)))
else:
c = np.arccos(np.cos(lat) * np.cos(phi))
kprime = c / np.sin(c)
x = kprime * np.cos(lat) * np.sin(phi)
y = kprime * np.sin(lat)
if lamb:
r2max = 4.
else:
r2max = pi ** 2
if half_sky:
if lamb:
r2max /= 2.
else:
r2max /= 4.
mask = np.asarray(x) ** 2 + np.asarray(y) ** 2 > r2max
if not hasattr(x, "__len__"):
if mask is not np.ma.nomask:
return np.nan, np.nan
else:
w = np.where(mask)
x[w] = np.nan
y[w] = np.nan
return x, y
vec2xy.__doc__ = SphericalProj.vec2xy.__doc__ % (name, name)
def xy2vec(self, x, y=None, direct=False):
if y is None:
x, y = x
if hasattr(x, "__len__"):
x, y = np.asarray(x), np.asarray(y)
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for" " this projector"
)
flip = self._flip
lamb = self.arrayinfo["lamb"]
half_sky = self.arrayinfo["half_sky"]
if lamb:
r2max = 4.
else:
r2max = pi ** 2
if half_sky:
if lamb:
r2max /= 2.
else:
r2max /= 4.
mask = np.asarray(x) ** 2 + np.asarray(y) ** 2 > r2max
w = np.where(mask == False)
if not mask.any():
mask = np.ma.nomask
if not hasattr(x, "__len__"):
if mask is not np.ma.nomask:
return np.nan, np.nan, np.nan
else:
rho = np.sqrt(x ** 2 + y ** 2)
if lamb:
c = 2. * np.arcsin(rho / 2.)
else:
c = rho
lat = np.arcsin(y * np.sin(c) / rho)
phi = np.arctan2(x * np.sin(c), (rho * np.cos(c)))
phi *= flip
vec = R.dir2vec(pi / 2. - lat, phi)
if not direct:
return self.rotator.I(vec)
else:
return vec
else:
vec = (
np.zeros(x.shape) + np.nan,
np.zeros(x.shape) + np.nan,
np.zeros(x.shape) + np.nan,
)
rho = np.sqrt(x[w] ** 2 + y[w] ** 2)
if lamb:
c = 2. * np.arcsin(rho / 2.)
else:
c = rho
lat = np.arcsin(y[w] * np.sin(c) / rho)
phi = np.arctan2(x[w] * np.sin(c), (rho * np.cos(c)))
phi *= flip
vec[0][w] = np.cos(phi) * np.cos(lat)
vec[1][w] = np.sin(phi) * np.cos(lat)
vec[2][w] = np.sin(lat)
if not direct:
return self.rotator.I(vec)
else:
return vec
xy2vec.__doc__ = SphericalProj.xy2vec.__doc__ % (name, name)
def ang2xy(self, theta, phi=None, lonlat=False, direct=False):
return self.vec2xy(R.dir2vec(theta, phi, lonlat=lonlat), direct=direct)
ang2xy.__doc__ = SphericalProj.ang2xy.__doc__ % (name, name)
def xy2ang(self, x, y=None, lonlat=False, direct=False):
return R.vec2dir(self.xy2vec(x, y, direct=direct), lonlat=lonlat)
xy2ang.__doc__ = SphericalProj.xy2ang.__doc__ % (name, name)
def xy2ij(self, x, y=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
lamb = self.arrayinfo["lamb"]
half_sky = self.arrayinfo["half_sky"]
if lamb:
r2max = 4.
else:
r2max = pi ** 2
if half_sky:
if lamb:
r2max /= 2.
else:
r2max /= 4.
if y is None:
x, y = x
dx = reso / 60. * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
if hasattr(x, "__len__"):
mask = x ** 2 + y ** 2 > r2max
if not mask.any():
mask = np.ma.nomask
j = np.ma.array(np.around(xc + x / dx).astype(np.long), mask=mask)
i = np.ma.array(np.around(yc + y / dx).astype(np.long), mask=mask)
else:
if x ** 2 + y ** 2 > r2max:
i, j, = np.nan, np.nan
else:
j = np.around(xc + x / dx).astype(np.long)
i = np.around(yc + y / dx).astype(np.long)
return i, j
xy2ij.__doc__ = SphericalProj.xy2ij.__doc__ % (name, name)
def ij2xy(self, i=None, j=None):
if self.arrayinfo is None:
raise TypeError(
"No projection plane array information defined for " "this projector"
)
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
lamb = self.arrayinfo["lamb"]
half_sky = self.arrayinfo["half_sky"]
dx = reso / 60. * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
if lamb:
r2max = 4.
else:
r2max = pi ** 2
if half_sky:
if lamb:
r2max /= 2.
else:
r2max /= 4.
if i is None and j is None:
idx = np.outer(np.arange(ysize), np.ones(xsize))
y = (idx - yc) * dx
idx = np.outer(np.ones(ysize), np.arange(xsize))
x = (idx - xc) * dx
elif i is not None and j is not None:
y = (np.asarray(i) - yc) * dx
x = (np.asarray(j) - xc) * dx
elif i is not None and j is None:
i, j = i
y = (np.asarray(i) - yc) * dx
x = (np.asarray(j) - xc) * dx
else:
raise TypeError("i and j must be both given or both not given")
if hasattr(x, "__len__"):
mask = x ** 2 + y ** 2 > r2max
if not mask.any():
mask = np.ma.nomask
x = np.ma.array(x, mask=mask)
y = np.ma.array(y, mask=mask)
else:
if x ** 2 + y ** 2 > r2max:
x, y = np.nan, np.nan
return x, y
ij2xy.__doc__ = SphericalProj.ij2xy.__doc__ % (name, name)
def get_extent(self):
xsize = int(self.arrayinfo["xsize"])
ysize = int(self.arrayinfo["ysize"])
reso = self.arrayinfo["reso"]
dx = reso / 60.0 * dtor
xc, yc = 0.5 * (xsize - 1), 0.5 * (ysize - 1)
left = -xc * dx
bottom = -yc * dx
right = (xsize - 1 - xc) * dx
top = (ysize - 1 - yc) * dx
return (left, right, bottom, top)
get_extent.__doc__ = SphericalProj.get_extent.__doc__
def get_fov(self):
half_sky = self.arrayinfo["half_sky"]
vx, vy, vz = self.xy2vec(self.ij2xy(0, 0), direct=True)
a = np.arccos(vx)
if np.isfinite(a):
return 2. * a
else:
if half_sky:
return pi
else:
return 2. * pi
| gpl-2.0 | 7,005,361,420,000,626,000 | 33.313514 | 88 | 0.494396 | false |
thomaslundgaard/pimp | src/flickcharm.py | 1 | 8208 | # -*- coding: utf-8 -*-
# Pimp - A mpd-frontend to be used as a jukebox at parties.
# Copyright (C) 2010 Peter Bjørn
# Copyright (C) 2010 Thomas Lundgaard
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
import sys
class FlickData:
Steady = 0
Pressed = 1
ManualScroll = 2
AutoScroll = 3
Stop = 4
def __init__(self):
self.state = FlickData.Steady
self.widget = None
self.pressPos = QPoint(0, 0)
self.offset = QPoint(0, 0)
self.dragPos = QPoint(0, 0)
self.speed = QPoint(0, 0)
self.ignored = []
class FlickCharmPrivate:
def __init__(self):
self.flickData = {}
self.ticker = QBasicTimer()
class FlickCharm(QObject):
def __init__(self, parent = None):
QObject.__init__(self, parent)
self.d = FlickCharmPrivate()
def activateOn(self, widget):
if isinstance(widget, QWebView):
frame = widget.page().mainFrame()
#frame.setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
frame.setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
widget.installEventFilter(self)
self.d.flickData[widget] = FlickData()
self.d.flickData[widget].widget = widget
self.d.flickData[widget].state = FlickData.Steady
else:
widget.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
#widget.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
viewport = widget.viewport()
viewport.installEventFilter(self)
widget.installEventFilter(self)
self.d.flickData[viewport] = FlickData()
self.d.flickData[viewport].widget = widget
self.d.flickData[viewport].state = FlickData.Steady
def deactivateFrom(self, widget):
if isinstance(widget, QWebView):
widget.removeEventFilter(self)
del(self.d.flickData[widget])
else:
viewport = widget.viewport()
viewport.removeEventFilter(self)
widget.removeEventFilter(self)
del(self.d.flickData[viewport])
def eventFilter(self, object, event):
if not object.isWidgetType():
return False;
eventType = event.type()
if eventType != QEvent.MouseButtonPress and \
eventType != QEvent.MouseButtonRelease and \
eventType != QEvent.MouseMove:
return False
if event.modifiers() != Qt.NoModifier:
return False
if not self.d.flickData.has_key(object):
return False
data = self.d.flickData[object]
found, newIgnored = removeAll(data.ignored, event)
if found:
data.ignored = newIgnored
return False
consumed = False
if data.state == FlickData.Steady:
if eventType == QEvent.MouseButtonPress:
if event.buttons() == Qt.LeftButton:
consumed = True
data.state = FlickData.Pressed
data.pressPos = copy.copy(event.pos())
data.offset = scrollOffset(data.widget)
elif data.state == FlickData.Pressed:
if eventType == QEvent.MouseButtonRelease:
consumed = True
data.state = FlickData.Steady
event1 = QMouseEvent(QEvent.MouseButtonPress,
data.pressPos, Qt.LeftButton,
Qt.LeftButton, Qt.NoModifier)
event2 = QMouseEvent(event)
data.ignored.append(event1)
data.ignored.append(event2)
QApplication.postEvent(object, event1)
QApplication.postEvent(object, event2)
elif eventType == QEvent.MouseMove:
consumed = True
data.state = FlickData.ManualScroll
data.dragPos = QCursor.pos()
if not self.d.ticker.isActive():
self.d.ticker.start(20, self)
elif data.state == FlickData.ManualScroll:
if eventType == QEvent.MouseMove:
consumed = True
pos = event.pos()
delta = pos - data.pressPos
setScrollOffset(data.widget, data.offset - delta)
elif eventType == QEvent.MouseButtonRelease:
consumed = True
data.state = FlickData.AutoScroll
elif data.state == FlickData.AutoScroll:
if eventType == QEvent.MouseButtonPress:
consumed = True
data.state = FlickData.Stop
data.speed = QPoint(0, 0)
data.offset = scrollOffset(data.widget)
elif eventType == QEvent.MouseButtonRelease:
consumed = True
data.state = FlickData.Steady
data.speed = QPoint(0, 0)
elif data.state == FlickData.Stop:
if eventType == QEvent.MouseButtonRelease:
consumed = True
data.state = FlickData.Steady
elif eventType == QEvent.MouseMove:
consumed = True
data.state = FlickData.ManualScroll
data.dragPos = QCursor.pos()
if not self.d.ticker.isActive():
self.d.ticker.start(20, self)
return consumed
def timerEvent(self, event):
count = 0
for data in self.d.flickData.values():
if data.state == FlickData.ManualScroll:
count += 1
cursorPos = QCursor.pos()
data.speed = cursorPos - data.dragPos
data.dragPos = cursorPos
elif data.state == FlickData.AutoScroll:
count += 1
data.speed = deaccelerate(data.speed)
p = scrollOffset(data.widget)
setScrollOffset(data.widget, p - data.speed)
if data.speed == QPoint(0, 0):
data.state = FlickData.Steady
if count == 0:
self.d.ticker.stop()
QObject.timerEvent(self, event);
def scrollOffset(widget):
if isinstance(widget, QWebView):
frame = widget.page().mainFrame()
x = frame.evaluateJavaScript("window.scrollX").toInt()[0]
y = frame.evaluateJavaScript("window.scrollY").toInt()[0]
else:
x = widget.horizontalScrollBar().value()
y = widget.verticalScrollBar().value()
return QPoint(x, y)
def setScrollOffset(widget, p):
if isinstance(widget, QWebView):
frame = widget.page().mainFrame()
frame.evaluateJavaScript("window.scrollTo(%d,%d);" % (p.x(), p.y()))
else:
widget.horizontalScrollBar().setValue(p.x())
widget.verticalScrollBar().setValue(p.y())
def deaccelerate(speed, a=1, maxVal=64):
x = qBound(-maxVal, speed.x(), maxVal)
y = qBound(-maxVal, speed.y(), maxVal)
if x > 0:
x = max(0, x - a)
elif x < 0:
x = min(0, x + a)
if y > 0:
y = max(0, y - a)
elif y < 0:
y = min(0, y + a)
return QPoint(x, y)
def qBound(minVal, current, maxVal):
return max(min(current, maxVal), minVal)
def removeAll(list, val):
found = False
ret = []
for element in list:
if element == val:
found = True
else:
ret.append(element)
return (found, ret)
| gpl-3.0 | 722,736,524,023,204,100 | 34.682609 | 76 | 0.570001 | false |
iamgreaser/pycubed | feature_server/scripts/avx.py | 1 | 10479 | # avx.py
import array
import collections
import operator
import math
import io
import gzip
import os
from itertools import izip, imap, chain, ifilter, product, repeat
from struct import pack, unpack, calcsize
# this module is a self-contained pure python implementation of a generic AVX loader/saver
# AVX Header: magic "AVX" followed by a version byte. Then a version-specific header.
# Depending on the version and header, it will load fixed or variable sized voxel geometry
# and optionally color data for surface voxels.
# A voxel is surface IFF:
# it is solid AND (one of its neighbors is not solid OR it is on the edge)
# Note: This is probably a better implementation of bitarrays: http://pypi.python.org/pypi/bitarray#downloads
DEFAULT_COLOR = (103, 64, 40)
class BitArray(object):
_bits = 8
_maxbit = _bits - 1
_max = 2 ** _bits - 1
_log = int(round(math.log(_bits, 2)))
def __init__(self, bits, fill = 0):
self.bits = int(bits)
self.bit_array = array.array('B')
if fill == 1:
fill = self._max # all bits set
else:
fill = 0 # all bits cleared
self.bit_array.extend((fill,) * self._array_size(self.bits))
@classmethod
def fromstring(cls, str, bits = -1):
ret = cls(0)
ret.loadstring(str, bits)
return ret
def loadstring(self, str, bits = -1):
max_bits = len(str) * 8
if bits > max_bits:
raise ValueError()
if bits < max_bits:
str = str[:int(math.ceil(bits/8.0))]
self.bit_array.fromstring(str)
self.bits = max(bits, max_bits)
@staticmethod
def _array_size(bits):
i = bits >> BitArray._log
if (bits & BitArray._maxbit):
i += 1 # a record for stragglers
return i
def get(self, bit_num):
record = bit_num >> self._log
offset = bit_num & self._maxbit
mask = 1 << offset
return (self.bit_array[record] & mask) >> offset
def set(self, bit_num):
record = bit_num >> self._log
offset = bit_num & self._maxbit
mask = 1 << offset
self.bit_array[record] |= mask
def clear(self, bit_num):
record = bit_num >> self._log
offset = bit_num & self._maxbit
mask = ~(1 << offset)
self.bit_array[record] &= mask
def toggle(self, bit_num):
record = bit_num >> self._log
offset = bit_num & self._maxbit
mask = 1 << offset
self.bit_array[record] ^= mask
def tostring(self, padbytes = 1):
# minimum padbytes == 1
str = self.bit_array.tostring()
str = str[:int(math.ceil(self.bits / 8.0))]
str += '\x00' * (-len(str) % padbytes)
return str
class BitArrayND(BitArray):
def __init__(self, shape, fill=0):
self.shape = shape
BitArray.__init__(self, self.bits, fill)
bits = property(lambda self: reduce(operator.mul, self.shape), lambda self, value: None)
@classmethod
def fromsparselist(cls, list):
ret = cls((0,) * len(list[0]))
ret.shape = [n+1 for n in map(max, izip(*list))]
ret.bit_array.extend((0,) * ret._array_size(ret.bits))
for coords in list:
ret.set(coords)
return ret
@classmethod
def fromstring(cls, shape, str):
ret = cls((0,) * len(shape))
ret.shape = shape
BitArray.loadstring(ret, str, ret.bits)
return ret
def _ravel(self, coords):
i = 0
for dim, j in zip(self.shape, coords):
i = i * dim + j
return i
def get(self, coords):
return BitArray.get(self, self._ravel(coords))
def set(self, coords):
return BitArray.set(self, self._ravel(coords))
def clear(self, coords):
return BitArray.clear(self, self._ravel(coords))
def toggle(self, coords):
return BitArray.toggle(self, self._ravel(coords))
def tosparselist(self):
ret = []
for coords in product(*map(xrange, self.shape)):
if self.get(coords):
ret.append(coords)
return ret
def isvalidcoords(self, coords):
return all((n >= 0 and n < d for n, d in izip(coords, self.shape)))
def neighbors(self, coords):
'returns the coordinates of all the valid elements whose coordinates differ from `coords` by +-1 in any one dimension'
if not self.isvalidcoords(coords):
return
i = 0
for changed in map(sum,product(coords, (1, -1))):
n = coords[:i//2] + (changed,) + coords[i//2+1:]
if self.isvalidcoords(n):
yield n
i += 1
def open_gzip(file = None, fileobj = None):
if fileobj is None:
if not os.path.isfile(file) and os.path.isfile(file + '.gz'):
file += '.gz'
return open_gzip(fileobj = open(file, 'rb'))
p = fileobj.tell()
magic = unpack('H', fileobj.read(2))
fileobj.seek(p, 0)
if magic == 0x1F8B: # .gz magic
fileobj = gzip.GzipFile(fileobj = fileobj)
return fileobj
class AVX(BitArrayND):
# headers [(attribute_name, struct.fmt)]
avx_magic = [('magic', '3s'), ('ver', 'B')]
avx_headers_ver = [
[('size_x', 'H'), ('size_y', 'H'), ('size_z', 'H'), ('has_colors', '?'), ('pad_bytes', 'B')]
]
magic = 'AVX'
ver = 0
def __init__(self, x, y, z, colored = True, default_color = DEFAULT_COLOR):
BitArrayND.__init__(self, [x, y, z])
self.has_colors = bool(colored)
self.colors = dict()
self.default_color = tuple(default_color)
self.pad_bytes = 1
@classmethod
def fromsparselist(cls, list, colored = False, default_color = DEFAULT_COLOR):
# a list of 1 bits coords in the form of [(x1,y1,z1), (x2,y2,z2)]
parent = BitArrayND.fromsparselist(list)
ret = cls(0, 0, 0, colored = colored, default_color = default_color)
ret.shape = parent.shape
ret.bit_array = parent.bit_array
if ret.has_colors:
ret.colors = dict((xyz, ret.default_color) for xyz in product(*map(xrange, ret.shape)) if ret.issurface(xyz))
return ret
@classmethod
def fromsparsedict(cls, dict, colored = True, default_color = DEFAULT_COLOR):
# {(x1,y1,z1): color, (x2,y2,z2): None, ...}
ret = cls.fromsparselist(dict.keys(), colored = colored, default_color = default_color)
if ret.has_colors:
for coords, color in dict.iteritems():
ret.setcolor(coords, color)
return ret
@classmethod
def fromfile(cls, file = None, fileobj = None):
fileobj = open_gzip(file, fileobj)
# new instance, load magic attributes
ret = cls(0, 0, 0)
ret._load_attributes(fileobj, cls.avx_magic)
if ret.magic != cls.magic or ret.ver > cls.ver:
raise IOError("Not an AVX file")
ret._load_attributes(fileobj, ret.avx_headers_ver[ret.ver])
bytes = int(math.ceil(ret.bits/8.0))
bytes += -bytes % ret.pad_bytes
ret.loadstring(fileobj.read(bytes), ret.bits)
if ret.has_colors:
#read at most x*y*z color tuples
str = fileobj.read(3*reduce(operator.mul, ret.shape))
i = 0
for xyz in product(*map(xrange, ret.shape)):
if ret.issurface(xyz):
ret.colors[xyz] = unpack('BBB', str[i:i+3])
i += 3
return ret
def _load_attributes(self, fileobj, attributes):
# save the current position, seek to the end to get remaining size, seek back
pos = fileobj.tell()
fileobj.seek(0, 2)
size = fileobj.tell()
fileobj.seek(pos, 0)
if size - pos < calcsize(''.join(zip(*attributes)[1])):
raise EOFError("Incomplete AVX file.")
for attr, fmt in attributes:
setattr(self, attr, unpack(fmt, fileobj.read(calcsize(fmt)))[0])
def save(self, file = None, fileobj = None, compresslevel = None):
if fileobj is None:
return self.save(fileobj = open(file, 'wb'))
if compresslevel:
return self.save(fileobj = GzipFile(fileobj = fileobj, compresslevel = compresslevel))
for attr, fmt in chain(self.avx_magic, self.avx_headers_ver[self.ver]):
fileobj.write(pack(fmt, getattr(self, attr)))
fileobj.write(self.tostring(self.pad_bytes))
if self.has_colors:
for xyz in sorted(self.colors):
fileobj.write(pack('BBB', *self.colors[xyz]))
def props(n):
def get(self): return self.shape[n]
def set(self, value): self.shape[n] = value
return get, set
size_x, size_y, size_z = [property(*props(n)) for n in xrange(3)]
del props
def tosparsedict(self):
return dict((coords, self.colors.get(coords, None)) for coords in self.tosparselist())
def setcolor(self, coords, color):
if self.has_colors and self.issurface(coords):
self.colors[coords] = color
def getcolor(self, coords):
if self.has_colors and self.issurface(coords):
return self.colors(coords)
def fixcolors(fn):
def wrapper(self, coords):
fn(self, coords)
for coord in list(self.neighbors(coords)) + [coords]:
c = self.colors.has_key(coord)
s = self.issurface(coord)
if c != s:
if c:
self.colors.discard(coord)
else:
self.colors[coord] = self.default_color
return wrapper
@fixcolors
def set(self, coords):
BitArrayND.set(self, coords)
@fixcolors
def clear(self, coords):
BitArrayND.clear(self, coords)
@fixcolors
def toggle(self, coords):
BitArrayND.toggle(self, coords)
del fixcolors
def issurface(self, coords):
return self.get(coords) and (
any(a == 0 or a == n-1 for a,n in izip(coords, self.shape)) # on the edge of the map
or not all(imap(self.get, self.neighbors(coords)))) # one of it neighbors is missing
# | gpl-3.0 | 52,658,477,657,679,064 | 32.589744 | 126 | 0.559977 | false |
joke2k/django-options | setup.py | 1 | 1264 | from setuptools import setup, find_packages
VERSION = (1, 0, 0)
# Dynamically calculate the version based on VERSION tuple
if len(VERSION)>2 and VERSION[2] is not None:
str_version = "%d.%d_%s" % VERSION[:3]
else:
str_version = "%d.%d" % VERSION[:2]
version= str_version
setup(
name='django-options',
version=version ,
author='joke2k',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
url='http://github.com/joke2k/django-options',
license='MIT',
description='A easy way to manage Site options in your django applications.',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Framework :: Django'
],
keywords='faker fixtures data test django',
install_requires=['django',],
tests_require=['django','fake-factory>=0.2'],
test_suite="runtests.runtests",
zip_safe=False,
) | bsd-3-clause | 5,084,994,503,800,844,000 | 30.625 | 81 | 0.640032 | false |
AABoyles/Tabular.ui | Scripts/dumpStatesToDB.py | 1 | 1190 | #!/usr/bin/python
import time, sqlite3, sys, urllib, csv
begin = time.time()
url = "http://www.correlatesofwar.org/COW2%20Data/SystemMembership/2011/states2011.csv"
print "Downloading from", url
response = urllib.urlretrieve(url, '../Data/states2011.csv')
print "Opening Database"
con = sqlite3.connect('../Data/PyRBD.db')
cur = con.cursor()
rows = 0
with open(response[0], 'Ur') as csvFile:
reader = csv.reader(csvFile)
query = "INSERT INTO stateMembership("
for row in reader:
if rows == 0:
headers = ",".join(row)
query += headers + ") VALUES "
cur.execute("create table if not exists stateMembership(" + headers + ");")
else:
query += "(\"" + "\",\"".join(row) + "\"),"
cur.execute("INSERT INTO stateMembership(" + headers + ") VALUES (\"" + "\",\"".join(row) + "\");")
rows += 1
if rows % 1000 == 0:
query = "INSERT INTO stateMembership("
if rows % 10000 == 0:
print rows, "rows processed."
con.commit()
con.commit()
con.close()
end = time.time()
print rows, "rows processed in", end - begin, "seconds"
sys.exit()
| gpl-2.0 | 1,801,252,314,439,419,100 | 29.512821 | 111 | 0.570588 | false |
GoogleCloudPlatform/cloud-opensource-python | compatibility_lib/compatibility_lib/test_compatibility_store.py | 1 | 22515 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compatibility_store.CompatibilityStore."""
import datetime
import unittest
import mock
from compatibility_lib import compatibility_store
from compatibility_lib import package
PACKAGE_1 = package.Package("package1")
PACKAGE_2 = package.Package("package2")
PACKAGE_3 = package.Package("package3")
PACKAGE_4 = package.Package("package4[gcp]")
class TestCompatibilityResult(unittest.TestCase):
def test_constructor_default(self):
packages = [PACKAGE_1, PACKAGE_2, PACKAGE_3, PACKAGE_4]
python_major_version = 3
status = compatibility_store.Status.SUCCESS
compat_result = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version=python_major_version,
status=status)
self.assertEqual(compat_result.packages, packages)
self.assertEqual(
compat_result.python_major_version, python_major_version)
self.assertEqual(compat_result.status, status)
self.assertIsNone(compat_result.details)
self.assertIsNone(compat_result.dependency_info)
def test_constructor_explicit(self):
packages = [PACKAGE_1, PACKAGE_2, PACKAGE_3, PACKAGE_4]
python_major_version = 3
status = compatibility_store.Status.SUCCESS
details = 'Could not find a version that satisfies the ' \
'requirement apache-beam[gcp]==2.4.0'
dependency_info = {
"cachetools": {
"installed_version": "2.1.0",
"installed_version_time": "2018-05-12T16:26:31",
"latest_version": "2.1.0",
"current_time": "2018-07-13T17:11:29.140608",
"latest_version_time": "2018-05-12T16:26:31",
"is_latest": True}}
timestamp = datetime.datetime.utcnow()
compat_result = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version=python_major_version,
status=status,
details=details,
dependency_info=dependency_info,
timestamp=timestamp)
self.assertEqual(compat_result.packages, packages)
self.assertEqual(
compat_result.python_major_version, python_major_version)
self.assertEqual(compat_result.status, status)
self.assertEqual(compat_result.details, details)
self.assertEqual(compat_result.dependency_info, dependency_info)
self.assertEqual(compat_result.timestamp, timestamp)
def test_with_updated_dependency_info_new_dependencies(self):
original_result = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
details="No details",
dependency_info={'package1': {'installed_version': '1.2.3'}})
updated_result = original_result.with_updated_dependency_info(
{'package2': {'installed_version': '4.5.6'}})
self.assertEqual(updated_result.dependency_info,
{
'package1': {'installed_version': '1.2.3'},
'package2': {'installed_version': '4.5.6'},
})
# Test that non-dependency properties are unchanged.
self.assertEqual(original_result.packages, updated_result.packages)
self.assertEqual(original_result.python_major_version,
updated_result.python_major_version)
self.assertEqual(original_result.status, updated_result.status)
self.assertEqual(original_result.details, updated_result.details)
self.assertEqual(original_result.timestamp, updated_result.timestamp)
def test_with_updated_dependency_info_changed_dependencies(self):
original_result = compatibility_store.CompatibilityResult(
packages=[PACKAGE_1],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
details="No details",
dependency_info={'package1': {'installed_version': '1.2.3'}})
updated_result = original_result.with_updated_dependency_info(
{'package1': {'installed_version': '2.3.4'},
'package2': {'installed_version': '4.5.6'}})
self.assertEqual(updated_result.dependency_info,
{
'package1': {'installed_version': '2.3.4'},
'package2': {'installed_version': '4.5.6'},
})
# Test that non-dependency properties are unchanged.
self.assertEqual(original_result.packages, updated_result.packages)
self.assertEqual(original_result.python_major_version,
updated_result.python_major_version)
self.assertEqual(original_result.status, updated_result.status)
self.assertEqual(original_result.details, updated_result.details)
self.assertEqual(original_result.timestamp, updated_result.timestamp)
class TestCompatibilityStore(unittest.TestCase):
def test_get_packages(self):
pkgs = ['google-api-core', 'apache-beam']
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
mock_cursor.fetchall.return_value = [
[pkgs[0], 'SUCCESS'],
[pkgs[1], 'CHECK WARNING']]
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
with patch_pymysql:
store = compatibility_store.CompatibilityStore()
packages = list(store.get_packages())
self.assertEqual(len(packages), 2)
for i, pkg in enumerate(packages):
self.assertTrue(
isinstance(pkg, compatibility_store.package.Package))
self.assertEqual(pkg.install_name, pkgs[i])
def test_get_self_compatibility(self):
row = (PACKAGE_1.install_name, 'SUCCESS', '3',
'2018-07-17 01:07:08.936648 UTC', None)
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
mock_cursor.fetchall.return_value = [row]
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql',
mock_pymysql)
store = compatibility_store.CompatibilityStore()
with patch_pymysql:
res = store.get_self_compatibility(PACKAGE_1)
self.assertEqual(len(res), 1)
self.assertTrue(
isinstance(res[0], compatibility_store.CompatibilityResult))
def test_get_self_compatibilities(self):
packages = [PACKAGE_1, PACKAGE_2, PACKAGE_3, PACKAGE_4]
rows = []
for pkg in packages:
row = (pkg.install_name, 'SUCCESS', '3',
'2018-07-17 01:07:08.936648 UTC', None)
rows.append(row)
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
mock_cursor.fetchall.return_value = rows
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql',
mock_pymysql)
store = compatibility_store.CompatibilityStore()
with patch_pymysql:
res = store.get_self_compatibilities(packages)
self.assertEqual(len(res), 4)
self.assertEqual(frozenset(res.keys()), frozenset(packages))
def test_get_pair_compatibility_value_error(self):
# get_pair_compatibility needs 2 packages to run the check, or it will
# raise ValueError.
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
store = compatibility_store.CompatibilityStore()
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql',
mock_pymysql)
packages = [PACKAGE_1]
with patch_pymysql, self.assertRaises(ValueError):
store.get_pair_compatibility(packages)
def test_get_pair_compatibility(self):
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
row = ('pkg1', 'pkg2', 'SUCCESS', '3',
'2018-07-17 02:14:27.15768 UTC', None)
mock_cursor.fetchall.return_value = [row]
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql',
mock_pymysql)
store = compatibility_store.CompatibilityStore()
packages = [PACKAGE_1, PACKAGE_2]
with patch_pymysql:
res = store.get_pair_compatibility(packages)
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0].packages), 2)
self.assertTrue(
isinstance(res[0], compatibility_store.CompatibilityResult))
def test_compatibility_combinations(self):
row1 = ('package1', 'package2', 'SUCCESS',
'3', '2018-07-17 02:14:27.15768 UTC', None)
row2 = ('package1', 'package3', 'SUCCESS',
'3', '2018-07-17 02:14:27.15768 UTC', None)
row3 = ('package2', 'package3', 'SUCCESS',
'3', '2018-07-17 02:14:27.15768 UTC', None)
store = compatibility_store.CompatibilityStore()
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
mock_cursor.fetchall.return_value = [row1, row2, row3]
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
packages = [PACKAGE_1, PACKAGE_2, PACKAGE_3]
with patch_pymysql:
res = store.get_compatibility_combinations(packages)
expected_pair_1 = frozenset({PACKAGE_1, PACKAGE_2})
expected_pair_2 = frozenset({PACKAGE_1, PACKAGE_3})
expected_pair_3 = frozenset({PACKAGE_2, PACKAGE_3})
self.assertEqual(len(res.keys()), 3)
self.assertEqual(
frozenset(res.keys()),
frozenset({expected_pair_1, expected_pair_2, expected_pair_3}))
def test_save_compatibility_statuses_pair(self):
packages = [PACKAGE_1, PACKAGE_2]
status = compatibility_store.Status.SUCCESS
comp_status = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version='3',
status=status,
details=None,
dependency_info=None,
timestamp=None)
row_pairwise = ('package1', 'package2', 'SUCCESS',
'3', None, None)
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
pair_sql = 'REPLACE INTO pairwise_compatibility_status values ' \
'(%s, %s, %s, %s, %s, %s)'
with patch_pymysql:
store = compatibility_store.CompatibilityStore()
store.save_compatibility_statuses([comp_status])
mock_cursor.executemany.assert_called_with(
pair_sql, [row_pairwise])
def test_save_compatibility_statuses_self(self):
packages = [PACKAGE_1]
status = compatibility_store.Status.SUCCESS
comp_status = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version='3',
status=status,
details=None,
dependency_info=None,
timestamp=None)
row_self = ('package1', 'SUCCESS', '3', None, None)
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
self_sql = 'REPLACE INTO self_compatibility_status values ' \
'(%s, %s, %s, %s, %s)'
with patch_pymysql:
store = compatibility_store.CompatibilityStore()
store.save_compatibility_statuses([comp_status])
mock_cursor.executemany.assert_called_with(
self_sql, [row_self])
def test_save_compatibility_statuses_release_time(self):
packages = [PACKAGE_1]
status = compatibility_store.Status.SUCCESS
comp_status = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version='3',
status=status,
details=None,
dependency_info={'dep1': {
'installed_version': '2.1.0',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.1.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': True,
}},
timestamp=None)
row_release_time = ('package1', 'dep1', '2.1.0', '2018-05-12T16:26:31',
'2.1.0', '2018-05-12T16:26:31', True,
'2018-07-13T17:11:29.140608')
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
sql = 'REPLACE INTO release_time_for_dependencies values ' \
'(%s, %s, %s, %s, %s, %s, %s, %s)'
with patch_pymysql:
store = compatibility_store.CompatibilityStore()
store.save_compatibility_statuses([comp_status])
mock_cursor.executemany.assert_called_with(
sql, [row_release_time])
def test_save_compatibility_statuses_release_time_for_latest(self):
packages = [PACKAGE_4]
timestamp = '2018-07-17 03:01:06.11693 UTC'
status = compatibility_store.Status.SUCCESS
comp_status_py2 = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version='2',
status=status,
details=None,
dependency_info={'package4': {
'installed_version': '12.7.0',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '12.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': True,
}},
timestamp=timestamp)
comp_status_py3 = compatibility_store.CompatibilityResult(
packages=packages,
python_major_version='3',
status=status,
details=None,
dependency_info={'package4': {
'installed_version': '2.2.0',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': False,
}},
timestamp=timestamp)
row_release_time = ('package4[gcp]', 'package4', '12.7.0',
'2018-05-12T16:26:31', '12.7.0',
'2018-05-12T16:26:31', True,
'2018-07-13T17:11:29.140608')
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
sql = 'REPLACE INTO release_time_for_dependencies values ' \
'(%s, %s, %s, %s, %s, %s, %s, %s)'
with patch_pymysql:
store = compatibility_store.CompatibilityStore()
store.save_compatibility_statuses(
[comp_status_py2, comp_status_py3])
mock_cursor.executemany.assert_called_with(
sql, [row_release_time])
def test_save_compatibility_statuses_release_time_for_latest_many_packages(
self):
status = compatibility_store.Status.SUCCESS
apache_beam_py2 = compatibility_store.CompatibilityResult(
packages=[package.Package('apache-beam[gcp]')],
python_major_version='2',
status=status,
details=None,
dependency_info={
'six': {
'installed_version': '9.9.9',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': False,
} ,
'apache-beam': {
'installed_version': '2.7.0',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': True,
}},
timestamp=None)
apache_beam_py3 = compatibility_store.CompatibilityResult(
packages=[package.Package('apache-beam[gcp]')],
python_major_version='3',
status=status,
details=None,
dependency_info={'apache-beam': {
'installed_version': '2.2.0',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': False,
}},
timestamp=None)
google_api_core_py2 = compatibility_store.CompatibilityResult(
packages=[package.Package('google-api-core')],
python_major_version='2',
status=status,
details=None,
dependency_info={
'google-api-core': {
'installed_version': '3.7.0',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': True,
}},
timestamp=None)
google_api_core_py3 = compatibility_store.CompatibilityResult(
packages=[package.Package('google-api-core')],
python_major_version='3',
status=status,
details=None,
dependency_info={'google-api-core': {
'installed_version': '3.7.1',
'installed_version_time': '2018-05-12T16:26:31',
'latest_version': '2.7.0',
'current_time': '2018-07-13T17:11:29.140608',
'latest_version_time': '2018-05-12T16:26:31',
'is_latest': False,
}},
timestamp=None)
apache_beam_row = ('apache-beam[gcp]', 'apache-beam', '2.7.0',
'2018-05-12T16:26:31', '2.7.0',
'2018-05-12T16:26:31', True,
'2018-07-13T17:11:29.140608')
six_row = ('apache-beam[gcp]', 'six', '9.9.9', '2018-05-12T16:26:31',
'2.7.0', '2018-05-12T16:26:31', False,
'2018-07-13T17:11:29.140608')
google_api_core_row = ('google-api-core', 'google-api-core', '3.7.1',
'2018-05-12T16:26:31', '2.7.0',
'2018-05-12T16:26:31', False,
'2018-07-13T17:11:29.140608')
mock_pymysql = mock.Mock()
mock_conn = mock.Mock()
mock_cursor = mock.Mock()
mock_pymysql.connect.return_value = mock_conn
mock_conn.cursor.return_value = mock_cursor
patch_pymysql = mock.patch(
'compatibility_lib.compatibility_store.pymysql', mock_pymysql)
sql = 'REPLACE INTO release_time_for_dependencies values ' \
'(%s, %s, %s, %s, %s, %s, %s, %s)'
with patch_pymysql:
store = compatibility_store.CompatibilityStore()
store.save_compatibility_statuses(
[apache_beam_py2,
apache_beam_py3,
google_api_core_py2,
google_api_core_py3])
mock_cursor.executemany.assert_called_with(
sql, [apache_beam_row, six_row, google_api_core_row])
class MockClient(object):
def __init__(self, project=None):
self.project = project
def dataset(self, dataset_name):
dataset_ref = mock.Mock()
def table(table_name):
return table_name
dataset_ref.table = table
return dataset_ref
def get_table(self, table_name):
return table_name
| apache-2.0 | 6,168,003,501,492,434,000 | 39.494604 | 79 | 0.57575 | false |
QualiSystems/Azure-Shell | package/cloudshell/cp/azure/common/singletons.py | 1 | 2092 | import threading
class AbstractComparableInstance(object):
"""Abstract class that must be used together with SingletonByArgsMeta class"""
def check_params_equality(self, *args, **kwargs):
"""Check if instance have the same attributes as provided in args and kwarg. Method must accept the same
attributes as a __init__ one
:param args: same args as for __init__ method
:param kwargs: same kwargs as for __init__ method
:return: (bool) True or False
"""
raise NotImplementedError("Class {} must implement method 'check_params_equality'".format(type(self)))
class SingletonByArgsMeta(type):
"""Metaclass that allows to create single instances per same arguments
Class that uses this metaclass must be a subclass of AbstractComparableInstance class and implement
"check_params_equality" method
Example usage:
>>> class Test(AbstractComparableInstance):
>>> __metaclass__ = SingletonByArgsMeta
>>>
>>> def __init__(self, a, b):
>>> self.a = a
>>> self.b = b
>>>
>>> def check_params_equality(self, a, b):
>>> return self.a == a and self.b == b
>>>
>>> Test("a1" , "b1") is Test("a1" , "b1")
>>> True
>>>
>>> Test("a1" , "b1") is Test("a2" , "b2")
>>> False
"""
__instances_by_cls = {}
lock = threading.Lock()
def __call__(cls, *args, **kwargs):
if not issubclass(cls, AbstractComparableInstance):
raise NotImplementedError("Class {} must inherit 'AbstractComparableInstance' "
"if used with SingletonByArgsMeta metaclass".format(cls))
with SingletonByArgsMeta.lock:
instance = cls.__instances_by_cls.get(cls)
if not (instance and instance.check_params_equality(*args, **kwargs)):
instance = super(SingletonByArgsMeta, cls).__call__(*args, **kwargs)
cls.__instances_by_cls[cls] = instance
return instance
| apache-2.0 | 1,728,342,937,448,489,700 | 37.036364 | 112 | 0.585564 | false |
sunfishcode/cretonne | lib/cretonne/meta/cdsl/isa.py | 1 | 18670 | """Defining instruction set architectures."""
from __future__ import absolute_import
from collections import OrderedDict
from .predicates import And, TypePredicate
from .registers import RegClass, Register, Stack
from .ast import Apply
from .types import ValueType
from .instructions import InstructionGroup
# The typing module is only required by mypy, and we don't use these imports
# outside type comments.
try:
from typing import Tuple, Union, Any, Iterable, Sequence, List, Set, Dict, TYPE_CHECKING # noqa
if TYPE_CHECKING:
from .instructions import MaybeBoundInst, InstructionFormat # noqa
from .predicates import PredNode, PredKey # noqa
from .settings import SettingGroup # noqa
from .registers import RegBank # noqa
from .xform import XFormGroup # noqa
OperandConstraint = Union[RegClass, Register, int, Stack]
ConstraintSeq = Union[OperandConstraint, Tuple[OperandConstraint, ...]]
# Instruction specification for encodings. Allows for predicated
# instructions.
InstSpec = Union[MaybeBoundInst, Apply]
BranchRange = Sequence[int]
# A recipe predicate consisting of an ISA predicate and an instruction
# predicate.
RecipePred = Tuple[PredNode, PredNode]
except ImportError:
pass
class TargetISA(object):
"""
A target instruction set architecture.
The `TargetISA` class collects everything known about a target ISA.
:param name: Short mnemonic name for the ISA.
:param instruction_groups: List of `InstructionGroup` instances that are
relevant for this ISA.
"""
def __init__(self, name, instruction_groups):
# type: (str, Sequence[InstructionGroup]) -> None
self.name = name
self.settings = None # type: SettingGroup
self.instruction_groups = instruction_groups
self.cpumodes = list() # type: List[CPUMode]
self.regbanks = list() # type: List[RegBank]
self.regclasses = list() # type: List[RegClass]
self.legalize_codes = OrderedDict() # type: OrderedDict[XFormGroup, int] # noqa
# Unique copies of all predicates.
self._predicates = dict() # type: Dict[PredKey, PredNode]
assert InstructionGroup._current is None,\
"InstructionGroup {} is still open!"\
.format(InstructionGroup._current.name)
def __str__(self):
# type: () -> str
return self.name
def finish(self):
# type: () -> TargetISA
"""
Finish the definition of a target ISA after adding all CPU modes and
settings.
This computes some derived properties that are used in multiple
places.
:returns self:
"""
self._collect_encoding_recipes()
self._collect_predicates()
self._collect_regclasses()
self._collect_legalize_codes()
return self
def _collect_encoding_recipes(self):
# type: () -> None
"""
Collect and number all encoding recipes in use.
"""
self.all_recipes = list() # type: List[EncRecipe]
rcps = set() # type: Set[EncRecipe]
for cpumode in self.cpumodes:
for enc in cpumode.encodings:
recipe = enc.recipe
if recipe not in rcps:
assert recipe.number is None
recipe.number = len(rcps)
rcps.add(recipe)
self.all_recipes.append(recipe)
# Make sure ISA predicates are registered.
if recipe.isap:
recipe.isap = self.unique_pred(recipe.isap)
self.settings.number_predicate(recipe.isap)
recipe.instp = self.unique_pred(recipe.instp)
def _collect_predicates(self):
# type: () -> None
"""
Collect and number all predicates in use.
Ensures that all ISA predicates have an assigned bit number in
`self.settings`.
"""
self.instp_number = OrderedDict() # type: OrderedDict[PredNode, int]
for cpumode in self.cpumodes:
for enc in cpumode.encodings:
instp = enc.instp
if instp and instp not in self.instp_number:
# assign predicate number starting from 0.
n = len(self.instp_number)
self.instp_number[instp] = n
# All referenced ISA predicates must have a number in
# `self.settings`. This may cause some parent predicates to be
# replicated here, which is OK.
if enc.isap:
self.settings.number_predicate(enc.isap)
def _collect_regclasses(self):
# type: () -> None
"""
Collect and number register classes.
Every register class needs a unique index, and the classes need to be
topologically ordered.
We also want all the top-level register classes to be first.
"""
# Compute subclasses and top-level classes in each bank.
# Collect the top-level classes so they get numbered consecutively.
for bank in self.regbanks:
bank.finish_regclasses()
# Always get the pressure tracking classes in first.
if bank.pressure_tracking:
self.regclasses.extend(bank.toprcs)
# The limit on the number of top-level register classes can be raised.
# This should be coordinated with the `MAX_TRACKED_TOPRCS` constant in
# `isa/registers.rs`.
assert len(self.regclasses) <= 4, "Too many top-level register classes"
# Get the remaining top-level register classes which may exceed
# `MAX_TRACKED_TOPRCS`.
for bank in self.regbanks:
if not bank.pressure_tracking:
self.regclasses.extend(bank.toprcs)
# Collect all of the non-top-level register classes.
# They are numbered strictly after the top-level classes.
for bank in self.regbanks:
self.regclasses.extend(
rc for rc in bank.classes if not rc.is_toprc())
for idx, rc in enumerate(self.regclasses):
rc.index = idx
# The limit on the number of register classes can be changed. It should
# be coordinated with the `RegClassMask` and `RegClassIndex` types in
# `isa/registers.rs`.
assert len(self.regclasses) <= 32, "Too many register classes"
def _collect_legalize_codes(self):
# type: () -> None
"""
Make sure all legalization transforms have been assigned a code.
"""
for cpumode in self.cpumodes:
self.legalize_code(cpumode.default_legalize)
for x in cpumode.type_legalize.values():
self.legalize_code(x)
def legalize_code(self, xgrp):
# type: (XFormGroup) -> int
"""
Get the legalization code for the transform group `xgrp`. Assign one if
necessary.
Each target ISA has its own list of legalization actions with
associated legalize codes that appear in the encoding tables.
This method is used to maintain the registry of legalization actions
and their table codes.
"""
if xgrp in self.legalize_codes:
code = self.legalize_codes[xgrp]
else:
code = len(self.legalize_codes)
self.legalize_codes[xgrp] = code
return code
def unique_pred(self, pred):
# type: (PredNode) -> PredNode
"""
Get a unique predicate that is equivalent to `pred`.
"""
if pred is None:
return pred
# TODO: We could actually perform some algebraic simplifications. It's
# not clear if it is worthwhile.
k = pred.predicate_key()
if k in self._predicates:
return self._predicates[k]
self._predicates[k] = pred
return pred
class CPUMode(object):
"""
A CPU mode determines which instruction encodings are active.
All instruction encodings are associated with exactly one `CPUMode`, and
all CPU modes are associated with exactly one `TargetISA`.
:param name: Short mnemonic name for the CPU mode.
:param target: Associated `TargetISA`.
"""
def __init__(self, name, isa):
# type: (str, TargetISA) -> None
self.name = name
self.isa = isa
self.encodings = [] # type: List[Encoding]
isa.cpumodes.append(self)
# Tables for configuring legalization actions when no valid encoding
# exists for an instruction.
self.default_legalize = None # type: XFormGroup
self.type_legalize = OrderedDict() # type: OrderedDict[ValueType, XFormGroup] # noqa
def __str__(self):
# type: () -> str
return self.name
def enc(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""
Add a new encoding to this CPU mode.
Arguments are the `Encoding constructor arguments, except for the first
`CPUMode argument which is implied.
"""
self.encodings.append(Encoding(self, *args, **kwargs))
def legalize_type(self, default=None, **kwargs):
# type: (XFormGroup, **XFormGroup) -> None
"""
Configure the legalization action per controlling type variable.
Instructions that have a controlling type variable mentioned in one of
the arguments will be legalized according to the action specified here
instead of using the `legalize_default` action.
The keyword arguments are value type names:
mode.legalize_type(i8=widen, i16=widen, i32=expand)
The `default` argument specifies the action to take for controlling
type variables that don't have an explicitly configured action.
"""
if default is not None:
self.default_legalize = default
for name, xgrp in kwargs.items():
ty = ValueType.by_name(name)
self.type_legalize[ty] = xgrp
def legalize_monomorphic(self, xgrp):
# type: (XFormGroup) -> None
"""
Configure the legalization action to take for monomorphic instructions
which don't have a controlling type variable.
See also `legalize_type()` for polymorphic instructions.
"""
self.type_legalize[None] = xgrp
def get_legalize_action(self, ty):
# type: (ValueType) -> XFormGroup
"""
Get the legalization action to use for `ty`.
"""
return self.type_legalize.get(ty, self.default_legalize)
class EncRecipe(object):
"""
A recipe for encoding instructions with a given format.
Many different instructions can be encoded by the same recipe, but they
must all have the same instruction format.
The `ins` and `outs` arguments are tuples specifying the register
allocation constraints for the value operands and results respectively. The
possible constraints for an operand are:
- A `RegClass` specifying the set of allowed registers.
- A `Register` specifying a fixed-register operand.
- An integer indicating that this result is tied to a value operand, so
they must use the same register.
- A `Stack` specifying a value in a stack slot.
The `branch_range` argument must be provided for recipes that can encode
branch instructions. It is an `(origin, bits)` tuple describing the exact
range that can be encoded in a branch instruction.
For ISAs that use CPU flags in `iflags` and `fflags` value types, the
`clobbers_flags` is used to indicate instruction encodings that clobbers
the CPU flags, so they can't be used where a flag value is live.
:param name: Short mnemonic name for this recipe.
:param format: All encoded instructions must have this
:py:class:`InstructionFormat`.
:param size: Number of bytes in the binary encoded instruction.
:param ins: Tuple of register constraints for value operands.
:param outs: Tuple of register constraints for results.
:param branch_range: `(origin, bits)` range for branches.
:param clobbers_flags: This instruction clobbers `iflags` and `fflags`.
:param instp: Instruction predicate.
:param isap: ISA predicate.
:param emit: Rust code for binary emission.
"""
def __init__(
self,
name, # type: str
format, # type: InstructionFormat
size, # type: int
ins, # type: ConstraintSeq
outs, # type: ConstraintSeq
branch_range=None, # type: BranchRange
clobbers_flags=True, # type: bool
instp=None, # type: PredNode
isap=None, # type: PredNode
emit=None # type: str
):
# type: (...) -> None
self.name = name
self.format = format
assert size >= 0
self.size = size
self.branch_range = branch_range
self.clobbers_flags = clobbers_flags
self.instp = instp
self.isap = isap
self.emit = emit
if instp:
assert instp.predicate_context() == format
self.number = None # type: int
self.ins = self._verify_constraints(ins)
if not format.has_value_list:
assert len(self.ins) == format.num_value_operands
self.outs = self._verify_constraints(outs)
def __str__(self):
# type: () -> str
return self.name
def _verify_constraints(self, seq):
# type: (ConstraintSeq) -> Sequence[OperandConstraint]
if not isinstance(seq, tuple):
seq = (seq,)
for c in seq:
if isinstance(c, int):
# An integer constraint is bound to a value operand.
# Check that it is in range.
assert c >= 0 and c < len(self.ins)
else:
assert (isinstance(c, RegClass)
or isinstance(c, Register)
or isinstance(c, Stack))
return seq
def ties(self):
# type: () -> Tuple[Dict[int, int], Dict[int, int]]
"""
Return two dictionaries representing the tied operands.
The first maps input number to tied output number, the second maps
output number to tied input number.
"""
i2o = dict() # type: Dict[int, int]
o2i = dict() # type: Dict[int, int]
for o, i in enumerate(self.outs):
if isinstance(i, int):
i2o[i] = o
o2i[o] = i
return (i2o, o2i)
def fixed_ops(self):
# type: () -> Tuple[Set[Register], Set[Register]]
"""
Return two sets of registers representing the fixed input and output
operands.
"""
i = set(r for r in self.ins if isinstance(r, Register))
o = set(r for r in self.outs if isinstance(r, Register))
return (i, o)
def recipe_pred(self):
# type: () -> RecipePred
"""
Get the combined recipe predicate which includes both the ISA predicate
and the instruction predicate.
Return `None` if this recipe has neither predicate.
"""
if self.isap is None and self.instp is None:
return None
else:
return (self.isap, self.instp)
class Encoding(object):
"""
Encoding for a concrete instruction.
An `Encoding` object ties an instruction opcode with concrete type
variables together with and encoding recipe and encoding bits.
The concrete instruction can be in three different forms:
1. A naked opcode: `trap` for non-polymorphic instructions.
2. With bound type variables: `iadd.i32` for polymorphic instructions.
3. With operands providing constraints: `icmp.i32(intcc.eq, x, y)`.
If the instruction is polymorphic, all type variables must be provided.
:param cpumode: The CPU mode where the encoding is active.
:param inst: The :py:class:`Instruction` or :py:class:`BoundInstruction`
being encoded.
:param recipe: The :py:class:`EncRecipe` to use.
:param encbits: Additional encoding bits to be interpreted by `recipe`.
:param instp: Instruction predicate, or `None`.
:param isap: ISA predicate, or `None`.
"""
def __init__(self, cpumode, inst, recipe, encbits, instp=None, isap=None):
# type: (CPUMode, InstSpec, EncRecipe, int, PredNode, PredNode) -> None # noqa
assert isinstance(cpumode, CPUMode)
assert isinstance(recipe, EncRecipe)
# Check for possible instruction predicates in `inst`.
if isinstance(inst, Apply):
instp = And.combine(instp, inst.inst_predicate())
self.inst = inst.inst
self.typevars = inst.typevars
else:
self.inst, self.typevars = inst.fully_bound()
# Add secondary type variables to the instruction predicate.
# This is already included by Apply.inst_predicate() above.
if len(self.typevars) > 1:
for tv, vt in zip(self.inst.other_typevars, self.typevars[1:]):
# A None tv is an 'any' wild card: `ishl.i32.any`.
if vt is None:
continue
typred = TypePredicate.typevar_check(self.inst, tv, vt)
instp = And.combine(instp, typred)
self.cpumode = cpumode
assert self.inst.format == recipe.format, (
"Format {} must match recipe: {}".format(
self.inst.format, recipe.format))
if self.inst.is_branch:
assert recipe.branch_range, (
'Recipe {} for {} must have a branch_range'
.format(recipe, self.inst.name))
self.recipe = recipe
self.encbits = encbits
# Record specific predicates. Note that the recipe also has predicates.
self.instp = self.cpumode.isa.unique_pred(instp)
self.isap = self.cpumode.isa.unique_pred(isap)
def __str__(self):
# type: () -> str
return '[{}#{:02x}]'.format(self.recipe, self.encbits)
def ctrl_typevar(self):
# type: () -> ValueType
"""
Get the controlling type variable for this encoding or `None`.
"""
if self.typevars:
return self.typevars[0]
else:
return None
| apache-2.0 | 5,686,922,585,626,008,000 | 36.641129 | 100 | 0.602625 | false |
DemSquirrel/SquirrelOS | kernel/kernel.py | 1 | 1396 | import sys, os, time
commandList = ["help", "shutdown", "ls", "dir", "mkdir", "cd", "time"]
def bootLoader():
print("Welcome to SquirrelOS!")
print("Made By Dem_Squirrel and FreakzDK \n")
print("Type help to see the commands")
commandLine()
def commandLine():
cmd = input("> ")
if cmd == commandList[0]:
helpdist()
if cmd == commandList[1]:
shutdown()
if cmd == commandList[2] or cmd == commandList[3]:
showdir()
if cmd == commandList[6]:
showTime()
if cmd.__contains__("mkdir"):
dirName = cmd[5:]
makedir(dirName)
if cmd.__contains__("cd"):
dirPath = cmd[2:]
dirPath = dirPath.strip()
changeDir(dirPath)
for command in commandList:
if cmd != commandList:
print("This command wasn't found")
commandLine()
else:
break
def helpdist():
for cmd in commandList:
print(cmd)
commandLine()
def shutdown():
sys.exit(0)
def showdir():
getDir = os.getcwd()
print(getDir)
print(os.listdir(getDir))
commandLine()
def makedir(name):
os.makedirs("data/"+name)
print("Made A dir"+name)
commandLine()
def changeDir(path):
os.chdir(os.getcwd()+"\\"+path)
commandLine()
def changeDir(path):
os.chdir(os.getcwd()+"/"+path)
def showTime():
print(time.ctime())
bootLoader() | mit | -2,599,846,609,285,886,000 | 21.532258 | 70 | 0.574499 | false |
tiancj/emesene | emesene/e3/xmpp/SleekXMPP/setup.py | 1 | 5033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Nathanael C. Fritz
# All Rights Reserved
#
# This software is licensed as described in the README.rst and LICENSE
# file, which you should have received as part of this distribution.
import sys
import codecs
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
# from ez_setup import use_setuptools
from testall import TestCommand
from sleekxmpp.version import __version__
# if 'cygwin' in sys.platform.lower():
# min_version = '0.6c6'
# else:
# min_version = '0.6a9'
#
# try:
# use_setuptools(min_version=min_version)
# except TypeError:
# # locally installed ez_setup won't have min_version
# use_setuptools()
#
# from setuptools import setup, find_packages, Extension, Feature
VERSION = __version__
DESCRIPTION = 'SleekXMPP is an elegant Python library for XMPP (aka Jabber, Google Talk, etc).'
with codecs.open('README.rst', 'r', encoding='UTF-8') as readme:
LONG_DESCRIPTION = ''.join(readme)
CLASSIFIERS = [ 'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Libraries :: Python Modules',
]
packages = [ 'sleekxmpp',
'sleekxmpp/stanza',
'sleekxmpp/test',
'sleekxmpp/roster',
'sleekxmpp/xmlstream',
'sleekxmpp/xmlstream/matcher',
'sleekxmpp/xmlstream/handler',
'sleekxmpp/plugins',
'sleekxmpp/plugins/xep_0004',
'sleekxmpp/plugins/xep_0004/stanza',
'sleekxmpp/plugins/xep_0009',
'sleekxmpp/plugins/xep_0009/stanza',
'sleekxmpp/plugins/xep_0012',
'sleekxmpp/plugins/xep_0027',
'sleekxmpp/plugins/xep_0030',
'sleekxmpp/plugins/xep_0030/stanza',
'sleekxmpp/plugins/xep_0033',
'sleekxmpp/plugins/xep_0047',
'sleekxmpp/plugins/xep_0050',
'sleekxmpp/plugins/xep_0054',
'sleekxmpp/plugins/xep_0059',
'sleekxmpp/plugins/xep_0060',
'sleekxmpp/plugins/xep_0060/stanza',
'sleekxmpp/plugins/xep_0065',
'sleekxmpp/plugins/xep_0066',
'sleekxmpp/plugins/xep_0077',
'sleekxmpp/plugins/xep_0078',
'sleekxmpp/plugins/xep_0080',
'sleekxmpp/plugins/xep_0084',
'sleekxmpp/plugins/xep_0085',
'sleekxmpp/plugins/xep_0086',
'sleekxmpp/plugins/xep_0092',
'sleekxmpp/plugins/xep_0107',
'sleekxmpp/plugins/xep_0108',
'sleekxmpp/plugins/xep_0115',
'sleekxmpp/plugins/xep_0118',
'sleekxmpp/plugins/xep_0128',
'sleekxmpp/plugins/xep_0153',
'sleekxmpp/plugins/xep_0172',
'sleekxmpp/plugins/xep_0184',
'sleekxmpp/plugins/xep_0186',
'sleekxmpp/plugins/xep_0191',
'sleekxmpp/plugins/xep_0198',
'sleekxmpp/plugins/xep_0199',
'sleekxmpp/plugins/xep_0202',
'sleekxmpp/plugins/xep_0203',
'sleekxmpp/plugins/xep_0221',
'sleekxmpp/plugins/xep_0224',
'sleekxmpp/plugins/xep_0231',
'sleekxmpp/plugins/xep_0249',
'sleekxmpp/plugins/xep_0258',
'sleekxmpp/features',
'sleekxmpp/features/feature_mechanisms',
'sleekxmpp/features/feature_mechanisms/stanza',
'sleekxmpp/features/feature_starttls',
'sleekxmpp/features/feature_bind',
'sleekxmpp/features/feature_session',
'sleekxmpp/features/feature_rosterver',
'sleekxmpp/thirdparty',
'sleekxmpp/thirdparty/suelta',
'sleekxmpp/thirdparty/suelta/mechanisms',
]
setup(
name = "sleekxmpp",
version = VERSION,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
author = 'Nathanael Fritz',
author_email = 'fritzy [at] netflint.net',
url = 'http://github.com/fritzy/SleekXMPP',
license = 'MIT',
platforms = [ 'any' ],
packages = packages,
requires = [ 'dnspython', 'pyasn1', 'pyasn1_modules' ],
classifiers = CLASSIFIERS,
cmdclass = {'test': TestCommand}
)
| gpl-3.0 | 1,389,728,075,824,915,200 | 39.264 | 100 | 0.543612 | false |
OpenTransitTools/data | ott/data/content/fares.py | 1 | 1644 | from datetime import datetime
from datetime import timedelta
from ott.utils import json_utils
import logging
log = logging.getLogger(__file__)
class Fares(object):
def __init__(self, fare_url, fare_timeout_mins=1440):
"""
"""
log.info("create an instance of {0}".format(self.__class__.__name__))
self.fare_url = fare_url
if fare_timeout_mins:
self.fare_timeout = fare_timeout_mins
else:
self.fare_timeout = 1440
self.last_update = datetime.now() - timedelta(minutes=(self.fare_timeout+10))
self.content = []
self.update()
def update(self):
try:
if self.content is None \
or len(self.content) < 1 \
or datetime.now() - self.last_update > timedelta(minutes = self.fare_timeout):
log.debug("updating the fare content")
self.last_update = datetime.now()
c = json_utils.stream_json(self.fare_url)
if c:
self.content = c
except Exception as e:
log.warn("couldn't update the fare content: {}".format(e))
def query(self, fare_type="adult", def_val=None):
"""
"""
#import pdb; pdb.set_trace()
ret_val = def_val
try:
self.update()
for c in self.content:
if fare_type in c:
ret_val = c[fare_type]
break
except Exception as e:
log.warn("no fare content for fare_type={0}, using default fare of {1}".format(fare_type, def_val))
return ret_val
| mpl-2.0 | -1,689,696,673,993,191,000 | 30.615385 | 111 | 0.536496 | false |
Redball45/Redball-Cogs | mineserver/async_mcrcon.py | 1 | 1899 | import asyncio
import struct
class ClientError(Exception):
pass
class InvalidPassword(Exception):
pass
class MinecraftClient:
def __init__(self, host, port, password):
self.host = host
self.port = port
self.password = password
self._auth = None
self._reader = None
self._writer = None
async def __aenter__(self):
if not self._writer:
self._reader, self._writer = await asyncio.open_connection(self.host, self.port)
await self._authenticate()
return self
async def __aexit__(self, exc_type, exc, tb):
if self._writer:
self._writer.close()
async def _authenticate(self):
if not self._auth:
await self._send(3, self.password)
self._auth = True
async def _read_data(self, leng):
data = b''
while len(data) < leng:
data += await self._reader.read(leng - len(data))
return data
async def _send(self, typen, message):
if not self._writer:
raise ClientError('Not connected.')
out = struct.pack('<li', 0, typen) + message.encode('utf8') + b'\x00\x00'
out_len = struct.pack('<i', len(out))
self._writer.write(out_len + out)
in_len = struct.unpack('<i', await self._read_data(4))
in_payload = await self._read_data(in_len[0])
in_id, in_type = struct.unpack('<ii', in_payload[:8])
in_data, in_padd = in_payload[8:-2], in_payload[-2:]
if in_padd != b'\x00\x00':
raise ClientError('Incorrect padding.')
if in_id == -1:
raise InvalidPassword('Incorrect password.')
data = in_data.decode('utf8')
return data
async def send(self, cmd):
result = await self._send(2, cmd)
await asyncio.sleep(0.003) #unsure about this
return result
| mit | 3,130,483,215,566,566,400 | 26.521739 | 92 | 0.565034 | false |
ponty/pyunpack | doc/generate-doc.py | 1 | 1300 | import glob
import logging
import os
from easyprocess import EasyProcess
from entrypoint2 import entrypoint
commands = """
python3 -m pyunpack.cli --help
"""
commands = commands.strip().splitlines()
def empty_dir(dir):
files = glob.glob(os.path.join(dir, "*"))
for f in files:
os.remove(f)
@entrypoint
def main():
gendir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "gen")
logging.info("gendir: %s", gendir)
os.makedirs(gendir, exist_ok=True)
empty_dir(gendir)
try:
os.chdir("gen")
for cmd in commands:
logging.info("cmd: %s", cmd)
fname_base = cmd.replace(" ", "_")
fname = fname_base + ".txt"
logging.info("cmd: %s", cmd)
print("file name: %s" % fname)
with open(fname, "w") as f:
f.write("$ " + cmd + "\n")
p = EasyProcess(cmd).call()
f.write(p.stdout)
if p.stderr and p.stdout:
f.write("\n")
f.write(p.stderr)
finally:
os.chdir("..")
embedme = EasyProcess(["npx", "embedme", "../README.md"])
embedme.call()
print(embedme.stdout)
assert embedme.return_code == 0
assert not "but file does not exist" in embedme.stdout
| bsd-2-clause | 4,043,248,013,462,465,500 | 24.490196 | 76 | 0.546923 | false |
miing/mci_migo | identityprovider/tests/sso_server/test_rate_limit.py | 1 | 1221 | from django.conf import settings
from identityprovider.tests.helpers import FunctionalTestCase
class RateLimitTestCase(FunctionalTestCase):
def test(self):
# = Rate limits =
# Rate limiting is enforced only on POST requests to the login screen,
# on a per-username, per-IP basis.
# There are two settings that control the rate at which you can submit
# requests: no more than LOGIN_LIMIT_REQUESTS requests can be submitted
# every LOGIN_LIMIT_MINUTES minutes.
#
# So, first, let's find out how many requests is our limit
limit = getattr(settings, 'LOGIN_LIMIT_REQUESTS', 20)
# Now, we should be able to submit the login form so many times without
# trouble:
email = '[email protected]'
for i in range(limit):
self.login(email=email, password='wrong')
# But on the next request, it should fail:
response = self.login(email=email, password='wrong')
self.assertContains(response, 'Rate limit exceeded', status_code=403)
# This shouldn't prevent us from logging in as a different user:
response = self.login()
self.assert_home_page(response)
| agpl-3.0 | -6,374,083,568,554,359,000 | 33.885714 | 79 | 0.659296 | false |
lcoandrade/DsgTools | gui/ProductionTools/Toolbars/InspectFeatures/inspectFeatures.py | 1 | 16372 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
InspectFeatures
A QGIS plugin
Builds a temp rubberband with a given size and shape.
-------------------
begin : 2016-08-02
git sha : $Format:%H$
copyright : (C) 2016 by Jossan Costa - Surveying Technician @ Brazilian Army
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from qgis.PyQt.QtWidgets import QMessageBox, QSpinBox, QAction, QWidget
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QSettings, pyqtSignal, pyqtSlot, QObject, Qt
from qgis.PyQt import QtGui, uic, QtCore
from qgis.PyQt.Qt import QObject
from qgis.core import QgsMapLayer, Qgis, QgsVectorLayer, QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsFeatureRequest, QgsWkbTypes, QgsProject
from qgis.gui import QgsMessageBar
from .inspectFeatures_ui import Ui_Form
# FORM_CLASS, _ = uic.loadUiType(os.path.join(
# os.path.dirname(__file__), 'inspectFeatures.ui'))
class InspectFeatures(QWidget,Ui_Form):
idxChanged = pyqtSignal(int)
def __init__(self, iface, parent = None):
"""
Constructor
"""
super(InspectFeatures, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.splitter.hide()
self.iface = iface
# self.iface.currentLayerChanged.connect(self.enableScale)
self.mMapLayerComboBox.layerChanged.connect(self.enableScale)
self.mMapLayerComboBox.layerChanged.connect(self.mFieldExpressionWidget.setLayer)
if not self.iface.activeLayer():
self.enableTool(False)
# self.iface.currentLayerChanged.connect(self.enableTool)
self.mMapLayerComboBox.layerChanged.connect(self.enableTool)
self.zoomPercentageSpinBox.setMinimum(0)
self.zoomPercentageSpinBox.setMaximum(100)
self.zoomPercentageSpinBox.setDecimals(3)
self.zoomPercentageSpinBox.setSingleStep(1)
self.zoomPercentageSpinBox.setSuffix('%')
self.zoomPercentageSpinBox.setValue(100)
self.zoomPercentageSpinBox.setEnabled(False)
self.zoomPercentageSpinBox.hide()
self.mScaleWidget.setScaleString('1:40000')
self.mScaleWidget.setEnabled(False)
self.mScaleWidget.hide()
self.enableScale()
self.canvas = self.iface.mapCanvas()
self.allLayers={}
self.idxChanged.connect(self.setNewId)
self.setToolTip('')
icon_path = ':/plugins/DsgTools/icons/inspectFeatures.png'
text = self.tr('DSGTools: Inspect Features')
self.activateToolAction = self.add_action(icon_path, text, self.inspectPushButton.toggle, parent = self.parent)
self.iface.registerMainWindowAction(self.activateToolAction, '')
icon_path = ':/plugins/DsgTools/icons/backInspect.png'
text = self.tr('DSGTools: Back Inspect')
self.backButtonAction = self.add_action(icon_path, text, self.backInspectButton.click, parent = self.parent)
self.iface.registerMainWindowAction(self.backButtonAction, '')
icon_path = ':/plugins/DsgTools/icons/nextInspect.png'
text = self.tr('DSGTools: Next Inspect')
self.nextButtonAction = self.add_action(icon_path, text, self.nextInspectButton.click, parent = self.parent)
self.iface.registerMainWindowAction(self.nextButtonAction, '')
icon_path = ':/plugins/DsgTools/icons/reload.png'
text = self.tr('DSGTools: Set Active Layer on Feature Inspector')
self.refreshPushButtonAction = self.add_action(icon_path, text, self.refreshPushButton.click, parent = self.parent)
self.iface.registerMainWindowAction(self.refreshPushButtonAction, '')
self.refreshPushButton.setToolTip(self.tr('Set current layer as selected layer on inspect tool'))
def add_action(self, icon_path, text, callback, parent=None):
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
if parent:
parent.addAction(action)
return action
def getIterateLayer(self):
return self.mMapLayerComboBox.currentLayer()
def enableTool(self, enabled = True):
if enabled == None or not isinstance(enabled, QgsVectorLayer):
allowed = False
else:
allowed = True
toggled = self.inspectPushButton.isChecked()
enabled = allowed and toggled
self.backInspectButton.setEnabled(enabled)
self.nextInspectButton.setEnabled(enabled)
self.idSpinBox.setEnabled(enabled)
def enableScale(self):
"""
The scale combo should only be enabled for point layers
"""
currentLayer = self.getIterateLayer()
if QgsMapLayer is not None and currentLayer:
if currentLayer.type() == QgsMapLayer.VectorLayer:
if currentLayer.geometryType() == QgsWkbTypes.PointGeometry:
self.mScaleWidget.setEnabled(True)
self.mScaleWidget.show()
self.zoomPercentageSpinBox.setEnabled(False)
self.zoomPercentageSpinBox.hide()
else:
self.mScaleWidget.setEnabled(False)
self.mScaleWidget.hide()
self.zoomPercentageSpinBox.setEnabled(True)
self.zoomPercentageSpinBox.show()
@pyqtSlot(bool)
def on_nextInspectButton_clicked(self):
"""
Inspects the next feature
"""
if self.nextInspectButton.isEnabled():
method = getattr(self, 'testIndexFoward')
self.iterateFeature(method)
def testIndexFoward(self, index, maxIndex, minIndex):
"""
Gets the next index
"""
index += 1
if index > maxIndex:
index = minIndex
return index
def testIndexBackwards(self, index, maxIndex, minIndex):
"""
gets the previous index
"""
index -= 1
if index < minIndex:
index = maxIndex
return index
@pyqtSlot(bool)
def on_backInspectButton_clicked(self):
"""
Inspects the previous feature
"""
if self.backInspectButton.isEnabled():
method = getattr(self, 'testIndexBackwards')
self.iterateFeature(method)
@pyqtSlot(int, name = 'on_idSpinBox_valueChanged')
def setNewId(self, newId):
if not isinstance(self.sender(), QSpinBox):
self.idSpinBox.setValue(newId)
else:
currentLayer = self.getIterateLayer()
lyrName = currentLayer.name()
if lyrName not in list(self.allLayers.keys()):
self.allLayers[lyrName] = 0
return
oldIndex = self.allLayers[lyrName]
if oldIndex == 0:
return
featIdList = self.getFeatIdList(currentLayer)
if oldIndex not in featIdList:
oldIndex = 0
zoom = self.mScaleWidget.scale() if currentLayer.geometryType() == QgsWkbTypes.PointGeometry else self.zoomPercentageSpinBox.value()
if oldIndex == newId:
# self.iface.messageBar().pushMessage(self.tr('Warning!'), self.tr('Selected id does not exist in layer {0}. Returned to previous id.').format(lyrName), level=Qgis.Warning, duration=2)
return
try:
index = featIdList.index(newId)
self.allLayers[lyrName] = index
self.makeZoom(zoom, currentLayer, newId)
self.idSpinBox.setSuffix(' ({0}/{1})'.format(index+1,len(featIdList)))
except:
# self.iface.messageBar().pushMessage(self.tr('Warning!'), self.tr('Selected id does not exist in layer {0}. Returned to previous id.').format(lyrName), level=Qgis.Warning, duration=2)
self.idSpinBox.setValue(oldIndex)
self.makeZoom(zoom, currentLayer, oldIndex)
def getFeatIdList(self, currentLayer):
#getting all features ids
if self.mFieldExpressionWidget.currentText() == '':
featIdList = currentLayer.allFeatureIds()
elif not self.mFieldExpressionWidget.isValidExpression():
self.iface.messageBar().pushMessage(self.tr('Warning!'), self.tr('Invalid attribute filter!'), level=Qgis.Warning, duration=2)
return []
else:
request = QgsFeatureRequest().setFilterExpression(self.mFieldExpressionWidget.asExpression())
request.setFlags(QgsFeatureRequest.NoGeometry)
featIdList = [i.id() for i in currentLayer.getFeatures(request)]
#sort is faster than sorted (but sort is just available for lists)
featIdList.sort()
return featIdList
def iterateFeature(self, method):
"""
Iterates over the features selecting and zooming to the desired one
method: method used to determine the desired feature index
"""
currentLayer = self.getIterateLayer()
lyrName = currentLayer.name()
zoom = self.mScaleWidget.scale() if currentLayer.geometryType() == QgsWkbTypes.PointGeometry else self.zoomPercentageSpinBox.value()
featIdList = self.getFeatIdList(currentLayer)
if currentLayer and len(featIdList) > 0:
#checking if this is the first time for this layer (currentLayer)
first = False
if lyrName not in list(self.allLayers.keys()):
self.allLayers[lyrName] = 0
first = True
#getting the current index
index = self.allLayers[lyrName]
#getting max and min ids
#this was made because the list is already sorted, there's no need to calculate max and min
maxIndex = len(featIdList) - 1
minIndex = 0
self.idSpinBox.setMaximum(featIdList[maxIndex])
self.idSpinBox.setMinimum(featIdList[minIndex])
#getting the new index
if not first:
index = method(index, maxIndex, minIndex)
self.idSpinBox.setSuffix(' ({0}/{1})'.format(index+1,len(featIdList)))
self.allLayers[lyrName] = index
#getting the new feature id
id = featIdList[index]
#adjustin the spin box value
self.idxChanged.emit(id)
self.makeZoom(zoom, currentLayer, id)
self.selectLayer(id, currentLayer)
else:
self.errorMessage()
def errorMessage(self):
"""
Shows am error message
"""
QMessageBox.warning(self.iface.mainWindow(), self.tr(u"ERROR:"), self.tr(u"<font color=red>There are no features in the current layer:<br></font><font color=blue>Add features and try again!</font>"), QMessageBox.Close)
def selectLayer(self, index, currentLayer):
"""
Remove current layer feature selection
currentLayer: layer that will have the feature selection removed
"""
if currentLayer:
currentLayer.removeSelection()
currentLayer.select(index)
def zoomToLayer(self, layer, zoom = None):
box = layer.boundingBoxOfSelected()
if zoom is not None:
box.grow(100-zoom)
# Defining the crs from src and destiny
epsg = self.iface.mapCanvas().mapSettings().destinationCrs().authid()
crsDest = QgsCoordinateReferenceSystem(epsg)
#getting srid from something like 'EPSG:31983'
if not layer:
layer = self.iface.mapCanvas().currentLayer()
srid = layer.crs().authid()
crsSrc = QgsCoordinateReferenceSystem(srid) #here we have to put authid, not srid
# Creating a transformer
coordinateTransformer = QgsCoordinateTransform(crsSrc, crsDest, QgsProject.instance())
newBox = coordinateTransformer.transform(box)
self.iface.mapCanvas().setExtent(newBox)
self.iface.mapCanvas().refresh()
def zoomFeature(self, zoom, idDict = None):
"""
Zooms to current layer selected features according to a specific zoom
zoom: zoom to be applied
"""
idDict = dict() if idDict is None else idDict
currentLayer = self.getIterateLayer()
if idDict == {}:
self.zoomToLayer(currentLayer, zoom=float(zoom))
else:
id = idDict['id']
lyr = idDict['lyr']
selectIdList = lyr.selectedFeatureIds()
lyr.removeSelection()
lyr.selectByIds([id])
self.zoomToLayer(layer = lyr, zoom=float(zoom))
lyr.selectByIds(selectIdList)
if self.getIterateLayer().geometryType() == QgsWkbTypes.PointGeometry:
self.iface.mapCanvas().zoomScale(float(zoom))
@pyqtSlot(bool, name = 'on_inspectPushButton_toggled')
def toggleBar(self, toggled=None):
"""
Shows/Hides the tool bar
"""
if toggled is None:
toggled = self.inspectPushButton.isChecked()
if toggled:
self.splitter.show()
self.enableTool(self.mMapLayerComboBox.currentLayer())
self.setToolTip(self.tr('Select a vector layer to enable tool'))
else:
self.splitter.hide()
self.enableTool(False)
self.setToolTip('')
def setValues(self, featIdList, currentLayer):
lyrName = currentLayer.name()
featIdList.sort()
self.allLayers[lyrName] = 0
maxIndex = len(featIdList) - 1
minIndex = 0
self.idSpinBox.setMaximum(featIdList[maxIndex])
self.idSpinBox.setMinimum(featIdList[minIndex])
#getting the new feature id
id = featIdList[0]
#adjustin the spin box value
self.idxChanged.emit(id)
#self.idSpinBox.setValue(id)
zoom = self.mScaleWidget.scale()
self.makeZoom(zoom, currentLayer, id)
def makeZoom(self, zoom, currentLayer, id):
#selecting and zooming to the feature
# if not self.onlySelectedRadioButton.isChecked():
# self.selectLayer(id, currentLayer)
# self.zoomFeature(zoom)
# else:
self.zoomFeature(zoom, idDict = {'id':id, 'lyr':currentLayer})
@pyqtSlot(bool)
def on_onlySelectedRadioButton_toggled(self, toggled):
currentLayer = self.getIterateLayer()
if toggled:
featIdList = currentLayer.selectedFeatureIds()
self.setValues(featIdList, currentLayer)
self.idSpinBox.setEnabled(False)
else:
featIdList = currentLayer.allFeatureIds()
self.setValues(featIdList, currentLayer)
self.idSpinBox.setEnabled(True)
@pyqtSlot(bool)
def on_refreshPushButton_clicked(self):
activeLayer = self.iface.activeLayer()
if isinstance(activeLayer, QgsVectorLayer):
self.mMapLayerComboBox.setLayer(activeLayer)
else:
self.iface.messageBar().pushMessage(self.tr('Warning!'), self.tr('Active layer is not valid to be used in this tool.'), level=Qgis.Warning, duration=2)
def unload(self):
self.iface.unregisterMainWindowAction(self.activateToolAction)
self.iface.unregisterMainWindowAction(self.backButtonAction)
self.iface.unregisterMainWindowAction(self.nextButtonAction)
| gpl-2.0 | -7,068,429,154,627,036,000 | 41.971129 | 226 | 0.605424 | false |
william-richard/moto | moto/managedblockchain/responses.py | 1 | 17730 | from __future__ import unicode_literals
import json
from six.moves.urllib.parse import urlparse, parse_qs
from moto.core.responses import BaseResponse
from .models import managedblockchain_backends
from .utils import (
region_from_managedblckchain_url,
networkid_from_managedblockchain_url,
proposalid_from_managedblockchain_url,
invitationid_from_managedblockchain_url,
memberid_from_managedblockchain_request,
nodeid_from_managedblockchain_url,
)
class ManagedBlockchainResponse(BaseResponse):
def __init__(self, backend):
super(ManagedBlockchainResponse, self).__init__()
self.backend = backend
@classmethod
def network_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._network_response(request, full_url, headers)
def _network_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
if method == "GET":
return self._all_networks_response(request, full_url, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._network_response_post(json_body, querystring, headers)
def _all_networks_response(self, request, full_url, headers):
mbcnetworks = self.backend.list_networks()
response = json.dumps(
{"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]}
)
headers["content-type"] = "application/json"
return 200, headers, response
def _network_response_post(self, json_body, querystring, headers):
name = json_body["Name"]
framework = json_body["Framework"]
frameworkversion = json_body["FrameworkVersion"]
frameworkconfiguration = json_body["FrameworkConfiguration"]
voting_policy = json_body["VotingPolicy"]
member_configuration = json_body["MemberConfiguration"]
# Optional
description = json_body.get("Description", None)
response = self.backend.create_network(
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
description,
)
return 200, headers, json.dumps(response)
@classmethod
def networkid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._networkid_response(request, full_url, headers)
def _networkid_response(self, request, full_url, headers):
method = request.method
if method == "GET":
network_id = networkid_from_managedblockchain_url(full_url)
return self._networkid_response_get(network_id, headers)
def _networkid_response_get(self, network_id, headers):
mbcnetwork = self.backend.get_network(network_id)
response = json.dumps({"Network": mbcnetwork.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
@classmethod
def proposal_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._proposal_response(request, full_url, headers)
def _proposal_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
if method == "GET":
return self._all_proposals_response(network_id, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._proposal_response_post(
network_id, json_body, querystring, headers
)
def _all_proposals_response(self, network_id, headers):
proposals = self.backend.list_proposals(network_id)
response = json.dumps(
{"Proposals": [proposal.to_dict() for proposal in proposals]}
)
headers["content-type"] = "application/json"
return 200, headers, response
def _proposal_response_post(self, network_id, json_body, querystring, headers):
memberid = json_body["MemberId"]
actions = json_body["Actions"]
# Optional
description = json_body.get("Description", None)
response = self.backend.create_proposal(
network_id, memberid, actions, description,
)
return 200, headers, json.dumps(response)
@classmethod
def proposalid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._proposalid_response(request, full_url, headers)
def _proposalid_response(self, request, full_url, headers):
method = request.method
network_id = networkid_from_managedblockchain_url(full_url)
if method == "GET":
proposal_id = proposalid_from_managedblockchain_url(full_url)
return self._proposalid_response_get(network_id, proposal_id, headers)
def _proposalid_response_get(self, network_id, proposal_id, headers):
proposal = self.backend.get_proposal(network_id, proposal_id)
response = json.dumps({"Proposal": proposal.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
@classmethod
def proposal_votes_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._proposal_votes_response(request, full_url, headers)
def _proposal_votes_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
proposal_id = proposalid_from_managedblockchain_url(full_url)
if method == "GET":
return self._all_proposal_votes_response(network_id, proposal_id, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._proposal_votes_response_post(
network_id, proposal_id, json_body, querystring, headers
)
def _all_proposal_votes_response(self, network_id, proposal_id, headers):
proposalvotes = self.backend.list_proposal_votes(network_id, proposal_id)
response = json.dumps({"ProposalVotes": proposalvotes})
headers["content-type"] = "application/json"
return 200, headers, response
def _proposal_votes_response_post(
self, network_id, proposal_id, json_body, querystring, headers
):
votermemberid = json_body["VoterMemberId"]
vote = json_body["Vote"]
self.backend.vote_on_proposal(
network_id, proposal_id, votermemberid, vote,
)
return 200, headers, ""
@classmethod
def invitation_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._invitation_response(request, full_url, headers)
def _invitation_response(self, request, full_url, headers):
method = request.method
if method == "GET":
return self._all_invitation_response(request, full_url, headers)
def _all_invitation_response(self, request, full_url, headers):
invitations = self.backend.list_invitations()
response = json.dumps(
{"Invitations": [invitation.to_dict() for invitation in invitations]}
)
headers["content-type"] = "application/json"
return 200, headers, response
@classmethod
def invitationid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._invitationid_response(request, full_url, headers)
def _invitationid_response(self, request, full_url, headers):
method = request.method
if method == "DELETE":
invitation_id = invitationid_from_managedblockchain_url(full_url)
return self._invitationid_response_delete(invitation_id, headers)
def _invitationid_response_delete(self, invitation_id, headers):
self.backend.reject_invitation(invitation_id)
headers["content-type"] = "application/json"
return 200, headers, ""
@classmethod
def member_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._member_response(request, full_url, headers)
def _member_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
if method == "GET":
return self._all_members_response(network_id, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._member_response_post(
network_id, json_body, querystring, headers
)
def _all_members_response(self, network_id, headers):
members = self.backend.list_members(network_id)
response = json.dumps({"Members": [member.to_dict() for member in members]})
headers["content-type"] = "application/json"
return 200, headers, response
def _member_response_post(self, network_id, json_body, querystring, headers):
invitationid = json_body["InvitationId"]
member_configuration = json_body["MemberConfiguration"]
response = self.backend.create_member(
invitationid, network_id, member_configuration,
)
return 200, headers, json.dumps(response)
@classmethod
def memberid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._memberid_response(request, full_url, headers)
def _memberid_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
network_id = networkid_from_managedblockchain_url(full_url)
member_id = memberid_from_managedblockchain_request(full_url, body)
if method == "GET":
return self._memberid_response_get(network_id, member_id, headers)
elif method == "PATCH":
json_body = json.loads(body.decode("utf-8"))
return self._memberid_response_patch(
network_id, member_id, json_body, headers
)
elif method == "DELETE":
return self._memberid_response_delete(network_id, member_id, headers)
def _memberid_response_get(self, network_id, member_id, headers):
member = self.backend.get_member(network_id, member_id)
response = json.dumps({"Member": member.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
def _memberid_response_patch(self, network_id, member_id, json_body, headers):
logpublishingconfiguration = json_body["LogPublishingConfiguration"]
self.backend.update_member(
network_id, member_id, logpublishingconfiguration,
)
return 200, headers, ""
def _memberid_response_delete(self, network_id, member_id, headers):
self.backend.delete_member(network_id, member_id)
headers["content-type"] = "application/json"
return 200, headers, ""
@classmethod
def node_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._node_response(request, full_url, headers)
def _node_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
member_id = memberid_from_managedblockchain_request(full_url, body)
if method == "GET":
status = None
if "status" in querystring:
status = querystring["status"][0]
return self._all_nodes_response(network_id, member_id, status, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._node_response_post(
network_id, member_id, json_body, querystring, headers
)
def _all_nodes_response(self, network_id, member_id, status, headers):
nodes = self.backend.list_nodes(network_id, member_id, status)
response = json.dumps({"Nodes": [node.to_dict() for node in nodes]})
headers["content-type"] = "application/json"
return 200, headers, response
def _node_response_post(
self, network_id, member_id, json_body, querystring, headers
):
instancetype = json_body["NodeConfiguration"]["InstanceType"]
availabilityzone = json_body["NodeConfiguration"]["AvailabilityZone"]
logpublishingconfiguration = json_body["NodeConfiguration"][
"LogPublishingConfiguration"
]
response = self.backend.create_node(
network_id,
member_id,
availabilityzone,
instancetype,
logpublishingconfiguration,
)
return 200, headers, json.dumps(response)
@classmethod
def nodeid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._nodeid_response(request, full_url, headers)
def _nodeid_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
network_id = networkid_from_managedblockchain_url(full_url)
member_id = memberid_from_managedblockchain_request(full_url, body)
node_id = nodeid_from_managedblockchain_url(full_url)
if method == "GET":
return self._nodeid_response_get(network_id, member_id, node_id, headers)
elif method == "PATCH":
json_body = json.loads(body.decode("utf-8"))
return self._nodeid_response_patch(
network_id, member_id, node_id, json_body, headers
)
elif method == "DELETE":
return self._nodeid_response_delete(network_id, member_id, node_id, headers)
def _nodeid_response_get(self, network_id, member_id, node_id, headers):
node = self.backend.get_node(network_id, member_id, node_id)
response = json.dumps({"Node": node.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
def _nodeid_response_patch(
self, network_id, member_id, node_id, json_body, headers
):
logpublishingconfiguration = json_body
self.backend.update_node(
network_id, member_id, node_id, logpublishingconfiguration,
)
return 200, headers, ""
def _nodeid_response_delete(self, network_id, member_id, node_id, headers):
self.backend.delete_node(network_id, member_id, node_id)
headers["content-type"] = "application/json"
return 200, headers, ""
| apache-2.0 | -3,417,318,426,714,210,000 | 40.522248 | 88 | 0.640948 | false |
hanteng/pyCountrySize | pyCountrySize/demograph_IPop_PPPGDP.py | 1 | 1593 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import pyCountrySize
#print "pyCountrySize.sizec.mean():\n",pyCountrySize.sizec.mean()
#print "pyCountrySize.LP.mean():\n",pyCountrySize.LP.mean()
# Customizing figures
import matplotlib.pyplot as plt
import matplotlib as mpl
from ggplot import ggplot, ggsave, aes, geom_point, geom_text, geom_smooth, labs, theme_matplotlib, theme, element_text
mpl.rcParams["axes.labelsize"] = 18.0
mpl.rcParams["axes.grid"] = True
mpl.rcParams["font.size"] = 12.0#
mpl.rcParams["axes.edgecolor"] = "black"
mpl.rcParams["axes.labelcolor"] = "black"
mpl.rcParams["grid.color"] = "grey" # or whatever you want
mpl.rcParams["figure.subplot.wspace"] = 0.05
mpl.rcParams["figure.figsize"] = [8*2, 6]
mpl.rcParams["figure.subplot.left"] = 0.15
mpl.rcParams["figure.subplot.right"] = 0.97
mpl.rcParams["figure.subplot.bottom"] = 0.20
mpl.rcParams["figure.figsize"] = [8, 6]
(x_picked, y_picked)=("PPPGDP", "IPop")
dslice=pyCountrySize.sizec.loc[['IPop','PPPGDP'],:,2013].dropna()
p_d = ggplot(aes(x=x_picked, y=y_picked, label=dslice.index.values), data=dslice)
p=p_d+geom_point()+\
geom_text(aes(hjust = 0, vjust = 0, size=10, color='darkblue'))+\
geom_smooth(aes(x=x_picked, y=y_picked), method='lm', se=False, color='grey')+\
labs(x = ":\n".join([x_picked, pyCountrySize.meta[x_picked]]), y = ":\n".join([y_picked, pyCountrySize.meta[y_picked]])) +\
theme_matplotlib()+ theme(axis_text_x = element_text(angle = 40, hjust = 1))
#print p
ggsave(p, "output_%s_%s.png" % (y_picked, x_picked) )
| gpl-3.0 | -1,971,714,729,285,226,500 | 35.204545 | 134 | 0.673572 | false |
JonnyPugh/WordpostBot | Project/post_statistics.py | 1 | 2409 | #!/usr/bin/env python
from extensions import *
from config import page_info
def main():
# Form list of IDs of all posts made to the page
post_ids = [post["id"] for post in execute_query("select id from Posts")]
# Form reaction info for each post made to the page
users = {}
user_names = {}
for post_id in post_ids:
json = get_request_json("https://graph.facebook.com/v2.8/"+post_id+"/reactions", {"access_token": page_info["access_token"]})
while json["data"]:
for reaction in json["data"]:
user_id = reaction["id"]
user_names[user_id] = reaction["name"]
if user_id not in users:
users[user_id] = {}
reaction_type = reaction["type"]
if reaction_type not in users[user_id]:
users[user_id][reaction_type] = 0
users[user_id][reaction_type] += 1
if "next" not in json["paging"]:
break
json = get_request_json(json["paging"]["next"])
# Form the reaction info strings for all users
emoticons = {
"LIKE": "\xF0\x9F\x91\x8D",
"LOVE": "\xF0\x9F\x92\x9F",
"HAHA": "\xF0\x9F\x98\x86",
"WOW": "\xF0\x9F\x98\xAE",
"SAD": "\xF0\x9F\x98\xA2",
"ANGRY": "\xF0\x9F\x98\xA1",
"THANKFUL": "\xF0\x9F\x8C\xB8",
"PRIDE": "\xF0\x9F\x8C\x88"
}
overall_total_reactions = 0
users_info = []
for user_id, user_info in users.items():
reactions_breakdown = " ".join([" ".join([emoticons[reaction_type], str(num)]) for (reaction_type, num) in sorted(user_info.items(), key=lambda x: x[1], reverse=True)])
total_reactions = sum(user_info.values())
users_info.append((total_reactions, user_names[user_id]+" - "+str(total_reactions)+": "+reactions_breakdown.decode("utf-8")))
overall_total_reactions += total_reactions
# Form the message to post to the page
number_of_reactors = 10
message = "***Top "+str(number_of_reactors)+" Reactors***\n"
ranking = 1
for reactions_info in sorted(users_info, key=lambda x: x[0], reverse=True)[:number_of_reactors]:
message += str(ranking)+". "+reactions_info[1]+"\n"
ranking += 1
message += "Average reactions per post: "+str(float(overall_total_reactions) / len(post_ids))
# Post the message to the page and log it
post_to_page(page_info["page_id"]+"/feed", message)
write_to_log(posts_log, "Finished posting statistics")
if __name__ == "__main__":
try:
main()
except Exception as e:
write_to_log(error_log, "Unexpected error caught while posting statistics: "+str(e))
| mit | 7,440,625,606,803,652,000 | 36.061538 | 170 | 0.651308 | false |
wmvanvliet/mne-python | tutorials/evoked/plot_20_visualize_evoked.py | 1 | 12628 | """
.. _tut-visualize-evoked:
Visualizing Evoked data
=======================
This tutorial shows the different visualization methods for
`~mne.Evoked` objects.
As usual we'll start by importing the modules we need:
"""
import os
import numpy as np
import mne
###############################################################################
# Instead of creating the `~mne.Evoked` object from an `~mne.Epochs` object,
# we'll load an existing `~mne.Evoked` object from disk. Remember, the
# :file:`.fif` format can store multiple `~mne.Evoked` objects, so we'll end up
# with a `list` of `~mne.Evoked` objects after loading. Recall also from the
# :ref:`tut-section-load-evk` section of :ref:`the introductory Evoked tutorial
# <tut-evoked-class>` that the sample `~mne.Evoked` objects have not been
# baseline-corrected and have unapplied projectors, so we'll take care of that
# when loading:
sample_data_folder = mne.datasets.sample.data_path()
sample_data_evk_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-ave.fif')
evokeds_list = mne.read_evokeds(sample_data_evk_file, baseline=(None, 0),
proj=True, verbose=False)
# show the condition names
for e in evokeds_list:
print(e.comment)
###############################################################################
# To make our life easier, let's convert that list of `~mne.Evoked`
# objects into a :class:`dictionary <dict>`. We'll use ``/``-separated
# dictionary keys to encode the conditions (like is often done when epoching)
# because some of the plotting methods can take advantage of that style of
# coding.
conds = ('aud/left', 'aud/right', 'vis/left', 'vis/right')
evks = dict(zip(conds, evokeds_list))
# ‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ this is equivalent to:
# {'aud/left': evokeds_list[0], 'aud/right': evokeds_list[1],
# 'vis/left': evokeds_list[2], 'vis/right': evokeds_list[3]}
###############################################################################
# Plotting signal traces
# ^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: Butterfly plots
#
# Plots of superimposed sensor timeseries are called "butterfly plots"
# because the positive- and negative-going traces can resemble butterfly
# wings.
#
# The most basic plot of `~mne.Evoked` objects is a butterfly plot of
# each channel type, generated by the `evoked.plot() <mne.Evoked.plot>`
# method. By default, channels marked as "bad" are suppressed, but you can
# control this by passing an empty :class:`list` to the ``exclude`` parameter
# (default is ``exclude='bads'``):
evks['aud/left'].plot(exclude=[])
###############################################################################
# Notice the completely flat EEG channel and the noisy gradiometer channel
# plotted in red color. Like many MNE-Python plotting functions,
# `evoked.plot() <mne.Evoked.plot>` has a ``picks`` parameter that can
# select channels to plot by name, index, or type. In the next plot we'll show
# only magnetometer channels, and also color-code the channel traces by their
# location by passing ``spatial_colors=True``. Finally, we'll superimpose a
# trace of the root mean square (RMS) of the signal across channels by
# passing ``gfp=True``. This parameter is called ``gfp`` for historical
# reasons and behaves correctly for all supported channel types: for MEG data,
# it will plot the RMS; while for EEG, it would plot the
# :term:`global field power <GFP>` (an average-referenced RMS), hence its
# name:
evks['aud/left'].plot(picks='mag', spatial_colors=True, gfp=True)
###############################################################################
# Plotting scalp topographies
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In an interactive session, the butterfly plots seen above can be
# click-dragged to select a time region, which will pop up a map of the average
# field distribution over the scalp for the selected time span. You can also
# generate scalp topographies at specific times or time spans using the
# `~mne.Evoked.plot_topomap` method:
times = np.linspace(0.05, 0.13, 5)
evks['aud/left'].plot_topomap(ch_type='mag', times=times, colorbar=True)
###############################################################################
fig = evks['aud/left'].plot_topomap(ch_type='mag', times=0.09, average=0.1)
fig.text(0.5, 0.05, 'average from 40-140 ms', ha='center')
###############################################################################
# Additional examples of plotting scalp topographies can be found in
# :ref:`ex-evoked-topomap`.
#
#
# Arrow maps
# ^^^^^^^^^^
#
# Scalp topographies at a given time point can be augmented with arrows to show
# the estimated magnitude and direction of the magnetic field, using the
# function `mne.viz.plot_arrowmap`:
mags = evks['aud/left'].copy().pick_types(meg='mag')
mne.viz.plot_arrowmap(mags.data[:, 175], mags.info, extrapolate='local')
###############################################################################
# Joint plots
# ^^^^^^^^^^^
#
# Joint plots combine butterfly plots with scalp topographies, and provide an
# excellent first-look at evoked data; by default, topographies will be
# automatically placed based on peak finding. Here we plot the
# right-visual-field condition; if no ``picks`` are specified we get a separate
# figure for each channel type:
# sphinx_gallery_thumbnail_number = 7
evks['vis/right'].plot_joint()
###############################################################################
# Like `~mne.Evoked.plot_topomap` you can specify the ``times`` at which
# you want the scalp topographies calculated, and you can customize the plot in
# various other ways as well. See `mne.Evoked.plot_joint` for details.
#
#
# Comparing ``Evoked`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# To compare `~mne.Evoked` objects from different experimental
# conditions, the function `mne.viz.plot_compare_evokeds` can take a
# :class:`list` or :class:`dict` of `~mne.Evoked` objects and plot them
# all on the same axes. Like most MNE-Python visualization functions, it has a
# ``picks`` parameter for selecting channels, but by default will generate one
# figure for each channel type, and combine information across channels of the
# same type by calculating the :term:`global field power`. Information
# may be combined across channels in other ways too; support for combining via
# mean, median, or standard deviation are built-in, and custom callable
# functions may also be used, as shown here:
def custom_func(x):
return x.max(axis=1)
for combine in ('mean', 'median', 'gfp', custom_func):
mne.viz.plot_compare_evokeds(evks, picks='eeg', combine=combine)
###############################################################################
# One nice feature of `~mne.viz.plot_compare_evokeds` is that when
# passing evokeds in a dictionary, it allows specifying plot styles based on
# ``/``-separated substrings of the dictionary keys (similar to epoch
# selection; see :ref:`tut-section-subselect-epochs`). Here, we specify colors
# for "aud" and "vis" conditions, and linestyles for "left" and "right"
# conditions, and the traces and legend are styled accordingly.
mne.viz.plot_compare_evokeds(evks, picks='MEG 1811', colors=dict(aud=0, vis=1),
linestyles=dict(left='solid', right='dashed'))
###############################################################################
# The legends generated by `~mne.viz.plot_compare_evokeds` above used the
# dictionary keys provided by the ``evks`` variable. If instead you pass a
# :class:`list` or :class:`tuple` of `~mne.Evoked` objects, the legend keys
# will be generated automatically from the ``comment`` attribute of the
# `~mne.Evoked` objects (or, as sequential integers if the comment attribute is
# empty or ambiguous). To illustrate this, we'll make a list of 5 `~mne.Evoked`
# objects: 2 with identical comments, 2 with empty comments (either an empty
# string or ``None``), and 1 with a unique non-empty comment:
temp_list = list()
for idx, _comment in enumerate(('foo', 'foo', '', None, 'bar'), start=1):
_evk = evokeds_list[0].copy()
_evk.comment = _comment
_evk.data *= idx # so we can tell the traces apart
temp_list.append(_evk)
mne.viz.plot_compare_evokeds(temp_list, picks='mag')
###############################################################################
# Image plots
# ^^^^^^^^^^^
#
# Like `~mne.Epochs`, `~mne.Evoked` objects also have a
# `~mne.Evoked.plot_image` method, but unlike `epochs.plot_image()
# <mne.Epochs.plot_image>`, `evoked.plot_image() <mne.Evoked.plot_image>`
# shows one *channel* per row instead of one *epoch* per row. Again, a
# ``picks`` parameter is available, as well as several other customization
# options; see `~mne.Evoked.plot_image` for details.
evks['vis/right'].plot_image(picks='meg')
###############################################################################
# Topographical subplots
# ^^^^^^^^^^^^^^^^^^^^^^
#
# For sensor-level analyses it can be useful to plot the response at each
# sensor in a topographical layout. The `~mne.viz.plot_compare_evokeds`
# function can do this if you pass ``axes='topo'``, but it can be quite slow
# if the number of sensors is too large, so here we'll plot only the EEG
# channels:
mne.viz.plot_compare_evokeds(evks, picks='eeg', colors=dict(aud=0, vis=1),
linestyles=dict(left='solid', right='dashed'),
axes='topo', styles=dict(aud=dict(linewidth=1),
vis=dict(linewidth=1)))
###############################################################################
# For larger numbers of sensors, the method `evoked.plot_topo()
# <mne.Evoked.plot_topo>` and the function `mne.viz.plot_evoked_topo`
# can both be used. The `~mne.Evoked.plot_topo` method will plot only a
# single condition, while the `~mne.viz.plot_evoked_topo` function can
# plot one or more conditions on the same axes, if passed a list of
# `~mne.Evoked` objects. The legend entries will be automatically drawn
# from the `~mne.Evoked` objects' ``comment`` attribute:
mne.viz.plot_evoked_topo(evokeds_list)
###############################################################################
# By default, `~mne.viz.plot_evoked_topo` will plot all MEG sensors (if
# present), so to get EEG sensors you would need to modify the evoked objects
# first (e.g., using `mne.pick_types`).
#
# .. note::
#
# In interactive sessions, both approaches to topographical plotting allow
# you to click one of the sensor subplots to pop open a larger version of
# the evoked plot at that sensor.
#
#
# 3D Field Maps
# ^^^^^^^^^^^^^
#
# The scalp topographies above were all projected into 2-dimensional overhead
# views of the field, but it is also possible to plot field maps in 3D. To do
# this requires a :term:`trans` file to transform locations between the
# coordinate systems of the MEG device and the head surface (based on the MRI).
# You *can* compute 3D field maps without a ``trans`` file, but it will only
# work for calculating the field *on the MEG helmet from the MEG sensors*.
subjects_dir = os.path.join(sample_data_folder, 'subjects')
sample_data_trans_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
###############################################################################
# By default, MEG sensors will be used to estimate the field on the helmet
# surface, while EEG sensors will be used to estimate the field on the scalp.
# Once the maps are computed, you can plot them with `evoked.plot_field()
# <mne.Evoked.plot_field>`:
maps = mne.make_field_map(evks['aud/left'], trans=sample_data_trans_file,
subject='sample', subjects_dir=subjects_dir)
evks['aud/left'].plot_field(maps, time=0.1)
###############################################################################
# You can also use MEG sensors to estimate the *scalp* field by passing
# ``meg_surf='head'``. By selecting each sensor type in turn, you can compare
# the scalp field estimates from each.
for ch_type in ('mag', 'grad', 'eeg'):
evk = evks['aud/right'].copy().pick(ch_type)
_map = mne.make_field_map(evk, trans=sample_data_trans_file,
subject='sample', subjects_dir=subjects_dir,
meg_surf='head')
fig = evk.plot_field(_map, time=0.1)
mne.viz.set_3d_title(fig, ch_type, size=20)
| bsd-3-clause | -7,932,918,158,828,842,000 | 45.03663 | 79 | 0.617919 | false |
stoq/kiwi | kiwi/ui/widgets/textview.py | 1 | 2984 | #
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2003-2005 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Christian Reis <[email protected]>
# Gustavo Rahal <[email protected]>
# Evandro Vale Miquelito <[email protected]>
# Johan Dahlin <[email protected]>
"""GtkTextView support for the Kiwi Framework"""
import datetime
from gi.repository import Gtk, GObject
from kiwi import ValueUnset
from kiwi.datatypes import number
from kiwi.ui.proxywidget import ValidatableProxyWidgetMixin
from kiwi.utils import gsignal
class ProxyTextView(Gtk.TextView, ValidatableProxyWidgetMixin):
__gtype_name__ = 'ProxyTextView'
data_value = GObject.Property(type=str, nick='Data Value')
data_type = GObject.Property(
getter=ValidatableProxyWidgetMixin.get_data_type,
setter=ValidatableProxyWidgetMixin.set_data_type,
type=str, blurb='Data Type')
mandatory = GObject.Property(type=bool, default=False)
model_attribute = GObject.Property(type=str, blurb='Model attribute')
gsignal('content-changed')
gsignal('validation-changed', bool)
gsignal('validate', object, retval=object)
allowed_data_types = (str, datetime.date) + number
def __init__(self):
Gtk.TextView.__init__(self)
ValidatableProxyWidgetMixin.__init__(self)
self.data_type = str
self._textbuffer = Gtk.TextBuffer()
self._textbuffer.connect('changed',
self._on_textbuffer__changed)
self.set_buffer(self._textbuffer)
def _on_textbuffer__changed(self, textbuffer):
self.emit('content-changed')
self.read()
def read(self):
textbuffer = self._textbuffer
data = textbuffer.get_text(textbuffer.get_start_iter(),
textbuffer.get_end_iter(),
True)
return self._from_string(data)
def update(self, data):
if data is ValueUnset or data is None:
text = ""
else:
text = self._as_string(data)
if self.props.mandatory:
self.emit('validation-changed', bool(text))
self._textbuffer.set_text(text)
GObject.type_register(ProxyTextView)
| lgpl-2.1 | 3,164,537,967,199,751,700 | 34.951807 | 73 | 0.672922 | false |
PythonCharmers/orange3 | Orange/widgets/classify/owmajority.py | 1 | 2418 | from Orange.data import Table
from Orange.classification.majority import MajorityLearner, ConstantModel
from Orange.preprocess.preprocess import Preprocess
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
class OWMajority(widget.OWWidget):
name = "Majority"
description = "Classification to the most frequent class " \
"from the training set."
priority = 20
icon = "icons/Majority.svg"
inputs = [("Data", Table, "set_data"),
("Preprocessor", Preprocess, "set_preprocessor")]
outputs = [("Learner", MajorityLearner),
("Classifier", ConstantModel)]
learner_name = Setting("Majority")
want_main_area = False
resizing_enabled = False
def __init__(self):
super().__init__()
self.data = None
self.preprocessors = None
gui.lineEdit(
gui.widgetBox(self.controlArea, "Learner/Classifier Name"),
self, "learner_name"
)
gui.button(self.controlArea, self, "Apply", callback=self.apply,
default=True)
self.apply()
def set_data(self, data):
self.error(0)
if data is not None and not data.domain.has_discrete_class:
self.error(0, "Data does not have a discrete target variable")
data = None
self.data = data
if data is not None:
self.apply()
def set_preprocessor(self, preproc):
if preproc is None:
self.preprocessors = None
else:
self.preprocessors = (preproc,)
self.apply()
def apply(self):
learner = MajorityLearner(
preprocessors=self.preprocessors
)
learner.name = self.learner_name
classifier = None
if self.data is not None:
self.error(0)
if not learner.check_learner_adequacy(self.data.domain):
self.error(0, learner.learner_adequacy_err_msg)
else:
classifier = learner(self.data)
classifier.name = self.learner_name
self.send("Learner", learner)
self.send("Classifier", classifier)
if __name__ == "__main__":
import sys
from PyQt4.QtGui import QApplication
a = QApplication(sys.argv)
ow = OWMajority()
d = Table('iris')
ow.set_data(d)
ow.show()
a.exec_()
ow.saveSettings()
| gpl-3.0 | 4,447,121,934,365,058,000 | 28.13253 | 74 | 0.593879 | false |
matthew-brett/draft-statsmodels | scikits/statsmodels/examples/example_glsar.py | 1 | 4334 | '''
Example: scikits.statsmodels.GLSAR
6 examples for GLSAR with artificial data
Notes
------
These examples were written mostly to cross-check results. It is still being
written, and GLSAR is still being worked on.
'''
import numpy as np
import numpy.testing as npt
from scipy import signal
import scikits.statsmodels as sm
from scikits.statsmodels.regression import GLSAR, yule_walker
examples_all = range(10) + ['test_copy']
examples = examples_all #[5]
if 0 in examples:
print '\n Example 0'
X = np.arange(1,8)
X = sm.add_constant(X)
Y = np.array((1, 3, 4, 5, 8, 10, 9))
rho = 2
model = GLSAR(Y, X, 2)
for i in range(6):
results = model.fit()
print "AR coefficients:", model.rho
rho, sigma = yule_walker(results.resid, order = model.order)
model = GLSAR(Y, X, rho)
par0 = results.params
print par0
model0if = GLSAR(Y, X, 2)
res = model0if.iterative_fit(6)
print 'iterativefit beta', res.params
results.t() # is this correct? it does equal params/bse
# but isn't the same as the AR example (which was wrong in the first place..)
print results.t_test([0,1]) # are sd and t correct? vs
print results.f_test(np.eye(2))
rhotrue = [0.5, 0.2]
rhotrue = np.asarray(rhotrue)
nlags = np.size(rhotrue)
beta = np.array([0.1, 2])
noiseratio = 0.5
nsample = 2000
x = np.arange(nsample)
X1 = sm.add_constant(x)
wnoise = noiseratio * np.random.randn(nsample+nlags)
#noise = noise[1:] + rhotrue*noise[:-1] # wrong this is not AR
#find my drafts for univariate ARMA functions
# generate AR(p)
if np.size(rhotrue) == 1:
# replace with scipy.signal.lfilter, keep for testing
arnoise = np.zeros(nsample+1)
for i in range(1,nsample+1):
arnoise[i] = rhotrue*arnoise[i-1] + wnoise[i]
noise = arnoise[1:]
an = signal.lfilter([1], np.hstack((1,-rhotrue)), wnoise[1:])
print 'simulate AR(1) difference', np.max(np.abs(noise-an))
else:
noise = signal.lfilter([1], np.hstack((1,-rhotrue)), wnoise)[nlags:]
# generate GLS model with AR noise
y1 = np.dot(X1,beta) + noise
if 1 in examples:
print '\nExample 1: iterative_fit and repeated calls'
mod1 = GLSAR(y1, X1, 1)
print mod1.results.params
print mod1.rho
for i in range(5):
mod1.iterative_fit(1)
print mod1.rho
print mod1.results.params
if 2 in examples:
print '\nExample 2: iterative fitting of first model'
print 'with AR(0)', par0
parold = par0
mod0 = GLSAR(Y, X, 1)
for i in range(5):
#print mod0.wexog.sum()
#print mod0.pinv_wexog.sum()
mod0.iterative_fit(1)
print 'rho', mod0.rho
parnew = mod0.results.params
print 'params', parnew
print 'params change in iteration', parnew - parold
parold = parnew
# generate pure AR(p) process
Y = noise
#example with no regressor,
#results now have same estimated rho as yule-walker directly
if 3 in examples:
print '\nExample 3: pure AR(2), GLSAR versus Yule_Walker'
model3 = GLSAR(Y, rho=2)
for i in range(5):
results = model3.fit()
print "AR coefficients:", model3.rho, results.params
rho, sigma = yule_walker(results.resid, order = model3.order)
model3 = GLSAR(Y, rho=rho)
if 'test_copy' in examples:
xx = X.copy()
rhoyw, sigmayw = yule_walker(xx[:,0], order = 2)
print rhoyw, sigmayw
print (xx == X).all() # test for unchanged array (fixed)
yy = Y.copy()
rhoyw, sigmayw = yule_walker(yy, order = 2)
print rhoyw, sigmayw
print (yy == Y).all() # test for unchanged array (fixed)
if 4 in examples:
print '\nExample 4: demeaned pure AR(2), GLSAR versus Yule_Walker'
Ydemeaned = Y - Y.mean()
model4 = GLSAR(Ydemeaned, rho=2)
for i in range(5):
results = model4.fit()
print "AR coefficients:", model3.rho, results.params
rho, sigma = yule_walker(results.resid, order = model4.order)
model4 = GLSAR(Ydemeaned, rho=rho)
if 5 in examples:
print '\nExample 5: pure AR(2), GLSAR iterative_fit versus Yule_Walker'
model3a = GLSAR(Y, rho=1)
res3a = model3a.iterative_fit(5)
print res3a.params
print model3a.rho
rhoyw, sigmayw = yule_walker(Y, order = 1)
print rhoyw, sigmayw
npt.assert_array_almost_equal(model3a.rho, rhoyw, 15)
| bsd-3-clause | 4,514,719,065,378,399,000 | 28.889655 | 81 | 0.643978 | false |
mancoast/CPythonPyc_test | fail/311_test_re.py | 1 | 38574 | from test.support import verbose, run_unittest
import re
from re import Scanner
import sys, os, traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefuly modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def test_re_escape(self):
p=""
self.assertEqual(re.escape(p), p)
for i in range(0, 256):
p = p + chr(i)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)) is not None,
True)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def test_re_escape_byte(self):
p=b""
self.assertEqual(re.escape(p), p)
for i in range(0, 256):
b = bytes([i])
p += b
self.assertEqual(re.match(re.escape(b), b) is not None, True)
self.assertEqual(re.match(re.escape(b), b).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None)
self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None)
self.assertRaises(re.error, re.match, "\911", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None)
self.assertRaises(re.error, re.match, "[\911]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def run_re_tests():
from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| gpl-3.0 | -4,241,193,093,140,544,500 | 45.362981 | 85 | 0.487349 | false |
rahul003/mxnet | python/mxnet/gluon/nn/conv_layers.py | 1 | 53771 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ, too-many-lines
"""Convolutional neural network layers."""
__all__ = ['Conv1D', 'Conv2D', 'Conv3D',
'Conv1DTranspose', 'Conv2DTranspose', 'Conv3DTranspose',
'MaxPool1D', 'MaxPool2D', 'MaxPool3D',
'AvgPool1D', 'AvgPool2D', 'AvgPool3D',
'GlobalMaxPool1D', 'GlobalMaxPool2D', 'GlobalMaxPool3D',
'GlobalAvgPool1D', 'GlobalAvgPool2D', 'GlobalAvgPool3D',
'ReflectionPad2D']
from ..block import HybridBlock
from ... import symbol
from ...base import numeric_types
from .activations import Activation
def _infer_weight_shape(op_name, data_shape, kwargs):
op = getattr(symbol, op_name)
sym = op(symbol.var('data', shape=data_shape), **kwargs)
return sym.infer_shape_partial()[0]
class _Conv(HybridBlock):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of outputs.
If `use_bias` is `True`, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
Parameters
----------
channels : int
The dimensionality of the output space
i.e. the number of output channels in the convolution.
kernel_size : int or tuple/list of n ints
Specifies the dimensions of the convolution window.
strides: int or tuple/list of n ints,
Specifies the strides of the convolution.
padding : int or tuple/list of n ints,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation: int or tuple/list of n ints,
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two convolution
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str,
Dimension ordering of data and weight. Can be 'NCW', 'NWC', 'NCHW',
'NHWC', 'NCDHW', 'NDHWC', etc. 'N', 'C', 'H', 'W', 'D' stands for
batch, channel, height, width and depth dimensions respectively.
Convolution is performed over 'D', 'H', and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
"""
def __init__(self, channels, kernel_size, strides, padding, dilation,
groups, layout, in_channels=0, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
op_name='Convolution', adj=None, prefix=None, params=None):
super(_Conv, self).__init__(prefix=prefix, params=params)
with self.name_scope():
self._channels = channels
self._in_channels = in_channels
if isinstance(strides, numeric_types):
strides = (strides,)*len(kernel_size)
if isinstance(padding, numeric_types):
padding = (padding,)*len(kernel_size)
if isinstance(dilation, numeric_types):
dilation = (dilation,)*len(kernel_size)
self._op_name = op_name
self._kwargs = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': channels, 'num_group': groups,
'no_bias': not use_bias, 'layout': layout}
if adj is not None:
self._kwargs['adj'] = adj
dshape = [0]*(len(kernel_size) + 2)
dshape[layout.find('N')] = 1
dshape[layout.find('C')] = in_channels
wshapes = _infer_weight_shape(op_name, dshape, self._kwargs)
self.weight = self.params.get('weight', shape=wshapes[1],
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=wshapes[2],
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
if bias is None:
act = getattr(F, self._op_name)(x, weight, name='fwd', **self._kwargs)
else:
act = getattr(F, self._op_name)(x, weight, bias, name='fwd', **self._kwargs)
if self.act is not None:
act = self.act(act)
return act
def _alias(self):
return 'conv'
def __repr__(self):
s = '{name}({mapping}, kernel_size={kernel}, stride={stride}'
len_kernel_size = len(self._kwargs['kernel'])
if self._kwargs['pad'] != (0,) * len_kernel_size:
s += ', padding={pad}'
if self._kwargs['dilate'] != (1,) * len_kernel_size:
s += ', dilation={dilate}'
if hasattr(self, 'out_pad') and self.out_pad != (0,) * len_kernel_size:
s += ', output_padding={out_pad}'.format(out_pad=self.out_pad)
if self._kwargs['num_group'] != 1:
s += ', groups={num_group}'
if self.bias is None:
s += ', bias=False'
s += ')'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
mapping='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]),
**self._kwargs)
class Conv1D(_Conv):
r"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 1 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 1 int,
Specify the strides of the convolution.
padding : int or a tuple/list of 1 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation : int or tuple/list of 1 int
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout: str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Convolution is applied on the 'W' dimension.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = floor((width+2*padding-dilation*(kernel_size-1)-1)/stride)+1
"""
def __init__(self, channels, kernel_size, strides=1, padding=0, dilation=1,
groups=1, layout='NCW', activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)
assert len(kernel_size) == 1, "kernel_size must be a number or a list of 1 ints"
super(Conv1D, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer, **kwargs)
class Conv2D(_Conv):
r"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 2 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 2 int,
Specify the strides of the convolution.
padding : int or a tuple/list of 2 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation : int or tuple/list of 2 int
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' and 'NHWC'
layout for now. 'N', 'C', 'H', 'W' stands for batch, channel, height,
and width dimensions respectively. Convolution is applied on the 'H' and
'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
out_width = floor((width+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1])+1
"""
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
assert layout == 'NCHW' or layout == 'NHWC', \
"Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
assert len(kernel_size) == 2, "kernel_size must be a number or a list of 2 ints"
super(Conv2D, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer, **kwargs)
class Conv3D(_Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is `True`,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 3 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 3 int,
Specify the strides of the convolution.
padding : int or a tuple/list of 3 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
dilation : int or tuple/list of 3 int
Specifies the dilation rate to use for dilated convolution.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Only supports 'NCDHW' and 'NDHWC'
layout for now. 'N', 'C', 'H', 'W', 'D' stands for batch, channel, height,
width and depth dimensions respectively. Convolution is applied on the 'D',
'H' and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = floor((depth+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
out_height = floor((height+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1])+1
out_width = floor((width+2*padding[2]-dilation[2]*(kernel_size[2]-1)-1)/stride[2])+1
"""
def __init__(self, channels, kernel_size, strides=(1, 1, 1), padding=(0, 0, 0),
dilation=(1, 1, 1), groups=1, layout='NCDHW', activation=None,
use_bias=True, weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
assert layout == 'NCDHW' or layout == 'NDHWC', \
"Only supports 'NCDHW' and 'NDHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*3
assert len(kernel_size) == 3, "kernel_size must be a number or a list of 3 ints"
super(Conv3D, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer, **kwargs)
class Conv1DTranspose(_Conv):
"""Transposed 1D convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 1 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 1 int
Specify the strides of the convolution.
padding : int or a tuple/list of 1 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
output_padding: int or a tuple/list of 1 int
Controls the amount of implicit zero-paddings on both sides of the
output for output_padding number of points for each dimension.
dilation : int or tuple/list of 1 int
Controls the spacing between the kernel points; also known as the
a trous algorithm
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Convolution is applied on the 'W' dimension.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = (width-1)*strides-2*padding+kernel_size+output_padding
"""
def __init__(self, channels, kernel_size, strides=1, padding=0, output_padding=0,
dilation=1, groups=1, layout='NCW', activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_channels=0, **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)
if isinstance(output_padding, numeric_types):
output_padding = (output_padding,)
assert len(kernel_size) == 1, "kernel_size must be a number or a list of 1 ints"
assert len(output_padding) == 1, "output_padding must be a number or a list of 1 ints"
super(Conv1DTranspose, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer,
bias_initializer, op_name='Deconvolution', adj=output_padding, **kwargs)
self.outpad = output_padding
class Conv2DTranspose(_Conv):
"""Transposed 2D convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 2 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 2 int
Specify the strides of the convolution.
padding : int or a tuple/list of 2 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
output_padding: int or a tuple/list of 2 int
Controls the amount of implicit zero-paddings on both sides of the
output for output_padding number of points for each dimension.
dilation : int or tuple/list of 2 int
Controls the spacing between the kernel points; also known as the
a trous algorithm
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' and 'NHWC'
layout for now. 'N', 'C', 'H', 'W' stands for batch, channel, height,
and width dimensions respectively. Convolution is applied on the 'H' and
'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = (height-1)*strides[0]-2*padding[0]+kernel_size[0]+output_padding[0]
out_width = (width-1)*strides[1]-2*padding[1]+kernel_size[1]+output_padding[1]
"""
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
output_padding=(0, 0), dilation=(1, 1), groups=1, layout='NCHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
assert layout == 'NCHW' or layout == 'NHWC', \
"Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*2
if isinstance(output_padding, numeric_types):
output_padding = (output_padding,)*2
assert len(kernel_size) == 2, "kernel_size must be a number or a list of 2 ints"
assert len(output_padding) == 2, "output_padding must be a number or a list of 2 ints"
super(Conv2DTranspose, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer,
bias_initializer, op_name='Deconvolution', adj=output_padding, **kwargs)
self.outpad = output_padding
class Conv3DTranspose(_Conv):
"""Transposed 3D convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
If `in_channels` is not specified, `Parameter` initialization will be
deferred to the first time `forward` is called and `in_channels` will be
inferred from the shape of input data.
Parameters
----------
channels : int
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size :int or tuple/list of 3 int
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 3 int
Specify the strides of the convolution.
padding : int or a tuple/list of 3 int,
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points
output_padding: int or a tuple/list of 3 int
Controls the amount of implicit zero-paddings on both sides of the
output for output_padding number of points for each dimension.
dilation : int or tuple/list of 3 int
Controls the spacing between the kernel points; also known as the
a trous algorithm.
groups : int
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Only supports 'NCDHW' and 'NDHWC'
layout for now. 'N', 'C', 'H', 'W', 'D' stands for batch, channel, height,
width and depth dimensions respectively. Convolution is applied on the 'D',
'H' and 'W' dimensions.
in_channels : int, default 0
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
activation : str
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `weight` weights matrix.
bias_initializer : str or `Initializer`
Initializer for the bias vector.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = (depth-1)*strides[0]-2*padding[0]+kernel_size[0]+output_padding[0]
out_height = (height-1)*strides[1]-2*padding[1]+kernel_size[1]+output_padding[1]
out_width = (width-1)*strides[2]-2*padding[2]+kernel_size[2]+output_padding[2]
"""
def __init__(self, channels, kernel_size, strides=(1, 1, 1), padding=(0, 0, 0),
output_padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, layout='NCDHW',
activation=None, use_bias=True, weight_initializer=None,
bias_initializer='zeros', in_channels=0, **kwargs):
assert layout == 'NCDHW' or layout == 'NDHWC', \
"Only supports 'NCDHW' and 'NDHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,)*3
if isinstance(output_padding, numeric_types):
output_padding = (output_padding,)*3
assert len(kernel_size) == 3, "kernel_size must be a number or a list of 3 ints"
assert len(output_padding) == 3, "output_padding must be a number or a list of 3 ints"
super(Conv3DTranspose, self).__init__(
channels, kernel_size, strides, padding, dilation, groups, layout,
in_channels, activation, use_bias, weight_initializer, bias_initializer,
op_name='Deconvolution', adj=output_padding, **kwargs)
self.outpad = output_padding
class _Pooling(HybridBlock):
"""Abstract class for different pooling layers."""
def __init__(self, pool_size, strides, padding, ceil_mode, global_pool,
pool_type, count_include_pad=None, **kwargs):
super(_Pooling, self).__init__(**kwargs)
if strides is None:
strides = pool_size
if isinstance(strides, numeric_types):
strides = (strides,)*len(pool_size)
if isinstance(padding, numeric_types):
padding = (padding,)*len(pool_size)
self._kwargs = {
'kernel': pool_size, 'stride': strides, 'pad': padding,
'global_pool': global_pool, 'pool_type': pool_type,
'pooling_convention': 'full' if ceil_mode else 'valid'}
if count_include_pad is not None:
self._kwargs['count_include_pad'] = count_include_pad
def _alias(self):
return 'pool'
def hybrid_forward(self, F, x):
return F.Pooling(x, name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}(size={kernel}, stride={stride}, padding={pad}, ceil_mode={ceil_mode})'
return s.format(name=self.__class__.__name__,
ceil_mode=self._kwargs['pooling_convention'] == 'full',
**self._kwargs)
class MaxPool1D(_Pooling):
"""Max pooling operation for one dimensional data.
Parameters
----------
pool_size: int
Size of the max pooling windows.
strides: int, or None
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Pooling is applied on the W dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = floor((width+2*padding-pool_size)/strides)+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
ceil_mode=False, **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)
assert len(pool_size) == 1, "pool_size must be a number or a list of 1 ints"
super(MaxPool1D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', **kwargs)
class MaxPool2D(_Pooling):
"""Max pooling operation for two dimensional (spatial) data.
Parameters
----------
pool_size: int or list/tuple of 2 ints,
Size of the max pooling windows.
strides: int, list/tuple of 2 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 2 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' layout for now.
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-pool_size[0])/strides[0])+1
out_width = floor((width+2*padding[1]-pool_size[1])/strides[1])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2), strides=None, padding=0, layout='NCHW',
ceil_mode=False, **kwargs):
assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*2
assert len(pool_size) == 2, "pool_size must be a number or a list of 2 ints"
super(MaxPool2D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', **kwargs)
class MaxPool3D(_Pooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
pool_size: int or list/tuple of 3 ints,
Size of the max pooling windows.
strides: int, list/tuple of 3 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 3 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Only supports 'NCDHW' layout for now.
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
out_height = floor((height+2*padding[1]-pool_size[1])/strides[1])+1
out_width = floor((width+2*padding[2]-pool_size[2])/strides[2])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCDHW', **kwargs):
assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*3
assert len(pool_size) == 3, "pool_size must be a number or a list of 3 ints"
super(MaxPool3D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', **kwargs)
class AvgPool1D(_Pooling):
"""Average pooling operation for temporal data.
Parameters
----------
pool_size: int
Size of the max pooling windows.
strides: int, or None
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. padding is applied on 'W' dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
count_include_pad : bool, default True
When 'False', will exclude padding elements when computing the average value.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, out_width)`
when `layout` is `NCW`. out_width is calculated as::
out_width = floor((width+2*padding-pool_size)/strides)+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
ceil_mode=False, count_include_pad=True, **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)
assert len(pool_size) == 1, "pool_size must be a number or a list of 1 ints"
super(AvgPool1D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', count_include_pad, **kwargs)
class AvgPool2D(_Pooling):
"""Average pooling operation for spatial data.
Parameters
----------
pool_size: int or list/tuple of 2 ints,
Size of the max pooling windows.
strides: int, list/tuple of 2 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 2 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' layout for now.
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
ceil_mode : bool, default False
When True, will use ceil instead of floor to compute the output shape.
count_include_pad : bool, default True
When 'False', will exclude padding elements when computing the average value.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-pool_size[0])/strides[0])+1
out_width = floor((width+2*padding[1]-pool_size[1])/strides[1])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCHW', count_include_pad=True, **kwargs):
assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*2
assert len(pool_size) == 2, "pool_size must be a number or a list of 2 ints"
super(AvgPool2D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', count_include_pad, **kwargs)
class AvgPool3D(_Pooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
pool_size: int or list/tuple of 3 ints,
Size of the max pooling windows.
strides: int, list/tuple of 3 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 3 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
ceil_mode : bool, default False
When True, will use ceil instead of floor to compute the output shape.
count_include_pad : bool, default True
When 'False', will exclude padding elements when computing the average value.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, out_depth, out_height, out_width)` when `layout` is `NCDHW`.
out_depth, out_height and out_width are calculated as::
out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
out_height = floor((height+2*padding[1]-pool_size[1])/strides[1])+1
out_width = floor((width+2*padding[2]-pool_size[2])/strides[2])+1
When `ceil_mode` is `True,` ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCDHW', count_include_pad=True, **kwargs):
assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*3
assert len(pool_size) == 3, "pool_size must be a number or a list of 3 ints"
super(AvgPool3D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', count_include_pad, **kwargs)
class GlobalMaxPool1D(_Pooling):
"""Gloabl max pooling operation for one dimensional (temporal) data.
Parameters
----------
layout : str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. Pooling is applied on the W dimension.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, 1)`
when `layout` is `NCW`.
"""
def __init__(self, layout='NCW', **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
super(GlobalMaxPool1D, self).__init__(
(1,), None, 0, True, True, 'max', **kwargs)
class GlobalMaxPool2D(_Pooling):
"""Global max pooling operation for two dimensional (spatial) data.
Parameters
----------
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' layout for now.
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, 1, 1)` when `layout` is `NCHW`.
"""
def __init__(self, layout='NCHW', **kwargs):
assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
super(GlobalMaxPool2D, self).__init__(
(1, 1), None, 0, True, True, 'max', **kwargs)
class GlobalMaxPool3D(_Pooling):
"""Global max pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Only supports 'NCDHW' layout for now.
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, 1, 1, 1)` when `layout` is `NCDHW`.
"""
def __init__(self, layout='NCDHW', **kwargs):
assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
super(GlobalMaxPool3D, self).__init__(
(1, 1, 1), None, 0, True, True, 'max', **kwargs)
class GlobalAvgPool1D(_Pooling):
"""Global average pooling operation for temporal data.
Parameters
----------
layout : str, default 'NCW'
Dimension ordering of data and weight. Only supports 'NCW' layout for now.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. padding is applied on 'W' dimension.
Inputs:
- **data**: 3D input tensor with shape `(batch_size, in_channels, width)`
when `layout` is `NCW`. For other layouts shape is permuted accordingly.
Outputs:
- **out**: 3D output tensor with shape `(batch_size, channels, 1)`.
"""
def __init__(self, layout='NCW', **kwargs):
assert layout == 'NCW', "Only supports 'NCW' layout for now"
super(GlobalAvgPool1D, self).__init__(
(1,), None, 0, True, True, 'avg', **kwargs)
class GlobalAvgPool2D(_Pooling):
"""Global average pooling operation for spatial data.
Parameters
----------
layout : str, default 'NCHW'
Dimension ordering of data and weight. Only supports 'NCHW' layout for now.
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, 1, 1)` when `layout` is `NCHW`.
"""
def __init__(self, layout='NCHW', **kwargs):
assert layout == 'NCHW', "Only supports 'NCHW' layout for now"
super(GlobalAvgPool2D, self).__init__(
(1, 1), None, 0, True, True, 'avg', **kwargs)
class GlobalAvgPool3D(_Pooling):
"""Global average pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
Inputs:
- **data**: 5D input tensor with shape
`(batch_size, in_channels, depth, height, width)` when `layout` is `NCDHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 5D output tensor with shape
`(batch_size, channels, 1, 1, 1)` when `layout` is `NCDHW`.
"""
def __init__(self, layout='NCDHW', **kwargs):
assert layout == 'NCDHW', "Only supports 'NCDHW' layout for now"
super(GlobalAvgPool3D, self).__init__(
(1, 1, 1), None, 0, True, True, 'avg', **kwargs)
class ReflectionPad2D(HybridBlock):
r"""Pads the input tensor using the reflection of the input boundary.
Parameters
----------
padding: int
An integer padding size
Inputs:
- **data**: input tensor with the shape :math:`(N, C, H_{in}, W_{in})`.
Outputs:
- **out**: output tensor with the shape :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = H_{in} + 2 \cdot padding
W_{out} = W_{in} + 2 \cdot padding
Examples
--------
>>> m = nn.ReflectionPad2D(3)
>>> input = mx.nd.random.normal(shape=(16, 3, 224, 224))
>>> output = m(input)
"""
def __init__(self, padding=0, **kwargs):
super(ReflectionPad2D, self).__init__(**kwargs)
if isinstance(padding, numeric_types):
padding = (0, 0, 0, 0, padding, padding, padding, padding)
assert(len(padding) == 8)
self._padding = padding
def hybrid_forward(self, F, x):
return F.pad(x, mode='reflect', pad_width=self._padding)
| apache-2.0 | -4,494,887,173,876,827,600 | 44.223717 | 100 | 0.62954 | false |
openproceedings/openproceedings-buildbot | pelicanconf.py | 1 | 1300 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = u'Andrea Zonca'
SITENAME = u'OpenProceedings'
SITEURL = ''
TIMEZONE = 'America/Los_Angeles'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
DISPLAY_CATEGORIES_ON_MENU=False
TRANSLATION_FEED_ATOM = None
PDF_PROCESSOR=True
BIBTEX_PROCESSOR=True
SOCIAL = None
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
THEME = "themes/pelican-bootstrap3"
PLUGIN_PATH = "plugins"
import sys
sys.path.append(".")
import figshare_pdf
PLUGINS = ["pdf", figshare_pdf]
FIGSHARE_CLIENT_KEY = '541lmG0dcTUK0sjCWquPog'
FIGSHARE_CLIENT_SECRET = os.environ["FIGSHARE_CLIENT_SECRET"]
FIGSHARE_TOKEN_KEY = 'v7RkXTnx2bIbhuNR3pvc0wPMDbv7CxteYwMgdbJe05kAv7RkXTnx2bIbhuNR3pvc0w'
FIGSHARE_TOKEN_SECRET = os.environ["FIGSHARE_TOKEN_SECRET"]
FIGSHARE_CATEGORY_ID = 77 #applied computer science
FIGSHARE_BIBTEX_TEMPLATE = """@InProceedings{ %(tag)s-openproc-2013,
author = { %(authors)s },
title = { %(title)s },
booktitle = { Test Proceedings for OpenProceedings },
year = { 2013 },
editor = { Andrea Zonca },
doi = { %(doi)s },
url = { %(url)s }
}
"""
| bsd-3-clause | 4,804,524,927,491,711,000 | 25 | 89 | 0.716923 | false |
ActiveState/code | recipes/Python/577210_Script_partransformar_videos_compatibles/recipe-577210.py | 1 | 3937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# avi2mp4.py
#
# Copyright 2010 Javier Rovegno Campos <tatadeluxe<at>gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
"""
Script para transformar videos compatibles con reproductor MP4:
MP4 2GB FUJITEL 80MP4TV2 MP4-TV (AVIConverter_320X240_20FPS_EN Setup.exe)
Requiere:
- ffmpeg - mencoder
Extras:
Divide video en partes de 10 min
Agrega subtítulos si existe el archivo file_name.srt
Uso:
avi2mp4 file_name.avi
"""
import sys
import os
import getopt
import commands
# Variables
mp4dir = '' # Directorio destino videos convertidos
# por ejemplo /home/tu_usuario/mp4/
seg_part = 10 * 60 # Tamaño partes: 10 min = 600 seg
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
def process(arg):
basename, extension = os.path.splitext(arg)
filename = os.path.basename(basename)
##print basename, extension, filename
##video_info = commands.getoutput('avidemux2_cli --nogui --load "%s" --autoindex --info --quit'%(arg)).split()
video_info = commands.getoutput('ffmpeg -i "%s"'%(arg)).split() # Agrega "" para evitar
try: # problema ruta con espacios
# Cálculo duración del video, -1 Saca la coma final
duracion = video_info[video_info.index('Duration:')+1][:-1]
except ValueError:
raise ValueError, "Imposible determinar duración video"
hr, min, seg = duracion.split(':')
##print duracion
seg_total = float(hr)*3600 + float(min)*60 + float(seg)
##print seg_total
npart = int(seg_total / seg_part) # Número de partes
seg_part_final = seg_total - seg_part * npart # Parte final tamaño restante
##print npart, seg_part_final
is_ok = 0
end_seg = str(seg_part)
for i in range(npart+1):
init_seg = str(i * seg_part)
if is_ok != 0: # Si hay un error al generar una parte del video
break
elif i == npart: # Parte final
end_seg = str(seg_part_final)
##print init_seg, end_seg, i
is_ok = os.system('mencoder "%s" -really-quiet \
-oac copy \
-ovc xvid -xvidencopts bitrate=687 \
-ss %s -endpos %s \
-sub "%s.srt" \
-ofps 20 \
-vf scale=320:240 \
-o "%s%s-0%s.avi"'
%(arg, init_seg, end_seg, basename, mp4dir, filename, i))
if is_ok == 0:
print 'Conversión realizada con éxito!\nRevisar archivos en %s'%(mp4dir)
else:
print 'Conversión fallida!\nRevisar archivos en %s'%(mp4dir)
if __name__ == "__main__":
main()
| mit | -880,062,395,325,107,000 | 36.759615 | 114 | 0.585689 | false |
hoh/Hubbub | hubbub/drugstore/__init__.py | 1 | 1171 | # Copyright (c) 2014 "Hugo Herter"
# [http://hugoherter.com]
#
# This file is part of Hubbub.
#
# Vega is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from .models import Message
def is_dummy(message):
return message.startswith('?DUMMY:') \
or message.startswith('<FONT>?DUMMY:')
def store(message, buddy, received=True):
print('store... [{}]'.format(message))
m = Message()
m.length = len(message)
m.date = datetime.now()
m.dummy = is_dummy(message)
m.received = received
m.buddy = buddy
m.save()
| agpl-3.0 | 1,188,090,506,460,646,000 | 29.025641 | 74 | 0.70965 | false |
snazy2000/netbox | netbox/utilities/views.py | 1 | 27989 | from __future__ import unicode_literals
from collections import OrderedDict
from django_tables2 import RequestConfig
from django.conf import settings
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import transaction, IntegrityError
from django.db.models import ProtectedError
from django.forms import CharField, Form, ModelMultipleChoiceField, MultipleHiddenInput, TypedChoiceField
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template import TemplateSyntaxError
from django.urls import reverse
from django.utils.html import escape
from django.utils.http import is_safe_url
from django.utils.safestring import mark_safe
from django.views.generic import View
from extras.models import CustomField, CustomFieldValue, ExportTemplate, UserAction
from utilities.forms import BootstrapMixin, CSVDataField
from .error_handlers import handle_protectederror
from .forms import ConfirmationForm
from .paginator import EnhancedPaginator
class CustomFieldQueryset:
"""
Annotate custom fields on objects within a QuerySet.
"""
def __init__(self, queryset, custom_fields):
self.queryset = queryset
self.custom_fields = custom_fields
def __iter__(self):
for obj in self.queryset:
values_dict = {cfv.field_id: cfv.value for cfv in obj.custom_field_values.all()}
obj.custom_fields = OrderedDict([(field, values_dict.get(field.pk)) for field in self.custom_fields])
yield obj
class GetReturnURLMixin(object):
"""
Provides logic for determining where a user should be redirected after processing a form.
"""
default_return_url = None
def get_return_url(self, request, obj):
query_param = request.GET.get('return_url')
if query_param and is_safe_url(url=query_param, host=request.get_host()):
return query_param
elif obj.pk and hasattr(obj, 'get_absolute_url'):
return obj.get_absolute_url()
elif self.default_return_url is not None:
return reverse(self.default_return_url)
return reverse('home')
class ObjectListView(View):
"""
List a series of objects.
queryset: The queryset of objects to display
filter: A django-filter FilterSet that is applied to the queryset
filter_form: The form used to render filter options
table: The django-tables2 Table used to render the objects list
template_name: The name of the template
"""
queryset = None
filter = None
filter_form = None
table = None
template_name = None
def get(self, request):
model = self.queryset.model
object_ct = ContentType.objects.get_for_model(model)
if self.filter:
self.queryset = self.filter(request.GET, self.queryset).qs
# If this type of object has one or more custom fields, prefetch any relevant custom field values
custom_fields = CustomField.objects.filter(obj_type=ContentType.objects.get_for_model(model))\
.prefetch_related('choices')
if custom_fields:
self.queryset = self.queryset.prefetch_related('custom_field_values')
# Check for export template rendering
if request.GET.get('export'):
et = get_object_or_404(ExportTemplate, content_type=object_ct, name=request.GET.get('export'))
queryset = CustomFieldQueryset(self.queryset, custom_fields) if custom_fields else self.queryset
try:
response = et.to_response(context_dict={'queryset': queryset},
filename='netbox_{}'.format(model._meta.verbose_name_plural))
return response
except TemplateSyntaxError:
messages.error(request, "There was an error rendering the selected export template ({})."
.format(et.name))
# Fall back to built-in CSV export
elif 'export' in request.GET and hasattr(model, 'to_csv'):
headers = getattr(model, 'csv_headers', None)
output = ','.join(headers) + '\n' if headers else ''
output += '\n'.join([obj.to_csv() for obj in self.queryset])
response = HttpResponse(
output,
content_type='text/csv'
)
response['Content-Disposition'] = 'attachment; filename="netbox_{}.csv"'\
.format(self.queryset.model._meta.verbose_name_plural)
return response
# Provide a hook to tweak the queryset based on the request immediately prior to rendering the object list
self.queryset = self.alter_queryset(request)
# Compile user model permissions for access from within the template
perm_base_name = '{}.{{}}_{}'.format(model._meta.app_label, model._meta.model_name)
permissions = {p: request.user.has_perm(perm_base_name.format(p)) for p in ['add', 'change', 'delete']}
# Construct the table based on the user's permissions
table = self.table(self.queryset)
if 'pk' in table.base_columns and (permissions['change'] or permissions['delete']):
table.columns.show('pk')
# Apply the request context
paginate = {
'klass': EnhancedPaginator,
'per_page': request.GET.get('per_page', settings.PAGINATE_COUNT)
}
RequestConfig(request, paginate).configure(table)
context = {
'table': table,
'permissions': permissions,
'filter_form': self.filter_form(request.GET, label_suffix='') if self.filter_form else None,
'export_templates': ExportTemplate.objects.filter(content_type=object_ct),
}
context.update(self.extra_context())
return render(request, self.template_name, context)
def alter_queryset(self, request):
# .all() is necessary to avoid caching queries
return self.queryset.all()
def extra_context(self):
return {}
class ObjectEditView(GetReturnURLMixin, View):
"""
Create or edit a single object.
model: The model of the object being edited
form_class: The form used to create or edit the object
template_name: The name of the template
default_return_url: The name of the URL used to display a list of this object type
"""
model = None
form_class = None
template_name = 'utilities/obj_edit.html'
def get_object(self, kwargs):
# Look up object by slug or PK. Return None if neither was provided.
if 'slug' in kwargs:
return get_object_or_404(self.model, slug=kwargs['slug'])
elif 'pk' in kwargs:
return get_object_or_404(self.model, pk=kwargs['pk'])
return self.model()
def alter_obj(self, obj, request, url_args, url_kwargs):
# Allow views to add extra info to an object before it is processed. For example, a parent object can be defined
# given some parameter from the request URL.
return obj
def get(self, request, *args, **kwargs):
obj = self.get_object(kwargs)
obj = self.alter_obj(obj, request, args, kwargs)
# Parse initial data manually to avoid setting field values as lists
initial_data = {k: request.GET[k] for k in request.GET}
form = self.form_class(instance=obj, initial=initial_data)
return render(request, self.template_name, {
'obj': obj,
'obj_type': self.model._meta.verbose_name,
'form': form,
'return_url': self.get_return_url(request, obj),
})
def post(self, request, *args, **kwargs):
obj = self.get_object(kwargs)
obj = self.alter_obj(obj, request, args, kwargs)
form = self.form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj_created = not form.instance.pk
obj = form.save()
msg = 'Created ' if obj_created else 'Modified '
msg += self.model._meta.verbose_name
if hasattr(obj, 'get_absolute_url'):
msg = '{} <a href="{}">{}</a>'.format(msg, obj.get_absolute_url(), escape(obj))
else:
msg = '{} {}'.format(msg, escape(obj))
messages.success(request, mark_safe(msg))
if obj_created:
UserAction.objects.log_create(request.user, obj, msg)
else:
UserAction.objects.log_edit(request.user, obj, msg)
if '_addanother' in request.POST:
return redirect(request.path)
return_url = form.cleaned_data.get('return_url')
if return_url is not None and is_safe_url(url=return_url, host=request.get_host()):
return redirect(return_url)
else:
return redirect(self.get_return_url(request, obj))
return render(request, self.template_name, {
'obj': obj,
'obj_type': self.model._meta.verbose_name,
'form': form,
'return_url': self.get_return_url(request, obj),
})
class ObjectDeleteView(GetReturnURLMixin, View):
"""
Delete a single object.
model: The model of the object being deleted
template_name: The name of the template
default_return_url: Name of the URL to which the user is redirected after deleting the object
"""
model = None
template_name = 'utilities/obj_delete.html'
def get_object(self, kwargs):
# Look up object by slug if one has been provided. Otherwise, use PK.
if 'slug' in kwargs:
return get_object_or_404(self.model, slug=kwargs['slug'])
else:
return get_object_or_404(self.model, pk=kwargs['pk'])
def get(self, request, **kwargs):
obj = self.get_object(kwargs)
form = ConfirmationForm(initial=request.GET)
return render(request, self.template_name, {
'obj': obj,
'form': form,
'obj_type': self.model._meta.verbose_name,
'return_url': self.get_return_url(request, obj),
})
def post(self, request, **kwargs):
obj = self.get_object(kwargs)
form = ConfirmationForm(request.POST)
if form.is_valid():
try:
obj.delete()
except ProtectedError as e:
handle_protectederror(obj, request, e)
return redirect(obj.get_absolute_url())
msg = 'Deleted {} {}'.format(self.model._meta.verbose_name, obj)
messages.success(request, msg)
UserAction.objects.log_delete(request.user, obj, msg)
return_url = form.cleaned_data.get('return_url')
if return_url is not None and is_safe_url(url=return_url, host=request.get_host()):
return redirect(return_url)
else:
return redirect(self.get_return_url(request, obj))
return render(request, self.template_name, {
'obj': obj,
'form': form,
'obj_type': self.model._meta.verbose_name,
'return_url': self.get_return_url(request, obj),
})
class BulkCreateView(View):
"""
Create new objects in bulk.
pattern_form: Form class which provides the `pattern` field
model_form: The ModelForm used to create individual objects
template_name: The name of the template
default_return_url: Name of the URL to which the user is redirected after creating the objects
"""
pattern_form = None
model_form = None
pattern_target = ''
template_name = None
default_return_url = 'home'
def get(self, request):
pattern_form = self.pattern_form()
model_form = self.model_form()
return render(request, self.template_name, {
'obj_type': self.model_form._meta.model._meta.verbose_name,
'pattern_form': pattern_form,
'model_form': model_form,
'return_url': reverse(self.default_return_url),
})
def post(self, request):
model = self.model_form._meta.model
pattern_form = self.pattern_form(request.POST)
model_form = self.model_form(request.POST)
if pattern_form.is_valid():
pattern = pattern_form.cleaned_data['pattern']
new_objs = []
try:
with transaction.atomic():
# Create objects from the expanded. Abort the transaction on the first validation error.
for value in pattern:
# Reinstantiate the model form each time to avoid overwriting the same instance. Use a mutable
# copy of the POST QueryDict so that we can update the target field value.
model_form = self.model_form(request.POST.copy())
model_form.data[self.pattern_target] = value
# Validate each new object independently.
if model_form.is_valid():
obj = model_form.save()
new_objs.append(obj)
else:
# Copy any errors on the pattern target field to the pattern form.
errors = model_form.errors.as_data()
if errors.get(self.pattern_target):
pattern_form.add_error('pattern', errors[self.pattern_target])
# Raise an IntegrityError to break the for loop and abort the transaction.
raise IntegrityError()
# If we make it to this point, validation has succeeded on all new objects.
msg = "Added {} {}".format(len(new_objs), model._meta.verbose_name_plural)
messages.success(request, msg)
UserAction.objects.log_bulk_create(request.user, ContentType.objects.get_for_model(model), msg)
if '_addanother' in request.POST:
return redirect(request.path)
return redirect(self.default_return_url)
except IntegrityError:
pass
return render(request, self.template_name, {
'pattern_form': pattern_form,
'model_form': model_form,
'obj_type': model._meta.verbose_name,
'return_url': reverse(self.default_return_url),
})
class BulkImportView(View):
"""
Import objects in bulk (CSV format).
model_form: The form used to create each imported object
table: The django-tables2 Table used to render the list of imported objects
template_name: The name of the template
default_return_url: The name of the URL to use for the cancel button
"""
model_form = None
table = None
default_return_url = None
template_name = 'utilities/obj_import.html'
def _import_form(self, *args, **kwargs):
fields = self.model_form().fields.keys()
required_fields = [name for name, field in self.model_form().fields.items() if field.required]
class ImportForm(BootstrapMixin, Form):
csv = CSVDataField(fields=fields, required_fields=required_fields)
return ImportForm(*args, **kwargs)
def _save_obj(self, obj_form):
"""
Provide a hook to modify the object immediately before saving it (e.g. to encrypt secret data).
"""
return obj_form.save()
def get(self, request):
return render(request, self.template_name, {
'form': self._import_form(),
'fields': self.model_form().fields,
'obj_type': self.model_form._meta.model._meta.verbose_name,
'return_url': self.default_return_url,
})
def post(self, request):
new_objs = []
form = self._import_form(request.POST)
if form.is_valid():
try:
# Iterate through CSV data and bind each row to a new model form instance.
with transaction.atomic():
for row, data in enumerate(form.cleaned_data['csv'], start=1):
obj_form = self.model_form(data)
if obj_form.is_valid():
obj = self._save_obj(obj_form)
new_objs.append(obj)
else:
for field, err in obj_form.errors.items():
form.add_error('csv', "Row {} {}: {}".format(row, field, err[0]))
raise ValidationError("")
# Compile a table containing the imported objects
obj_table = self.table(new_objs)
if new_objs:
msg = 'Imported {} {}'.format(len(new_objs), new_objs[0]._meta.verbose_name_plural)
messages.success(request, msg)
UserAction.objects.log_import(request.user, ContentType.objects.get_for_model(new_objs[0]), msg)
return render(request, "import_success.html", {
'table': obj_table,
'return_url': self.default_return_url,
})
except ValidationError:
pass
return render(request, self.template_name, {
'form': form,
'fields': self.model_form().fields,
'obj_type': self.model_form._meta.model._meta.verbose_name,
'return_url': self.default_return_url,
})
class BulkEditView(View):
"""
Edit objects in bulk.
cls: The model of the objects being edited
parent_cls: The model of the parent object (if any)
queryset: Custom queryset to use when retrieving objects (e.g. to select related objects)
filter: FilterSet to apply when deleting by QuerySet
table: The table used to display devices being edited
form: The form class used to edit objects in bulk
template_name: The name of the template
default_return_url: Name of the URL to which the user is redirected after editing the objects (can be overridden by
POSTing return_url)
"""
cls = None
parent_cls = None
queryset = None
filter = None
table = None
form = None
template_name = 'utilities/obj_bulk_edit.html'
default_return_url = 'home'
def get(self):
return redirect(self.default_return_url)
def post(self, request, **kwargs):
# Attempt to derive parent object if a parent class has been given
if self.parent_cls:
parent_obj = get_object_or_404(self.parent_cls, **kwargs)
else:
parent_obj = None
# Determine URL to redirect users upon modification of objects
posted_return_url = request.POST.get('return_url')
if posted_return_url and is_safe_url(url=posted_return_url, host=request.get_host()):
return_url = posted_return_url
elif parent_obj:
return_url = parent_obj.get_absolute_url()
else:
return_url = reverse(self.default_return_url)
# Are we editing *all* objects in the queryset or just a selected subset?
if request.POST.get('_all') and self.filter is not None:
pk_list = [obj.pk for obj in self.filter(request.GET, self.cls.objects.only('pk')).qs]
else:
pk_list = [int(pk) for pk in request.POST.getlist('pk')]
if '_apply' in request.POST:
form = self.form(self.cls, request.POST)
if form.is_valid():
custom_fields = form.custom_fields if hasattr(form, 'custom_fields') else []
standard_fields = [field for field in form.fields if field not in custom_fields and field != 'pk']
# Update standard fields. If a field is listed in _nullify, delete its value.
nullified_fields = request.POST.getlist('_nullify')
fields_to_update = {}
for field in standard_fields:
if field in form.nullable_fields and field in nullified_fields:
if isinstance(form.fields[field], CharField):
fields_to_update[field] = ''
else:
fields_to_update[field] = None
elif form.cleaned_data[field] not in (None, ''):
fields_to_update[field] = form.cleaned_data[field]
updated_count = self.cls.objects.filter(pk__in=pk_list).update(**fields_to_update)
# Update custom fields for objects
if custom_fields:
objs_updated = self.update_custom_fields(pk_list, form, custom_fields, nullified_fields)
if objs_updated and not updated_count:
updated_count = objs_updated
if updated_count:
msg = 'Updated {} {}'.format(updated_count, self.cls._meta.verbose_name_plural)
messages.success(self.request, msg)
UserAction.objects.log_bulk_edit(request.user, ContentType.objects.get_for_model(self.cls), msg)
return redirect(return_url)
else:
initial_data = request.POST.copy()
initial_data['pk'] = pk_list
form = self.form(self.cls, initial=initial_data)
# Retrieve objects being edited
queryset = self.queryset or self.cls.objects.all()
table = self.table(queryset.filter(pk__in=pk_list), orderable=False)
if not table.rows:
messages.warning(request, "No {} were selected.".format(self.cls._meta.verbose_name_plural))
return redirect(return_url)
return render(request, self.template_name, {
'form': form,
'table': table,
'obj_type_plural': self.cls._meta.verbose_name_plural,
'return_url': return_url,
})
def update_custom_fields(self, pk_list, form, fields, nullified_fields):
obj_type = ContentType.objects.get_for_model(self.cls)
objs_updated = False
for name in fields:
field = form.fields[name].model
# Setting the field to null
if name in form.nullable_fields and name in nullified_fields:
# Delete all CustomFieldValues for instances of this field belonging to the selected objects.
CustomFieldValue.objects.filter(field=field, obj_type=obj_type, obj_id__in=pk_list).delete()
objs_updated = True
# Updating the value of the field
elif form.cleaned_data[name] not in [None, '']:
# Check for zero value (bulk editing)
if isinstance(form.fields[name], TypedChoiceField) and form.cleaned_data[name] == 0:
serialized_value = field.serialize_value(None)
else:
serialized_value = field.serialize_value(form.cleaned_data[name])
# Gather any pre-existing CustomFieldValues for the objects being edited.
existing_cfvs = CustomFieldValue.objects.filter(field=field, obj_type=obj_type, obj_id__in=pk_list)
# Determine which objects have an existing CFV to update and which need a new CFV created.
update_list = [cfv['obj_id'] for cfv in existing_cfvs.values()]
create_list = list(set(pk_list) - set(update_list))
# Creating/updating CFVs
if serialized_value:
existing_cfvs.update(serialized_value=serialized_value)
CustomFieldValue.objects.bulk_create([
CustomFieldValue(field=field, obj_type=obj_type, obj_id=pk, serialized_value=serialized_value)
for pk in create_list
])
# Deleting CFVs
else:
existing_cfvs.delete()
objs_updated = True
return len(pk_list) if objs_updated else 0
class BulkDeleteView(View):
"""
Delete objects in bulk.
cls: The model of the objects being deleted
parent_cls: The model of the parent object (if any)
queryset: Custom queryset to use when retrieving objects (e.g. to select related objects)
filter: FilterSet to apply when deleting by QuerySet
table: The table used to display devices being deleted
form: The form class used to delete objects in bulk
template_name: The name of the template
default_return_url: Name of the URL to which the user is redirected after deleting the objects (can be overriden by
POSTing return_url)
"""
cls = None
parent_cls = None
queryset = None
filter = None
table = None
form = None
template_name = 'utilities/obj_bulk_delete.html'
default_return_url = 'home'
def post(self, request, **kwargs):
# Attempt to derive parent object if a parent class has been given
if self.parent_cls:
parent_obj = get_object_or_404(self.parent_cls, **kwargs)
else:
parent_obj = None
# Determine URL to redirect users upon deletion of objects
posted_return_url = request.POST.get('return_url')
if posted_return_url and is_safe_url(url=posted_return_url, host=request.get_host()):
return_url = posted_return_url
elif parent_obj:
return_url = parent_obj.get_absolute_url()
else:
return_url = reverse(self.default_return_url)
# Are we deleting *all* objects in the queryset or just a selected subset?
if request.POST.get('_all') and self.filter is not None:
pk_list = [obj.pk for obj in self.filter(request.GET, self.cls.objects.only('pk')).qs]
else:
pk_list = [int(pk) for pk in request.POST.getlist('pk')]
form_cls = self.get_form()
if '_confirm' in request.POST:
form = form_cls(request.POST)
if form.is_valid():
# Delete objects
queryset = self.cls.objects.filter(pk__in=pk_list)
try:
deleted_count = queryset.delete()[1][self.cls._meta.label]
except ProtectedError as e:
handle_protectederror(list(queryset), request, e)
return redirect(return_url)
msg = 'Deleted {} {}'.format(deleted_count, self.cls._meta.verbose_name_plural)
messages.success(request, msg)
UserAction.objects.log_bulk_delete(request.user, ContentType.objects.get_for_model(self.cls), msg)
return redirect(return_url)
else:
form = form_cls(initial={'pk': pk_list, 'return_url': return_url})
# Retrieve objects being deleted
queryset = self.queryset or self.cls.objects.all()
table = self.table(queryset.filter(pk__in=pk_list), orderable=False)
if not table.rows:
messages.warning(request, "No {} were selected for deletion.".format(self.cls._meta.verbose_name_plural))
return redirect(return_url)
return render(request, self.template_name, {
'form': form,
'parent_obj': parent_obj,
'obj_type_plural': self.cls._meta.verbose_name_plural,
'table': table,
'return_url': return_url,
})
def get_form(self):
"""
Provide a standard bulk delete form if none has been specified for the view
"""
class BulkDeleteForm(ConfirmationForm):
pk = ModelMultipleChoiceField(queryset=self.cls.objects.all(), widget=MultipleHiddenInput)
if self.form:
return self.form
return BulkDeleteForm
| apache-2.0 | 8,454,032,785,602,730,000 | 39.041488 | 120 | 0.596341 | false |
goiri/hadoopsimulator | simulator.py | 1 | 9394 | #!/usr/bin/env pypy
from commons import isRealistic
import math
import random
if not isRealistic():
random.seed(0)
from operator import attrgetter
from node import Node
from job import Job
from schedulerpolicy import SchedulerPolicy
from history import History
from history import HistoryViewer
import sys
from datetime import datetime
'''
Simulator. Author: Inigo, Cheng
'''
class Simulator(SchedulerPolicy):
def __init__(self, logfile='history.log'):
# Initialize the scheduler
SchedulerPolicy.__init__(self) # super()
self.t = 0
# Nodes
self.nodes = {}
# History
self.logfile = logfile
self.history = History(filename=self.logfile)
# Job submission
self.lastJobId = 1
# Simulation
self.maxTime = None
# Id for jobs
if sys.platform == 'win32':
self.trackerId = datetime.now().strftime('%Y%m%d%H%M')
else:
self.trackerId = datetime.now().strftime('%4Y%2m%2d%2H%2M')
# Specify if the nodes are sent to sleep when there's no load
self.nodeManagement = True
# Outputs
self.energy = None
# Step length
self.STEP = 1
# Submit a job to run
def addJob(self, job):
# Assign automatic job id
if job.jobId == None:
while 'job_%s_%04d' % (self.trackerId, self.lastJobId) in self.jobs:
self.lastJobId += 1
job.jobId = 'job_%s_%04d' % (self.trackerId, self.lastJobId)
# Initialize tasks
job.initTasks()
# Save the information
self.jobs[job.jobId] = job
self.jobsQueue.append(job.jobId)
# Sort the queue according to submission order
self.jobsQueue = sorted(self.jobsQueue, cmp=self.schedulingPolicy)
return job.jobId
# Check if there is any idle node for reduces
def getIdleNodeMap(self):
for nodeId in sorted(self.nodes):
node = self.nodes[nodeId]
if node.status == 'ON' and len(node.maps) < node.numMaps:
return node
return None
def getIdleNodesMap(self):
ret = []
for nodeId in sorted(self.nodes):
node = self.nodes[nodeId]
if node.status == 'ON' and len(node.maps) < node.numMaps:
ret.append(node)
return ret
# Check if there is any idle node for reduces
def getIdleNodeRed(self):
for nodeId in sorted(self.nodes):
node = self.nodes[nodeId]
if node.status == 'ON' and len(node.reds) < node.numReds:
return node
return None
def getIdleNodesRed(self):
ret = []
for nodeId in sorted(self.nodes):
node = self.nodes[nodeId]
if node.status == 'ON' and len(node.reds) < node.numReds:
ret.append(node)
return ret
def getWakingNodes(self):
ret = 0
for nodeId in self.nodes:
node = self.nodes[nodeId]
if node.status.startswith('WAKING-'):
ret += 1
return ret
# Get a queued map
def getMapTask(self):
for jobId in self.jobsQueue:
job = self.jobs[jobId]
if self.t >= job.submit:
mapTask = job.getMapTask()
if mapTask != None:
return mapTask
return None
# Get a queued reduce
def getRedTask(self):
for jobId in self.jobsQueue:
job = self.jobs[jobId]
if self.t >= job.submit:
redTask = job.getRedTask()
if redTask != None:
return redTask
return None
# Check if there is a map queued
def mapQueued(self):
ret = 0
for jobId in self.jobsQueue:
job = self.jobs[jobId]
if self.t >= job.submit:
ret += job.mapQueued()
return ret
# Check if the node is required: running job or providing data for a job
def isNodeRequired(self, nodeId):
node = self.nodes[nodeId]
# Check if the node is in the covering subset (data) or is running
if node.covering or node.isRunning():
return True
# Check if it has executed tasks from active tasks
for jobId in self.jobsQueue:
job = self.jobs[jobId]
if job.isRunning() and nodeId in job.getNodes():
return True
return False
# Check if there is a reduce queued
def redQueued(self):
ret = 0
for jobId in self.jobsQueue:
job = self.jobs[jobId]
if self.t >= job.submit:
ret += job.redQueued()
return ret
def getNodesUtilization(self):
utilizations = []
for nodeId in self.nodes:
node = self.nodes[nodeId]
if node.status == 'ON':
utilization = 1.0*len(node.maps)/node.numMaps
utilizations.append(utilization)
return sum(utilizations)/len(utilizations) if len(utilizations)>0 else 1.0
def getNodesRunning(self):
ret = 0
for nodeId in self.nodes:
node = self.nodes[nodeId]
if node.status == 'ON':
ret += 1
return ret
# Energy in Wh
def getEnergy(self):
# J = Ws -> Wh
return self.energy/3600.0
# Average time to run per job in seconds
def getPerformance(self):
ret = None
if len(self.jobs) > 0:
ret = 0.0
for jobId in self.jobs:
job = self.jobs[jobId]
ret += job.getFinish()
ret = ret / len(self.jobs)
return ret
# Average quality per job in %
def getQuality(self):
ret = []
for jobId in self.jobs:
job = self.jobs[jobId]
ret.append(job.getQuality())
return sum(ret)/len(ret) if len(ret)>0 else 0.0
def isTimeLimit(self):
return not (self.maxTime==None or self.t < self.maxTime)
# Run simulation
def run(self):
self.energy = 0.0
# Log initial node status
for nodeId in self.nodes:
node = self.nodes[nodeId]
self.history.logNodeStatus(self.t, node)
# Iterate every X seconds
while len(self.jobsQueue) > 0 and not self.isTimeLimit():
# Run running tasks
# =====================================================
completedAttempts = []
for node in self.nodes.values():
completedAttempts += node.progress(self.STEP) # progress 1 second at a time
# Mark completed maps
completedJobs = []
for attempt in completedAttempts:
attempt.finish = self.t
# Check if we finish the jobs
completedJobs += attempt.getJob().completeAttempt(attempt)
# Log
self.history.logAttempt(attempt)
for job in completedJobs:
job.finish = self.t
job.status = Job.Status.SUCCEEDED
# Update queues
self.jobsQueue.remove(job.jobId)
self.jobsDone.append(job.jobId)
# Log
self.history.logJob(job)
# Check which nodes are available to run tasks
# =====================================================
# Maps
while self.mapQueued()>0 and self.getIdleNodeMap() != None:
# Get a map that needs to be executed and assign it to a node
idleNode = self.getIdleNodeMap()
# TODO policy to decide when to approximate
#mapAttempt = self.getMapTask(approx=True if self.getNodesUtilization() > 1.8 else False)
mapAttempt = self.getMapTask()
mapAttempt.start = self.t
if mapAttempt.getJob().isMapDropping():
mapAttempt.drop()
mapAttempt.finish = self.t
mapAttempt.approx = False
completedJobs += mapAttempt.getJob().dropAttempt(mapAttempt)
# Log
self.history.logAttempt(mapAttempt)
else:
# Start running in a node
idleNode.assignMap(mapAttempt)
# Reduces
while self.redQueued()>0 and self.getIdleNodeRed() != None:
# Get a map that needs to be executed and assign it to a node
idleNode = self.getIdleNodeRed()
redAttempt = self.getRedTask()
redAttempt.start = self.t
if redAttempt.getJob().isRedDropping():
redAttempt.drop()
redAttempt.finish = self.t
# Log
self.history.logAttempt(redAttempt)
else:
idleNode.assignRed(redAttempt)
# Node management
# =====================================================
# Check if we need less nodes. Idle nodes.
if self.nodeManagement:
lessNodes = 0
lessNodes = min(len(self.getIdleNodesMap()), len(self.getIdleNodesRed()))
# Check if we need more nodes. Size of the queues.
moreNodes = 0
if lessNodes == 0:
moreNodesMaps = math.ceil(1.0*self.mapQueued() / 3) - self.getWakingNodes()
moreNodesReds = math.ceil(self.redQueued() / 1) - self.getWakingNodes()
moreNodes = max(moreNodesMaps, moreNodesReds, 0)
# Change node status
for node in self.nodes.values():
if node.status == 'ON' and not self.isNodeRequired(node.nodeId) and lessNodes > 0:
lessNodes -= 1
seconds = node.timeSleep
if isRealistic():
seconds = random.gauss(seconds, 0.1*seconds) #+/-10%
node.status = 'SLEEPING-%d' % seconds
self.history.logNodeStatus(self.t, node)
elif node.status == 'SLEEP' and moreNodes > 0:
moreNodes -= 1
seconds = node.timeWake
if isRealistic():
seconds = random.gauss(seconds, 0.1*seconds) #+/-10%
node.status = 'WAKING-%d' % seconds
self.history.logNodeStatus(self.t, node)
# Transition status
elif node.status.startswith('SLEEPING-'):
seconds = int(node.status[len('SLEEPING-'):]) - 1
if seconds <= 0:
node.status = 'SLEEP'
self.history.logNodeStatus(self.t, node)
else:
node.status = 'SLEEPING-%d' % seconds
elif node.status.startswith('WAKING-'):
seconds = int(node.status[len('WAKING-'):]) - 1
if seconds <= 0:
node.status = 'ON'
self.history.logNodeStatus(self.t, node)
else:
node.status = 'WAKING-%d' % seconds
# Account for power
power = 0.0
for node in self.nodes.values():
power += node.getPower()
self.history.logPower(self.t, power)
self.energy += 1.0*power # s x W = J
# Progress to next period
self.t += self.STEP
# Log final output
if self.logfile != None:
self.history.close()
viewer = HistoryViewer(self.history.getFilename())
viewer.generate()
| apache-2.0 | 5,428,418,715,652,537,000 | 27.21021 | 93 | 0.653928 | false |
garcia/simfile | simfile/tests/test_simfile.py | 1 | 5138 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement, unicode_literals
import codecs
import decimal
from fractions import Fraction
import os
import unittest
import simfile
from simfile.timing import BeatValues
from simfile.sm import *
def get_simfile(filename, cache={}):
if not filename in cache:
cache[filename] = simfile.open(os.path.join('testdata', filename))
return cache[filename]
def get_default_chart():
return SMChart.from_str(
'dance-single:'
'Brackets:'
'Edit:'
'12:'
'0.793,1.205,0.500,0.298,0.961:'
'0000\n0000\n0000\n0000'
)
class TestChart(unittest.TestCase):
def test_init(self):
chart = get_default_chart()
self.assertEqual(chart.stepstype, 'dance-single')
self.assertEqual(chart.description, 'Brackets')
self.assertEqual(chart.difficulty, 'Edit')
self.assertEqual(chart.meter, '12')
self.assertEqual(chart.radarvalues, '0.793,1.205,0.500,0.298,0.961')
self.assertEqual(chart.notes, '0000\n0000\n0000\n0000')
def test_repr(self):
chart = get_default_chart()
self.assertEqual(repr(chart), '<SMChart: dance-single Edit 12>')
class TestCharts(unittest.TestCase):
def test_repr(self):
charts = get_simfile('Tribal Style.sm').charts
repr_charts = repr(charts)
self.assertTrue(repr_charts.startswith('SMCharts([<SMChart:'))
self.assertTrue(repr_charts.endswith('>])'))
class TestBeatEvents(unittest.TestCase):
def test_bpms(self):
bpms = BeatValues.from_str(get_simfile('Robotix.sm')['BPMS'])
self.assertIsInstance(bpms, BeatValues)
self.assertEqual(bpms[0][0], 0)
self.assertEqual(bpms[0][1], 150)
self.assertEqual(bpms[1][0], 144)
self.assertEqual(bpms[1][1], decimal.Decimal('170.001'))
def test_stops(self):
stops = BeatValues.from_str(get_simfile('Robotix.sm')['STOPS'])
self.assertIsInstance(stops, BeatValues)
self.assertEqual(stops[0][0], 313)
self.assertEqual(stops[0][1], decimal.Decimal('0.400'))
self.assertEqual(stops[1][0], 344)
self.assertEqual(stops[1][1], decimal.Decimal('0.400'))
class TestSimfile(unittest.TestCase):
def test_init_file(self):
with open('testdata/Tribal Style.sm', 'r', encoding='utf-8') as infile:
sm = SMSimfile(file=infile)
# Check that basic properties were parsed
self.assertEqual(sm['TITLE'], 'Tribal Style')
self.assertEqual(sm['ARTIST'], 'KaW')
def test_from_string(self):
with codecs.open('testdata/Tribal Style.sm', 'r', 'utf-8') as infile:
sm1 = SMSimfile(file=infile)
# String input should be identical to filename input
with codecs.open('testdata/Tribal Style.sm', 'r', 'utf-8') as infile:
sm2 = SMSimfile(string=infile.read())
self.assertEqual(sm1, sm2)
# Empty string argument is valid
blank = SMSimfile(string='')
self.assertEqual(blank, simfile.loads(''))
def test_eq(self):
# Equality is indirectly tested in other methods, but it has subtleties
# that need to be specifically tested that don't fit in elsewhere.
sm = simfile.loads('#TITLE:A;#SUBTITLE:B;')
sm_outside_ws = simfile.loads(' #TITLE:A;\r\n\t#SUBTITLE:B; \r\n\r\n')
sm_inside_ws = simfile.loads('#TITLE:\tA\n;#\r\rSUBTITLE:\nB\t\n;')
sm_order = simfile.loads('#SUBTITLE:B;#TITLE:A;')
sm_identifier_case = simfile.loads('#Title:A;#subtitle:B;')
sm_value_case = simfile.loads('#TITLE:a;#SUBTITLE:b;')
sm_chart = simfile.loads('#TITLE:A;#SUBTITLE:B;#NOTES::::1::;')
sm_chart_2 = simfile.loads('#TITLE:A;#SUBTITLE:B;#NOTES::::2::;')
self.assertEqual(sm, sm_outside_ws)
self.assertNotEqual(sm, sm_inside_ws)
self.assertNotEqual(sm, sm_order)
self.assertEqual(sm, sm_identifier_case)
self.assertNotEqual(sm, sm_value_case)
self.assertNotEqual(sm, sm_chart)
self.assertNotEqual(sm_chart, sm_chart_2)
def test_parameter_properties(self):
sm = get_simfile('Tribal Style.sm')
self.assertEqual(sm['TITLE'], 'Tribal Style')
self.assertEqual(sm['SUBTITLE'], '')
self.assertEqual(sm['ARTIST'], 'KaW')
self.assertEqual(sm['SAMPLESTART'], '41.060')
self.assertEqual(sm['SAMPLELENGTH'], '13.840')
self.assertIsInstance(BeatValues.from_str(sm['BPMS']), BeatValues)
self.assertIsInstance(BeatValues.from_str(sm['STOPS']), BeatValues)
def test_repr(self):
sm = get_simfile('Tribal Style.sm', {})
self.assertEqual(repr(sm), '<SMSimfile: Tribal Style>')
sm['SUBTITLE'] = 'Subtitle'
self.assertEqual(repr(sm), '<SMSimfile: Tribal Style Subtitle>')
sm['SUBTITLE'] = '(Subtitle)'
self.assertEqual(repr(sm), '<SMSimfile: Tribal Style (Subtitle)>')
del sm['SUBTITLE']
del sm['TITLE']
self.assertEqual(repr(sm), '<SMSimfile>')
if __name__ == '__main__':
unittest.main()
| mit | 1,968,587,014,820,314,400 | 36.231884 | 79 | 0.628455 | false |
fizyk/pyramid_localize | src/pyramid_localize/tools.py | 1 | 3166 | # Copyright (c) 2013-2014 by pyramid_localize authors and contributors <see AUTHORS file>
#
# This module is part of pyramid_localize and is released under
# the MIT License (MIT): http://opensource.org/licenses/MIT
"""methods in this module are tools, thank to which pyramid_localize works most of its magic."""
import sys
import os
import logging
from translationstring import _interp_regex
from pyramid.i18n import make_localizer
from pyramid.i18n import TranslationString
from pyramid.asset import resolve_asset_spec
from pyramid.path import package_path
from pyramid.interfaces import ILocalizer
from pyramid.interfaces import ITranslationDirectories
log = logging.getLogger(__name__)
def set_localizer(request, reset=False):
"""
Set localizer and auto_translate methods for request.
:param pyramid.request.Request request: request object
:param bool reset: flag that directs resetting localizer within app
"""
if reset:
for locale in request.registry["localize"]["locales"]["available"]:
log.debug("Resetting %s localizator", locale)
tdirs = request.registry.queryUtility(ITranslationDirectories, default=[])
localizer = make_localizer(locale, tdirs)
request.registry.registerUtility(localizer, ILocalizer, name=locale)
def auto_translate(*args, **kwargs):
# lets pass default domain, so we don't have to determine it with
# each _() function in apps.
if len(args) <= 1 and "domain" not in kwargs:
kwargs["domain"] = request.registry["localize"]["domain"]
# unlike in examples we use TranslationString, to make sure we always
# use appropriate domain
return request.localizer.translate(TranslationString(*args, **kwargs))
request._ = auto_translate
def destination_path(request):
"""
Return absolute path of the translation destination.
:param pyramid.request.Request request: a request object
:returns: A combined translation destination path
:rtype: str
"""
package_name, filename = resolve_asset_spec(request.registry["localize"]["translation"]["destination"])
if package_name is None: # absolute filename
directory = filename
else:
__import__(package_name)
package = sys.modules[package_name]
directory = os.path.join(package_path(package), filename)
return directory
def dummy_autotranslate(msgid, domain=None, default=None, mapping=None): # pylint:disable=unused-argument
"""
Simulate autotranslate.
:param str msgid: Message or message id
:param str domain: Translation domain
:param str default: Default message
:param dict mapping: Mapping dictionary for message variables
:returns: *translated* string
:rtype: str
"""
# Try to return defaults first:
tstr = None
if default:
tstr = default
else:
tstr = msgid
if mapping and tstr:
def replace(match):
whole, param1, param2 = match.groups()
return str(mapping.get(param1 or param2, whole))
tstr = _interp_regex.sub(replace, tstr)
return tstr
| mit | 6,348,952,720,227,318,000 | 31.639175 | 107 | 0.692356 | false |
gdin-netmanage/pynm | htmlresolve.py | 1 | 1090 | from bs4 import BeautifulSoup
# from list[43] is the real list
def resolve_list(raw_list):
start = 0
end = 6
lists = []
soup = BeautifulSoup(raw_list, 'html.parser')
soup_list = soup.text.split()
soup_list = soup_list[43:-3]
#soup_tuple = tuple(soup_list)
for msg in soup_list:
if '西区' in msg:
n = soup_list.index(msg)
soup_list[n] = soup_list[n] + soup_list[n + 1]
del soup_list[n + 1]
#print(soup_list)
while end < len(soup_list):
_list = []
for msg in soup_list[start:end]:
_list.append(msg)
lists.append(_list)
start += 8
end += 8
# pass
return lists
def resolve_detail(raw_detail, name):
soup = BeautifulSoup(raw_detail, 'html.parser')
#print(soup.findAll('a'))
for value in soup.findAll('a'):
if name in value:
return value.get('href')
def get_phone_num(num_text):
soup = BeautifulSoup(num_text, 'html.parser')
raw_phone_msg = soup.find(id='handset')
return raw_phone_msg.get('value')
| gpl-2.0 | 3,583,873,395,166,290,400 | 26.846154 | 58 | 0.570902 | false |
nickgentoo/scikit-learn-graph | skgraph/utils/countminsketch_TABLESrandomprojectionNEWLinear.py | 1 | 7826 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 16:23:52 2016
Copyright 2015 Nicolo' Navarin
This file is part of count-mean-sketch based on https://github.com/rafacarrascosa/countminsketch.
count-mean-sketch is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
count-mean-sketch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with count-mean-sketch. If not, see <http://www.gnu.org/licenses/>.
"""
import hashlib
import os
#import array
import itertools
import numpy as np
import string
from numpy import median
import numpy.matlib
import copy
from itertools import izip
from numpy import random, sqrt, log, sin, cos, pi
from scipy.sparse import csr_matrix, linalg
#from joblib import Parallel, delayed
#import multiprocessing
import scipy
import tables as tb
def processInput(i, m, rs):
numpy.random.seed(i + (rs * 10000))
v = numpy.random.normal(0, 1, m)
v = numpy.multiply(sqrt(m), v)
row = [idx for idx in xrange(m)]
col = [i for idx in xrange(m)]
data = v
return (row, col, data)
class CountMinSketch(object):
"""
A class for counting hashable items using the Count-min Sketch strategy.
It fulfills a similar purpose than `itertools.Counter`.
The Count-min Sketch is a randomized data structure that uses a constant
amount of memory and has constant insertion and lookup times at the cost
of an arbitrarily small overestimation of the counts.
It has two parameters:
- `m` the size of the hash tables, larger implies smaller overestimation
- `d` the amount of hash tables, larger implies lower probability of
overestimation.
An example usage:
from countminsketch import CountMinSketch
sketch = CountMinSketch(1000, 10) # m=1000, d=10
sketch.add("oh yeah")
sketch.add(tuple())
sketch.add(1, value=123)
print sketch["oh yeah"] # prints 1
print sketch[tuple()] # prints 1
print sketch[1] # prints 123
print sketch["non-existent"] # prints 0
Note that this class can be used to count *any* hashable type, so it's
possible to "count apples" and then "ask for oranges". Validation is up to
the user.
"""
def __init__(self, m, samplesize,rs):
""" sizes is an array of hash dimensions.
"""
if not m:
raise ValueError("Table size (m) and amount of hash functions (d)"
" must be non-zero")
self.n = 0
self.m=m
self.samplesize=samplesize
self.rs=rs
self.mus=numpy.asarray([0.0] *m).reshape(self.m,1)
print "mus", self.mus.shape
#self.tables = numpy.matlib.zeros(shape=(m,samplesize))
#self.tables=numpy.random.normal(size=(m,samplesize))
# for _ in xrange(d):
# table = array.array("d", (0.0 for _ in xrange(m)))
# self.tables.append(table)
#inizialize projection matrix
import random as rnd
#numpy.random.seed(self.rs * 10000)
filename=''.join(rnd.choice(string.ascii_uppercase + string.digits) for _ in range(16))
#filename= "test"
self.filename=filename+'.h5'
h5file = tb.open_file(self.filename, mode='w', title="Random Projection Matrix")
root = h5file.root
self.x = h5file.create_carray(root, 'x', tb.Float64Atom(), shape=(self.samplesize, self.m))
print "generating matrix of shape", self.samplesize, self.m
for i in range(self.samplesize):
numpy.random.seed(i + (self.rs * 10000))
#v = numpy.random.normal(0, 1, self.m)
self.x[i, :self.m] = numpy.random.normal(0, 1, self.m) # Now put in some data
print "Random projection matrix saved on file", filename+'.h5'
def transform(self, vector):
#mus is a vector of the means
#print "example size", vector.shape
#print "transformation size", self.tables.shape
#tables=csr_matrix ((self.m,self.samplesize))
#num_cores = multiprocessing.cpu_count()
indices=vector.nonzero()[0]
#print vector.shape
norm=scipy.sparse.linalg.norm(vector,1)
#print norm
# results = Parallel(n_jobs=num_cores)(delayed(processInput)(i,self.m,self.rs) for i in indices)
# parrow = []
# parcol = []
# pardata = []
# for (row,col,v) in results:
# parrow.extend(row)
# parcol.extend(col)
# pardata.extend(v)
row=[]
col=[]
data=[]
data_nobias=[]
vbias=[]
#print indices
#print indices
#RPM=self.x[indices,:self.m]
#print RPM
data_nobias=self.x[indices,:self.m].ravel()
#data_nobias=list(itertools.chain.from_iterable([self.x[i,:self.m] for i in indices]))
#print data_nobias
data=np.tile(numpy.multiply(norm, self.mus).ravel(),len(indices))
#data=list(itertools.chain.from_iterable([numpy.multiply(norm, self.mus).ravel()]*len(indices)))
#print data
row=np.tile(range(self.m),len(indices))
#row=range(self.m)*len(indices)
#print row
col=np.repeat(indices, self.m)
#col=np.tile([i]* self.m,len(indices))
#col=list(itertools.chain.from_iterable([[i]* self.m for i in indices]))
#print col
# print data_nobias
# for i in indices:
# #numpy.random.seed(i+(self.rs*10000))
# v=self.x[i,:self.m].reshape(self.m,1)
# #v=numpy.multiply(sqrt(self.m),v).reshape(self.m,1)
# #print "v", v.shape
# #print "munorm", (self.mus*norm).shape
# #vbias.extend(numpy.multiply(norm, self.mu))
# #print "vbias", vbias.shape
# row.extend(range(self.m))
# col.extend([i]* self.m)
# data.extend(numpy.multiply(norm, self.mus).ravel()) #considero il bias
# data_nobias.extend(v.ravel())
#print data
tables_nobias=csr_matrix ((data_nobias,(row,col)), shape=(self.m,self.samplesize))
tables_nobias=scipy.sparse.csr_matrix.multiply(tables_nobias,sqrt(self.m))
#vbias.extend(numpy.multiply(norm,self.mu))
toadd=csr_matrix ((data,(row,col)), shape=(self.m,self.samplesize))
tables=tables_nobias+ toadd #csr_matrix ((data,(row,col)), shape=(self.m,self.samplesize))
transformation= np.multiply(tables,vector).todense()
#print transformation.shape
#assert(parrow==row)
#assert(parcol==col)
#assert(pardata==data)
#TODO return vector in which i-th (1-tanh(R_i\phi(g) +norm*\mu_i)^2 * norm)
#then just multiply each entry by y w_i to get the gradient
#self.norm=norm
#val2= self.norm*self.mus
#print "val2", val2.shape
#print "tablesnobias", tables_nobias.shape
#print "vector", vector.shape
#self.Rphix= (np.multiply(tables_nobias,vector)).todense()
#val3=self.Rphix+val2
#print "val3",val3.shape
#ones = np.ones(self.m).reshape(self.m,1)
#print "ones", ones.shape
#derivative= np.multiply((ones-numpy.square(val3)),norm)
#print derivative
return transformation # Probably I'll need to return v (to compute the bs)
def removetmp(self):
os.remove(self.filename)
print "removed temporary file"
| gpl-3.0 | -4,687,570,474,477,212,000 | 36.806763 | 104 | 0.622412 | false |
kstreee/infer | infer/lib/python/inferlib/capture/make.py | 1 | 2445 | # Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
import subprocess
import traceback
import util
from inferlib import config, utils
MODULE_NAME = 'make/cc/clang/gcc'
MODULE_DESCRIPTION = '''Run analysis of code built with commands like:
make [target]
clang [compiler_options] <filename>
gcc [compiler_options] <filename>
cc [compiler_options] <filename>
Analysis examples:
infer -- make all
infer -- clang -c srcfile.m
infer -- gcc -c srcfile.c'''
def gen_instance(*args):
return MakeCapture(*args)
create_argparser = \
util.clang_frontend_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class MakeCapture:
def __init__(self, args, cmd):
self.args = args
self.cmd = [os.path.basename(cmd[0])] + cmd[1:]
def get_envvars(self):
env_vars = dict(os.environ)
wrappers_path = config.WRAPPERS_DIRECTORY
env_vars['INFER_OLD_PATH'] = env_vars['PATH']
env_vars['PATH'] = '{wrappers}{sep}{path}'.format(
wrappers=wrappers_path,
sep=os.pathsep,
path=env_vars['PATH'],
)
frontend_env_vars = util.get_clang_frontend_envvars(self.args)
env_vars.update(frontend_env_vars)
return env_vars
def capture(self):
try:
env = self.get_envvars()
logging.info('Running command %s with env:\n%s' % (self.cmd, env))
subprocess.check_call(self.cmd, env=env)
capture_dir = os.path.join(self.args.infer_out, 'captured')
if len(os.listdir(capture_dir)) < 1:
# Don't return with a failure code unless we're
# running make. It could be normal to have captured
# nothing (eg, empty source file). Further output will
# alert the user that there was nothing to analyze.
if self.cmd[0] == 'make':
# reuse code from gradle, etc. integration
return util.run_compilation_commands([], 'make clean')
return os.EX_OK
except subprocess.CalledProcessError as exc:
if self.args.debug:
traceback.print_exc()
return exc.returncode
| bsd-3-clause | 5,882,811,356,160,687,000 | 32.040541 | 78 | 0.625767 | false |
bgris/ODL_bgris | lib/python3.5/site-packages/spyder/config/main.py | 1 | 30624 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder configuration options
Note: Leave this file free of Qt related imports, so that it can be used to
quickly load a user config file
"""
import os
import sys
import os.path as osp
# Local import
from spyder.config.base import (CHECK_ALL, EXCLUDED_NAMES, get_home_dir,
SUBFOLDER, TEST)
from spyder.config.fonts import BIG, MEDIUM, MONOSPACE, SANS_SERIF
from spyder.config.user import UserConfig
from spyder.config.utils import IMPORT_EXT
from spyder.utils import codeanalysis
#==============================================================================
# Main constants
#==============================================================================
# Find in files exclude patterns
EXCLUDE_PATTERNS = [r'\.pyc$|\.pyo$|\.orig$|\.hg|\.svn|\bbuild\b',
r'\.pyc$|\.pyo$|\.orig$|\.hg|\.svn']
# Extensions that should be visible in Spyder's file/project explorers
SHOW_EXT = ['.py', '.ipynb', '.txt', '.dat', '.pdf', '.png', '.svg']
# Extensions supported by Spyder (Editor or Variable explorer)
USEFUL_EXT = IMPORT_EXT + SHOW_EXT
# Name filters for file/project explorers (excluding files without extension)
NAME_FILTERS = ['README', 'INSTALL', 'LICENSE', 'CHANGELOG'] + \
['*' + _ext for _ext in USEFUL_EXT if _ext]
# Port used to detect if there is a running instance and to communicate with
# it to open external files
OPEN_FILES_PORT = 21128
# OS Specific
WIN = os.name == 'nt'
MAC = sys.platform == 'darwin'
CTRL = "Meta" if MAC else "Ctrl"
# Run cell shortcuts
if sys.platform == 'darwin':
RUN_CELL_SHORTCUT = 'Meta+Return'
else:
RUN_CELL_SHORTCUT = 'Ctrl+Return'
RUN_CELL_AND_ADVANCE_SHORTCUT = 'Shift+Return'
# =============================================================================
# Defaults
# =============================================================================
DEFAULTS = [
('main',
{
'icon_theme': 'spyder 3',
'single_instance': True,
'open_files_port': OPEN_FILES_PORT,
'tear_off_menus': False,
'high_dpi_scaling': False,
'vertical_dockwidget_titlebars': False,
'vertical_tabs': False,
'animated_docks': True,
'prompt_on_exit': False,
'panes_locked': True,
'window/size': (1260, 740),
'window/position': (10, 10),
'window/is_maximized': True,
'window/is_fullscreen': False,
'window/prefs_dialog_size': (745, 411),
'show_status_bar': True,
'memory_usage/enable': True,
'memory_usage/timeout': 2000,
'cpu_usage/enable': False,
'cpu_usage/timeout': 2000,
'use_custom_margin': True,
'custom_margin': 0,
'show_internal_console_if_traceback': True,
'check_updates_on_startup': True,
'toolbars_visible': True,
# Global Spyder fonts
'font/family': MONOSPACE,
'font/size': MEDIUM,
'font/italic': False,
'font/bold': False,
'rich_font/family': SANS_SERIF,
'rich_font/size': BIG,
'rich_font/italic': False,
'rich_font/bold': False,
'cursor/width': 2,
'completion/size': (300, 180),
}),
('quick_layouts',
{
'place_holder': '',
'names': ['Matlab layout', 'Rstudio layout', 'Vertical split', 'Horizontal split'],
'order': ['Matlab layout', 'Rstudio layout', 'Vertical split', 'Horizontal split'],
'active': ['Matlab layout', 'Rstudio layout', 'Vertical split', 'Horizontal split'],
}),
('internal_console',
{
'max_line_count': 300,
'working_dir_history': 30,
'working_dir_adjusttocontents': False,
'wrap': True,
'calltips': True,
'codecompletion/auto': False,
'codecompletion/enter_key': True,
'codecompletion/case_sensitive': True,
'external_editor/path': 'SciTE',
'external_editor/gotoline': '-goto:',
'light_background': True,
}),
('main_interpreter',
{
'default': True,
'custom': False,
'umr/enabled': True,
'umr/verbose': True,
'umr/namelist': [],
}),
('console',
{
'max_line_count': 500,
'wrap': True,
'single_tab': True,
'calltips': True,
'codecompletion/auto': True,
'codecompletion/enter_key': True,
'codecompletion/case_sensitive': True,
'show_elapsed_time': False,
'show_icontext': False,
'monitor/enabled': True,
'qt/api': 'default',
'matplotlib/backend/value': 0,
'light_background': True,
'merge_output_channels': os.name != 'nt',
'colorize_sys_stderr': os.name != 'nt',
'pythonstartup/default': True,
'pythonstartup/custom': False,
'ets_backend': 'qt4'
}),
('ipython_console',
{
'show_banner': True,
'completion_type': 0,
'use_pager': False,
'show_calltips': True,
'ask_before_closing': False,
'buffer_size': 500,
'pylab': True,
'pylab/autoload': False,
'pylab/backend': 0,
'pylab/inline/figure_format': 0,
'pylab/inline/resolution': 72,
'pylab/inline/width': 6,
'pylab/inline/height': 4,
'startup/run_lines': '',
'startup/use_run_file': False,
'startup/run_file': '',
'greedy_completer': False,
'autocall': 0,
'symbolic_math': False,
'in_prompt': '',
'out_prompt': '',
'light_color': True,
'dark_color': False
}),
('variable_explorer',
{
'autorefresh': False,
'autorefresh/timeout': 2000,
'check_all': CHECK_ALL,
'dataframe_format': '.3g', # no percent sign to avoid problems
# with ConfigParser's interpolation
'excluded_names': EXCLUDED_NAMES,
'exclude_private': True,
'exclude_uppercase': True,
'exclude_capitalized': False,
'exclude_unsupported': True,
'truncate': True,
'minmax': False
}),
('editor',
{
'printer_header/font/family': SANS_SERIF,
'printer_header/font/size': MEDIUM,
'printer_header/font/italic': False,
'printer_header/font/bold': False,
'wrap': False,
'wrapflag': True,
'code_analysis/pyflakes': True,
'code_analysis/pep8': False,
'todo_list': True,
'realtime_analysis': True,
'realtime_analysis/timeout': 2500,
'outline_explorer': True,
'line_numbers': True,
'blank_spaces': False,
'edge_line': True,
'edge_line_column': 79,
'toolbox_panel': True,
'calltips': True,
'go_to_definition': True,
'close_parentheses': True,
'close_quotes': False,
'add_colons': True,
'auto_unindent': True,
'indent_chars': '* *',
'tab_stop_width_spaces': 4,
'codecompletion/auto': True,
'codecompletion/enter_key': True,
'codecompletion/case_sensitive': True,
'check_eol_chars': True,
'tab_always_indent': False,
'intelligent_backspace': True,
'highlight_current_line': True,
'highlight_current_cell': True,
'occurrence_highlighting': True,
'occurrence_highlighting/timeout': 1500,
'always_remove_trailing_spaces': False,
'fullpath_sorting': True,
'show_tab_bar': True,
'max_recent_files': 20,
'save_all_before_run': True,
'focus_to_editor': True,
'onsave_analysis': False
}),
('historylog',
{
'enable': True,
'max_entries': 100,
'wrap': True,
'go_to_eof': True,
}),
('help',
{
'enable': True,
'max_history_entries': 20,
'wrap': True,
'connect/editor': False,
'connect/python_console': False,
'connect/ipython_console': False,
'math': True,
'automatic_import': True,
}),
('onlinehelp',
{
'enable': True,
'zoom_factor': .8,
'max_history_entries': 20,
}),
('outline_explorer',
{
'enable': True,
'show_fullpath': False,
'show_all_files': False,
'show_comments': True,
}),
('project_explorer',
{
'name_filters': NAME_FILTERS,
'show_all': True,
'show_hscrollbar': True
}),
('explorer',
{
'enable': True,
'wrap': True,
'name_filters': NAME_FILTERS,
'show_hidden': True,
'show_all': True,
'show_icontext': False,
}),
('find_in_files',
{
'enable': True,
'supported_encodings': ["utf-8", "iso-8859-1", "cp1252"],
'include': '',
'include_regexp': True,
'exclude': EXCLUDE_PATTERNS,
'exclude_regexp': True,
'search_text_regexp': True,
'search_text': [''],
'search_text_samples': [codeanalysis.TASKS_PATTERN],
'in_python_path': False,
'more_options': False,
}),
('workingdir',
{
'editor/open/browse_scriptdir': True,
'editor/open/browse_workdir': False,
'editor/new/browse_scriptdir': False,
'editor/new/browse_workdir': True,
'editor/open/auto_set_to_basedir': False,
'editor/save/auto_set_to_basedir': False,
'working_dir_adjusttocontents': False,
'working_dir_history': 20,
'startup/use_last_directory': True,
}),
('shortcuts',
{
# ---- Global ----
# -- In app/spyder.py
'_/close pane': "Shift+Ctrl+F4",
'_/lock unlock panes': "Shift+Ctrl+F5",
'_/use next layout': "Shift+Alt+PgDown",
'_/use previous layout': "Shift+Alt+PgUp",
'_/preferences': "Ctrl+Alt+Shift+P",
'_/maximize pane': "Ctrl+Alt+Shift+M",
'_/fullscreen mode': "F11",
'_/save current layout': "Shift+Alt+S",
'_/layout preferences': "Shift+Alt+P",
'_/show toolbars': "Alt+Shift+T",
'_/spyder documentation': "F1",
'_/restart': "Shift+Alt+R",
'_/quit': "Ctrl+Q",
# -- In plugins/editor
'_/file switcher': 'Ctrl+P',
'_/symbol finder': 'Ctrl+Alt+P',
'_/debug': "Ctrl+F5",
'_/debug step over': "Ctrl+F10",
'_/debug continue': "Ctrl+F12",
'_/debug step into': "Ctrl+F11",
'_/debug step return': "Ctrl+Shift+F11",
'_/debug exit': "Ctrl+Shift+F12",
'_/run': "F5",
'_/configure': "Ctrl+F6",
'_/re-run last script': "F6",
# -- In plugins/init
'_/switch to help': "Ctrl+Shift+H",
'_/switch to outline_explorer': "Ctrl+Shift+O",
'_/switch to editor': "Ctrl+Shift+E",
'_/switch to historylog': "Ctrl+Shift+L",
'_/switch to onlinehelp': "Ctrl+Shift+D",
'_/switch to project_explorer': "Ctrl+Shift+P",
'_/switch to console': "Ctrl+Shift+C",
'_/switch to ipython_console': "Ctrl+Shift+I",
'_/switch to variable_explorer': "Ctrl+Shift+V",
'_/switch to find_in_files': "Ctrl+Shift+F",
'_/switch to explorer': "Ctrl+Shift+X",
# -- In widgets/findreplace.py
'_/find text': "Ctrl+F",
'_/find next': "F3",
'_/find previous': "Shift+F3",
'_/replace text': "Ctrl+R",
'_/hide find and replace': "Escape",
# ---- Editor ----
# -- In widgets/sourcecode/codeeditor.py
'editor/code completion': CTRL+'+Space',
'editor/duplicate line': "Ctrl+Alt+Up" if WIN else \
"Shift+Alt+Up",
'editor/copy line': "Ctrl+Alt+Down" if WIN else \
"Shift+Alt+Down",
'editor/delete line': 'Ctrl+D',
'editor/transform to uppercase': 'Ctrl+Shift+U',
'editor/transform to lowercase': 'Ctrl+U',
'editor/move line up': "Alt+Up",
'editor/move line down': "Alt+Down",
'editor/go to definition': "Ctrl+G",
'editor/toggle comment': "Ctrl+1",
'editor/blockcomment': "Ctrl+4",
'editor/unblockcomment': "Ctrl+5",
'editor/start of line': "Meta+A",
'editor/end of line': "Meta+E",
'editor/previous line': "Meta+P",
'editor/next line': "Meta+N",
'editor/previous char': "Meta+B",
'editor/next char': "Meta+F",
'editor/previous word': "Meta+Left",
'editor/next word': "Meta+Right",
'editor/kill to line end': "Meta+K",
'editor/kill to line start': "Meta+U",
'editor/yank': 'Meta+Y',
'editor/rotate kill ring': 'Shift+Meta+Y',
'editor/kill previous word': 'Meta+Backspace',
'editor/kill next word': 'Meta+D',
'editor/start of document': 'Ctrl+Up',
'editor/end of document': 'Ctrl+Down',
'editor/undo': 'Ctrl+Z',
'editor/redo': 'Ctrl+Shift+Z',
'editor/cut': 'Ctrl+X',
'editor/copy': 'Ctrl+C',
'editor/paste': 'Ctrl+V',
'editor/delete': 'Delete',
'editor/select all': "Ctrl+A",
# -- In widgets/editor.py
'editor/inspect current object': 'Ctrl+I',
'editor/breakpoint': 'F12',
'editor/conditional breakpoint': 'Shift+F12',
'editor/run selection': "F9",
'editor/go to line': 'Ctrl+L',
'editor/go to previous file': 'Ctrl+Tab',
'editor/go to next file': 'Ctrl+Shift+Tab',
'editor/new file': "Ctrl+N",
'editor/open last closed':"Ctrl+Shift+T",
'editor/open file': "Ctrl+O",
'editor/save file': "Ctrl+S",
'editor/save all': "Ctrl+Alt+S",
'editor/save as': 'Ctrl+Shift+S',
'editor/close all': "Ctrl+Shift+W",
'editor/last edit location': "Ctrl+Alt+Shift+Left",
'editor/previous cursor position': "Ctrl+Alt+Left",
'editor/next cursor position': "Ctrl+Alt+Right",
'editor/zoom in 1': "Ctrl++",
'editor/zoom in 2': "Ctrl+=",
'editor/zoom out': "Ctrl+-",
'editor/zoom reset': "Ctrl+0",
'editor/close file 1': "Ctrl+W",
'editor/close file 2': "Ctrl+F4",
'editor/run cell': RUN_CELL_SHORTCUT,
'editor/run cell and advance': RUN_CELL_AND_ADVANCE_SHORTCUT,
# -- In plugins/editor.py
'editor/show/hide outline': "Ctrl+Alt+O",
# -- In Breakpoints
'_/switch to breakpoints': "Ctrl+Shift+B",
# ---- Consoles (in widgets/shell) ----
'console/inspect current object': "Ctrl+I",
'console/clear shell': "Ctrl+L",
'console/clear line': "Shift+Escape",
# ---- In Pylint ----
'pylint/run analysis': "F8",
# ---- In Profiler ----
'profiler/run profiler': "F10",
# ---- In widgets/ipythonconsole/shell.py ----
'ipython_console/new tab': "Ctrl+T",
'ipython_console/reset namespace': "Ctrl+Alt+R",
'ipython_console/restart kernel': "Ctrl+.",
# ---- In widgets/arraybuider.py ----
'array_builder/enter array inline': "Ctrl+Alt+M",
'array_builder/enter array table': "Ctrl+M",
# ---- In widgets/variableexplorer/aarayeditor.py ----
'variable_explorer/copy': 'Ctrl+C',
}),
('color_schemes',
{
'names': ['emacs', 'idle', 'monokai', 'pydev', 'scintilla',
'spyder', 'spyder/dark', 'zenburn', 'solarized/light',
'solarized/dark'],
'selected': 'spyder',
# ---- Emacs ----
'emacs/name': "Emacs",
# Name Color Bold Italic
'emacs/background': "#000000",
'emacs/currentline': "#2b2b43",
'emacs/currentcell': "#1c1c2d",
'emacs/occurrence': "#abab67",
'emacs/ctrlclick': "#0000ff",
'emacs/sideareas': "#555555",
'emacs/matched_p': "#009800",
'emacs/unmatched_p': "#c80000",
'emacs/normal': ('#ffffff', False, False),
'emacs/keyword': ('#3c51e8', False, False),
'emacs/builtin': ('#900090', False, False),
'emacs/definition': ('#ff8040', True, False),
'emacs/comment': ('#005100', False, False),
'emacs/string': ('#00aa00', False, True),
'emacs/number': ('#800000', False, False),
'emacs/instance': ('#ffffff', False, True),
# ---- IDLE ----
'idle/name': "IDLE",
# Name Color Bold Italic
'idle/background': "#ffffff",
'idle/currentline': "#f2e6f3",
'idle/currentcell': "#feefff",
'idle/occurrence': "#e8f2fe",
'idle/ctrlclick': "#0000ff",
'idle/sideareas': "#efefef",
'idle/matched_p': "#99ff99",
'idle/unmatched_p': "#ff9999",
'idle/normal': ('#000000', False, False),
'idle/keyword': ('#ff7700', True, False),
'idle/builtin': ('#900090', False, False),
'idle/definition': ('#0000ff', False, False),
'idle/comment': ('#dd0000', False, True),
'idle/string': ('#00aa00', False, False),
'idle/number': ('#924900', False, False),
'idle/instance': ('#777777', True, True),
# ---- Monokai ----
'monokai/name': "Monokai",
# Name Color Bold Italic
'monokai/background': "#2a2b24",
'monokai/currentline': "#484848",
'monokai/currentcell': "#3d3d3d",
'monokai/occurrence': "#666666",
'monokai/ctrlclick': "#0000ff",
'monokai/sideareas': "#2a2b24",
'monokai/matched_p': "#688060",
'monokai/unmatched_p': "#bd6e76",
'monokai/normal': ("#ddddda", False, False),
'monokai/keyword': ("#f92672", False, False),
'monokai/builtin': ("#ae81ff", False, False),
'monokai/definition': ("#a6e22e", False, False),
'monokai/comment': ("#75715e", False, True),
'monokai/string': ("#e6db74", False, False),
'monokai/number': ("#ae81ff", False, False),
'monokai/instance': ("#ddddda", False, True),
# ---- Pydev ----
'pydev/name': "Pydev",
# Name Color Bold Italic
'pydev/background': "#ffffff",
'pydev/currentline': "#e8f2fe",
'pydev/currentcell': "#eff8fe",
'pydev/occurrence': "#ffff99",
'pydev/ctrlclick': "#0000ff",
'pydev/sideareas': "#efefef",
'pydev/matched_p': "#99ff99",
'pydev/unmatched_p': "#ff99992",
'pydev/normal': ('#000000', False, False),
'pydev/keyword': ('#0000ff', False, False),
'pydev/builtin': ('#900090', False, False),
'pydev/definition': ('#000000', True, False),
'pydev/comment': ('#c0c0c0', False, False),
'pydev/string': ('#00aa00', False, True),
'pydev/number': ('#800000', False, False),
'pydev/instance': ('#000000', False, True),
# ---- Scintilla ----
'scintilla/name': "Scintilla",
# Name Color Bold Italic
'scintilla/background': "#ffffff",
'scintilla/currentline': "#e1f0d1",
'scintilla/currentcell': "#edfcdc",
'scintilla/occurrence': "#ffff99",
'scintilla/ctrlclick': "#0000ff",
'scintilla/sideareas': "#efefef",
'scintilla/matched_p': "#99ff99",
'scintilla/unmatched_p': "#ff9999",
'scintilla/normal': ('#000000', False, False),
'scintilla/keyword': ('#00007f', True, False),
'scintilla/builtin': ('#000000', False, False),
'scintilla/definition': ('#007f7f', True, False),
'scintilla/comment': ('#007f00', False, False),
'scintilla/string': ('#7f007f', False, False),
'scintilla/number': ('#007f7f', False, False),
'scintilla/instance': ('#000000', False, True),
# ---- Spyder ----
'spyder/name': "Spyder",
# Name Color Bold Italic
'spyder/background': "#ffffff",
'spyder/currentline': "#f7ecf8",
'spyder/currentcell': "#fdfdde",
'spyder/occurrence': "#ffff99",
'spyder/ctrlclick': "#0000ff",
'spyder/sideareas': "#efefef",
'spyder/matched_p': "#99ff99",
'spyder/unmatched_p': "#ff9999",
'spyder/normal': ('#000000', False, False),
'spyder/keyword': ('#0000ff', False, False),
'spyder/builtin': ('#900090', False, False),
'spyder/definition': ('#000000', True, False),
'spyder/comment': ('#adadad', False, True),
'spyder/string': ('#00aa00', False, False),
'spyder/number': ('#800000', False, False),
'spyder/instance': ('#924900', False, True),
# ---- Spyder/Dark ----
'spyder/dark/name': "Spyder Dark",
# Name Color Bold Italic
'spyder/dark/background': "#131926",
'spyder/dark/currentline': "#2b2b43",
'spyder/dark/currentcell': "#31314e",
'spyder/dark/occurrence': "#abab67",
'spyder/dark/ctrlclick': "#0000ff",
'spyder/dark/sideareas': "#282828",
'spyder/dark/matched_p': "#009800",
'spyder/dark/unmatched_p': "#c80000",
'spyder/dark/normal': ('#ffffff', False, False),
'spyder/dark/keyword': ('#558eff', False, False),
'spyder/dark/builtin': ('#aa00aa', False, False),
'spyder/dark/definition': ('#ffffff', True, False),
'spyder/dark/comment': ('#7f7f7f', False, False),
'spyder/dark/string': ('#11a642', False, True),
'spyder/dark/number': ('#c80000', False, False),
'spyder/dark/instance': ('#be5f00', False, True),
# ---- Zenburn ----
'zenburn/name': "Zenburn",
# Name Color Bold Italic
'zenburn/background': "#3f3f3f",
'zenburn/currentline': "#333333",
'zenburn/currentcell': "#2c2c2c",
'zenburn/occurrence': "#7a738f",
'zenburn/ctrlclick': "#0000ff",
'zenburn/sideareas': "#3f3f3f",
'zenburn/matched_p': "#688060",
'zenburn/unmatched_p': "#bd6e76",
'zenburn/normal': ('#dcdccc', False, False),
'zenburn/keyword': ('#dfaf8f', True, False),
'zenburn/builtin': ('#efef8f', False, False),
'zenburn/definition': ('#efef8f', False, False),
'zenburn/comment': ('#7f9f7f', False, True),
'zenburn/string': ('#cc9393', False, False),
'zenburn/number': ('#8cd0d3', False, False),
'zenburn/instance': ('#dcdccc', False, True),
# ---- Solarized Light ----
'solarized/light/name': "Solarized Light",
# Name Color Bold Italic
'solarized/light/background': '#fdf6e3',
'solarized/light/currentline': '#f5efdB',
'solarized/light/currentcell': '#eee8d5',
'solarized/light/occurence': '#839496',
'solarized/light/ctrlclick': '#d33682',
'solarized/light/sideareas': '#eee8d5',
'solarized/light/matched_p': '#586e75',
'solarized/light/unmatched_p': '#dc322f',
'solarized/light/normal': ('#657b83', False, False),
'solarized/light/keyword': ('#859900', False, False),
'solarized/light/builtin': ('#6c71c4', False, False),
'solarized/light/definition': ('#268bd2', True, False),
'solarized/light/comment': ('#93a1a1', False, True),
'solarized/light/string': ('#2aa198', False, False),
'solarized/light/number': ('#cb4b16', False, False),
'solarized/light/instance': ('#b58900', False, True),
# ---- Solarized Dark ----
'solarized/dark/name': "Solarized Dark",
# Name Color Bold Italic
'solarized/dark/background': '#002b36',
'solarized/dark/currentline': '#083f4d',
'solarized/dark/currentcell': '#073642',
'solarized/dark/occurence': '#657b83',
'solarized/dark/ctrlclick': '#d33682',
'solarized/dark/sideareas': '#073642',
'solarized/dark/matched_p': '#93a1a1',
'solarized/dark/unmatched_p': '#dc322f',
'solarized/dark/normal': ('#839496', False, False),
'solarized/dark/keyword': ('#859900', False, False),
'solarized/dark/builtin': ('#6c71c4', False, False),
'solarized/dark/definition': ('#268bd2', True, False),
'solarized/dark/comment': ('#586e75', False, True),
'solarized/dark/string': ('#2aa198', False, False),
'solarized/dark/number': ('#cb4b16', False, False),
'solarized/dark/instance': ('#b58900', False, True)
})
]
#==============================================================================
# Config instance
#==============================================================================
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you need to
# do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed in our codebase,
# or if you want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = '32.0.0'
# Main configuration instance
try:
CONF = UserConfig('spyder', defaults=DEFAULTS, load=(not TEST),
version=CONF_VERSION, subfolder=SUBFOLDER, backup=True,
raw_mode=True)
except:
CONF = UserConfig('spyder', defaults=DEFAULTS, load=False,
version=CONF_VERSION, subfolder=SUBFOLDER, backup=True,
raw_mode=True)
# Removing old .spyder.ini location:
old_location = osp.join(get_home_dir(), '.spyder.ini')
if osp.isfile(old_location):
os.remove(old_location)
| gpl-3.0 | -3,498,848,067,270,148,600 | 43.434718 | 98 | 0.452079 | false |
ju1ius/clisnips | clisnips/importers/clisnips.py | 1 | 1272 | import time
from textwrap import dedent
from typing import Callable, TextIO
from clisnips.database.snippets_db import SnippetsDatabase
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
def import_xml(db: SnippetsDatabase, file: TextIO, log: Callable):
start_time = time.time()
log(('info', f'Importing snippets from {file.name}...'))
db.insertmany(_parse_snippets(file))
log(('info', 'Rebuilding & optimizing search index...'))
db.rebuild_index()
db.optimize_index()
elapsed_time = time.time() - start_time
log(('success', f'Success: imported in {elapsed_time:.1f} seconds.'))
def _parse_snippets(file):
now = int(time.time())
for event, el in ElementTree.iterparse(file):
if el.tag != 'snippet':
continue
row = {
'title': el.findtext('title').strip(),
'tag': el.findtext('tag').strip(),
'cmd': dedent(el.findtext('command')),
'doc': dedent(el.findtext('doc').strip()),
'created_at': el.attrib.get('created-at', now),
'last_used_at': el.attrib.get('last-used-at', now),
'usage_count': el.attrib.get('usage-count', 0)
}
yield row
| gpl-3.0 | 8,562,715,914,577,953,000 | 30.8 | 73 | 0.617138 | false |
graphistry/pygraphistry | graphistry/tests/test_tigergraph.py | 1 | 2720 | # -*- coding: utf-8 -*-
import graphistry
from common import NoAuthTestCase
class TestTiger(NoAuthTestCase):
def test_tg_init_plain(self):
tg = graphistry.tigergraph()
self.assertTrue(type(tg) == graphistry.plotter.Plotter)
def test_tg_init_many(self):
tg = graphistry.tigergraph(
protocol = 'https',
server = '127.0.0.1',
web_port = 10000,
api_port = 11000,
db = 'z',
user = 'tigergraph1',
pwd = 'tigergraph2',
verbose = False
)
self.assertTrue(type(tg) == graphistry.plotter.Plotter)
def test_tg_endpoint_url_simple(self):
tg = graphistry.tigergraph(
protocol = 'https',
server = '127.0.0.1',
web_port = 10000,
api_port = 11000,
db = 'z',
user = 'tigergraph1',
pwd = 'tigergraph2',
verbose = False
)
self.assertEqual(
tg.gsql_endpoint('x', dry_run = True),
'https://tigergraph1:[email protected]:11000/query/z/x'
)
def test_tg_endpoint_url_1_arg(self):
tg = graphistry.tigergraph(
protocol = 'https',
server = '127.0.0.1',
web_port = 10000,
api_port = 11000,
db = 'z',
user = 'tigergraph1',
pwd = 'tigergraph2',
verbose = False
)
self.assertEqual(
tg.gsql_endpoint('x', {'f': 1}, dry_run = True),
'https://tigergraph1:[email protected]:11000/query/z/x?f=1'
)
def test_tg_endpoint_url_3_arg(self):
tg = graphistry.tigergraph(
protocol = 'https',
server = '127.0.0.1',
web_port = 10000,
api_port = 11000,
db = 'z',
user = 'tigergraph1',
pwd = 'tigergraph2',
verbose = False
)
#27 does not preserve order
self.assertEqual(
len(tg.gsql_endpoint('x', {'f': 1, 'ggg': 2, 'h': 33}, dry_run = True)),
len('https://tigergraph1:[email protected]:11000/query/z/x?f=1&ggg=2&h=33')
)
def test_tg_gsql(self):
tg = graphistry.tigergraph(
protocol = 'https',
server = '127.0.0.1',
web_port = 10000,
api_port = 11000,
db = 'z',
user = 'tigergraph1',
pwd = 'tigergraph2',
verbose = False
)
self.assertEqual(
tg.gsql('x', dry_run = True),
'https://tigergraph1:[email protected]:10000/gsqlserver/interpreted_query'
)
| bsd-3-clause | 7,975,139,469,498,783,000 | 30.264368 | 91 | 0.483824 | false |
hckrtst/learnpython | py3_essential_training/19 Projects/Extras/RSS/tkrss.py | 1 | 8047 | #!/usr/bin/python3
# template.py by Bill Weinman [http://bw.org/]
# created for Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Gorup, LLC
# standard libraries
import sys
import tkinter
import webbrowser
# BW libraries
from rssdb import rssDB
from rss import RSS
# for exception symbols
import sqlite3
import urllib
import xml.parsers.expat
TITLE = 'RSS Sandbox'
class hyperlinkManager:
''' manager for hyperlinks in tk text widgets '''
def __init__(self, text):
self.text = text
self.text.tag_bind('hyper', '<Enter>', self._enter)
self.text.tag_bind('hyper', '<Leave>', self._leave)
self.text.tag_bind('hyper', '<Button-1>', self._click)
self.links = {}
def add(self, url):
'''
add an action to the manager.
return tags to use in text wiget
'''
tag = 'hyper-{}'.format(len(self.links))
self.links[tag] = url
return 'hyper', tag
def _enter(self, event):
self.text.config(cursor='hand2') # set the cursor ('hand2' is standard, 'trek' is fun)
def _leave(self, event):
self.text.config(cursor='') # cursor back to standard
def _click(self, event):
for tag in self.text.tag_names(tkinter.CURRENT):
if tag[:6] == 'hyper-':
webbrowser.open(self.links[tag])
break
class mainWindow(tkinter.Frame):
def __init__(self, master = None, **kwargs):
tkinter.Frame.__init__(self, master)
self._db = rssDB()
self.master.title( TITLE )
self.createWidgets()
self.grid()
def createWidgets(self):
# default font
self.defaultFont = ('Helvetica', '16', 'roman')
# URL text box
self.labelURL = tkinter.Label(text = 'URL')
self.varURL = tkinter.StringVar()
self.entryURL = tkinter.Entry(textvariable = self.varURL, width = 40)
self.buttonURLGo = tkinter.Button (text = 'Go', command = self.go)
self.buttonAdd = tkinter.Button (text = 'Add', command = self.addFeed)
# Listbox for feeds
self.listBox = tkinter.Listbox()
self.buttonListGo = tkinter.Button (text = 'Go', command = self.listGo)
self.buttonListDel = tkinter.Button (text = 'Del', command = self.listDel)
self.listBox.grid(row = 2, column = 0, rowspan = 4, columnspan = 2, padx = 10, pady = 3)
# scrollbar for listBox - must have same grid options as listBox
self.textScroll = tkinter.Scrollbar(self.master)
self.textScroll.grid(row = 2, column = 0, columnspan = 2, rowspan = 4, pady = 3, sticky='nse')
self.textScroll.config(command=self.listBox.yview)
self.listBox.config(yscrollcommand=self.textScroll.set)
# fill the listbox from the database
self.fillListBox()
# set up the rest of the grid
self.labelURL.grid(row = 0, column = 0, sticky = 'e')
self.entryURL.grid(row = 0, column = 1, pady = 3)
self.buttonURLGo.grid(row = 0, padx = 2, column = 2)
self.buttonAdd.grid(row = 0, padx = 2, column = 3)
self.buttonListDel.grid(row = 4, column = 2, padx = 2, sticky = 'sw')
self.buttonListGo.grid(row = 4, column = 3, padx = 2, sticky = 'sw')
def fillListBox(self):
self._db_index = [];
self.listBox.config(listvariable = tkinter.StringVar(), width = 40)
for r in self._db.list():
self.listBox.insert(tkinter.END, r['title'])
self._db_index.append(str(r['id']))
def go(self, url = None):
if url is None: url = self.varURL.get()
contentWindow = tkinter.Toplevel()
textContainer = tkinter.Text(contentWindow, wrap = 'word', height = 25, width = 100)
contentClose = tkinter.Button(contentWindow, text = 'Close', command = contentWindow.destroy)
textContainer.tag_add('default', '0.0')
textContainer.tag_config('default', font = self.defaultFont)
textContainer.tag_config('hyper', foreground='blue', underline = 1, font = self.defaultFont)
contentWindow.title('RSS Feed')
contentWindow.grid()
textContainer.grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10)
contentClose.grid(row = 1, column = 1, pady = 5)
# scrollbar for textContainer - must have same grid options as parent
textScroll = tkinter.Scrollbar(contentWindow)
textScroll.grid(row = 0, column = 0, columnspan = 2, sticky='nse')
textScroll.config(command=textContainer.yview)
textContainer.config(yscrollcommand=textScroll.set)
hyperlink = hyperlinkManager(textContainer)
try:
feedString = ''
feed = RSS(url)
contentWindow.title(feed.feedTitle)
separator = '--------------------\n'
for r in feed.records():
textContainer.insert(tkinter.INSERT, separator, 'default')
if ['title'] and r['link']:
textContainer.insert(tkinter.INSERT, r['title'] + '\n', hyperlink.add(r['link']))
else:
if r['title']: textContainer.insert(tkinter.INSERT, r['title'] + '\n', 'default')
if r['link']: textContainer.insert(tkinter.INSERT, r['title'] + '\n', hyperlink.add(r['link']))
if r['description']: textContainer.insert(tkinter.INSERT, r['description'] + '\n', 'default')
except urllib.error.HTTPError as e: self.errorBox(e, contentWindow.destroy)
except urllib.error.URLError as e: self.errorBox(e, contentWindow.destroy)
except ValueError as e:
if url: self.errorBox(e, contentWindow.destroy)
else: contentWindow.destroy()
except xml.parsers.expat.ExpatError as e: self.errorBox(e, contentWindow.destroy)
def listGo(self):
try: recno = self.listBox.curselection()[0]
except: self.errorBox('No feed selected')
else:
rec = self._db.getById(self._db_index[int(recno)])
self.varURL.set('')
self.go(rec['url'])
def listDel(self):
recno = self.listBox.curselection()[0]
itemText = self.listBox.get(recno)
self._db.delById(self._db_index[int(recno)])
self.fillListBox()
self.messageBox('Deleted from list: {}.'.format(itemText))
def addFeed(self):
url = self.varURL.get()
try:
feed = RSS(url)
rec = {
'title': feed.feedTitle.strip(),
'url': url.strip(),
'description': feed.feedDescription.strip()
}
try:
self._db.insert(rec)
except sqlite3.IntegrityError: # duplicate key - update instead
self._db.update(rec)
self.messageBox('Udpated in list: {}.'.format(rec['title']))
else:
self.fillListBox()
self.messageBox('Added to list: {}.'.format(rec['title']))
except urllib.error.HTTPError as e: self.errorBox(e)
except urllib.error.URLError as e: self.errorBox(e)
except ValueError as e: self.errorBox(e)
except xml.parsers.expat.ExpatError as e: self.errorBox(e)
self.varURL.set('') # clear the URL box
def errorBox(self, message, callback = None):
self.messageBox(message, title = 'Error')
if callback is not None: callback()
def messageBox(self, message, **kwargs):
mTitle = kwargs['title'] if 'title' in kwargs else 'Message'
messageWindow = tkinter.Toplevel()
textContainer = tkinter.Message(messageWindow, width = 500, text = message)
messageClose = tkinter.Button(messageWindow, text = 'Close', command = messageWindow.destroy)
messageWindow.title(TITLE + ' - ' + mTitle)
messageWindow.grid()
textContainer.grid(sticky = 'ew')
messageClose.grid()
if __name__=='__main__':
app = mainWindow()
app.mainloop()
| mit | 4,962,287,881,522,683,000 | 38.836634 | 115 | 0.595129 | false |
levilucio/SyVOLT | ECore_Copier_MM/properties/positive/HEC_prop2_CompleteLHS.py | 1 | 15178 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HEC_prop2_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HEC_prop2_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HEC_prop2_CompleteLHS, self).__init__(name='HEC_prop2_CompleteLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = []
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EC_prop2')
# Nodes that represent match classes
# match class EClass() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__EClass"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class EStructuralFeature() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__EStructuralFeature"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
#Nodes that represent apply classes
# match class EClass() node
self.add_node()
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__EClass"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class EStructuralFeature() node
self.add_node()
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__EStructuralFeature"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the match associations of the property.
# match association EClass--eStructuralFeatures-->EStructuralFeature node
self.add_node()
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# Nodes that represent the apply associations of the property.
# apply association EClass--eStructuralFeatures-->EStructuralFeature node
self.add_node()
self.vs[5]["MT_subtypeMatching__"] = False
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_T"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# Nodes that represent trace relations
# backward association EStructuralFeature---->EStructuralFeature node
self.add_node()
self.vs[6]["MT_subtypeMatching__"] = False
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__trace_link"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'blink6')
# Add the edges
self.add_edges([
(3,6), # apply_class EStructuralFeature() -> backward_association
(6,1), # backward_association -> apply_class EStructuralFeature()
(2,5), # apply_class EClass() -> association eStructuralFeatures
(5,3), # association eStructuralFeatures -> apply_class EStructuralFeature()
(0,4), # match_class EClass() -> association eStructuralFeatures
(4,1) # association eStructuralFeatures -> match_class EStructuralFeature()
])
# Add the attribute equations
self["equations"] = [((2,'name'),(0,'name')), ]
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "eStructuralFeatures"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | -6,822,914,805,518,198,000 | 51.885017 | 125 | 0.463104 | false |
Netflix/sketchy | sketchy/controllers/tasks.py | 1 | 16979 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import lxml.html as LH
import lxml.html.clean as clean
import os
import re
import json
import requests
from requests.exceptions import ConnectionError
from requests import post
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from subprocess32 import PIPE
from collections import defaultdict
from sketchy import db, app, celery
from sketchy.models.capture import Capture
from sketchy.models.static import Static
from sketchy.controllers.validators import grab_domain
import subprocess32
import socket
import netaddr
@celery.task(name='check_url', bind=True)
def check_url(self, capture_id=0, retries=0, model='capture'):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
# check for env variable for session cookies
cookies = {}
try:
cookies = dict(item.split("=") for item in os.getenv('phantomjs_cookies').split(" "))
except:
pass
capture_record = Capture.query.filter(Capture.id == capture_id).first()
capture_record.job_status = 'STARTED'
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
# Only retrieve the headers of the request, and return response code
try:
response = ""
verify_ssl = app.config['SSL_HOST_VALIDATION']
response = requests.get(capture_record.url, verify=verify_ssl, allow_redirects=False, timeout=5, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:28.0) Gecko/20100101 Firefox/28.0"}, cookies=cookies)
capture_record.url_response_code = response.status_code
if capture_record.status_only:
capture_record.job_status = 'COMPLETED'
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
if capture_record.callback:
finisher(capture_record)
else:
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
# If URL doesn't return a valid status code or times out, raise an exception
except Exception as err:
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.url_response_code = 0
check_url.retry(kwargs={'capture_id': capture_id, 'retries': capture_record.retry + 1, 'model': model}, exc=err, countdown=app.config['COOLDOWN'], max_retries=app.config['MAX_RETRIES'])
# If the code was not a good code, record the status as a 404 and raise an exception
finally:
db.session.commit()
return str(response.status_code)
def do_capture(status_code, the_record, base_url, model='capture', phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Create a screenshot, text scrape, from a provided html file.
This depends on phantomjs and an associated javascript file to perform the captures.
In the event an error occurs, an exception is raised and handled by the celery task
or the controller that called this method.
"""
# Make sure the the_record
db.session.add(the_record)
# If the capture is for static content, use a different PhantomJS config file
if model == 'static':
capture_name = the_record.filename
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/static.js',
app.config['LOCAL_STORAGE_FOLDER'],
capture_name]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)
else:
capture_name = grab_domain(the_record.url) + '_' + str(the_record.id)
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/capture.js',
the_record.url,
os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.html')
# Using subprocess32 backport, call phantom and if process hangs kill it
pid = subprocess32.Popen(service_args, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = pid.communicate(timeout=phantomjs_timeout)
except subprocess32.TimeoutExpired:
pid.kill()
stdout, stderr = pid.communicate()
app.logger.error('PhantomJS Capture timeout at {} seconds'.format(phantomjs_timeout))
raise subprocess32.TimeoutExpired('phantomjs capture',phantomjs_timeout)
# If the subprocess has an error, raise an exception
if stderr or stdout:
raise Exception("{}{}".format(stdout, stderr))
# Strip tags and parse out all text
ignore_tags = ('script', 'noscript', 'style')
with open(content_to_parse, 'r') as content_file:
content = content_file.read()
cleaner = clean.Cleaner()
content = cleaner.clean_html(content)
doc = LH.fromstring(content)
output = ""
for elt in doc.iterdescendants():
if elt.tag in ignore_tags:
continue
text = elt.text or ''
tail = elt.tail or ''
wordz = " ".join((text, tail)).strip('\t')
if wordz and len(wordz) >= 2 and not re.match("^[ \t\n]*$", wordz):
output += wordz.encode('utf-8')
# Since the filename format is different for static captures, update the filename
# This will ensure the URLs are pointing to the correct resources
if model == 'static':
capture_name = capture_name.split('.')[0]
# Wite our html text that was parsed into our capture folder
parsed_text = open(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.txt'), 'wb')
parsed_text.write(output)
# Update the sketch record with the local URLs for the sketch, scrape, and html captures
the_record.sketch_url = base_url + '/files/' + capture_name + '.png'
the_record.scrape_url = base_url + '/files/' + capture_name + '.txt'
the_record.html_url = base_url + '/files/' + capture_name + '.html'
# Create a dict that contains what files may need to be written to S3
files_to_write = defaultdict(list)
files_to_write['sketch'] = capture_name + '.png'
files_to_write['scrape'] = capture_name + '.txt'
files_to_write['html'] = capture_name + '.html'
# If we are not writing to S3, update the capture_status that we are completed.
if not app.config['USE_S3']:
the_record.job_status = "COMPLETED"
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
else:
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
db.session.commit()
return files_to_write
def s3_save(files_to_write, the_record):
"""
Write a sketch, scrape, and html file to S3
"""
db.session.add(the_record)
# These are the content-types for the files S3 will be serving up
response_types = {'sketch': 'image/png', 'scrape': 'text/plain', 'html': 'text/html'}
# Iterate through each file we need to write to s3
for capture_type, file_name in files_to_write.items():
# Connect to S3, generate Key, set path based on capture_type, write file to S3
conn = boto.s3.connect_to_region(
region_name = app.config.get('S3_BUCKET_REGION_NAME'),
calling_format = boto.s3.connection.OrdinaryCallingFormat()
)
key = Key(conn.get_bucket(app.config.get('S3_BUCKET_PREFIX')))
path = "sketchy/{}/{}".format(capture_type, file_name)
key.key = path
key.set_contents_from_filename(app.config['LOCAL_STORAGE_FOLDER'] + '/' + file_name)
# Generate a URL for downloading the files
url = conn.generate_url(
app.config.get('S3_LINK_EXPIRATION'),
'GET',
bucket=app.config.get('S3_BUCKET_PREFIX'),
key=key.key,
response_headers={
'response-content-type': response_types[capture_type],
'response-content-disposition': 'attachment; filename=' + file_name
})
# Generate appropriate url based on capture_type
if capture_type == 'sketch':
the_record.sketch_url = str(url)
if capture_type == 'scrape':
the_record.scrape_url = str(url)
if capture_type == 'html':
the_record.html_url = str(url)
# Remove local files if we are saving to S3
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['sketch']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['scrape']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['html']))
# If we don't have a finisher task is complete
if the_record.callback:
the_record.capture_status = 'S3_ITEMS_SAVED'
else:
the_record.capture_status = 'S3_ITEMS_SAVED'
the_record.job_status = 'COMPLETED'
db.session.commit()
def finisher(the_record):
"""
POST finished chain to a callback URL provided
"""
db.session.add(the_record)
verify_ssl = app.config['SSL_HOST_VALIDATION']
# Set the correct headers for the postback
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'Connection': 'close'}
#proxy = {"http": "127.0.0.1:8080"}
try:
# Blacklist IP addresses
ip_addr = socket.gethostbyname(grab_domain(the_record.url))
if app.config['IP_BLACKLISTING']:
if netaddr.all_matching_cidrs(ip_addr, app.config['IP_BLACKLISTING_RANGE'].split(',')):
the_record.capture_status = "IP BLACKLISTED:{} - ".format(ip_addr) + the_record.capture_status
except:
pass
req = post(the_record.callback, verify=verify_ssl, data=json.dumps(the_record.as_dict()), headers=headers)
# If a 4xx or 5xx status is received, raise an exception
req.raise_for_status()
# Update capture_record and save to database
the_record.job_status = 'COMPLETED'
# Removed to propagate blacklist message
#the_record.capture_status = 'CALLBACK_SUCCEEDED'
db.session.add(the_record)
db.session.commit()
@celery.task(name='celery_static_capture', ignore_result=True, bind=True)
def celery_static_capture(self, base_url, capture_id=0, retries=0, model="static"):
"""
Celery task used to create a sketch and scrape with a provided static HTML file.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
static_record = Static.query.filter(Static.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(static_record)
static_record.retry = retries
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches and scrapes
files_to_write = do_capture(0, static_record, base_url, model='static')
# Call the s3 save function if s3 is configured, and perform callback if configured.
if app.config['USE_S3']:
if static_record.callback:
s3_save(files_to_write, static_record)
finisher(static_record)
else:
s3_save(files_to_write, static_record)
elif static_record.callback:
finisher(static_record)
# Only execute retries on ConnectionError exceptions, otherwise fail immediately
except ConnectionError as err:
app.logger.error(err)
static_record.job_status = 'RETRY'
static_record.capture_status = str(err)
static_record.retry = retries + 1
db.session.commit()
raise celery_static_capture.retry(args=[base_url],
kwargs={'capture_id' :capture_id, 'retries': static_record.retry + 1, 'model': 'static'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# Catch exceptions raised by any functions called
except Exception as err:
app.logger.error(err)
static_record.job_status = 'FAILURE'
if str(err):
static_record.capture_status = str(err)
raise Exception
finally:
db.session.commit()
@celery.task(name='celery_capture', ignore_result=True, bind=True)
def celery_capture(self, status_code, base_url, capture_id=0, retries=0, model="capture", phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Celery task used to create sketch, scrape, html.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
capture_record = Capture.query.filter(Capture.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
try:
# Perform a callback or complete the task depending on error code and config
if capture_record.url_response_code > 400 and app.config['CAPTURE_ERRORS'] == False:
if capture_record.callback:
finisher(capture_record)
else:
capture_record.job_status = 'COMPLETED'
return True
# Only execute retries on ConnectionError exceptions, otherwise fail immediately
except ConnectionError as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs = { 'capture_id' :capture_id, 'retries': capture_record.retry + 1, 'model': 'capture'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
except Exception as err:
app.logger.error(err)
capture_record.job_status = 'FAILURE'
if str(err):
capture_record.capture_status = str(err)
capture_record.capture_status = str(err)
finally:
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches, scrapes, and html
files_to_write = do_capture(status_code, capture_record, base_url, model='capture', phantomjs_timeout=phantomjs_timeout)
# Call the s3 save function if s3 is configured, and perform callback if configured.
if app.config['USE_S3']:
if capture_record.callback:
s3_save(files_to_write, capture_record)
finisher(capture_record)
else:
s3_save(files_to_write, capture_record)
elif capture_record.callback:
finisher(capture_record)
# If the screenshot generation timed out, try to render again
except subprocess32.TimeoutExpired as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs={'capture_id' :capture_id, 'retries': capture_record.retry, 'model': 'capture', 'phantomjs_timeout': (capture_record.retry * 5) + phantomjs_timeout}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# Retry on connection error exceptions
except ConnectionError as err:
app.logger.error(err)
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.retry = retries + 1
raise celery_capture.retry(args=[status_code, base_url],
kwargs={'capture_id' :capture_id, 'retries': capture_record.retry, 'model': 'capture'}, exc=err,
countdown=app.config['COOLDOWN'],
max_retries=app.config['MAX_RETRIES'])
# For all other exceptions, fail immediately
except Exception as err:
app.logger.error(err)
if str(err):
capture_record.capture_status = str(err)
capture_record.job_status = 'FAILURE'
raise Exception
finally:
db.session.commit()
| apache-2.0 | -4,491,304,202,966,124,500 | 42.647815 | 230 | 0.65104 | false |
mdobrychlop/pyry3d_chimera_extension | PyRy3D_input_generator.py | 1 | 14149 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# www.genesilico.pl
#
#creates ranked 3D models of macromoleular complexes
#based on experimental restraints and a whole complex shape.
__author__ = "Joanna M. Kasprzak"
__copyright__ = "Copyright 2010, The PyRy3D Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Joanna Kasprzak"
__email__ = "[email protected]"
__status__ = "Prototype"
import sys, os, glob, tarfile
from shutil import rmtree
#Internal imports
#BioPython
from Bio import PDB
from Bio.PDB import PDBParser, PDBIO
from Bio.PDB.Atom import Atom
from Bio.PDB.Residue import Residue
from Bio.PDB.Chain import Chain
from Bio.PDB.Model import Model
from Bio.PDB.Structure import Structure
import tkMessageBox
#from Pyry_cleanPDB import run_cleanPDB
RESNAMES = {"ALA": "A", "ARG": "R", "ASP": "D", "ASN": "N", "CYS": "C",\
"GLU": "E", "GLY": "G", "GLN": "Q", "HIS": "H", \
"ILE": "I", "LEU": "L", "LYS": "K", "MET": "M", "MSE": "M",\
"PHE": "F", "PRO": "P", "SER": "S", "THR": "T",\
"TRP": "W", "TYR": "Y", "VAL": "V", \
"CYT": "C", "THY": "T", "GUA": "G", "ADE": "A", "URA": "U"}
"""
This module is created to enable PyRy3D users to create input files automatically
1. it takes a folder with structures
2. it renames chains, renumber residues, remove hetatoms
3. creates tared archive with structures in PyRy3D format
4. automatically creates fasta file with structure sequences
5. automatically creates config file with simulation parameters set into default values
The module will become:
1. a part of PyRy3D program
2. a part of PyRy3D Chimera plugin
Future:
1. features enabling users to decide on parameters for config file
2. features enabling users to decide on numeration/chain_names for structures
"""
class PyRy3D_IG_Error(Exception): pass
class PyRy3D_InputGenerator(object):
def __init__(self):
pass
def __str__(self):
pass
def generate_pyry_infiles(self):
pass
def print_run_command(self):
pass
class InStructure(object):
def __init__(self, biostruct, filename):
self.biostruct = biostruct
self.filename = filename
class InStructures(object):
"""
stores information and methods to create default PyRy3D structure folder
"""
def __init__(self):
self.structures = [] #list of Bio.PDB structures provided by the user
self.taken_chains = [] #list of chain names already assigned
self.alphabet = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","W","X","Y","Z",\
"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","w","x","y","z",\
"1","2","3","4","5","6","7","8","9","0","-","+","_","=","~","`","!","@","#","$","%","^","&","*","(",\
")","{","}","[","]","|"]
#here add other chars and digits
self.outname = "" #outfolder name
def __str__(self):
pass
def generate_pyry_instructures(self, input_folder, output_folder, rankname = ""):
"""
"""
self.extract_structures(input_folder)
self.create_outfolder(output_folder)
self.prepare_instructures()
self.archive_structures(rankname)
def prepare_instructures(self):
"""
XXXXXXXXXXXXXXXXXXXXXXXXXXXXx
"""
for struc in self.structures:
chain_name = ""
for model in struc.biostruct:
for chain in model:
chain_name += chain.id
self.clean_structures(chain)
if (chain_name in self.taken_chains) or (chain_name == ""):
self.rename_chains(chain, self.alphabet[0])
self.taken_chains.append(self.alphabet[0])
self.alphabet.pop(0)
elif (chain_name not in self.taken_chains):
self.taken_chains.append(chain_name)
if chain_name in self.alphabet:
self.alphabet.remove(chain_name)
self.write_pdb(struc.biostruct, self.outname+"/"+str(struc.filename))
def create_outfolder(self, outname):
"""
creates outfolder with prepared structures' files
"""
#if os.path.exists(str(outname)) == True:
# rmtree(str(outname))
self.outname = outname
#os.mkdir(str(outname))
def extract_structures(self, infolder):
"""
takes all files from outfolder and stores in self.structures list of objects
"""
#os.system("python cleanPDB.py -q -d "+str(infolder))
#run_cleanPDB(str(filename), self.shape_file)
pdb_files = glob.glob(str(infolder)+'/*.pdb')
#if len(pdb_files) == 0: raise PyRy3D_IG_Error("The files you provided are not pdb files")
for pdbfile in pdb_files:
parser = PDBParser()
structure = parser.get_structure(str(pdbfile), pdbfile)
pdbfile=pdbfile.replace("\\","/")
#print "POREPLACE", pdbfile
filename = pdbfile.split("/")[-1]
#print "DALEJ", filename
struc = InStructure(structure,filename)
if len(list(structure.get_residues())) == 0:
raise PyRy3D_IG_Error("The file you provided for structure %s is not a valid pdb file"%(structure.id))
self.structures.append(struc)
def clean_structures(self, chain):
"""
remove hetatms, ions, ligands etc which are not parsed by Bio.PDB
"""
print "Cleaning", chain.id
for resi in chain:
if resi.id[0] != ' ':
#print "!!!!!!!!", chain.id, resi.id, resi.resname
#if resi.resname == "MSE":
# resi.resname = "MET"
#resi.id[0] = " "
#else:
#print "DETACH", resi.id, resi.resname, chain.id
chain.detach_child(resi.id)
def rename_chains(self, chain, chain_name):
"""
renames chains in structures, as a result each structure has
a different chain name (A, B,......, Z)
"""
#what if more than 24 chains?
chain.id = chain_name
def renumber_residues(self, chain):
"""
renumbers residues from 1 to ...
"""
i = 1
for resi in chain:
resi.id = (' ', i, ' ')
i += 1
#def renumber_residues_start_stop(struct, start_id, stop_id, ren_type = None):
# """
# method for renumbering residues according to user defined order
# """
# i = start_id
# for model in struct:
# for chain in model:
# chain.id = 'P'
# for residue in chain:
# if ren_type != None:
# if residue.id[2] != ' ':
# residue.id = (' ', i, ' ')
# i += 1
# elif i <= stop_id:
# residue.id = (' ', i, ' ')
# i += 1
# return struct
def write_pdb(self, structure, filename):
"""
Writing to the pdb_file, saving changed coordinated
"""
fp=open(filename, "w")
io=PDBIO(1)
io.set_structure(structure)
io.save(fp)
def archive_structures(self,rankname):
"""
creates tar archive with structures - final input for PyRy3D
"""
if rankname != "":
#rankname = "input"
tar = tarfile.open(self.outname+"/packs/"+rankname+".tar", "w:")
tarname=rankname
tar.add(self.outname,arcname=tarname,recursive=False)
files = glob.glob(self.outname+"/*.pdb")
for f in files:
fn = f.split("/")[-1]
tar.add(f,arcname=tarname+"/"+fn)
tar.close()
else:
rankname = "input"
tar = tarfile.open(self.outname+"/"+rankname+".tar", "w:")
#tarname=self.outname.split("/")[-1]
tarname=rankname
tar.add(self.outname,arcname=tarname)
tar.close()
class InSequences(object):
"""
stores information and methods to create default PyRy3D mfasta file
"""
def __init__(self):
pass
def __str__(self):
pass
def generate_pyry_insequences(self, fastafile, structures):
"""
create multi fasta file in format:
>A
seq_A
>B
seq_B
Parameters:
-------------
fastafile : output fasta file name
structures : list of all structures as Bio.PDB objects
"""
self.create_fasta_file(fastafile)
self.get_sequences(structures)
def create_fasta_file(self, filename):
if os.path.exists(str(filename)) == True:
os.remove(str(filename))
fh = open(filename, "a")
self.fasta_file = fh
def get_sequences(self, structures):
"""
retrieves struct sequence as one letter code
Parameters:
-----------
structures: all structures from infolder as a list of Bio.PDB objects
"""
for struct in structures:
sequence, chains_names = "", []
for ch in struct.biostruct.get_chains():
chains_names.append(ch.id)
for resi in struct.biostruct.get_residues():
resi_name = ''
resi_name += resi.resname.strip()
#for 3letter residue names like "ALA"
if len(resi_name) == 3:
resi_name = self.get_1letter_resname(resi_name, struct.biostruct, ch)
resi.resname = resi_name
#for dna names like "DC"
elif len(resi_name) == 2:
resi_name = resi_name[1]
resi.resname = resi_name
sequence += resi_name
self.add_sequence(sequence, chains_names)
self.fasta_file.close()
def add_sequence(self, sequence, chains):
"""
adds sequence to fasta file
"""
chains_ids = ";".join(chains)
self.fasta_file.write(">"+str(chains_ids)+"\n")
self.fasta_file.write(sequence+"\n")
def get_1letter_resname(self, resname, struct, chain):
"""
returns 1letter code of given residue eg. A for ALA
Parameters:
-----------
resname : residue name in any notation eg ALA, URI or A, U
struct : structure for which the function works at the moment
Returns:
---------
resname in 1letter notation e.g. A, U
Raises:
--------
PyRy3D_IG_Error : if v.strange residue name appears
"""
if resname in RESNAMES.keys() : return RESNAMES[resname]
else:
#print "There is no residue %s %s"%(resname, chain.id)
return ""
class InConfig(object):
"""
stores information and methods to create default PyRy3D cofig file
"""
def __init__(self):
pass
def __str__(self):
pass
def generate_pyry_inconfig(self, filename):
"""
generates config file with all values set into default
"""
self.create_config_file(str(filename))
self.add_default_data()
def create_config_file(self, conffile):
"""
"""
if os.path.exists(str(conffile)) == True:
os.remove(str(conffile))
self.confile = open(conffile, "a")
def add_default_data(self):
"""
"""
content = """
SIMMETHOD x #genetic or sa for simulated annealing (default)
#REDUCTMETHOD roulette #Roulette,Tournament,Cutoff
ANNTEMP 10 #from range X to Y
STEPS 10 #default 100; how many simulation steps to perform?; maximum 1000
MAP_OUT 1 #default 1; can be in range from 0 to 10
BOX_OUT 1 #default 1; can be in range from 0 to 10
MAP_FREE_SPACE 1 #default 1; can be in range from 0 to 10
COLLISION 1 #default 1.5; can be in range from 0 to 10
RESTRAINTS 1 #default 1.3; can be in range from 0 to 10
MAXROT 10 #default is 5
MAXTRANS 10 10 10 #default is [5, 5, 5]
KVOL 1 #kvol default is 10, max is 50; how many complex volumes will describe density map
#THRESHOLD 1.6 #float value existing in map file, default is 0
SIMBOX 1.2 #default simulation box diameter
GRIDRADIUS 1.0 #default is 1.0
GRIDTYPE X
PARAMSCALINGRANGES 0 25 50 #default 0 25 50; at what point of simulation should parameter scaling ranges kick in
PARAMSCALINGR1 50 100 #default 50 100
PARAMSCALINGR2 25 50 #default 25 50
PARAMSCALINGR3 0 25 #default 0 25
WRITE_N_ITER 1 # default 1, minimum 1 max=STRUCT_NR
#OUT_STEPS FIRST LAST #default one struct with best score; which steps in output data?
STRUCT_NR 1 #default 0.1SIMUL_NR; number ot out structure with best scores
"""
self.confile.write(content)
self.confile.close()
def add_user_defined_data(self):
"""
method takes params from the user (from command line) and adds to config file
"""
pass
if __name__=='__main__':
doc = """
PyRy3D_Input_Generator
easy generator of input files for PyRy3D program
(c) 2010 by Joanna M. Kasprzak
usage: python pyry3d.py
"""
print doc
#config = InConfig().generate_pyry_inconfig("config.txt")
instr = InStructures()
instr.generate_pyry_instructures("problems", "problems")
inseq = InSequences().generate_pyry_insequences("problems.fasta", instr.structures)
#print get_pyry_command()
| gpl-3.0 | 8,014,325,520,493,498,000 | 32.449173 | 126 | 0.541452 | false |
nico01f/z-pec | ZimbraServer/src/python/pylibs/conf.py | 1 | 2933 | #
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Zimbra, Inc.
#
# The contents of this file are subject to the Zimbra Public License
# Version 1.3 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
import os
class Config:
def __init__(self):
self.config = {}
self.progname = "zmconfigd"
if (os.getenv("zimbra_server_hostname") is not None):
self.hostname = os.getenv("zimbra_server_hostname")
else:
self.hostname = os.popen("/opt/zimbra/bin/zmhostname").readline().strip()
if (self.hostname is None or self.hostname == ""):
os._exit(1)
self.wd_all = False
self.debug = False
self.baseDir = "/opt/zimbra"
self.logStatus = {
4 : "Debug",
3 : "Info",
2 : "Warning",
1 : "Error",
0 : "Fatal"
}
self.configFile = self.baseDir+"/conf/zmconfigd.cf";
self.logFile = self.baseDir+"/log/"+self.progname+".log";
self.pidFile = self.baseDir+"/log/"+self.progname+".pid";
self.interval = 60
if self.debug:
self.interval = 10
self.restartconfig = False
self.watchdog = True
self.wd_list = [ "antivirus" ]
self.loglevel = 3
def __setitem__(self, key, val):
self.config[key] = val
def __getitem__(self, key):
try:
return self.config[key]
except Exception, e:
return None
def setVals(self, state):
self.ldap_is_master = state.localconfig["ldap_is_master"]
self.ldap_root_password = state.localconfig["ldap_root_password"]
self.ldap_master_url = state.localconfig["ldap_master_url"]
self.loglevel = 3
if state.localconfig["ldap_starttls_required"] is not None:
self.ldap_starttls_required = (state.localconfig["ldap_starttls_required"].upper() != "FALSE")
if state.localconfig["zmconfigd_log_level"] is not None:
self.loglevel = int(state.localconfig["zmconfigd_log_level"])
self.interval = 60
if state.localconfig["zmconfigd_interval"] is not None and state.localconfig["zmconfigd_interval"] != "":
self.interval = int(state.localconfig["zmconfigd_interval"])
self.debug = False
if state.localconfig["zmconfigd_debug"] is not None:
self.debug = state.localconfig["zmconfigd_debug"]
if state.localconfig["zmconfigd_watchdog"] is not None:
self.watchdog = (state.localconfig["zmconfigd_watchdog"].upper() != "FALSE")
if state.localconfig["zmconfigd_enable_config_restarts"] is not None:
self.restartconfig = (state.localconfig["zmconfigd_enable_config_restarts"].upper() != "FALSE")
if state.localconfig["zmconfigd_watchdog_services"] is not None:
self.wd_list = state.localconfig["zmconfigd_watchdog_services"].split()
| mit | 2,484,072,969,233,789,000 | 36.126582 | 107 | 0.682919 | false |
astorfi/TensorFlow-World | codes/3-neural_networks/convolutional-neural-network/code/net_structure/net.py | 1 | 3884 | #####################################
# With some tiny modification, this code is the one used by Tensorflow slim at:
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim
# Please refer to the link for further explanations.
### The difference is this architecture is written in fully-convolutional fashion.
### The advantage is that, this model can be used for larger image sizes with some average pooling in the last layer.
import tensorflow as tf
slim = tf.contrib.slim
def net_architecture(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='Net'):
"""Creates a variant of the Net model.
Args:
images: The batch of `Tensors`: size [batch_size, height, width, channels].
num_classes: Total number of classes.
is_training: Training/Validation.
dropout_keep_prob: The percentage of activation values: Only active in training mode!
scope: Variable_scope.
Returns:
logits: the pre-softmax activations of size [batch_size, `num_classes`]
end_points: The dictionary for the layers outputs.
"""
# Create empty dictionary
end_points = {}
with tf.variable_scope(scope, 'Net', [images, num_classes]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d and max_pool2d.
with tf.contrib.framework.arg_scope([tf.contrib.layers.conv2d, tf.contrib.layers.max_pool2d],
outputs_collections=end_points_collection):
# Layer-1
net = tf.contrib.layers.conv2d(images, 32, [5, 5], scope='conv1')
net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool1')
# Layer-2
net = tf.contrib.layers.conv2d(net, 64, [5, 5], scope='conv2')
net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool2')
# Layer-3
net = tf.contrib.layers.conv2d(net, 1024, [7, 7], padding='VALID', scope='fc3')
net = tf.contrib.layers.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout3')
# Last layer which is the logits for classes
logits = tf.contrib.layers.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='fc4')
# Return the collections as a dictionary
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
# Squeeze spatially to eliminate extra dimensions.(embedding layer)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='fc4/squeezed')
end_points[sc.name + '/fc4'] = logits
return logits, end_points
def net_arg_scope(weight_decay=0.0005, is_training=False):
"""Defines the default network argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the model.
"""
if is_training:
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.conv2d],
padding='SAME',
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG',
uniform=False, seed=None,
dtype=tf.float32),
activation_fn=tf.nn.relu) as sc:
return sc
else:
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.conv2d],
padding='SAME',
activation_fn=tf.nn.relu) as sc:
return sc
| mit | 468,183,706,509,463,600 | 40.763441 | 117 | 0.579815 | false |
scality/scality-manila-utils | scality_manila_utils/smb_helper.py | 1 | 11130 | # Copyright (c) 2015 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of functions for
- Addition and removal of exports
- Management of client permissions on export locations
"""
import errno
import functools
import io
import json
import logging
import os
import subprocess
import time
try:
import ConfigParser as configparser
except ImportError:
import configparser
from scality_manila_utils import utils
from scality_manila_utils.exceptions import (ClientExistsException,
ClientNotFoundException,
EnvironmentException,
ExportException,
ExportAlreadyExists,
ExportNotFoundException,
ExportHasGrantsException)
log = logging.getLogger(__name__)
# From http://prosseek.blogspot.fr/2012/10/
# reading-ini-file-into-dictionary-in.html
class SmbConfParser(configparser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
def _get_defined_exports():
"""Retrieve all defined exports from the Samba registry."""
with utils.elevated_privileges():
cmd = ['net', 'conf', 'list']
msg = ("Something went wrong while dumping the Samba "
"registry: stdout='{stdout}', stderr='{stderr}'")
stdout, stderr = utils.execute(cmd, msg)
config = SmbConfParser()
output = stdout.replace('\t', '')
config.readfp(io.StringIO(output))
return config.as_dict()
def verify_environment(root_export):
"""
Preliminary checks for installed binaries and running services.
:param root_export: SOFS directory which holds the export points exposed
through manila
:type root_export: string (unicode)
:raises:
:py:class:`scality_manila_utils.exceptions.EnvironmentException`
if the environment is not ready
"""
if not utils.is_stored_on_sofs(root_export):
raise EnvironmentException("%s doesn't seem to be stored on a SOFS "
"filesystem" % root_export)
env_path = os.getenv('PATH').split(':')
for binary in ('net', 'sfused'):
utils.binary_check(binary, env_path)
for process in ('sfused', 'smbd'):
utils.process_check(process)
with io.open('/etc/samba/smb.conf') as f:
# We can't use `for line in f` here because it seems unmockable...
for line in f.readlines():
if line.strip() == 'registry shares = yes':
break
else:
msg = ("You must enable 'registry shares' in your Samba "
"configuration: add 'registry shares = yes' in the [global]"
" section.")
raise EnvironmentException(msg)
def ensure_environment(f):
"""
Decorator function which verifies that expected services are running etc.
"""
@functools.wraps(f)
def wrapper(root_export, *args, **kwargs):
verify_environment(root_export)
return f(root_export=root_export, *args, **kwargs)
return wrapper
def ensure_export_exists(f):
"""
Decorator function which verifies that a given export exists and pass
the `dict` of all defined exports to the decorated function.
"""
@functools.wraps(f)
def wrapper(export_name, *args, **kwargs):
exports = _get_defined_exports()
if export_name not in exports:
msg = "Share '{0:s}' not found in Samba registry.".format(
export_name)
raise ExportNotFoundException(msg)
return f(export_name=export_name, exports=exports, *args, **kwargs)
return wrapper
@ensure_environment
@ensure_export_exists
def get_export(export_name, exports, *args, **kwargs):
"""
Retrieve client details of an export.
:param export_name: name of export
:type export_name: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
:returns: string with export client details in json format
"""
export = exports[export_name]
clients = dict((host, ["rw"]) for host in export['hosts allow'].split())
return json.dumps(clients)
@ensure_environment
def add_export(root_export, export_name, *args, **kwargs):
"""
Add an export.
:param root_export: SOFS directory which holds the export points exposed
through manila
:type root_export: string (unicode)
:param export_name: name of export to add
:type export_name: string (unicode)
"""
if not export_name or '/' in export_name:
raise ExportException('Invalid export name')
export_point = os.path.join(root_export, export_name)
create_cmd = [
'net', 'conf', 'addshare', export_name, export_point,
'writeable=y', 'guest_ok=y',
]
parameters = {
'browseable': 'yes',
'create mask': '0755',
'hosts deny': '0.0.0.0/0', # deny all by default
'hosts allow': '127.0.0.1',
'read only': 'no',
}
set_of_commands = [['net', 'conf', 'setparm', export_name,
param, value] for param, value in parameters.items()]
with utils.elevated_privileges():
try:
os.mkdir(export_point)
# On some systems, the `mode` argument of mkdir is ignored.
# So be safe, and do an explicit chmod.
os.chmod(export_point, 0o0777)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
log.debug("The share/directory %s already exists on SOFS",
export_name)
exports = _get_defined_exports()
if export_name in exports:
msg = ("Share '{0:s}' already defined in Samba "
"registry.".format(export_name))
raise ExportAlreadyExists(msg)
subprocess.check_call(create_cmd)
for cmd in set_of_commands:
subprocess.check_call(cmd)
@ensure_environment
@ensure_export_exists
def wipe_export(root_export, export_name, exports):
"""
Remove an export.
The export point is not actually removed, but renamed with the prefix
"TRASH-".
:param root_export: SOFS directory which holds the export points exposed
through manila
:type root_export: string (unicode)
:param export_name: name of export to remove
:type export_name: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
"""
export = exports[export_name]
export_path = os.path.join(root_export, export_name)
# Wipe export if and only if no "external host" has access to it
if export['hosts allow'] not in ['', '127.0.0.1']:
raise ExportHasGrantsException('Unable to remove export with grants')
# We need to introduce a "variable" part (i.e a date)
# in case an export with the same name is deleted twice
tombstone = u'TRASH-{0:s}-{1:s}'.format(export_name,
time.strftime("%Y-%b-%d-%X-%Z"))
tombstone_path = os.path.join(root_export, tombstone)
with utils.elevated_privileges():
log.info("Deleting the export '%s' from the Samba registry",
export_name)
cmd = ['net', 'conf', 'delshare', export_name]
msg = ("Something went wrong while deleting the export {0:s}: "
"stdout={{stdout}}, stderr={{stderr}}").format(export_name)
utils.execute(cmd, msg)
log.info("Renaming export '%s' to '%s'", export_name, tombstone)
try:
os.rename(export_path, tombstone_path)
except OSError as exc:
log.error("Unable to rename '%s' for removal : %r",
export_name, exc)
# Two concurrent wipe_export could happen at the same time so
# the loser of the race could see a ENOENT.
if exc.errno != errno.ENOENT:
raise
# Persisting the parent of the moved directory is required, as
# it keeps track of its contents.
utils.fsync_path(root_export)
def _set_hosts_allow(export_name, hosts_allow):
"""
Set the `hosts_allow` parameter for a given share.
:param export_name: name of export to grant access to
:type export_name: string (unicode)
:param hosts_allow: hosts allowed on this share
:type hosts_allow: iterable of `str`
"""
cmd = ['net', 'conf', 'setparm', export_name,
'hosts allow', ' '.join(hosts_allow)]
msg = ("Something went wrong while setting '{0!r}' as "
"the list of 'hosts allow' for share '{1:s}': stdout={{stdout}}, "
"stderr={{stderr}}").format(hosts_allow, export_name)
with utils.elevated_privileges():
utils.execute(cmd, msg)
@ensure_environment
@ensure_export_exists
def grant_access(export_name, host, exports, *args, **kwargs):
"""
Grant access for a host to an export.
:param export_name: name of export to grant access to
:type export_name: string (unicode)
:param host: host to grant access for
:type host: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
"""
hosts_allow = exports[export_name]['hosts allow'].split()
if host in hosts_allow:
msg = "Host '{0:s}' already allowed on share '{1:s}'".format(
host, export_name)
raise ClientExistsException(msg)
hosts_allow.append(host)
_set_hosts_allow(export_name, hosts_allow)
@ensure_environment
@ensure_export_exists
def revoke_access(export_name, host, exports, *args, **kwargs):
"""
Revoke access for a host to an export.
:param export_name: name of export for revocation
:type export_name: string (unicode)
:param host: host to revoke access for
:type host: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
"""
hosts_allow = exports[export_name]['hosts allow'].split()
if host not in hosts_allow:
raise ClientNotFoundException("'{0:s}' has no access defined on share "
"'{1:s}'".format(host, export_name))
hosts_allow.remove(host)
_set_hosts_allow(export_name, hosts_allow)
| apache-2.0 | 762,380,520,090,251,500 | 32.223881 | 79 | 0.614555 | false |
eblot/miscripts | Python/embeddev/prepbr.py | 1 | 2923 | #!/usr/bin/env python
# Some kind of very project-specific way to remove unwanted components
from __future__ import with_statement
import os
import re
import sys
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
TRUE_VALUES = ['yes','ok','true']
FALSE_VALUES = ['no','none','false']
def parse(filename):
if not filename or not os.path.isfile(filename):
raise AssertionError("Invalid filename: %s" % filename)
states = '|'.join(TRUE_VALUES + FALSE_VALUES)
compcre = re.compile(r'^\s*\*\s+(n[adsl]_\w+)\s+(' + states + ')\s*$',
re.IGNORECASE)
add_comp = []
del_comp = []
with open(filename, 'rt') as f:
for line in f.readlines():
mo = compcre.match(line)
if mo:
comp = mo.group(1)
status = mo.group(2)
if status in TRUE_VALUES:
add_comp.append(comp)
elif status in FALSE_VALUES:
del_comp.append(comp)
return (add_comp, del_comp)
def proceed(add, rem, stop_on_error=False):
MAP = { 'na' : 'neoasl',
'nd' : 'neodrv',
'nl' : 'neolib',
'ns' : 'neosys' }
for order, info in enumerate([('sdk', ''), ('sdktests', '_test')]):
for fn in add:
libdir = MAP[fn.split('_')[0]]
reldir = os.path.join(info[0], libdir, fn + info[1])
if order == 0 and not os.path.isdir(reldir):
err = "Missing required component '%s'" % reldir
if stop_on_error:
raise AssertionError(err)
else:
print >> sys.stderr, "Warning: %s" % err
del_list = []
for fn in rem:
libdir = MAP[fn.split('_')[0]]
reldir = os.path.join(info[0], libdir, fn + info[1])
if os.path.isdir(reldir):
del_list.append(reldir)
if del_list:
args = ['svn', 'rm']
args.extend(del_list)
child = Popen(args)
ret = child.wait()
if ret and stop_on_error:
raise AssertionError('Error: SVN cannot remove files')
if __name__ == '__main__':
try:
usage = 'Usage: %prog [options]\n'\
' prepare a project branch'
optparser = OptionParser(usage=usage)
optparser.add_option('-i', '--input', dest='input',
help='input file')
optparser.add_option('-k', '--keep-going', dest='keepgo',
action='store_true',
help='Keep going on error')
(options, args) = optparser.parse_args(sys.argv[1:])
(add, rem) = parse(options.input)
proceed(add, rem, stop_on_error=not options.keepgo)
except AssertionError, e:
print >> sys.stderr, "Error: %s" % e.args[0]
exit(-1)
| mit | -231,721,042,177,944,200 | 34.646341 | 74 | 0.509066 | false |
TrabalhoRedesUfrj/NEOM | NEOM.py | 1 | 14411 | ##+"""
##+ Main function of the client with GUI
##+ Author: Ex7755
##+"""
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from random import randint
import time
import socket, select, string, sys, ssl, pprint
from Protocol import MessageHandler
ssl_certfile = "./keys/server.crt"
class ClientThread(QThread):
progressEvent = pyqtSignal(QString)
serChato = pyqtSignal()
geraImg = pyqtSignal()
def __init__(self,contaTe,mensTe, textEnv, ssl_sock,username,w):
QThread.__init__(self)
self.contaTe = contaTe
self.mensTe = mensTe
self.textEnv = textEnv
self.ssl_sock = ssl_sock
self.sent = 0
self.mens = ""
self.username = username
self.w = w
def recieve(self,mens):
self.mens = mens
self.sent = 21
def sendvib(self):
self.sent = 22
def sendfil(self,filee):
self.filee = filee
self.sent = 23
def sendimage(self,imgf):
self.imgf = imgf
self.sent = 24
def close(self):
self.runT = False
def run(self):
self.runT = True
while self.runT:
socket_list = [self.sent,self.ssl_sock]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])
for sock in read_sockets:
# incoming message from remote server
if sock == self.ssl_sock:
data = sock.recv(262144)
if not data:
print '\nDisconnected from chat server'
self.w.close()
self.runT = False
else:
indata = MessageHandler()
indata.receiveMessage(data)
cmd = indata.readOther()
msg = indata.readMessage()
user = indata.readName()
if cmd == None:
self.mensTe.append("\r%s:\n%s"%(user,msg))
elif cmd[0] == "userIn":
self.mensTe.append("\r%s:\n%s"%(user,msg))
self.contaTe.append(msg.split(" ")[0]+"\n")
elif cmd[0] == "userOut":
self.mensTe.append("\r%s:\n%s"%(user,msg))
tempCont = self.contaTe.toPlainText()
tempCont.replace('\n'+msg.split(" ")[1]+'\n',"")
self.progressEvent.emit(tempCont)
elif cmd[0] == "newFile":
self.mensTe.append("\r%s:\n%s"%(user,msg))
indata.readDocument("savedDocuments/")
elif cmd[0] == "newPicture":
self.mensTe.append("\r%s:\n%s"%(user,msg))
self.imgPa = indata.readDocument("savedDocuments/")
self.geraImg.emit()
elif cmd[0] == "chato":
self.serChato.emit()
else:
self.mensTe.append("\r%s:\n%s"%(user,msg))
# user entered a message
else:
if self.sent == 21:
out = MessageHandler()
out.addMessage(self.mens)
out.addName(self.username)
self.sent = 0
self.ssl_sock.send(out.sendMessage())
elif self.sent == 22:
out = MessageHandler()
out.addOther("chato")
out.addMessage(" ")
out.addName(self.username)
self.sent = 0
self.ssl_sock.send(out.sendMessage())
elif self.sent == 23:
out = MessageHandler()
out.addMessage("enviou um arquivo.")
out.addOther("newFile")
out.addName(self.username)
out.addDocument(self.filee)
self.sent = 0
self.mensTe.append("\r%s:\n%s"%(self.username,"enviou um arquivo."))
self.ssl_sock.send(out.sendMessage())
elif self.sent == 24:
out = MessageHandler()
out.addDocument(self.imgf)
out.addMessage("enviou uma imagem.")
out.addOther("newPicture")
out.addName(self.username)
self.mensTe.append("\r%s:\n%s"%(self.username,"enviou uma imagem."))
self.ssl_sock.send(out.sendMessage())
self.sent = 0
out = MessageHandler()
out.addMessage("QUIT")
out.addName(self.username)
self.ssl_sock.send(out.sendMessage())
class ChatJan(QWidget):
def defineThre(self,thre):
self.thre = thre
def closeEvent(self,event):
self.thre.close()
def chat(myName,serverIp,serverPort,app,geo, ssl_sock,users):
def tremer():
xOri = w.geometry().x()
yOri = w.geometry().y()
w.move(0,0)
xD = w.geometry().x()
yD = w.geometry().y()
xOri = xOri - xD
yOri = yOri - yD
for i in range(1,100):
w.move(xOri,yOri)
xt = randint(-5,5)
yt = randint(-5,5)
w.move(xOri+xt,yOri+yt)
app.processEvents()
time.sleep(0.01)
w.move(xOri,yOri)
def bAten_clicked():
client.sendvib()
tremer()
def bEnv_clicked():
mensagem = str(textEnv.toPlainText())
client.recieve(mensagem)
mensTe.append("\r%s:\n%s\n"%(myName,mensagem))
textEnv.clear()
def bEnvFile_clicked():
fileDiag = QFileDialog()
fileDiag.setFilter(fileDiag.filter() | QDir.Hidden)
fileDiag.setDefaultSuffix('*')
fileDiag.setAcceptMode(QFileDialog().AcceptSave)
fileDiag.setNameFilters(['*(*.*)'])
filename = str(fileDiag.getOpenFileName(w,'Open File','/'))
if fileDiag.selectedFiles() and filename != '':
client.sendfil(filename)
def showImg(filename):
print filename
showImg = QDialog()
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap(filename)))
showImg.setPalette(palette)
showImg.setFixedSize(QPixmap(filename).width(),QPixmap(filename).height())
showImg.exec_()
def bEnvImg_clicked():
fileDiag = QFileDialog()
fileDiag.setNameFilters(["Imagens (*.png *jpg)"])
filename = str(fileDiag.getOpenFileName(w,'Open File','/'))
if fileDiag.selectedFiles() and filename != '':
client.sendimage(filename)
showImg(filename)
def onResize(event):
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap("fk-neon.jpg").scaled(w.size())))
w.setPalette(palette)
def remakeCont(newCont):
contaTe.clear()
contaTe.append(newCont)
def keyEven(event):
if event.key() == Qt.Key_Return:
bEnv_clicked()
def receberImg():
showImg(str(client.imgPa))
w = ChatJan()
w.resizeEvent = onResize
userT = QLabel(w)
userT.setText("Usuario: " + myName)
userT.setStyleSheet("color: white")
conneT = QLabel(w)
conneT.setText("Conectado a: "+ serverIp +":")
conneT.setStyleSheet("color: white")
mensTi = QLabel(w)
mensTi.setText("Mensagens")
mensTi.setStyleSheet("color: white")
mensTe = QTextEdit(w)
mensTe.setReadOnly(True)
contaTi = QLabel(w)
contaTi.setText("Usuarios conectados")
contaTi.setStyleSheet("color: white")
contaTe = QTextEdit(w)
contaTe.setReadOnly(True)
contaTe.append(myName+"\n")
if (users != "None"):
contaTe.append(users+"\n")
textEnv = QTextEdit(w)
textEnv.keyReleaseEvent = keyEven
bAten = QPushButton(w)
bAten.setText("Chamar a atencao")
bAten.clicked.connect(bAten_clicked)
bEnvFile = QPushButton(w)
bEnvFile.setText("Enviar arquvo")
bEnvFile.clicked.connect(bEnvFile_clicked)
bEnv = QPushButton(w)
bEnv.setText("Enviar")
bEnv.clicked.connect(bEnv_clicked)
bEnvImg = QPushButton(w)
bEnvImg.setText("Enviar imagem")
bEnvImg.clicked.connect(bEnvImg_clicked)
grid1 = QGridLayout()
grid1.addWidget(contaTi,1,1,Qt.AlignCenter)
grid1.addWidget(contaTe,2,1,-1,2)
grid1.addWidget(mensTi,1,3,Qt.AlignCenter)
grid1.addWidget(mensTe,2,3)
grid2 = QGridLayout()
grid2.addWidget(textEnv,3,1,4,1)
grid2.addWidget(bAten,3,2)
grid2.addWidget(bEnvFile,4,2)
grid2.addWidget(bEnvImg,5,2)
grid2.addWidget(bEnv,6,2)
hbox1 = QHBoxLayout()
hbox1.addStretch()
hbox1.addWidget(userT)
hbox1.addStretch()
hbox2 = QHBoxLayout()
hbox2.addStretch()
hbox2.addWidget(conneT)
hbox2.addStretch()
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(grid1)
vbox.addLayout(grid2)
w.setLayout(vbox)
client = ClientThread(contaTe,mensTe, textEnv, ssl_sock,myName,w)
client.progressEvent.connect(remakeCont)
client.serChato.connect(tremer)
client.geraImg.connect(receberImg)
palette = QLabel()
w.defineThre(client)
w.setGeometry(geo.x(),geo.y(),800,500)
w.setMinimumSize(800,500)
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap("fk-neon.jpg").scaled(w.size())))
w.setPalette(palette)
w.setWindowTitle("NEOM")
w.show()
client.start()
def start(app):
def bCo_clicked(new):
try:
temp = False
errMens = None
try:
serverIp = str(textT.text())
serverPort = int(textTP.text())
myName = str(textTU.text())
myPass = str(textTUS.text())
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
ssl_sock=ssl.wrap_socket(s,ca_certs=ssl_certfile,cert_reqs=ssl.CERT_REQUIRED)
try:
ssl_sock.connect((serverIp,serverPort))
except:
errMens = "Falha ao tentar conectar no servidor"
except:
print "dados invalidos"
auth = MessageHandler()
ssl_sock.send(auth.sendAuthentication(myName, myPass, new=new))
ans = False
socket_list = [ssl_sock]
read_sockets, write_sockets, error_sockets = select.select(socket_list, [], [])
while not ans:
for sock in read_sockets:
if sock == ssl_sock:
data = sock.recv(262144)
if data:
auth.cleanAll()
auth.receiveMessage(data)
commands = auth.readOther()
if "authenticate" in commands:
if "ok" in commands:
ans = True
temp = True
users = str(auth.readMessage())
users = users.replace(',','\n')
break
elif "fail" in commands:
text = auth.readMessage()
errMens = "Nao foi possivel executar comando:\n%s" % (text)
ans = True
break
errMens = "Resposta nao pode ser executada."
ans = True
break
except:
errMens = "Servidor nao encontrado"
print errMens
ans = True
if (temp):
w.close()
chat(myName,serverIp,serverPort,app,w.geometry(), ssl_sock,users)
else:
errMensq = QMessageBox(None)
errMensq.setIcon(QMessageBox.Warning)
errMensq.setText(errMens)
errMensq.exec_()
def bRes_clicked():
new = True
bCo_clicked(new)
def onResize(event):
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap("abstract-neon.jpg").scaled(w.size())))
w.setPalette(palette)
w = QWidget()
w.resizeEvent = onResize
subT = QLabel(w)
subT.setText("Digite o ip do servidor:")
subT.setStyleSheet("color: white")
subTP = QLabel(w)
subTP.setText("Digite a porta do servidor:")
subTP.setStyleSheet("color: white")
subTU = QLabel(w)
subTU.setText("Digite o nome de usuario:")
subTU.setStyleSheet("color: white")
subTUS = QLabel(w)
subTUS.setText("Digite a senha:")
subTUS.setStyleSheet("color: white")
textT = QLineEdit(w)
textTP = QLineEdit(w)
textTU = QLineEdit(w)
textTUS = QLineEdit(w)
textTUS.setEchoMode(QLineEdit.Password)
bCo = QPushButton(w)
bCo.setText("Conectar")
bCo.clicked.connect(bCo_clicked)
bRes = QPushButton(w)
bRes.setText("Registrar")
bRes.clicked.connect(bRes_clicked)
vbox = QVBoxLayout()
vbox.addWidget(subTU)
vbox.addWidget(textTU)
vbox.addWidget(subTUS)
vbox.addWidget(textTUS)
vbox.addWidget(subT)
vbox.addWidget(textT)
vbox.addWidget(subTP)
vbox.addWidget(textTP)
vbox.addWidget(bCo)
vbox.addWidget(bRes)
vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addLayout(vbox)
hbox.addStretch(1)
w.setLayout(hbox)
new = False
w.setGeometry(200,200,200,300)
w.setMinimumSize(200,350)
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap("abstract-neon.jpg").scaled(w.size())))
w.setPalette(palette)
w.setWindowTitle("NEOM")
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
app = QApplication(sys.argv)
start(app)
sys.exit()
| gpl-3.0 | 7,031,653,115,401,569,000 | 34.495074 | 99 | 0.526334 | false |
amirgeva/coide | qutepart/indenter/python.py | 1 | 2214 | from qutepart.indenter.base import IndentAlgBase
class IndentAlgPython(IndentAlgBase):
"""Indenter for Python language.
"""
def computeSmartIndent(self, block, char):
prevIndent = self._prevNonEmptyBlockIndent(block)
prevNonEmptyBlock = self._prevNonEmptyBlock(block)
prevLineStripped = prevNonEmptyBlock.text().strip() # empty text from invalid block is ok
# for:
if prevLineStripped.endswith(':'):
return self._increaseIndent(prevIndent)
""" Generally, when a brace is on its own at the end of a regular line
(i.e a data structure is being started), indent is wanted.
For example:
dictionary = {
'foo': 'bar',
}
"""
if prevLineStripped.endswith('{') or \
prevLineStripped.endswith('['):
return self._increaseIndent(prevIndent)
"""Check hanging indentation
call_func(x,
y,
z
"""
try:
foundBlock, foundColumn = self.findAnyBracketBackward(prevNonEmptyBlock,
prevNonEmptyBlock.length())
except ValueError:
pass
else:
return self._makeIndentFromWidth(foundColumn + 1)
"""Unindent if hanging indentation finished
"""
if prevLineStripped and \
prevLineStripped[-1] in ')]}':
try:
foundBlock, foundColumn = self.findBracketBackward(prevNonEmptyBlock,
len(prevNonEmptyBlock.text().rstrip()) - 1,
prevLineStripped[-1])
except ValueError:
pass
else:
return self._blockIndent(foundBlock)
# finally, a raise, pass, and continue should unindent
if prevLineStripped in ('continue', 'break', 'pass', 'raise', 'return') or \
prevLineStripped.startswith('raise ') or \
prevLineStripped.startswith('return '):
return self._decreaseIndent(prevIndent)
return prevIndent
| gpl-2.0 | -3,886,822,592,496,036,400 | 35.9 | 110 | 0.542005 | false |
hakanozadam/bal | bal/reference/prepare.py | 1 | 5583 | #!/bin/env python3
# AUTHORS:
# Hakan Ozadam
# Rachel Brown
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
#################################################################
import argparse
import os
from shutil import which
from sys import platform as _os
#################################################################
def get_commandline_arguments():
''' Parse and return the command line arguments'''
parser = argparse.ArgumentParser(description=
'''
BAL Reference Prepare
This script creates bowtie2 and HISAT references for BAL.
In order to prepare HISAT and bowtie2 reference,
BAL needs a whole genome reference in, fasta format, with exon annotation in a GTF file.
BAL locally aligns the reads against the first N nucleotide of the introns.
By default, N = 20 but this can be modified in the N parameter.
''')
parser.add_argument("-g" ,
metavar = 'gtf file' ,
help = "GTF file annotating the exons in the genome of interest." ,
required = True ,
type = str)
parser.add_argument("-f" ,
metavar = 'Genomic Fasta File' ,
help = "The fasta file that contains the genomic sequence" ,
required = True ,
type = str)
parser.add_argument("-N" ,
metavar = 'Number of five prime intron nucleotides' ,
help = "This is the number of five prime nucleotides in the intron where the reaqds are going to "
"be locally aligned against." ,
required = False ,
default = 20,
type = int)
parser.add_argument("-o" ,
metavar = 'Output Directory' ,
help = "Output directory" ,
required = True ,
type = str)
return parser.parse_args()
#################################################################################
def check_HISAT_files(ref_base):
''' TODO: Check for the existence of other files as well'''
result = list()
suffixes = ('.1.bt2', '.2.bt2', '.3.bt2', '.4.bt2', '.rev.1.bt2', '.rev.2.bt2')
for suffix in suffixes:
if (not os.path.isfile(ref_base + suffix) ) and\
(not os.path.isfile(ref_base + suffix + "l")):
result.append("Couldn't find the HISAT reference: " + ref_base + suffix + " or " +
ref_base + suffix + "l")
return result
#################################################################################
def process_commandline_arguments(cmd_args):
''' Check if the input files exist or not and do some consistency checks '''
error_messages = list()
if not os.path.isfile(cmd_args.f):
error_messages.append("Couldn't find the fasta file " + cmd_args.f)
if not os.path.isfile(cmd_args.g):
error_messages.append("Couldn't find the gtf file " + cmd_args.g)
if error_messages:
print("Error!\nThe following error(s) occurred:")
for error in enumerate(error_messages):
print("{n}) {e}".format(n = error[0] + 1, e = error[1]))
exit(1)
return cmd_args
##################################################################################
def get_arguments():
return process_commandline_arguments(get_commandline_arguments())
###################################################################################
###################################################################################
def get_executables(bin_directory):
''' Check the existence of executables: hisat, bowtie2
Put their paths in a dictionary and return it'''
#check the os and define bin variables for executables accordingly
if _os == "linux" or _os == "linux2":
hisat_relative_path = 'bal/bin/hisat/linux_x86_64'
bowtie2_relative_path = 'bal/bin/bowtie2/linux_x86_64'
bowtie2_build_relative_path = 'bal/bin/bowtie2/linux_x86_64/bowtie2-build'
elif _os == "darwin":
hisat_relative_path = 'bal/bin/hisat/mac_os_x_x86_64'
bowtie2_relative_path = 'bal/bin/bowtie2/mac_os_x_x86_64'
bowtie2_build_relative_path = 'bal/bin/bowtie2/mac_os_x_x86_64/bowtie2-build'
print(bowtie2_build_relative_path)
executables = dict()
error_messages = list()
executables['hisat'] = os.path.join(bin_directory, hisat_relative_path, 'hisat')
executables['hisat-build'] = os.path.join(bin_directory, hisat_relative_path, 'hisat-build')
executables['hisat_extract_splice_sites'] = os.path.join(bin_directory, hisat_relative_path,\
'extract_splice_sites.py')
executables['bowtie2'] = os.path.join(bin_directory, bowtie2_relative_path,'bowtie2')
executables['bowtie2-build'] = os.path.join(bin_directory, bowtie2_build_relative_path)
for executable, path in executables.items():
if not which(path):
error_messages.append("Couldn't find the {executable} executable at {path}"\
.format(executable = executable, path = path))
if(error_messages):
print('The following executable(s) are missing. If you have the files in the indicated path,'
'make sure that the files are executable.')
print("\n".join(error_messages))
exit(1)
return executables
| gpl-2.0 | 3,486,771,046,450,948,000 | 37.770833 | 118 | 0.547376 | false |
ebrelsford/growing_cities | growing_cities/team/migrations/0001_initial.py | 1 | 1453 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TeamMember'
db.create_table(u'team_teammember', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('bio', self.gf('django.db.models.fields.TextField')()),
('original_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
))
db.send_create_signal(u'team', ['TeamMember'])
def backwards(self, orm):
# Deleting model 'TeamMember'
db.delete_table(u'team_teammember')
models = {
u'team.teammember': {
'Meta': {'object_name': 'TeamMember'},
'bio': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['team'] | gpl-3.0 | -6,899,176,736,712,854,000 | 37.263158 | 102 | 0.582932 | false |
carlos-jenkins/confspec | lib/confspec/validation.py | 1 | 10924 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Carlos Jenkins <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Module for validation functions.
"""
from __future__ import absolute_import, division, print_function
# -----------------------------------------------------------------------------
# Integer and float validation
# -----------------------------------------------------------------------------
def positive():
"""
Validate that the given number is positive. Note that zero is neither
positive or negative.
>>> f = positive()
>>> f(0)
False
>>> f(5)
True
>>> f(-5)
False
>>> f(45.5)
True
:rtype: A validator function.
"""
def validator(num):
return num > 0
return validator
def negative():
"""
Validate that the given number is negative. Note that zero is neither
positive or negative.
>>> f = negative()
>>> f(0)
False
>>> f(5)
False
>>> f(-5)
True
>>> f(45.5)
False
:rtype: A validator function.
"""
def validator(num):
return num < 0
return validator
def greater_than(lower):
"""
Validate that the given number is greater than a given lower number.
>>> f = greater_than(10)
>>> f(10)
False
>>> f(20)
True
>>> f(5)
False
>>> f(-5)
False
>>> f(45.5)
True
:param lower: The lower bound to compare against.
:type lower: int or float
:rtype: A validator function.
"""
def validator(num):
return num > lower
return validator
def greater_than_eq(lower):
"""
Validate that the given number is greater or equal than a given lower
number.
>>> f = greater_than_eq(10)
>>> f(10)
True
>>> f(20)
True
>>> f(5)
False
>>> f(-5)
False
>>> f(45.5)
True
:param lower: The lower bound to compare against.
:type lower: int or float
:rtype: A validator function.
"""
def validator(num):
return num >= lower
return validator
def lower_than(upper):
"""
Validate that the given number is less than a given upper number.
>>> f = lower_than(10)
>>> f(10)
False
>>> f(20)
False
>>> f(5)
True
>>> f(-5)
True
>>> f(45.5)
False
:param upper: The upper bound to compare against.
:type upper: int or float
:rtype: A validator function.
"""
def validator(num):
return num < upper
return validator
def lower_than_eq(upper):
"""
Validate that the given number is less or equal than a given upper number.
>>> f = lower_than_eq(10)
>>> f(10)
True
>>> f(20)
False
>>> f(5)
True
>>> f(-5)
True
>>> f(45.5)
False
:param upper: The upper bound to compare against.
:type upper: int or float
:rtype: A validator function.
"""
def validator(num):
return num <= upper
return validator
def in_range(bottom, top):
"""
Validate that a number is in the given range.
>>> f = in_range(-10, 100)
>>> f(-10)
True
>>> f(100)
True
>>> f(50)
True
>>> f(200)
False
>>> f(-20)
False
>>> f(55.85)
True
:param bottom: bottom interval delimiter.
:type bottom: int or float
:param top: top interval delimiter.
:type top: int or float
:rtype: A validator function.
"""
def validator(num):
if bottom <= num <= top:
return True
return False
return validator
def multiple_of(multi):
"""
Validate that the given number is multiple of the given multiple.
>>> f = multiple_of(10)
>>> f(10)
True
>>> f(100)
True
>>> f(20)
True
>>> f(35)
False
>>> f(4)
False
>>> f = multiple_of(5.2)
>>> f(10.4)
True
:param multi: Multiple to check against.
:type multi: int or float
:rtype: A validator function.
"""
def validator(num):
return (num % multi) == 0
return validator
def is_even():
"""
Validate that the given number is even.
>>> f = is_even()
>>> f(10)
True
>>> f(2)
True
>>> f(0)
True
>>> f(-1)
False
>>> f(3)
False
>>> f(2.0)
True
:rtype: A validator function.
"""
def validator(num):
return (num % 2) == 0
return validator
def is_odd():
"""
Validate that the given number is odd.
>>> f = is_odd()
>>> f(3)
True
>>> f(-1)
True
>>> f(10)
False
>>> f(2)
False
>>> f(0)
False
>>> f(2.0)
False
:rtype: A validator function.
"""
def validator(num):
return (num % 2) == 1
return validator
# -----------------------------------------------------------------------------
# Collection validation
# -----------------------------------------------------------------------------
def is_one_of(options):
"""
Validate that the given attribute is member of the given list.
>>> f = is_one_of(['foo', 'bar'])
>>> f('ham')
False
>>> f('foo')
True
>>> f('Foo')
False
>>> f = is_one_of([10, 15, 20])
>>> f(20)
True
:param list options: The options that the attribute can be.
:rtype: A validator function.
"""
def validator(item):
return item in options
return validator
def is_subset_of(main):
"""
Validate that the given set is subset of the main given set.
>>> f = is_subset_of(set(['a', 'b', 'c', 'd']))
>>> f(set(['b', 'd']))
True
>>> f(set(['a', 'b', 'c', 'd']))
True
>>> f(set(['a', 'f']))
False
:param set main: The main set to compare to.
:rtype: A validator function.
"""
def validator(sub):
return sub <= main
return validator
def all_validate_to(func):
"""
Validate that all elements in the given list pass the validation of the
given validator function.
>>> f = all_validate_to(positive())
>>> f([10, 20, 30])
True
>>> f([10, 0, 30])
False
>>> f([10, 10, -100])
False
>>> f = all_validate_to(in_range(10, 20))
>>> f([10, 11, 12, 18, 19, 20])
True
>>> f([10, 20, 30])
False
:param function func: A validator function to be used to valid each one of
the elements in the list.
:rtype: A validator function.
"""
def validator(elements):
return all(map(func, elements))
return validator
def empty():
"""
Validate that the given list is empty.
>>> f = empty()
>>> f([])
True
>>> f([1, 2])
False
:rtype: A validator function.
"""
def validator(elements):
return len(elements) == 0
return validator
def non_empty():
"""
Validate that the given list is NOT empty.
>>> f = non_empty()
>>> f([])
False
>>> f([1, 2])
True
:rtype: A validator function.
"""
def validator(elements):
return len(elements) > 0
return validator
# -----------------------------------------------------------------------------
# String validation
# -----------------------------------------------------------------------------
def has_substring(string):
"""
Validate that the given substring is part of the given string.
>>> f = has_substring('foobarhamjam')
>>> f('arham')
True
>>> f('barham')
True
>>> f('FOO')
False
>>> f('JAMHAM')
False
:param str string: Main string to compare against.
:rtype: A validator function.
"""
def validator(substring):
return substring in string
return validator
def has_substring_igncase(string):
"""
Validate that the given substring is part of the given string but ignoring
case.
>>> f = has_substring_igncase('foobarhamjam')
>>> f('ArHaM')
True
>>> f('BARham')
True
>>> f('FOO')
True
>>> f('JAMHAM')
False
:param str string: Main string to compare against.
:rtype: A validator function.
"""
string = string.lower()
def validator(substring):
return substring.lower() in string
return validator
def startswith(prefix):
"""
Validate that the given string has the given prefix.
>>> f = startswith('_p')
>>> f('_parameter')
True
>>> f('_program')
True
>>> f('_peter')
True
>>> f('john')
False
>>> f('disk')
False
:param str prefix: The prefix to verify.
:rtype: A validator function.
"""
def validator(string):
return string.startswith(prefix)
return validator
def startswith_igncase(prefix):
"""
Validate that the given string has the given prefix but ignoring case.
>>> f = startswith_igncase('_p')
>>> f('_Parameter')
True
>>> f('_Program')
True
>>> f('_peter')
True
>>> f('john')
False
>>> f('disk')
False
:param str prefix: The prefix to verify.
:rtype: A validator function.
"""
prefix = prefix.lower()
def validator(string):
return string.lower().startswith(prefix)
return validator
def endswith(suffix):
"""
Validate that the given string has the given suffix.
>>> f = endswith('ix_')
>>> f('My prefix_')
True
>>> f('My suffix_')
True
>>> f('Other thing')
False
:param str suffix: The suffix to verify.
:rtype: A validator function.
"""
def validator(string):
return string.endswith(suffix)
return validator
def endswith_igncase(suffix):
"""
Validate that the given string has the given suffix but ignoring case.
>>> f = endswith_igncase('ix_')
>>> f('My PREFIX_')
True
>>> f('My suffix_')
True
>>> f('Other THING')
False
:param str suffix: The suffix to verify.
:rtype: A validator function.
"""
suffix = suffix.lower()
def validator(string):
return string.lower().endswith(suffix)
return validator
__all__ = [
'positive',
'negative',
'greater_than',
'greater_than_eq',
'lower_than',
'lower_than_eq',
'in_range',
'multiple_of',
'is_even',
'is_odd',
'is_one_of',
'is_subset_of',
'all_validate_to',
'empty',
'non_empty',
'has_substring',
'has_substring_igncase',
'startswith',
'startswith_igncase',
'endswith',
'endswith_igncase'
]
| apache-2.0 | -175,904,754,583,558,660 | 18.647482 | 79 | 0.53149 | false |
CommonsCloud/Core-API | CommonsCloudAPI/models/statistic.py | 1 | 9805 |
"""
For CommonsCloud copyright information please see the LICENSE document
(the "License") included with this software package. This file may not
be used in any manner except in compliance with the License
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Import Python Dependencies
"""
import json
from datetime import datetime
"""
Import Flask Dependencies
"""
from flask import abort
"""
Import Commons Cloud Dependencies
"""
from CommonsCloudAPI.models.base import CommonsModel
from CommonsCloudAPI.models.template import Template
from CommonsCloudAPI.models.field import Field
from CommonsCloudAPI.extensions import db
from CommonsCloudAPI.extensions import logger
from CommonsCloudAPI.extensions import sanitize
from CommonsCloudAPI.extensions import status as status_
"""
Define our individual models
"""
class Statistic(db.Model, CommonsModel):
__public__ = {'default': ['id', 'name', 'units', 'function', 'created', 'status', 'field_id']}
__tablename__ = 'statistic'
__table_args__ = {
'extend_existing': True
}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
units = db.Column(db.String(24))
function = db.Column(db.String(24))
created = db.Column(db.DateTime)
status = db.Column(db.Boolean)
field_id = db.Column(db.Integer, db.ForeignKey('field.id'))
def __init__(self, name="", units="", function="SUM", created=datetime.now(), status=True, field_id=""):
self.name = name
self.units = units
self.function = function
self.created = created
self.status = status
self.field_id = field_id
"""
Create a new statistic in the CommonsCloudAPI
@param (object) self
@param (dictionary) request_object
The content that is being submitted by the user
"""
def statistic_create(self, template_id, request_object):
"""
Make sure that some data was submitted before proceeding
"""
if not request_object.data:
logger.error('User %d new Statistic request failed because they didn\'t submit any `data` with their request', \
self.current_user.id)
return status_.status_400('You didn\'t include any `data` with your request.'), 400
"""
Make sure we can use the request data as json
"""
statistic_content = json.loads(request_object.data)
field_id = sanitize.sanitize_integer(statistic_content.get('field_id', ''))
explicitly_allowed_fields_ = self.explicitly_allowed_fields()
template_fields_ = self.template_field_list(template_id)
if not field_id in explicitly_allowed_fields_ or \
not field_id in template_fields_:
logger.error('User %d new Statistic request failed because they are\'t allowed to modify the associated field', \
self.current_user.id)
return status_.status_400('You are\'t allowed to modify the field you\'re trying to add a statistic to'), 400
new_statistic = {
'name': sanitize.sanitize_string(statistic_content.get('name', '')),
'units': sanitize.sanitize_string(statistic_content.get('units', '')),
'function': sanitize.sanitize_string(statistic_content.get('function', '')),
'field_id': field_id
}
statistic_ = Statistic(**new_statistic)
db.session.add(statistic_)
db.session.commit()
return statistic_
def statistic_get(self, template_id, statistic_id):
explicitly_allowed_templates_ = self.explicitly_allowed_templates('is_admin')
if not template_id in explicitly_allowed_templates_:
logger.error('User %d view Statistic request failed because they are\'t allowed to admin the template', \
self.current_user.id)
return status_.status_401('You are\'t allowed to view this statistic'), 401
statistic_ = Statistic.query.get(statistic_id)
return statistic_
def statistic_list(self, template_id):
explicitly_allowed_fields_ = self.explicitly_allowed_fields()
template_fields_ = self.template_field_list(template_id)
field_id_list_ = set(explicitly_allowed_fields_) & set(template_fields_)
statistics_ = Statistic.query.filter(Statistic.field_id.in_(field_id_list_)).all()
return statistics_
"""
Update an existing statistic in the CommonsCloudAPI
@param (object) self
@param (dictionary) request_object
The content that is being submitted by the user
"""
def statistic_update(self, template_id, statistic_id, request_object):
explicitly_allowed_templates_ = self.explicitly_allowed_templates()
if not template_id in explicitly_allowed_templates_:
logger.error('User %d update Statistic request failed because they are\'t allowed to modify the associated Template', \
self.current_user.id)
return status_.status_401('You are\'t allowed to modify the Template you\'re trying to add a statistic to'), 401
"""
Make sure that some data was submitted before proceeding
"""
if not request_object.data:
logger.error('User %d update Statistic request failed because they didn\'t submit any `data` with their request', \
self.current_user.id)
return status_.status_400('You didn\'t include any `data` with your request.'), 400
"""
Make sure we can use the request data as json
"""
statistic_ = Statistic.query.get(statistic_id)
if not statistic_.id:
logger.error('User %d Statistic request failed because Statistic does\'t exist', \
self.current_user.id)
return status_.status_404('The Statistic you\'re looking for doesn\'t exist'), 404
statistic_content = json.loads(request_object.data)
if hasattr(statistic_, 'name'):
statistic_.name = sanitize.sanitize_string(statistic_content.get('name', statistic_.name))
if hasattr(statistic_, 'units'):
statistic_.units = sanitize.sanitize_string(statistic_content.get('units', statistic_.units))
if hasattr(statistic_, 'function'):
statistic_.function = sanitize.sanitize_string(statistic_content.get('function', statistic_.function))
if hasattr(statistic_, 'field_id'):
statistic_.field_id = sanitize.sanitize_integer(statistic_content.get('field_id', statistic_.field_id))
if hasattr(statistic_, 'status'):
statistic_.status = sanitize.sanitize_boolean(statistic_content.get('status', statistic_.status))
db.session.commit()
return statistic_
"""
Delete an existing Statistic from the CommonsCloudAPI
@param (object) self
@param (int) statistic_id
The unique ID of the Statistic to be retrieved from the system
@return (bool)
A boolean to indicate if the deletion was succesful
"""
def statistic_delete(self, template_id, statistic_id):
explicitly_allowed_templates_ = self.explicitly_allowed_templates()
if not template_id in explicitly_allowed_templates_:
logger.error('User %d delete Statistic request failed because they are\'t allowed to modify the associated Template', \
self.current_user.id)
return status_.status_401('You are\'t allowed to modify the Template you\'re trying to add a statistic to'), 401
statistic_ = Statistic.query.get(statistic_id)
if not statistic_.id:
logger.error('User %d delete Statistic request failed because Statistic does\'t exist', \
self.current_user.id)
return status_.status_404('The Statistic you\'re looking for doesn\'t exist'), 404
db.session.delete(statistic_)
db.session.commit()
return True
"""
Get a list of template ids from the current user and convert
them into a list of numbers so that our SQLAlchemy query can
understand what's going on
"""
def explicitly_allowed_templates(self, permission_type='read'):
templates_ = []
if not hasattr(self.current_user, 'id'):
logger.warning('User did\'t submit their information %s', \
self.current_user)
return status_.status_401('You need to be logged in to access applications'), 401
for template in self.current_user.templates:
if permission_type and getattr(template, permission_type):
templates_.append(template.template_id)
return templates_
"""
Get a list of template ids from the current user and convert
them into a list of numbers so that our SQLAlchemy query can
understand what's going on
"""
def explicitly_allowed_fields(self, permission_type='write'):
fields_ = []
if not hasattr(self.current_user, 'id'):
logger.warning('User did\'t submit their information %s', \
self.current_user)
return status_.status_401('You need to be logged in to access applications'), 401
for field in self.current_user.fields:
if permission_type and getattr(field, permission_type):
fields_.append(field.field_id)
return fields_
def template_field_list(self, template_id):
template_ = Template.query.get(template_id)
fields_ = []
for field_ in template_.fields:
fields_.append(field_.id)
return fields_
| agpl-3.0 | -501,611,573,143,487,740 | 33.403509 | 129 | 0.65895 | false |
thombashi/tcconfig | tcconfig/shaper/_interface.py | 1 | 7593 | """
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
import abc
import subprocrunner
import typepy
from humanreadable import ParameterError
from .._common import run_command_helper
from .._const import TcSubCommand, TrafficDirection
from .._iptables import IptablesMangleMarkEntry
from .._logger import logger
from .._network import get_anywhere_network
from .._shaping_rule_finder import TcShapingRuleFinder
class ShaperInterface(metaclass=abc.ABCMeta):
@abc.abstractproperty
def algorithm_name(self): # pragma: no cover
pass
@abc.abstractmethod
def set_shaping(self): # pragma: no cover
pass
class AbstractShaper(ShaperInterface):
@property
def _tc_device(self):
return "{:s}".format(self._tc_obj.get_tc_device())
@property
def _dev(self):
return "dev {:s}".format(self._tc_device)
@property
def _existing_parent(self):
if self.__existing_parent:
return self.__existing_parent
self.__existing_parent = self._shaping_rule_finder.find_parent()
return self.__existing_parent
@property
def _shaping_rule_finder(self):
if self.__shaping_rule_finder:
return self.__shaping_rule_finder
self.__shaping_rule_finder = TcShapingRuleFinder(logger=logger, tc=self._tc_obj)
return self.__shaping_rule_finder
def __init__(self, tc_obj):
self._tc_obj = tc_obj
self.__shaping_rule_finder = None
self.__existing_parent = None
def _set_netem(self):
base_command = self._tc_obj.get_tc_command(TcSubCommand.QDISC)
parent = self._get_tc_parent(
"{:s}:{:d}".format(self._tc_obj.qdisc_major_id_str, self._get_qdisc_minor_id())
)
handle = self._get_tc_handle(
"{:x}:".format(self._get_netem_qdisc_major_id(self._tc_obj.qdisc_major_id))
)
command_item_list = [
base_command,
"dev {:s}".format(self._tc_obj.get_tc_device()),
"parent {:s}".format(parent),
"handle {:s}".format(handle),
self._tc_obj.netem_param.make_netem_command_parts(),
]
return run_command_helper(
" ".join(command_item_list),
ignore_error_msg_regexp=self._tc_obj.REGEXP_FILE_EXISTS,
notice_msg=self._tc_obj.EXISTS_MSG_TEMPLATE.format(
"failed to '{command:s}': netem qdisc already exists "
"(dev={dev:s}, parent={parent:s}, handle={handle:s})".format(
command=base_command,
dev=self._tc_obj.get_tc_device(),
parent=parent,
handle=handle,
)
),
)
def _get_filter_prio(self, is_exclude_filter: bool) -> int:
offset = 4
if is_exclude_filter:
offset = 0
if self._tc_obj.protocol == "ip":
return 1 + offset
if self._tc_obj.protocol == "ipv6":
return 2 + offset
return 3 + offset
def _add_filter(self):
if self._tc_obj.is_change_shaping_rule:
return 0
command_item_list = [
self._tc_obj.get_tc_command(TcSubCommand.FILTER),
self._dev,
"protocol {:s}".format(self._tc_obj.protocol),
"parent {:s}:".format(self._tc_obj.qdisc_major_id_str),
"prio {:d}".format(self._get_filter_prio(is_exclude_filter=False)),
]
if self._is_use_iptables():
command_item_list.append("handle {:d} fw".format(self._get_unique_mangle_mark_id()))
else:
if typepy.is_null_string(self._tc_obj.dst_network):
dst_network = get_anywhere_network(self._tc_obj.ip_version)
else:
dst_network = self._tc_obj.dst_network
command_item_list.extend(
[
"u32",
"match {:s} {:s} {:s}".format(self._tc_obj.protocol_match, "dst", dst_network),
]
)
if typepy.is_not_null_string(self._tc_obj.src_network):
command_item_list.append(
"match {:s} {:s} {:s}".format(
self._tc_obj.protocol_match, "src", self._tc_obj.src_network
)
)
if self._tc_obj.src_port:
command_item_list.append(
"match {:s} sport {:d} 0xffff".format(
self._tc_obj.protocol_match, self._tc_obj.src_port
)
)
if self._tc_obj.dst_port:
command_item_list.append(
"match {:s} dport {:d} 0xffff".format(
self._tc_obj.protocol_match, self._tc_obj.dst_port
)
)
command_item_list.append(
"flowid {:s}:{:d}".format(self._tc_obj.qdisc_major_id_str, self._get_qdisc_minor_id())
)
return subprocrunner.SubprocessRunner(" ".join(command_item_list)).run()
def _add_exclude_filter(self):
pass
def _is_use_iptables(self):
return all(
[self._tc_obj.is_enable_iptables, self._tc_obj.direction == TrafficDirection.OUTGOING]
)
@abc.abstractmethod
def _get_qdisc_minor_id(self): # pragma: no cover
pass
@abc.abstractmethod
def _get_netem_qdisc_major_id(self, base_id): # pragma: no cover
pass
def _get_network_direction_str(self):
if self._tc_obj.direction == TrafficDirection.OUTGOING:
return "dst"
if self._tc_obj.direction == TrafficDirection.INCOMING:
return "src"
raise ParameterError(
"unknown direction", expected=TrafficDirection.LIST, value=self._tc_obj.direction
)
def _get_tc_handle(self, default_handle):
handle = None
if self._tc_obj.is_change_shaping_rule:
handle = self._shaping_rule_finder.find_qdisc_handle(self._get_tc_parent(None))
if not handle:
handle = default_handle
return handle
def _get_tc_parent(self, default_parent):
parent = None
if self._tc_obj.is_change_shaping_rule:
parent = self._existing_parent
if not parent:
parent = default_parent
return parent
def _get_unique_mangle_mark_id(self):
mark_id = self._tc_obj.iptables_ctrl.get_unique_mark_id()
self.__add_mangle_mark(mark_id)
return mark_id
@abc.abstractmethod
def _make_qdisc(self): # pragma: no cover
pass
@abc.abstractmethod
def _add_rate(self): # pragma: no cover
pass
def __add_mangle_mark(self, mark_id):
dst_network = None
src_network = None
if self._tc_obj.direction == TrafficDirection.OUTGOING:
dst_network = self._tc_obj.dst_network
if typepy.is_null_string(self._tc_obj.src_network):
chain = "OUTPUT"
else:
src_network = self._tc_obj.src_network
chain = "PREROUTING"
elif self._tc_obj.direction == TrafficDirection.INCOMING:
src_network = self._tc_obj.dst_network
chain = "INPUT"
self._tc_obj.iptables_ctrl.add(
IptablesMangleMarkEntry(
ip_version=self._tc_obj.ip_version,
mark_id=mark_id,
source=src_network,
destination=dst_network,
chain=chain,
)
)
| mit | 4,832,686,497,575,263,000 | 29.991837 | 99 | 0.550507 | false |
gautamMalu/XenInBox | pyanaconda/ui/tui/spokes/user.py | 1 | 6705 | # User creation text spoke
#
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Martin Sivak <[email protected]>
# Chris Lumens <[email protected]>
#
from pyanaconda.ui.tui.spokes import EditTUISpoke
from pyanaconda.ui.tui.spokes import EditTUISpokeEntry as Entry
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.users import guess_username, USERNAME_VALID
from pyanaconda.flags import flags
from pyanaconda.i18n import N_, _
from pykickstart.constants import FIRSTBOOT_RECONFIG
from pyanaconda.constants import ANACONDA_ENVIRON, FIRSTBOOT_ENVIRON
from pyanaconda.regexes import GECOS_VALID, USERNAME_VALID, GROUPLIST_SIMPLE_VALID
__all__ = ["UserSpoke"]
class UserSpoke(FirstbootSpokeMixIn, EditTUISpoke):
title = N_("Create user")
category = "password"
edit_fields = [
Entry("Create user", "_create", EditTUISpoke.CHECK, True),
Entry("Fullname", "gecos", GECOS_VALID, lambda self,args: args._create),
Entry("Username", "name", USERNAME_VALID, lambda self,args: args._create),
Entry("Use password", "_use_password", EditTUISpoke.CHECK, lambda self,args: args._create),
Entry("Password", "_password", EditTUISpoke.PASSWORD, lambda self,args: args._use_password and args._create),
Entry("Administrator", "_admin", EditTUISpoke.CHECK, lambda self,args: args._create),
Entry("Groups", "_groups", GROUPLIST_SIMPLE_VALID, lambda self,args: args._create)
]
@classmethod
def should_run(cls, environment, data):
# the user spoke should run always in the anaconda and in firstboot only
# when doing reconfig or if no user has been created in the installation
if environment == ANACONDA_ENVIRON:
return True
elif environment == FIRSTBOOT_ENVIRON and data is None:
# cannot decide, stay in the game and let another call with data
# available (will come) decide
return True
elif environment == FIRSTBOOT_ENVIRON and data and \
(data.firstboot.firstboot == FIRSTBOOT_RECONFIG or \
len(data.user.userList) == 0):
return True
else:
return False
def __init__(self, app, data, storage, payload, instclass):
FirstbootSpokeMixIn.__init__(self)
EditTUISpoke.__init__(self, app, data, storage, payload, instclass)
if self.data.user.userList:
self.args = self.data.user.userList[0]
self.args._create = True
else:
self.args = self.data.UserData()
self.args._create = False
self.args._use_password = self.args.isCrypted or self.args.password
# Keep the password separate from the kickstart data until apply()
# so that all of the properties are set at once
self.args._password = ""
def refresh(self, args = None):
self.args._admin = "wheel" in self.args.groups
self.args._groups = ", ".join(self.args.groups)
return EditTUISpoke.refresh(self, args)
@property
def completed(self):
""" Verify a user is created; verify pw is set if option checked. """
if len(self.data.user.userList) > 0:
if self.args._use_password and not bool(self.args.password or self.args.isCrypted):
return False
else:
return True
else:
return False
@property
def showable(self):
return not (self.completed and flags.automatedInstall)
@property
def mandatory(self):
""" Only mandatory if the root pw hasn't been set in the UI
eg. not mandatory if the root account was locked in a kickstart
"""
return not self.data.rootpw.password and not self.data.rootpw.lock
@property
def status(self):
if len(self.data.user.userList) == 0:
return _("No user will be created")
elif self.args._use_password and not bool(self.args.password or self.args.isCrypted):
return _("You must set a password")
elif "wheel" in self.data.user.userList[0].groups:
return _("Administrator %s will be created") % self.data.user.userList[0].name
else:
return _("User %s will be created") % self.data.user.userList[0].name
def apply(self):
if self.args.gecos and not self.args.name:
username = guess_username(self.args.gecos)
if USERNAME_VALID.match(username):
self.args.name = guess_username(self.args.gecos)
self.args.groups = [g.strip() for g in self.args._groups.split(",") if g]
# Add or remove the user from wheel group
if self.args._admin and "wheel" not in self.args.groups:
self.args.groups.append("wheel")
elif not self.args._admin and "wheel" in self.args.groups:
self.args.groups.remove("wheel")
# Add or remove the user from userlist as needed
if self.args._create and (self.args not in self.data.user.userList):
self.data.user.userList.append(self.args)
elif (not self.args._create) and (self.args in self.data.user.userList):
self.data.user.userList.remove(self.args)
# encrypt and store password only if user entered anything; this should
# preserve passwords set via kickstart
if self.args._use_password and len(self.args._password) > 0:
self.args.password = self.args._password
self.args.isCrypted = True
self.args.password_kickstarted = False
# clear pw when user unselects to use pw
else:
self.args.password = ""
self.args.isCrypted = False
self.args.password_kickstarted = False
| gpl-2.0 | 1,448,730,758,010,517,000 | 43.403974 | 117 | 0.656078 | false |
EndyKaufman/django-postgres-angularjs-blog | app/file/migrations/0002_fill_from_mock.py | 1 | 1054 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-31 16:13
from __future__ import unicode_literals
from django.db import migrations
from project import helpers, settings
import json
import os
def fill_from_mock(apps, schema_editor):
helpers.mkdir_recursive(settings.MEDIA_ROOT)
helpers.copy_dir_recursive(os.path.join('mock', 'file', 'media'), settings.MEDIA_ROOT, remove_if_exists=True)
try:
with open(os.path.join('mock', 'file', 'list.json')) as f:
content = f.read()
f.close()
except IOError:
content = '[]'
records = json.loads(content)
File = apps.get_model("file", "File")
for record in records:
file, created = File.objects.get_or_create(pk=record['id'], src=record['src'])
file.comment = record['comment']
file.save()
helpers.get_thumbnail(record['src'])
class Migration(migrations.Migration):
dependencies = [
('file', '0001_initial'),
]
operations = [
migrations.RunPython(fill_from_mock),
]
| mit | -2,137,496,670,613,839,400 | 26.736842 | 113 | 0.627135 | false |
google/edward2 | edward2/tensorflow/layers/made.py | 1 | 9581 | # coding=utf-8
# Copyright 2021 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked autoencoder for distribution estimation."""
import numpy as np
import tensorflow as tf
class MADE(tf.keras.Model):
"""Masked autoencoder for distribution estimation (Germain et al., 2015).
MADE takes as input a real Tensor of shape [..., length, channels] and returns
a Tensor of shape [..., length, units] and same dtype. It masks layer weights
to satisfy autoregressive constraints with respect to the length dimension. In
particular, for a given ordering, each input dimension of length can be
reconstructed from previous dimensions.
The output's units dimension captures per-time-step representations. For
example, setting units to 2 can parameterize the location and log-scale of an
autoregressive Gaussian distribution.
"""
def __init__(self,
units,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right',
activation=None,
use_bias=True,
**kwargs):
"""Constructs network.
Args:
units: Positive integer, dimensionality of the output space.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
the input dimension multiplied by `num_heads`. Each hidden unit size
must be at least the size of length (otherwise autoregressivity is not
possible).
input_order: Order of degrees to the input units: 'random',
'left-to-right', 'right-to-left', or an array of an explicit order.
For example, 'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
activation: Activation function.
use_bias: Whether to use a bias.
**kwargs: Keyword arguments of parent class.
"""
super(MADE, self).__init__(**kwargs)
self.units = int(units)
self.hidden_dims = hidden_dims
self.input_order = input_order
self.hidden_order = hidden_order
self.activation = tf.keras.activations.get(activation)
self.use_bias = use_bias
self.network = tf.keras.Sequential([])
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
length = input_shape[-2]
channels = input_shape[-1]
if length is None or channels is None:
raise ValueError('The two last dimensions of the inputs to '
'`MADE` should be defined. Found `None`.')
masks = create_masks(input_dim=length,
hidden_dims=self.hidden_dims,
input_order=self.input_order,
hidden_order=self.hidden_order)
# Input-to-hidden layer: [..., length, channels] -> [..., hidden_dims[0]].
self.network.add(tf.keras.layers.Reshape([length * channels]))
# Tile the mask so each element repeats contiguously; this is compatible
# with the autoregressive contraints unlike naive tiling.
mask = masks[0]
mask = tf.tile(mask[:, tf.newaxis, :], [1, channels, 1])
mask = tf.reshape(mask, [mask.shape[0] * channels, mask.shape[-1]])
if self.hidden_dims:
layer = tf.keras.layers.Dense(
self.hidden_dims[0],
kernel_initializer=make_masked_initializer(mask),
kernel_constraint=make_masked_constraint(mask),
activation=self.activation,
use_bias=self.use_bias)
self.network.add(layer)
# Hidden-to-hidden layers: [..., hidden_dims[l-1]] -> [..., hidden_dims[l]].
for l in range(1, len(self.hidden_dims)):
layer = tf.keras.layers.Dense(
self.hidden_dims[l],
kernel_initializer=make_masked_initializer(masks[l]),
kernel_constraint=make_masked_constraint(masks[l]),
activation=self.activation,
use_bias=self.use_bias)
self.network.add(layer)
# Hidden-to-output layer: [..., hidden_dims[-1]] -> [..., length, units].
# Tile the mask so each element repeats contiguously; this is compatible
# with the autoregressive contraints unlike naive tiling.
if self.hidden_dims:
mask = masks[-1]
mask = tf.tile(mask[..., tf.newaxis], [1, 1, self.units])
mask = tf.reshape(mask, [mask.shape[0], mask.shape[1] * self.units])
layer = tf.keras.layers.Dense(
length * self.units,
kernel_initializer=make_masked_initializer(mask),
kernel_constraint=make_masked_constraint(mask),
activation=None,
use_bias=self.use_bias)
self.network.add(layer)
self.network.add(tf.keras.layers.Reshape([length, self.units]))
self.built = True
def call(self, inputs):
return self.network(inputs)
def create_degrees(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
if (isinstance(input_order, str) and
input_order not in ('random', 'left-to-right', 'right-to-left')):
raise ValueError('Input order is not valid.')
if hidden_order not in ('random', 'left-to-right'):
raise ValueError('Hidden order is not valid.')
degrees = []
if isinstance(input_order, str):
input_degrees = np.arange(1, input_dim + 1)
if input_order == 'right-to-left':
input_degrees = np.flip(input_degrees, 0)
elif input_order == 'random':
np.random.shuffle(input_degrees)
else:
input_order = np.array(input_order)
if np.all(np.sort(input_order) != np.arange(1, input_dim + 1)):
raise ValueError('invalid input order')
input_degrees = input_order
degrees.append(input_degrees)
for units in hidden_dims:
if hidden_order == 'random':
min_prev_degree = min(np.min(degrees[-1]), input_dim - 1)
hidden_degrees = np.random.randint(
low=min_prev_degree, high=input_dim, size=units)
elif hidden_order == 'left-to-right':
hidden_degrees = (np.arange(units) % max(1, input_dim - 1) +
min(1, input_dim - 1))
degrees.append(hidden_degrees)
return degrees
def create_masks(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)
masks = []
# Create input-to-hidden and hidden-to-hidden masks.
for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]):
mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32)
masks.append(mask)
# Create hidden-to-output mask.
mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32)
masks.append(mask)
return masks
def make_masked_initializer(mask):
initializer = tf.keras.initializers.GlorotUniform()
def masked_initializer(shape, dtype=None):
return mask * initializer(shape, dtype)
return masked_initializer
def make_masked_constraint(mask):
constraint = tf.identity
def masked_constraint(x):
return mask * constraint(x)
return masked_constraint
| apache-2.0 | -7,966,163,768,912,077,000 | 40.656522 | 80 | 0.659639 | false |
dietrichc/streamline-ppc-reports | examples/dfp/v201405/audience_segment_service/populate_first_party_audience_segments.py | 1 | 2571 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example populates a specific first party audience segment.
To determine which first party audience segments exist, run
get_first_party_audience_segments.py.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
AUDIENCE_SEGMENT_ID = 'INSERT_AUDIENCE_SEGMENT_ID_HERE'
def main(client, audience_segment_id):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201405')
# Create statement object to get the specified first party audience segment.
values = (
[{'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'FIRST_PARTY'
}
},
{'key': 'audience_segment_id',
'value': {
'xsi_type': 'NumberValue',
'value': AUDIENCE_SEGMENT_ID
}
}])
query = 'WHERE Type = :type AND Id = :audience_segment_id'
statement = dfp.FilterStatement(query, values, 1)
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
segments = response['results']
for segment in segments:
print ('Audience segment with id \'%s\' and name \'%s\' will be populated.'
% (segment['id'], segment['name']))
action = {
'xsi_type': 'PopulateAudienceSegments'
}
populated_audience_segments = (
audience_segment_service.performAudienceSegmentAction(
action, statement.ToStatement()))
print ('%s audience segment populated' %
populated_audience_segments['numChanges'])
else:
print 'No Results Found'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, AUDIENCE_SEGMENT_ID)
| apache-2.0 | 3,754,272,147,470,007,000 | 29.607143 | 81 | 0.669778 | false |
cpcloud/ibis | ibis/sql/postgres/tests/conftest.py | 1 | 2161 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import ibis
PG_USER = os.environ.get(
'IBIS_TEST_POSTGRES_USER', os.environ.get('PGUSER', 'postgres')
)
PG_PASS = os.environ.get(
'IBIS_TEST_POSTGRES_PASSWORD', os.environ.get('PGPASSWORD', 'postgres')
)
PG_HOST = os.environ.get(
'IBIS_TEST_POSTGRES_HOST', os.environ.get('PGHOST', 'localhost')
)
PG_PORT = os.environ.get(
'IBIS_TEST_POSTGRES_PORT', os.environ.get('PGPORT', 5432)
)
IBIS_TEST_POSTGRES_DB = os.environ.get(
'IBIS_TEST_POSTGRES_DATABASE', os.environ.get('PGDATABASE', 'ibis_testing')
)
@pytest.fixture(scope='session')
def con():
return ibis.postgres.connect(
host=PG_HOST,
user=PG_USER,
password=PG_PASS,
database=IBIS_TEST_POSTGRES_DB,
port=PG_PORT,
)
@pytest.fixture(scope='module')
def db(con):
return con.database()
@pytest.fixture(scope='module')
def alltypes(db):
return db.functional_alltypes
@pytest.fixture(scope='module')
def geotable(con):
return con.table('geo')
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
@pytest.fixture(scope='module')
def gdf(geotable):
return geotable.execute()
@pytest.fixture(scope='module')
def at(alltypes):
return alltypes.op().sqla_table
@pytest.fixture(scope='module')
def intervals(con):
return con.table("intervals")
@pytest.fixture
def translate():
from ibis.sql.postgres.compiler import PostgreSQLDialect
dialect = PostgreSQLDialect()
context = dialect.make_context()
return lambda expr: dialect.translator(expr, context).get_result()
| apache-2.0 | 1,272,161,993,730,689,300 | 22.747253 | 79 | 0.707543 | false |
bspaans/python-mingus | tests/unit/core/test_meter.py | 1 | 2500 | from __future__ import absolute_import
import doctest
import unittest
from six.moves import range
import mingus.core.meter as meter
class test_meter(unittest.TestCase):
def setUp(self):
self.simple_meters = [
(2, 4),
(3, 4),
(4, 4),
(6, 4),
(8, 4),
(5, 4),
(2, 2),
(1, 2),
(6, 4),
]
self.compound_meters = [
(6, 4),
(9, 4),
(12, 4),
(6, 8),
(9, 8),
(12, 8),
(6, 16),
(9, 16),
(12, 16),
]
self.asymmetrical_meters = [
(3, 4),
(5, 4),
(7, 4),
(11, 4),
(1, 8),
(3, 8),
(5, 8),
(7, 8),
(3, 16),
(11, 16),
(15, 16),
(17, 16),
]
def test_valid_beat_duration(self):
for x in [
1,
2,
4,
8,
16,
32,
64,
128,
256,
512,
1024,
2048,
]:
self.assertTrue(
meter.valid_beat_duration(x), "%d should be a valid beat duration" % x
)
def test_invalid_beat_duration(self):
for x in [0, 3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15,] + list(range(17, 31)):
self.assertTrue(
not meter.valid_beat_duration(x),
"%d should not be a valid beat duration" % x,
)
def test_is_compound(self):
for x in self.compound_meters:
self.assertTrue(
meter.is_compound(x), "%d/%d should be a compound meter" % x
)
def test_is_simple(self):
for x in self.simple_meters:
self.assertTrue(meter.is_simple(x), "%d/%d should be a simple meter" % x)
def test_is_valid_meter(self):
for x in self.compound_meters + self.simple_meters:
self.assertTrue(meter.is_valid(x), "%d/%d should be a valid meter" % x)
def test_is_asymmetrical(self):
for x in self.asymmetrical_meters:
self.assertTrue(
meter.is_asymmetrical(x), "%d/%d should be a asymmetrical meter" % x
)
def test_is_full(self):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(meter))
return tests
| gpl-3.0 | -2,183,450,939,416,553,700 | 23.509804 | 86 | 0.4248 | false |
skosukhin/spack | var/spack/repos/builtin/packages/r-desolve/package.py | 1 | 1817 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDesolve(RPackage):
"""Functions that solve initial value problems of a system of first-order
ordinary differential equations ('ODE'), of partial differential
equations ('PDE'), of differential algebraic equations ('DAE'), and of
delay differential equations."""
homepage = "https://cran.r-project.org/package=deSolve"
url = "https://cran.r-project.org/src/contrib/deSolve_1.20.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/deSolve"
version('1.20', '85c6a2d8568944ae8eef27ac7c35fb25')
| lgpl-2.1 | 4,684,682,502,985,198,000 | 46.815789 | 78 | 0.681893 | false |
ssjssh/algorithm | src/ssj/queue/list_queue.py | 1 | 1674 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
用list实现队列
"""
class Queue(object):
"""docstring for Queue"""
class EmptyNode(object):
"""
用EmptyNode来实现特殊的空节点表示
"""
pass
def __init__(self, cap):
super(Queue, self).__init__()
self.__empty = self.EmptyNode()
self.__cap = cap
if cap < 0:
raise ValueError("cap of queue can not be negative")
self.__value = [self.__empty for x in xrange(0, cap)]
# head指向下一个要出队列的元素,tail指向下一个要插入元素的位置。规定数据结构中的指针是简化逻辑的方法
self.__head = 0
self.__tail = 0
def enter(self, x):
if self.__tail == self.__head and self.__value[self.__head] is not self.__empty:
raise IndexError("queue is full")
self.__value[self.__tail] = x
self.__tail += 1
if self.__tail >= self.__cap:
self.__tail = 0
def exit(self):
if self.__tail == self.__head and self.__value[self.__head] is self.__empty:
raise IndexError("queue is empty")
v = self.__value[self.__head]
self.__head += 1
if self.__head >= self.__cap:
self.__head = 0
return v
def __len__(self):
if self.__tail > self.__head:
return self.__tail - self.__head
elif self.__head > self.__tail:
return self.__cap - (self.__head - self.__tail)
else:
if self.__value[self.__head] is self.__empty:
return 0
else:
return self.__cap
| gpl-2.0 | 1,703,777,181,144,225,300 | 27.703704 | 88 | 0.499355 | false |
jonathf/chaospy | chaospy/quadrature/newton_cotes.py | 1 | 4349 | """
Example usage
-------------
Generate Newton-Cotes quadrature rules::
>>> distribution = chaospy.Uniform(0, 1)
>>> for order in range(5):
... abscissas, weights = chaospy.generate_quadrature(
... order, distribution, rule="newton_cotes")
... print(order, abscissas.round(3), weights.round(3))
0 [[0.5]] [1.]
1 [[0. 1.]] [0.5 0.5]
2 [[0. 0.5 1. ]] [0.167 0.667 0.167]
3 [[0. 0.333 0.667 1. ]] [0.125 0.375 0.375 0.125]
4 [[0. 0.25 0.5 0.75 1. ]] [0.078 0.356 0.133 0.356 0.078]
The first few orders with exponential growth rule::
>>> for order in range(4): # doctest: +NORMALIZE_WHITESPACE
... abscissas, weights = chaospy.generate_quadrature(
... order, distribution, rule="newton_cotes", growth=True)
... print(order, abscissas.round(3), weights.round(3))
0 [[0.5]] [1.]
1 [[0. 0.5 1. ]] [0.167 0.667 0.167]
2 [[0. 0.25 0.5 0.75 1. ]] [0.078 0.356 0.133 0.356 0.078]
3 [[0. 0.125 0.25 0.375 0.5 0.625 0.75 0.875 1. ]]
[ 0.035 0.208 -0.033 0.37 -0.16 0.37 -0.033 0.208 0.035]
Applying Smolyak sparse grid on Newton-Cotes::
>>> distribution = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> abscissas, weights = chaospy.generate_quadrature(
... 2, distribution, rule="newton_cotes",
... growth=True, sparse=True)
>>> abscissas.round(3)
array([[0. , 0. , 0. , 0.25, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.75, 1. ,
1. , 1. ],
[0. , 0.5 , 1. , 0.5 , 0. , 0.25, 0.5 , 0.75, 1. , 0.5 , 0. ,
0.5 , 1. ]])
>>> weights.round(3)
array([ 0.028, 0.022, 0.028, 0.356, 0.022, 0.356, -0.622, 0.356,
0.022, 0.356, 0.028, 0.022, 0.028])
"""
from __future__ import division
try:
from functools import lru_cache
except ImportError: # pragma: no cover
from functools32 import lru_cache
import numpy
from scipy import integrate
from .hypercube import hypercube_quadrature
def newton_cotes(order, domain=(0, 1), growth=False, segments=1):
"""
Generate the abscissas and weights in Newton-Cotes quadrature.
Newton-Cotes quadrature, are a group of formulas for numerical integration
based on evaluating the integrand at equally spaced points.
Args:
order (int, numpy.ndarray:):
Quadrature order.
domain (:func:`chaospy.Distribution`, ;class:`numpy.ndarray`):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
segments (int):
Split intervals into N subintervals and create a patched
quadrature based on the segmented quadrature. Can not be lower than
`order`. If 0 is provided, default to square root of `order`.
Nested samples only exist when the number of segments are fixed.
Returns:
(numpy.ndarray, numpy.ndarray):
abscissas:
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights:
The quadrature weights with ``weights.shape == (N,)``.
Examples:
>>> abscissas, weights = chaospy.quadrature.newton_cotes(4)
>>> abscissas.round(4)
array([[0. , 0.25, 0.5 , 0.75, 1. ]])
>>> weights.round(4)
array([0.0778, 0.3556, 0.1333, 0.3556, 0.0778])
>>> abscissas, weights = chaospy.quadrature.newton_cotes(4, segments=2)
>>> abscissas.round(4)
array([[0. , 0.25, 0.5 , 0.75, 1. ]])
>>> weights.round(4)
array([0.0833, 0.3333, 0.1667, 0.3333, 0.0833])
"""
order = numpy.asarray(order)
order = numpy.where(growth, numpy.where(order, 2**order, 0), order)
return hypercube_quadrature(
_newton_cotes,
order=order,
domain=domain,
segments=segments,
)
@lru_cache(None)
def _newton_cotes(order):
"""Backend for Newton-Cotes quadrature rule."""
if order == 0:
return numpy.full((1, 1), 0.5), numpy.ones(1)
return numpy.linspace(0, 1, order+1), integrate.newton_cotes(order)[0]/order
| mit | 1,887,471,795,179,704,600 | 36.817391 | 80 | 0.574155 | false |
avalentino/gsdview | gsdview/qtsupport.py | 1 | 32996 | # GSDView - Geo-Spatial Data Viewer
# Copyright (C) 2008-2021 Antonio Valentino <[email protected]>
#
# This module is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this module if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 US
"""Utility functions and classes for Qt5 applications."""
import os
import csv
import math
import logging
from io import StringIO
from configparser import ConfigParser
from qtpy import QtCore, QtWidgets, QtGui, QtSvg, QtPrintSupport, uic
from gsdview import utils
_log = logging.getLogger(__name__)
# @COMPATIBILITY: PySide
# Credit:
# http://stackoverflow.com/questions/4442286/python-code-genration-with-pyside-uic/14195313#14195313
if not hasattr(uic, 'loadUiType'):
def loadUiType(uifile, from_imports=False):
"""Load a .ui file and return the generated form class and
the Qt base class.
The "loadUiType" command convert the ui file to py code
in-memory first and then execute it in a special frame to
retrieve the form_class.
"""
from io import StringIO
from xml.etree.ElementTree import ElementTree
import qtpy
from qtpy import QtWidgets
if qtpy.PYSIDE2:
from pyside2uic import compileUi
elif qtpy.PYSIDE:
from pysideuic import compileUi
else:
raise RuntimeError(f'unexpected qtpy.API: {qtpy.API!r}')
# Parse the UI file
etree = ElementTree()
ui = etree.parse(uifile)
widget_class = ui.find('widget').get('class')
form_class = ui.find('class').text
with open(uifile) as fd:
code_stream = StringIO()
frame = {}
compileUi(fd, code_stream, indent=0, from_imports=from_imports)
pyc = compile(code_stream.getvalue(), '<string>', 'exec')
exec(pyc, frame)
# Fetch the base_class and form class based on their type in the
# xml from designer
form_class = frame['Ui_%s' % form_class]
base_class = getattr(QtWidgets, widget_class)
return form_class, base_class
else:
loadUiType = uic.loadUiType
# Menus and toolbars helpers ###############################################
def actionGroupToMenu(actionGroup, label, mainwin):
menu = QtWidgets.QMenu(label, mainwin)
menu.addActions(actionGroup.actions())
return menu
def actionGroupToToolbar(actionGroup, label, name=None):
if name is None:
# get camel case name
parts = str(label).title().split()
parts[0] = parts[0].lower()
name = ''.join(parts)
toolbar = QtWidgets.QToolBar(label)
toolbar.addActions(actionGroup.actions())
if name:
toolbar.setObjectName(name)
return toolbar
# Application cursor helpers ###############################################
def overrideCursor(func):
def aux(*args, **kwargs):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
return func(*args, **kwargs)
finally:
QtWidgets.QApplication.restoreOverrideCursor()
return aux
def callExpensiveFunc(func, *args, **kwargs):
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
return func(*args, **kwargs)
finally:
QtWidgets.QApplication.restoreOverrideCursor()
# Table model/view helpers ##################################################
def clearTable(tablewidget):
"""Remove contents from a table widget preserving labels. """
labels = [
str(tablewidget.horizontalHeaderItem(col).text())
for col in range(tablewidget.columnCount())
]
tablewidget.clear()
tablewidget.setHorizontalHeaderLabels(labels)
tablewidget.setRowCount(0)
def selectAllItems(itemview):
"""Select all items in an QAbstractItemView."""
model = itemview.model()
topleft = model.index(0, 0)
try:
bottomright = model.index(model.rowCount() - 1,
model.columnCount() - 1)
except (TypeError, AttributeError):
# columnCount is a private method in QAbstractListModel
# assume it is a list
bottomright = model.index(model.rowCount() - 1)
selection = QtCore.QItemSelection(topleft, bottomright)
itemview.selectionModel().select(selection,
QtCore.QItemSelectionModel.Select)
# @QtCore.Slot(QtWidgets.QWidget) # @TODO: check
def copySelectedItems(itemview):
"""Copy selected items of an QAbstractItemView to the clipboard and
also return copied data."""
selection = itemview.selectionModel().selection()
lines = []
for itemrange in selection:
model = itemrange.model()
parent = itemrange.parent()
for row in range(itemrange.top(), itemrange.bottom() + 1):
parts = []
for col in range(itemrange.left(), itemrange.right() + 1):
index = model.index(row, col, parent)
parts.append(str(model.data(index)))
line = '\t'.join(parts)
lines.append(line)
data = '\n'.join(lines)
if data:
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(data, QtGui.QClipboard.Clipboard)
clipboard.setText(data, QtGui.QClipboard.Selection)
# @TODO: check
# data = QtCore.QByteArray()
# data.append('\n'.join(lines))
# mimedata = QtCore.QMimeData()
# mimedata.setData('text/csv', data)
# clipboard = QtWidgets.QApplication.clipboard()
# clipboard.setMimeData(mimedata, QtGui.QClipboard.Clipboard)
# clipboard.setMimeData(mimedata, QtGui.QClipboard.Selection)
return data
def modelToIni(model, section=None, cfg=None):
assert model.columnCount() == 2
if cfg is None:
cfg = ConfigParser()
for row in range(model.rowCount()):
name = model.index(row, 0).data()
value = model.index(row, 1).data()
cfg.set(section, name, value)
return cfg
def modelToCsv(model, dialect='excel'):
fp = StringIO()
writer = csv.writer(fp, dialect)
try:
ncols = model.columnCount()
except TypeError:
# columnCount is a private method in QAbstractListModel
ncols = 1
for row in range(model.rowCount()):
line = []
for col in range(ncols):
line.append(model.index(row, col).data())
writer.writerow(line)
return fp.getvalue()
def modelToTextDocument(model, doc=None):
if doc is None:
doc = QtGui.QTextDocument()
cursor = QtGui.QTextCursor(doc)
cursor.movePosition(QtGui.QTextCursor.End)
cursor.beginEditBlock()
format_ = QtGui.QTextTableFormat()
format_.setCellPadding(5)
format_.setCellSpacing(0)
format_.setBorderStyle(QtGui.QTextFrameFormat.BorderStyle_Solid)
format_.setHeaderRowCount(1)
nrows = model.rowCount()
try:
ncols = model.columnCount()
except TypeError:
# columnCount is a private method in QAbstractListModel
ncols = 1
table = cursor.insertTable(nrows, ncols, format_)
# textformat = QtWidgets.QTextFormat()
for row in range(nrows):
for col in range(ncols):
text = model.index(row, col).data()
if text is None:
text = ''
else:
text = str(text)
cell = table.cellAt(row, col)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(text) # , textformat)
# headers style
headerformat = QtGui.QTextCharFormat()
headerformat.setFontWeight(QtGui.QFont.Bold)
brush = headerformat.background()
brush.setColor(QtCore.Qt.lightGray)
brush.setStyle(QtCore.Qt.SolidPattern)
headerformat.setBackground(brush)
# horizontal header
headers = [
model.headerData(col, QtCore.Qt.Horizontal) for col in range(ncols)
]
if any(headers):
table.insertRows(0, 1)
for col, text in enumerate(headers):
if text is None:
text = ''
else:
text = str(text)
cell = table.cellAt(0, col)
cell.setFormat(headerformat)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(text)
# vertical header
headers = [
model.headerData(row, QtCore.Qt.Vertical) for row in range(nrows)
]
if any(headers):
table.insertColumns(0, 1)
for row, text in enumerate(headers):
if text is None:
text = ''
else:
text = str(text)
cell = table.cellAt(row + 1, 0)
cell.setFormat(headerformat)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(text, headerformat)
cursor.endEditBlock()
return doc
def exportTable(model, parent=None):
filters = [
'CSV file (*.csv)',
'CSV TAB-delimited file (*.csv)',
'HTML file (*.html)',
'All files (*)',
]
try:
ncols = model.columnCount()
except TypeError:
# columnCount is a private method in QAbstractListModel
ncols = 1
if ncols == 1:
filters.insert(0, 'Text file (*.txt)')
target = os.path.join(utils.default_workdir(), 'data.txt')
if ncols == 2:
filters.insert(0, 'INI file format (*.ini)')
target = os.path.join(utils.default_workdir(), 'data.ini')
else:
target = os.path.join(utils.default_workdir(), 'data.csv')
# @TODO: check
if parent is None:
try:
parent = model.window()
except AttributeError:
parent = None
filename, filter_ = QtWidgets.QFileDialog.getSaveFileName(
parent, model.tr('Save'), target, ';;'.join(filters))
if filename:
ext = os.path.splitext(filename)[-1]
ext = ext.lower()
if ext == '.csv' or ext == '.txt':
if 'TAB' in filter_:
dialect = 'excel-tab'
else:
dialect = 'excel'
data = modelToCsv(model, dialect)
elif ext == '.ini':
cfg = modelToIni(model)
fp = StringIO()
cfg.write(fp)
data = fp.getvalue()
elif ext == '.html':
doc = modelToTextDocument(model)
data = doc.toHtml()
else:
# default
data = modelToCsv(model, 'excel-tab')
with open(filename, 'w') as fd:
fd.write(data)
def setViewContextActions(widget):
assert (widget.contextMenuPolicy() == QtCore.Qt.ActionsContextMenu), \
'menu policy is not "QtCore.Qt.ActionsContextMenu"'
# if widget.contextMenuPolicy() != QtCore.Qt.ActionsContextMenu:
# widget.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
icon = geticon('copy.svg', __name__)
action = QtWidgets.QAction(
icon, widget.tr('&Copy'), widget,
objectName='copyAction',
shortcut=widget.tr('Ctrl+C'),
toolTip=widget.tr('Copy selected items'),
triggered=lambda: copySelectedItems(widget))
widget.addAction(action)
icon = QtGui.QIcon.fromTheme('edit-select-all')
action = QtWidgets.QAction(
icon, widget.tr('Select &All'), widget,
objectName='selectAllAction',
# shortcut=widget.tr('Ctrl+A'),
toolTip=widget.tr('Select all items'),
triggered=lambda: selectAllItems(widget))
widget.addAction(action)
icon = widget.style().standardIcon(QtWidgets.QStyle.SP_DialogSaveButton)
action = QtWidgets.QAction(
icon, widget.tr('&Save As'), widget,
objectName='saveAsAction',
shortcut=widget.tr('Ctrl+S'),
statusTip=widget.tr('Save as'),
triggered=lambda: exportTable(widget.model()))
widget.addAction(action)
icon = QtGui.QIcon.fromTheme('printer')
action = QtWidgets.QAction(
icon, widget.tr('&Print'), widget,
objectName='printAction',
shortcut=widget.tr('Ctrl+P'),
statusTip=widget.tr('Print'),
triggered=lambda: printObject(widget))
widget.addAction(action)
# icon = QtGui.QIcon(
# ':/trolltech/styles/commonstyle/images/filecontents-128.png')
# action = QtWidgets.QAction(icon, widget.tr('Print Preview'), widget,
# objectName='printPreviewAction',
# statusTip=widget.tr('Print Preview'))#,
# #triggered=tablePrintPreview)
# # @TODO: tablePrintPreview
# widget.addAction(action)
# Printing helpers ##########################################################
def coreprint(obj, printer):
painter = QtGui.QPainter(printer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
obj.render(painter)
painter.end()
def printObject(obj, printer=None, parent=None):
if printer is None:
printer = QtPrintSupport.QPrinter(
QtPrintSupport.QPrinter.PrinterResolution)
# printer.setOutputFile(os.path.join(utils.default_workdir().
# 'filename.pdf'))
# @TODO: check
if parent is None:
try:
parent = obj.window()
except AttributeError:
parent = None
# dialog = QtPrintSupport.QPrintDialog(printer)
# try:
# window = obj.window()
# except AttributeError:
# window = = None
# preview = QtWidgets.QPrintPreviewWidget(printer, window)
# preview.paintRequested.connect(coreprint)
# dialog.setOptionTabs([preview])
# ret = d.exec_()
ret = QtPrintSupport.QPrintDialog(printer, parent).exec_()
if ret == QtWidgets.QDialog.Accepted:
if isinstance(obj, (QtGui.QTextDocument, QtWidgets.QTextEdit)):
obj.print_(printer)
elif hasattr(obj, 'model'):
model = obj.model()
doc = modelToTextDocument(model)
doc.print_(printer)
elif isinstance(obj, QtCore.QAbstractItemModel):
doc = modelToTextDocument(obj)
doc.print_(printer)
else:
coreprint(obj, printer)
def printPreview(obj, printer=None, parent=None):
if printer is None:
printer = QtPrintSupport.QPrinter(
QtPrintSupport.QPrinter.PrinterResolution)
# @TODO: check
if parent is None:
try:
parent = obj.window()
except AttributeError:
parent = None
dialog = QtWidgets.QPrintPreviewDialog(printer, parent)
dialog.paintRequested.connect(coreprint)
ret = dialog.exec_()
# @WARNING: duplicate code
ret = QtPrintSupport.QPrintDialog(printer, parent).exec_()
if ret == QtWidgets.QDialog.Accepted:
if isinstance(obj, (QtGui.QTextDocument, QtWidgets.QTextEdit)):
obj.print_(printer)
elif hasattr(object, 'model'):
model = obj.model()
doc = modelToTextDocument(model)
obj.print_(printer)
elif isinstance(obj, QtCore.QAbstractItemModel):
doc = modelToTextDocument(obj)
doc.print_(printer)
else:
coreprint(obj, printer)
# QImage helpers ###########################################################
import numpy as np
GRAY_COLORTABLE = [QtGui.QColor(i, i, i).rgba() for i in range(256)]
RED_COLORTABLE = [QtGui.QColor(i, 0, 0).rgba() for i in range(256)]
GREEN_COLORTABLE = [QtGui.QColor(0, i, 0).rgba() for i in range(256)]
BLUE_COLORTABLE = [QtGui.QColor(0, 0, i).rgba() for i in range(256)]
JET_COLORTABLE = [QtGui.QColor(r, g, b).rgba() for r, g, b in [
[ 0, 0, 128],
[ 0, 0, 132],
[ 0, 0, 137],
[ 0, 0, 141],
[ 0, 0, 146],
[ 0, 0, 150],
[ 0, 0, 155],
[ 0, 0, 159],
[ 0, 0, 164],
[ 0, 0, 168],
[ 0, 0, 173],
[ 0, 0, 178],
[ 0, 0, 182],
[ 0, 0, 187],
[ 0, 0, 191],
[ 0, 0, 196],
[ 0, 0, 200],
[ 0, 0, 205],
[ 0, 0, 209],
[ 0, 0, 214],
[ 0, 0, 218],
[ 0, 0, 223],
[ 0, 0, 227],
[ 0, 0, 232],
[ 0, 0, 237],
[ 0, 0, 241],
[ 0, 0, 246],
[ 0, 0, 250],
[ 0, 0, 255],
[ 0, 0, 255],
[ 0, 0, 255],
[ 0, 0, 255],
[ 0, 0, 255],
[ 0, 4, 255],
[ 0, 8, 255],
[ 0, 12, 255],
[ 0, 16, 255],
[ 0, 20, 255],
[ 0, 24, 255],
[ 0, 28, 255],
[ 0, 32, 255],
[ 0, 36, 255],
[ 0, 40, 255],
[ 0, 44, 255],
[ 0, 48, 255],
[ 0, 52, 255],
[ 0, 56, 255],
[ 0, 60, 255],
[ 0, 64, 255],
[ 0, 68, 255],
[ 0, 72, 255],
[ 0, 76, 255],
[ 0, 80, 255],
[ 0, 84, 255],
[ 0, 88, 255],
[ 0, 92, 255],
[ 0, 96, 255],
[ 0, 100, 255],
[ 0, 104, 255],
[ 0, 108, 255],
[ 0, 112, 255],
[ 0, 116, 255],
[ 0, 120, 255],
[ 0, 124, 255],
[ 0, 128, 255],
[ 0, 132, 255],
[ 0, 136, 255],
[ 0, 140, 255],
[ 0, 144, 255],
[ 0, 148, 255],
[ 0, 152, 255],
[ 0, 156, 255],
[ 0, 160, 255],
[ 0, 164, 255],
[ 0, 168, 255],
[ 0, 172, 255],
[ 0, 176, 255],
[ 0, 180, 255],
[ 0, 184, 255],
[ 0, 188, 255],
[ 0, 192, 255],
[ 0, 196, 255],
[ 0, 200, 255],
[ 0, 204, 255],
[ 0, 208, 255],
[ 0, 212, 255],
[ 0, 216, 255],
[ 0, 220, 254],
[ 0, 224, 251],
[ 0, 228, 248],
[ 2, 232, 244],
[ 6, 236, 241],
[ 9, 240, 238],
[ 12, 244, 235],
[ 15, 248, 231],
[ 19, 252, 228],
[ 22, 255, 225],
[ 25, 255, 222],
[ 28, 255, 219],
[ 31, 255, 215],
[ 35, 255, 212],
[ 38, 255, 209],
[ 41, 255, 206],
[ 44, 255, 202],
[ 48, 255, 199],
[ 51, 255, 196],
[ 54, 255, 193],
[ 57, 255, 190],
[ 60, 255, 186],
[ 64, 255, 183],
[ 67, 255, 180],
[ 70, 255, 177],
[ 73, 255, 173],
[ 77, 255, 170],
[ 80, 255, 167],
[ 83, 255, 164],
[ 86, 255, 160],
[ 90, 255, 157],
[ 93, 255, 154],
[ 96, 255, 151],
[ 99, 255, 148],
[102, 255, 144],
[106, 255, 141],
[109, 255, 138],
[112, 255, 135],
[115, 255, 131],
[119, 255, 128],
[122, 255, 125],
[125, 255, 122],
[128, 255, 119],
[131, 255, 115],
[135, 255, 112],
[138, 255, 109],
[141, 255, 106],
[144, 255, 102],
[148, 255, 99],
[151, 255, 96],
[154, 255, 93],
[157, 255, 90],
[160, 255, 86],
[164, 255, 83],
[167, 255, 80],
[170, 255, 77],
[173, 255, 73],
[177, 255, 70],
[180, 255, 67],
[183, 255, 64],
[186, 255, 60],
[190, 255, 57],
[193, 255, 54],
[196, 255, 51],
[199, 255, 48],
[202, 255, 44],
[206, 255, 41],
[209, 255, 38],
[212, 255, 35],
[215, 255, 31],
[219, 255, 28],
[222, 255, 25],
[225, 255, 22],
[228, 255, 19],
[231, 255, 15],
[235, 255, 12],
[238, 255, 9],
[241, 252, 6],
[244, 248, 2],
[248, 245, 0],
[251, 241, 0],
[254, 237, 0],
[255, 234, 0],
[255, 230, 0],
[255, 226, 0],
[255, 222, 0],
[255, 219, 0],
[255, 215, 0],
[255, 211, 0],
[255, 208, 0],
[255, 204, 0],
[255, 200, 0],
[255, 196, 0],
[255, 193, 0],
[255, 189, 0],
[255, 185, 0],
[255, 182, 0],
[255, 178, 0],
[255, 174, 0],
[255, 171, 0],
[255, 167, 0],
[255, 163, 0],
[255, 159, 0],
[255, 156, 0],
[255, 152, 0],
[255, 148, 0],
[255, 145, 0],
[255, 141, 0],
[255, 137, 0],
[255, 134, 0],
[255, 130, 0],
[255, 126, 0],
[255, 122, 0],
[255, 119, 0],
[255, 115, 0],
[255, 111, 0],
[255, 108, 0],
[255, 104, 0],
[255, 100, 0],
[255, 96, 0],
[255, 93, 0],
[255, 89, 0],
[255, 85, 0],
[255, 82, 0],
[255, 78, 0],
[255, 74, 0],
[255, 71, 0],
[255, 67, 0],
[255, 63, 0],
[255, 59, 0],
[255, 56, 0],
[255, 52, 0],
[255, 48, 0],
[255, 45, 0],
[255, 41, 0],
[255, 37, 0],
[255, 34, 0],
[255, 30, 0],
[255, 26, 0],
[255, 22, 0],
[255, 19, 0],
[250, 15, 0],
[246, 11, 0],
[241, 8, 0],
[237, 4, 0],
[232, 0, 0],
[228, 0, 0],
[223, 0, 0],
[218, 0, 0],
[214, 0, 0],
[209, 0, 0],
[205, 0, 0],
[200, 0, 0],
[196, 0, 0],
[191, 0, 0],
[187, 0, 0],
[182, 0, 0],
[178, 0, 0],
[173, 0, 0],
[168, 0, 0],
[164, 0, 0],
[159, 0, 0],
[155, 0, 0],
[150, 0, 0],
[146, 0, 0],
[141, 0, 0],
[137, 0, 0],
[132, 0, 0],
[128, 0, 0],
]]
def _aligned(data, nbyes=4):
h, w = data.shape
fact = nbyes / data.itemsize
# math.ceil return int
shape = (h, math.ceil(w / fact) * nbyes)
if shape != data.shape:
# build aligned matrix
image = np.zeros(shape, data.dtype)
image[:, 0:w] = data[:, 0:w]
else:
image = np.require(data, data.dtype, 'CO') # 'CAO'
return image
def numpy2qimage(data, colortable=GRAY_COLORTABLE):
"""Convert a numpy array into a QImage.
.. note:: requires sip >= 4.7.5.
"""
has_colortable = False
if data.dtype in (np.uint8, np.ubyte, np.byte):
if data.ndim == 2:
h, w = data.shape
image = _aligned(data)
format_ = QtGui.QImage.Format_Indexed8
has_colortable = True
elif data.ndim == 3 and data.shape[2] == 3:
h, w = data.shape[:2]
image = np.zeros((h, w, 4), data.dtype)
image[:, :, 2::-1] = data
image[..., -1] = 255
format_ = QtGui.QImage.Format_RGB32
elif data.ndim == 3 and data.shape[2] == 4:
h, w = data.shape[:2]
image = np.require(data, np.uint8, 'CO') # 'CAO'
format_ = QtGui.QImage.Format_ARGB32
else:
raise ValueError('unable to convert data: shape=%s, '
'dtype="%s"' % (data.shape,
np.dtype(data.dtype)))
elif data.dtype == np.uint16 and data.ndim == 2:
# @TODO: check
h, w = data.shape
image = _aligned(data)
format_ = QtGui.QImage.Format_RGB16
elif data.dtype == np.uint32 and data.ndim == 2:
h, w = data.shape
image = np.require(data, data.dtype, 'CO') # 'CAO'
# format_ = QtGui.QImage.Format_ARGB32
format_ = QtGui.QImage.Format_RGB32
else:
raise ValueError(
f'unable to convert data: shape={data.shape}, '
f'dtype="{np.dtype(data.dtype)}"')
result = QtGui.QImage(image.data, w, h, format_)
result.ndarray = image
if has_colortable:
result.setColorTable(colortable)
return result
# Resources helpers #########################################################
def getuifile(name, package=None):
"""Return the ui file path.
It is assumed that Qt UI files are located in the "ui" subfolfer of
the package.
.. seealso:: :func:`gsdview.utils.getresource`
"""
return utils.getresource(os.path.join('ui', name), package)
def getuiform(name, package=None):
"""Return the ui form class.
If it is available a pre-built python module the form class is
imported from it (assuming that the module contains a single UI
class having a name that starts with `Ui_`).
If no pre-build python module is available than the form call is
loaded directly from the ui file using the PyQt5.uic helper module.
.. note:: in the pyside2 packege is used to provide bindings for Qt5
then the uic module is not available and only pre-built
modules are searched.
When pyside2 is used an :exc:`ImportError` is raised
if pre-built forms are not available.
.. note:: like :func:`gsdview.qtsupport.getuifile` this
function assumes that pre-build form modules and Qt UI
files are located in the "ui" subfolder of the package.
.. seealso:: :func:`gsdview.utils.getresource`,
:func:`gsdview.qtsupport.getuifile`
"""
try:
fromlist = package.rsplit('.')[:-1]
fromlist.append('ui')
modname = '.'.join(fromlist + [name])
module = __import__(modname, fromlist=fromlist)
formnames = [
key for key in module.__dict__.keys() if key.startswith('Ui_')
]
formname = formnames[0]
FormClass = getattr(module, formname)
_log.debug('load "%s" form base class from pre-compiled python module',
formname)
except ImportError:
uifile = getuifile(name + '.ui', package)
FormClass, QtBaseClass = loadUiType(uifile)
_log.debug('load "%s" form class from ui file', FormClass.__name__)
return FormClass
def geticonfile(name, package=None):
"""Return the icon file path.
It is assumed that icon files are located in the "images" subfolder
of the package.
.. seealso:: :func:`gsdview.utils.getresource`
"""
return utils.getresource(os.path.join('images', name), package)
def geticon(name, package=None):
"""Build and return requested icon.
It is assumed that icon files are located in the "images" subfolder
of the package.
.. seealso:: :func:`gsdview.utils.getresource`
"""
iconfile = utils.getresource(os.path.join('images', name), package)
return QtGui.QIcon(iconfile)
# Misc helpers ##############################################################
def cfgToTextDocument(cfg, doc=None):
if doc is None:
doc = QtGui.QTextDocument()
cursor = QtGui.QTextCursor(doc)
cursor.movePosition(QtGui.QTextCursor.End)
# table style
tableformat = QtGui.QTextTableFormat()
tableformat.setTopMargin(10)
tableformat.setBottomMargin(10)
tableformat.setCellPadding(5)
tableformat.setCellSpacing(0)
tableformat.setBorderStyle(QtGui.QTextFrameFormat.BorderStyle_Solid)
tableformat.setHeaderRowCount(1)
# headers style
titleblockformat = QtGui.QTextBlockFormat()
titleblockformat.setTopMargin(20)
titleblockformat.setBottomMargin(10)
titleformat = QtGui.QTextCharFormat()
titleformat.setFontWeight(QtGui.QFont.Bold)
# titleformat.setPointSze(12)
# headers style
headerformat = QtGui.QTextCharFormat()
headerformat.setFontWeight(QtGui.QFont.Bold)
brush = headerformat.background()
brush.setColor(QtCore.Qt.lightGray)
brush.setStyle(QtCore.Qt.SolidPattern)
headerformat.setBackground(brush)
for section in cfg.sections():
items = sorted(cfg.items(section))
if not items:
continue
cursor.beginEditBlock()
cursor.movePosition(QtGui.QTextCursor.End)
# title
cursor.insertBlock(titleblockformat)
cursor.insertText(section, titleformat)
nrows = len(items)
ncols = 2
table = cursor.insertTable(nrows, ncols, tableformat)
# textformat = QtWidgets.QTextFormat()
for index, (key, value) in enumerate(items):
cell = table.cellAt(index, 0)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(key)
cell = table.cellAt(index, 1)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(value)
# horizontal header
headers = [doc.tr('Key'), doc.tr('Value')]
table.insertRows(0, 1)
for col, text in enumerate(headers):
cell = table.cellAt(0, col)
cell.setFormat(headerformat)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(text)
# vertical header
table.insertColumns(0, 1)
for row in range(1, nrows + 1):
text = str(row)
cell = table.cellAt(row, 0)
cell.setFormat(headerformat)
cellCursor = cell.firstCursorPosition()
cellCursor.insertText(text, headerformat)
cursor.endEditBlock()
return doc
def imgexport(obj, parent=None):
filters = [
obj.tr('All files (*)'),
obj.tr('Simple Vector Graphics file (*.svg)'),
obj.tr('PDF file (*.pdf)'),
obj.tr('PostScript file (*.ps)'),
]
filters.extend('{} file (*.{})'.format(
str(fmt).upper(), str(fmt))
for fmt in QtGui.QImageWriter.supportedImageFormats())
formats = {
str(fmt).lower()
for fmt in QtGui.QImageWriter.supportedImageFormats()
}
formats.update(('svg', 'pdf', 'ps'))
# @TODO: check
if parent is None:
try:
parent = obj.window()
except AttributeError:
parent = None
target = os.path.join(utils.default_workdir(), 'image.jpeg')
filename, filter_ = QtWidgets.QFileDialog.getSaveFileName(
parent, obj.tr('Save picture'), target, ';;'.join(filters))
ext = 'unknown'
while filename and (ext not in formats):
ext = os.path.splitext(filename)[1]
if ext:
ext = ext[1:].lower()
if ext in formats:
break
else:
QtWidgets.QMessageBox.information(
parent, obj.tr('Unknown file format'),
obj.tr('Unknown file format "%s".\nPlease retry.') % ext)
filename, filter_ = QtWidgets.QFileDialog.getSaveFileName(
parent, obj.tr('Save draw'), filename, ';;'.join(filters),
filter_)
else:
ext = 'unknown'
if filename:
if hasattr(obj, 'viewport'):
srcsize = obj.viewport().rect().size()
elif hasattr(obj, 'sceneRect'):
# QGraphicsViews also has a viewport method so they should be
# trapped by the previous check
srcsize = obj.sceneRect().toRect().size()
else:
srcsize = QtWidgets.QSize(800, 600)
if ext in ('pdf', 'ps'):
device = QtPrintSupport.QPrinter(
QtPrintSupport.QPrinter.HighResolution)
device.setOutputFileName(filename)
if ext == 'pdf':
device.setOutputFormat(QtPrintSupport.QPrinter.PdfFormat)
else:
# ext == 'ps'
device.setOutputFormat(
QtPrintSupport.QPrinter.PostScriptFormat)
elif ext == 'svg':
device = QtSvg.QSvgGenerator()
device.setFileName(filename)
device.setSize(srcsize)
# device.setViewBox(obj.sceneRect().toRect())
# device.setTitle(obj.tr('Graphics Draw'))
# device.setDescription(obj.tr('Qt SVG drawing.'))
else:
device = QtGui.QPixmap(srcsize)
# @TODO: check
device.fill(QtCore.Qt.white)
painter = QtGui.QPainter()
if painter.begin(device):
# painter.setRenderHint(QtGui.QPainter.Antialiasing)
obj.render(painter)
painter.end()
if hasattr(device, 'save'):
device.save(filename)
else:
QtWidgets.QMessageBox.warning(
parent,
obj.tr('Warning'),
obj.tr('Unable initialize painting device.'))
# Qt info
def format_qt_info():
qlocale = QtCore.QLocale()
supported_image_formats = [
fmt.data().decode('utf-8')
for fmt in QtGui.QImageReader.supportedImageFormats()
]
qt_info = [
'Qt system locale: %s\n' % qlocale.system().name(),
'Qt locale name: %s\n' % qlocale.name(),
'Qt locale country: %s\n' % qlocale.countryToString(qlocale.country()),
'Qt locale language: %s\n' % qlocale.languageToString(
qlocale.language()),
'Qt locale decimal point: "%s"\n' % qlocale.decimalPoint(),
'Qt UI languages: %s\n' % qlocale.uiLanguages(),
'Qt supported image formats: %s\n' % ', '.join(
supported_image_formats),
]
return qt_info
| gpl-2.0 | 8,448,601,159,089,923,000 | 28.355872 | 100 | 0.552128 | false |
pombredanne/django-rest-framework-api-server | api/tests/test_login.py | 1 | 1037 | from django_webtest import WebTest
from django.contrib.auth.models import User
class TestLogInAndGetUserList(WebTest):
def testLoginAnGetUsers(self):
User.objects.create_user("prairiedogg", **{"password": "my_$pecial_password"})
username_and_password = {"username": "prairiedogg",
"password": "my_$pecial_password"}
login_response = self.app.post_json('/api/auth/token/',
username_and_password,
status=200)
token = login_response.json_body['token']
headers = {'Authorization': str('Token %s' % token)}
users_response = self.app.get('/api/users/',
headers=headers,
status=200)
number_of_users = len(users_response.json)
self.assertEqual(number_of_users, 1);
first_user = users_response.json[0]
self.assertEqual(first_user["username"], "prairiedogg")
| bsd-2-clause | 8,711,859,397,793,313,000 | 38.884615 | 86 | 0.544841 | false |
camptocamp/QGIS | python/plugins/processing/gui/MessageBarProgress.py | 1 | 2019 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SilentProgress.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing import interface
from PyQt4.QtCore import *
from PyQt4 import QtGui
class MessageBarProgress():
def __init__(self):
self.progressMessageBar = interface.iface.messageBar().createMessage("Executing algorithm")
self.progress = QtGui.QProgressBar()
self.progress.setMaximum(100)
self.progress.setAlignment(Qt.AlignLeft|Qt.AlignVCenter)
self.progressMessageBar.layout().addWidget(self.progress)
interface.iface.messageBar().pushWidget(self.progressMessageBar, interface.iface.messageBar().INFO)
def setText(self, text):
pass
def setPercentage(self, i):
self.progress.setValue(i)
def setInfo(self, _):
pass
def setCommand(self, _):
pass
def setDebugInfo(self, _):
pass
def setConsoleInfo(self, _):
pass
def close(self):
interface.iface.messageBar().clearWidgets()
| gpl-2.0 | 3,237,296,405,584,719,400 | 32.098361 | 107 | 0.505201 | false |
ThreatCentral/blackberries | src/ThreatCentral/transforms/EmailToActors.py | 1 | 6520 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright [2016] Hewlett Packard Enterprise Development LP Licensed under
# the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at Unless required by applicable
# law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from canari.maltego.utils import debug
from canari.framework import configure
from canari.maltego.entities import EmailAddress
from common.entities import Actor
from canari.maltego.message import Label, UIMessage
from common.client import search_actor, encode_to_utf8, lower, ThreatCentralError
__author__ = 'Bart Otten'
__copyright__ = '(c) Copyright [2016] Hewlett Packard Enterprise Development LP'
__credits__ = []
__license__ = 'Apache 2.0'
__version__ = '1'
__maintainer__ = 'Bart Otten'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform'
]
@configure(
label='Search Email Address in Actors',
description='Searches Email Address on Threat Central',
uuids=['threatcentral.v2.EmailToActors'],
inputs=[('Threat Central', EmailAddress)],
debug=False,
remote=False
)
def dotransform(request, response, config):
i = 0
for actor in search_actor(request.value):
try:
rtype = lower(actor.get('type'))
if actor.get('tcScore'):
weight = int(actor.get('tcScore'))
else:
weight = 1
actor = actor.get('resource')
# actor_name = actor.get('name', '').split('\n')
if len(actor) is not 0:
if rtype == 'actor':
if actor.get('name'):
e = Actor(encode_to_utf8(actor.get('name')), weight=weight)
e.name = encode_to_utf8(actor.get('name'))
e.actor = encode_to_utf8(actor.get('name'))
elif actor.get('title'):
e = Actor(encode_to_utf8(actor.get('title')))
e.title = encode_to_utf8(actor.get('title'))
e.resourceId = actor.get('resourceId')
if actor.get('organization'):
e.organization = encode_to_utf8(actor.get('organization'))
if actor.get('aliases'):
e.aliases = actor.get('aliases')
if actor.get('country'):
e.country = encode_to_utf8(actor.get('country', dict()).get('displayName'))
if actor.get('score'):
e.score = actor.get('score')
if actor.get('links'):
e += Label('Links', '<br/>'.join(['<a href="{}">{}</a>'.format(_.get('href'),
_.get('href'))
for _ in actor.get('links')]))
if actor.get('hyperlinks'):
e += Label('Hyperlinks', '<br/>'.join(['<a href="{}">{}</a>'.format(_.get('url'),
_.get('title'))
for _ in actor.get('hyperlinks')]))
if actor.get('title'):
e += Label('Title', encode_to_utf8(actor.get('title')))
if actor.get('resourceId'):
e += Label('ResourceID', actor.get('resourceId'))
if actor.get('aliases'):
e += Label('Aliases', '<br/>'.join([encode_to_utf8(_) for _ in actor.get('aliases', '')]))
if actor.get('description'):
e += Label('Description', '<br/>'.join(encode_to_utf8(actor.get('description', '')
).split('\n')))
if actor.get('country'):
e += Label('Country', encode_to_utf8(actor.get('country', dict()).get('displayName')))
if actor.get('organization'):
e += Label('Organization', encode_to_utf8(actor.get('organization')))
if actor.get('types'):
e += Label('Types', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in actor.get('types')]))
if actor.get('motivations'):
e += Label('Motivations', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in actor.get('motivations')]))
if actor.get('intendedEffects'):
e += Label('Intended Effects', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in actor.get('intendedEffects')]))
if actor.get('sophistication'):
e += Label('Sophistication', actor.get('sophistication', dict()).get('displayName'))
if actor.get('socialMediaText'):
e += Label('Social Media', '<br/>'.join(encode_to_utf8(actor.get('socialMediaText',
'')).split('\n')))
if actor.get('moreInfo'):
e += Label('More Info', '<br/>'.join(encode_to_utf8(actor.get('moreInfo', '')
).split('\n')))
if actor.get('score'):
e += Label('Score', actor.get('score'))
if i < 1:
i += 1
e.linkcolor = "0xf90000"
response += e
except AttributeError as err:
response += UIMessage(err, type='PartialError')
continue
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
except TypeError:
return response
return response
| apache-2.0 | -68,039,911,885,579,130 | 45.241135 | 114 | 0.468712 | false |
npdoty/pywikibot | pywikibot/families/wikisource_family.py | 1 | 3906 | # -*- coding: utf-8 -*-
"""Family module for Wikisource."""
#
# (C) Pywikibot team, 2004-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wikisource
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wikisource."""
name = 'wikisource'
closed_wikis = [
# https://noc.wikimedia.org/conf/highlight.php?file=closed.dblist
'ang', 'ht',
]
removed_wikis = [
# https://noc.wikimedia.org/conf/highlight.php?file=deleted.dblist
'tokipona',
]
def __init__(self):
"""Constructor."""
self.languages_by_size = [
'en', 'pl', 'ru', 'de', 'fr', 'zh', 'he', 'it', 'es', 'ar', 'cs',
'pt', 'fa', 'hu', 'www', 'ml', 'ko', 'sv', 'gu', 'sl', 'bn', 'te',
'sa', 'sr', 'ro', 'el', 'fi', 'uk', 'vi', 'ja', 'th', 'hy', 'az',
'ca', 'hr', 'br', 'ta', 'nl', 'is', 'la', 'no', 'vec', 'eo', 'tr',
'be', 'et', 'mk', 'yi', 'id', 'da', 'bg', 'li', 'mr', 'as', 'kn',
'or', 'bs', 'sah', 'lt', 'gl', 'cy', 'sk', 'zh-min-nan', 'fo',
'pa',
]
super(Family, self).__init__()
# All requests to 'mul.wikisource.org/*' are redirected to
# the main page, so using 'wikisource.org'
self.langs['mul'] = self.domain
self.languages_by_size.append('mul')
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/BPI#Current_implementation
self.cross_allowed = [
'ca', 'el', 'fa', 'it', 'ko', 'no', 'pl', 'vi', 'zh',
]
self.authornamespaces = {
'_default': [0],
'ar': [102],
'be': [102],
'bg': [100],
'ca': [106],
'cs': [100],
'da': [102],
'en': [102],
'eo': [102],
'et': [106],
'fa': [102],
'fr': [102],
'he': [108],
'hr': [100],
'hu': [100],
'hy': [100],
'it': [102],
'ko': [100],
'la': [102],
'nl': [102],
'no': [102],
'pl': [104],
'pt': [102],
'ro': [102],
'sv': [106],
'tr': [100],
'vi': [102],
'zh': [102],
}
# Subpages for documentation.
# TODO: List is incomplete, to be completed for missing languages.
# TODO: Remove comments for appropriate pages
self.doc_subpages = {
'_default': ((u'/doc', ),
['ar', 'as', 'az', 'bn', 'en', 'es',
'et', 'gu', 'hu', 'it', 'ja', 'kn', 'ml',
'mk', 'mr', 'pt', 'ro', 'sa', 'sah', 'ta',
'te', 'th', 'vi']
),
'be': (u'/Дакументацыя', ),
'bn': (u'/নথি', ),
'br': (u'/diellerezh', ),
'de': (u'/Doku', u'/Meta'),
'el': (u'/τεκμηρίωση', ),
'eo': ('u/dokumentado', ),
# 'fa': (u'/صفحه الگو', ),
# 'fa': (u'/فضاینام توضیحات', ),
# 'fa': (u'/آغاز جعبه', ),
# 'fa': (u'/پایان جعبه۲', ),
# 'fa': (u'/آغاز جعبه۲', ),
# 'fa': (u'/پایان جعبه', ),
# 'fa': (u'/توضیحات', ),
'fr': (u'/documentation', ),
'id': (u'/dok', ),
'ko': (u'/설명문서', ),
'no': (u'/dok', ),
'ru': (u'/Документация', ),
'sl': (u'/dok', ),
'sv': (u'/dok', ),
'uk': (u'/документація', ),
}
| mit | 5,903,984,611,172,784,000 | 31.025424 | 78 | 0.387933 | false |
PPC64/folly | folly/build/generate_varint_tables.py | 1 | 3152 | #!/usr/bin/env python
#
# Generate tables for GroupVarint32
# Copyright 2011 Facebook
#
# @author Tudor Bosman ([email protected])
#
# Reference: http://www.stepanovpapers.com/CIKM_2011.pdf
#
# From 17 encoded bytes, we may use between 5 and 17 bytes to encode 4
# integers. The first byte is a key that indicates how many bytes each of
# the 4 integers takes:
#
# bit 0..1: length-1 of first integer
# bit 2..3: length-1 of second integer
# bit 4..5: length-1 of third integer
# bit 6..7: length-1 of fourth integer
#
# The value of the first byte is used as the index in a table which returns
# a mask value for the SSSE3 PSHUFB instruction, which takes an XMM register
# (16 bytes) and shuffles bytes from it into a destination XMM register
# (optionally setting some of them to 0)
#
# For example, if the key has value 4, that means that the first integer
# uses 1 byte, the second uses 2 bytes, the third and fourth use 1 byte each,
# so we set the mask value so that
#
# r[0] = a[0]
# r[1] = 0
# r[2] = 0
# r[3] = 0
#
# r[4] = a[1]
# r[5] = a[2]
# r[6] = 0
# r[7] = 0
#
# r[8] = a[3]
# r[9] = 0
# r[10] = 0
# r[11] = 0
#
# r[12] = a[4]
# r[13] = 0
# r[14] = 0
# r[15] = 0
import os
from optparse import OptionParser
OUTPUT_FILE = "GroupVarintTables.cpp"
def generate(f):
f.write("""
#include <folly/Portability.h>
#include <stdint.h>
#if (FOLLY_X64 || defined(__i386__)) && (FOLLY_SSE >= 2)
#include <x86intrin.h>
#endif
namespace folly {
namespace detail {
#if (FOLLY_X64 || defined(__i386__)) && (FOLLY_SSE >= 2)
extern const __m128i groupVarintSSEMasks[] = {
""")
# Compute SSE masks
for i in range(0, 256):
offset = 0
vals = [0, 0, 0, 0]
for j in range(0, 4):
d = 1 + ((i >> (2 * j)) & 3)
# the j'th integer uses d bytes, consume them
for k in range(0, d):
vals[j] |= offset << (8 * k)
offset += 1
# set remaining bytes in result to 0
# 0xff: set corresponding byte in result to 0
for k in range(d, 4):
vals[j] |= 0xff << (8 * k)
f.write(" {{static_cast<int64_t>(0x{1:08x}{0:08x}), "
"static_cast<int64_t>(0x{3:08x}{2:08x})}},\n".format(*vals))
f.write("};\n"
"#endif /*#if (FOLLY_X64 || defined(__i386__)) && (FOLLY_SSE >= 2)*/\n"
"\n"
"extern const uint8_t groupVarintLengths[] = {\n")
# Also compute total encoded lengths, including key byte
for i in range(0, 256):
offset = 1 # include key byte
for j in range(0, 4):
d = 1 + ((i >> (2 * j)) & 3)
offset += d
f.write(" {0},\n".format(offset))
f.write("""
};
} // namespace detail
} // namespace folly
""")
def main():
parser = OptionParser()
parser.add_option("--install_dir", dest="install_dir", default=".",
help="write output to DIR", metavar="DIR")
parser.add_option("--fbcode_dir")
(options, args) = parser.parse_args()
f = open(os.path.join(options.install_dir, OUTPUT_FILE), "w")
generate(f)
f.close()
if __name__ == "__main__":
main()
| apache-2.0 | 2,212,862,077,880,834,300 | 25.711864 | 83 | 0.56948 | false |
ROCmSoftwarePlatform/Tensile | Tensile/Utilities/merge.py | 1 | 19273 | ################################################################################
# Copyright 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import yaml
import os
import sys
import argparse
from copy import deepcopy
from enum import IntEnum
verbosity = 1
def ensurePath(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def allFiles(startDir):
current = os.listdir(startDir)
files = []
for filename in [_current for _current in current if os.path.splitext(_current)[-1].lower() == '.yaml']:
fullPath = os.path.join(startDir,filename)
if os.path.isdir(fullPath):
files = files + allFiles(fullPath)
else:
files.append(fullPath)
return files
def reindexSolutions(data):
for i, _ in enumerate(data[5]):
data[5][i]["SolutionIndex"] = i
return data
def fixSizeInconsistencies(sizes, fileType):
duplicates = list()
for i in range(0,len(sizes)):
currSize = sizes[i][0]
# >= so size will be trimmed when a SolutionTag is included
if len(currSize) >= 8:
currSize = currSize[:-4]
if currSize in (item for index in sizes for item in index):
duplicates.append(i-len(duplicates))
else:
sizes[i][0] = currSize
sizes_ = deepcopy(sizes)
if len(duplicates) > 0:
for i in duplicates:
sizes_.pop(i)
verbose(len(duplicates), "duplicate size(s) removed from", fileType, "logic file")
return sizes_, len(sizes_)
# remove dict key "SolutionIndex" from dict
def cmpHelper(sol):
return {k:v for k, v in sol.items() if k!="SolutionIndex"}
def addKernel(solutionPool, solution):
for item in solutionPool:
if cmpHelper(item) == cmpHelper(solution):
index = item["SolutionIndex"]
debug("...Reuse previously existed kernel", end="")
break
else:
index = len(solutionPool)
_solution = deepcopy(solution) # if we don't we will see some subtle errors
_solution["SolutionIndex"] = index
solutionPool.append(_solution)
debug("...A new kernel has been added", end="")
debug("({}) {}".format(index, solutionPool[index]["SolutionNameMin"] if "SolutionNameMin" in solutionPool[index] else "(SolutionName N/A)"))
return solutionPool, index
def removeUnusedKernels(origData, prefix=""):
origNumSolutions = len(origData[5])
kernelsInUse = [ index for _, [index, _] in origData[7] ]
for i, solution in enumerate(origData[5]):
solutionIndex = solution["SolutionIndex"]
origData[5][i]["__InUse__"] = True if solutionIndex in kernelsInUse else False
# debug prints
for o in [o for o in origData[5] if o["__InUse__"]==False]:
debug("{}Solution ({}) {} is unused".format(
prefix,
o["SolutionIndex"],
o["SolutionNameMin"] if "SolutionNameMin" in o else "(SolutionName N/A)"))
# filter out dangling kernels
origData[5] = [ {k: v for k, v in o.items() if k != "__InUse__"}
for o in origData[5] if o["__InUse__"]==True ]
# reindex solutions
idMap = {} # new = idMap[old]
for i, solution in enumerate(origData[5]):
idMap[solution["SolutionIndex"]] = i
origData[5][i]["SolutionIndex"] = i
for i, [size, [oldSolIndex, eff]] in enumerate(origData[7]):
origData[7][i] = [size, [idMap[oldSolIndex], eff]]
numInvalidRemoved = origNumSolutions - len(origData[5])
return origData, numInvalidRemoved
def loadData(filename):
try:
stream = open(filename, "r")
except IOError:
print("Cannot open file: ", filename)
sys.stdout.flush()
sys.exit(-1)
data = yaml.load(stream, yaml.SafeLoader)
return data
# this is for complying the behavior of legacy merge script, where incremental logic
# file always replaces the base logic file even it's slower in performance -
# in the future we may let default force merge policy = False
def defaultForceMergePolicy(incFile):
if "arcturus" in incFile:
forceMerge = False
else:
forceMerge = True
return forceMerge
def msg(*args, **kwargs):
for i in args: print(i, end=" ")
print(**kwargs)
def verbose(*args, **kwargs):
if verbosity < 1: return
msg(*args, **kwargs)
def debug(*args, **kwargs):
if verbosity < 2: return
msg(*args, **kwargs)
# Tags distinguishing solution types
# Can be added to size key to allow solutions of each type to be present
# in logic file for a given size
class MfmaTag(IntEnum):
VALU = 0
MFMA = 1
def __str__(self):
return ["VALU", "MFMA"][self]
def __repr__(self):
return str(self)
class AlphaValueTag(IntEnum):
ANY = 0
ONE = 1
NEG_ONE = 2
ZERO = 3
def __str__(self):
return "Alpha="+["Any", "1", "-1", "0"][self]
def __repr__(self):
return str(self)
class BetaValueTag(IntEnum):
ANY = 0
ONE = 1
NEG_ONE = 2
ZERO = 3
def __str__(self):
return "Beta="+["Any", "1", "-1", "0"][self]
def __repr__(self):
return str(self)
def strToScalarValueTag(Class, value):
if value == "Any":
return Class.ANY
if value == 1:
return Class.ONE
if value == -1:
return Class.NEG_ONE
if value == 0:
return Class.ZERO
else:
raise RuntimeError("Unsupported value for Alpha/Beta scalar value")
class CEqualsDTag(IntEnum):
C_EQ_D = 0
C_NEQ_D = 1
def __str__(self):
return ["C=D", "C!=D"][self]
def __repr__(self):
return str(self)
# Tag of form (MFMATag, AlphaValueTag, BetaValueTag, CEqualsDTag)
def getSolutionTag(solution):
tagTuple = ()
if solution.get("EnableMatrixInstruction", False) or solution.get("MatrixInstruction", False):
tagTuple = tagTuple + (MfmaTag.MFMA,)
else:
tagTuple = tagTuple + (MfmaTag.VALU,)
tagTuple = tagTuple + (strToScalarValueTag(AlphaValueTag, solution.get("AssertAlphaValue", "Any")),)
tagTuple = tagTuple + (strToScalarValueTag(BetaValueTag, solution.get("AssertBetaValue", "Any")),)
tagTuple = tagTuple + (CEqualsDTag.C_EQ_D if solution.get("AssertCEqualsD", False) else CEqualsDTag.C_NEQ_D ,)
return tagTuple
def findSolutionWithIndex(solutionData, solIndex):
# Check solution at the index corresponding to solIndex first
if solIndex < len(solutionData) and solutionData[solIndex]["SolutionIndex"] == solIndex:
return solutionData[solIndex]
else:
debug("Searching for index...")
solution = [s for s in solutionData if s["SolutionIndex"]==solIndex]
assert(len(solution) == 1)
return solution[0]
def addSolutionTagToKeys(solutionMap, solutionPool):
return [[[getSolutionTag(findSolutionWithIndex(solutionPool, idx))] + keys, [idx, eff]]
for [keys, [idx, eff]] in solutionMap]
def removeSolutionTagFromKeys(solutionMap):
return [[keys[1:], [idx, incEff]] for keys, [idx, incEff] in solutionMap]
# To be used with add_solution_tags to allow faster general solutions to supercede slower specific ones
def findFastestCompatibleSolution(origDict, sizeMapping):
tags = sizeMapping[0]
# Tag of form (MFMATag, AlphaValueTag, BetaValueTag, CEqualsDTag)
compatibleTagList = [tags]
# Add all compatible tags to the list
if tags[1] != AlphaValueTag.ANY:
compatibleTagList = compatibleTagList + [(t[0], AlphaValueTag.ANY) + t[2:] for t in compatibleTagList]
if tags[2] != BetaValueTag.ANY:
compatibleTagList = compatibleTagList + [t[:2] + (BetaValueTag.ANY,) + t[3:] for t in compatibleTagList]
if tags[3] != CEqualsDTag.C_NEQ_D:
compatibleTagList = compatibleTagList + [t[:3] + (CEqualsDTag.C_NEQ_D,) + t[4:] for t in compatibleTagList]
#Find the fastest efficiency of all compatible tags
maxEfficiency = 0
for tag in compatibleTagList:
result = origDict.get((tag,) + sizeMapping[1:], None)
if result:
_, eff = origDict[(tag,) + sizeMapping[1:]]
maxEfficiency = max(maxEfficiency, eff)
return maxEfficiency
# returns merged logic data as list
def mergeLogic(origData, incData, forceMerge, trimSize=True, addSolutionTags=False):
origNumSizes = len(origData[7])
origNumSolutions = len(origData[5])
incNumSizes = len(incData[7])
incNumSolutions = len(incData[5])
verbose(origNumSizes, "sizes and", origNumSolutions, "kernels in base logic file")
verbose(incNumSizes, "sizes and", incNumSolutions, "kernels in incremental logic file")
# Add SolutionTag to distinguish solutions with different requirements
origTaggedSizes = addSolutionTagToKeys(origData[7], origData[5])
incTaggedSizes = addSolutionTagToKeys(incData[7], incData[5])
if addSolutionTags:
origData[7] = origTaggedSizes
incData[7] = incTaggedSizes
# Print warning if addSolutionTags=False results in removed sizes
else:
origSet = {tuple(size) for size, [_, _] in origData[7]}
origTaggedSet = {tuple(size) for size, [_, _] in origTaggedSizes}
incSet = {tuple(size) for size, [_, _] in incData[7]}
incTaggedSet = {tuple(size) for size, [_, _] in incTaggedSizes}
if len(origSet) != len(origTaggedSet):
verbose("Warning:", len(origTaggedSet) - len(origSet), "duplicate sizes are present in base logic",
"that may not be handled correctly unless --add_solution_tags is used")
if len(incSet) != len(incTaggedSet):
verbose("Warning:", len(incTaggedSet) - len(incSet), "duplicate sizes are present in incremental logic",
"that may not be handled correctly unless --add_solution_tags is used")
if trimSize:
# trim 8-tuple gemm size format to 4-tuple [m, n, b, k]
# TODO future gemm size could include dictionary format so need robust preprocessing
[origData[7], origNumSizes] = fixSizeInconsistencies(origData[7], "base")
[incData[7], incNumSizes] = fixSizeInconsistencies(incData[7], "incremental")
origData, numOrigRemoved = removeUnusedKernels(origData, "Base logic file: ")
incData, numIncRemoved = removeUnusedKernels(incData, "Inc logic file: ")
solutionPool = deepcopy(origData[5])
solutionMap = deepcopy(origData[7])
origDict = {tuple(origSize): [i, origEff] for i, [origSize, [origIndex, origEff]] in enumerate(origData[7])}
for incSize, [incIndex, incEff] in incData[7]:
incSolution = findSolutionWithIndex(incData[5], incIndex)
try:
j, origEff = origDict[tuple(incSize)]
if incEff > origEff or forceMerge:
if incEff > origEff:
verbose("[O]", incSize, "already exists and has improved in performance.", end="")
elif forceMerge:
verbose("[!]", incSize, "already exists but does not improve in performance.", end="")
verbose("Efficiency:", origEff, "->", incEff, "(force_merge=True)" if forceMerge else "")
solutionPool, index = addKernel(solutionPool, incSolution)
solutionMap[j][1] = [index, incEff]
else:
verbose("[X]", incSize, "already exists but does not improve in performance.", end="")
verbose("Efficiency:", origEff, "->", incEff)
except KeyError:
if addSolutionTags and findFastestCompatibleSolution(origDict, tuple(incSize)) > incEff:
verbose("[X]", incSize, "has been rejected because a compatible solution already exists with higher performance")
else:
verbose("[-]", incSize, "has been added to solution table, Efficiency: N/A ->", incEff)
solutionPool, index = addKernel(solutionPool, incSolution)
solutionMap.append([incSize,[index, incEff]])
verbose(numOrigRemoved, "unused kernels removed from base logic file")
verbose(numIncRemoved, "unused kernels removed from incremental logic file")
# Remove SolutionTag for yaml output
if addSolutionTags:
solutionMap = removeSolutionTagFromKeys(solutionMap)
mergedData = deepcopy(origData)
mergedData[5] = solutionPool
mergedData[7] = solutionMap
mergedData, numReplaced = removeUnusedKernels(mergedData, "Merged data: ")
numSizesAdded = len(solutionMap)-len(origData[7])
numSolutionsAdded = len(solutionPool)-len(origData[5])
numSolutionsRemoved = numReplaced+numOrigRemoved # incremental file not counted
return [mergedData, numSizesAdded, numSolutionsAdded, numSolutionsRemoved]
def avoidRegressions(originalDir, incrementalDir, outputPath, forceMerge, trimSize=True, addSolutionTags=False):
originalFiles = allFiles(originalDir)
incrementalFiles = allFiles(incrementalDir)
ensurePath(outputPath)
# filter the incremental logic files that have the corresponding base file
incrementalFiles = [ i for i in incrementalFiles
if os.path.split(i)[-1] in [os.path.split(o)[-1] for o in originalFiles] ]
for incFile in incrementalFiles:
basename = os.path.split(incFile)[-1]
origFile = os.path.join(originalDir, basename)
forceMerge = defaultForceMergePolicy(incFile) if forceMerge is None else forceMerge
msg("Base logic file:", origFile, "| Incremental:", incFile, "| Merge policy: %s"%("Forced" if forceMerge else "Winner"), "| Trim size:", trimSize,
"| Add solution tags:", addSolutionTags)
origData = loadData(origFile)
incData = loadData(incFile)
# So far "SolutionIndex" in logic yamls has zero impact on actual 1-1 size mapping (but the order of the Solution does)
# since mergeLogic() takes that value very seriously so we reindex them here so it doesn't choke on duplicated SolutionIndex
origData = reindexSolutions(origData)
incData = reindexSolutions(incData)
mergedData, *stats = mergeLogic(origData, incData, forceMerge, trimSize, addSolutionTags)
msg(stats[0], "size(s) and", stats[1], "kernel(s) added,", stats[2], "kernel(s) removed")
with open(os.path.join(outputPath, basename), "w") as outFile:
yaml.safe_dump(mergedData,outFile,default_flow_style=None)
msg("File written to", os.path.join(outputPath, basename))
msg("------------------------------")
# partialLogicFilePaths: list of full paths to partial logic files
# outputDir: Directory to write the final result to
# forceMerge:
# trimSize:
# Expects: that all the partial logic files
# have the same base name, but are located
# in different folders.
# Provides: one final logic file that is the
# merged result of all partial files.
# This is useful for when a tuning task is
# shared between multiple machines who each
# will provide a partial result.
def mergePartialLogics(partialLogicFilePaths, outputDir, forceMerge, trimSize=True, addSolutionTags=False):
logicFiles = deepcopy(partialLogicFilePaths)
ensurePath(outputDir)
baseLogicFile = logicFiles.pop(0)
baseLogicData = loadData(baseLogicFile)
msg("Base logic file:", baseLogicFile)
for f in logicFiles:
forceMerge = defaultForceMergePolicy(f) if forceMerge is None else forceMerge
msg("Incremental file:", f, "| Merge policy: %s"%("Forced" if forceMerge else "Winner"), "| Trim size:", trimSize)
incLogicData = loadData(f)
# So far "SolutionIndex" in logic yamls has zero impact on actual 1-1 size mapping (but the order of the Solution does)
# since mergeLogic() takes that value very seriously so we reindex them here so it doesn't choke on duplicated SolutionIndex
baseLogicData = reindexSolutions(baseLogicData)
incLogicData = reindexSolutions(incLogicData)
mergedData, *stats = mergeLogic(baseLogicData, incLogicData, forceMerge, trimSize, addSolutionTags)
msg(stats[0], "size(s) and", stats[1], "kernel(s) added,", stats[2], "kernel(s) removed")
# Use the merged data as the base data for the next partial logic file
baseLogicData = deepcopy(mergedData)
baseFileName = os.path.basename(baseLogicFile)
outputFilePath = os.path.join(outputDir, baseFileName)
with open(outputFilePath, "w") as outFile:
yaml.safe_dump(baseLogicData, outFile, default_flow_style=None)
msg("File written to", outputFilePath)
msg("------------------------------")
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument("original_dir", help="The library logic directory without tuned sizes")
argParser.add_argument("incremental_dir", help="The incremental logic directory")
argParser.add_argument("output_dir", help="The output logic directory")
argParser.add_argument("-v", "--verbosity", help="0: summary, 1: verbose, 2: debug", default=1, type=int)
argParser.add_argument("--force_merge", help="Merge previously known sizes unconditionally. Default behavior if not arcturus", default="none")
argParser.add_argument("--notrim", help="Do not trim long size format down to short format (m,n,b,k). Default is --trim", action="store_false")
argParser.add_argument("--add_solution_tags", help="Add tags to the size key for solution properies, allowing for solutions with different requirements "
"to exist for the same size. Default doesn't add this tag.", action="store_true")
args = argParser.parse_args(sys.argv[1:])
originalDir = args.original_dir
incrementalDir = args.incremental_dir
outputPath = args.output_dir
verbosity = args.verbosity
forceMerge = args.force_merge.lower()
trimSize = args.notrim
add_solution_tags = args.add_solution_tags
if forceMerge in ["none"]: forceMerge=None
elif forceMerge in ["true", "1"]: forceMerge=True
elif forceMerge in ["false", "0"]: forceMerge=False
avoidRegressions(originalDir, incrementalDir, outputPath, forceMerge, trimSize, add_solution_tags)
| mit | -3,096,896,397,483,536,000 | 41.639381 | 157 | 0.660613 | false |
ruohoruotsi/librosa | tests/test_dtw.py | 1 | 2995 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import librosa
import numpy as np
from test_core import srand
import warnings
warnings.resetwarnings()
warnings.simplefilter('always')
def test_dtw_global():
# Example taken from:
# Meinard Mueller, Fundamentals of Music Processing
X = np.array([[1, 3, 3, 8, 1]])
Y = np.array([[2, 0, 0, 8, 7, 2]])
gt_D = np.array([[1., 2., 3., 10., 16., 17.],
[2., 4., 5., 8., 12., 13.],
[3., 5., 7., 10., 12., 13.],
[9., 11., 13., 7., 8., 14.],
[10, 10., 11., 14., 13., 9.]])
mut_D, _ = librosa.dtw(X, Y)
assert np.array_equal(gt_D, mut_D)
def test_dtw_global_diagonal():
# query is a linear ramp
X = np.linspace(0.1, 1, 10)
Y = X
gt_wp = list(zip(list(range(10)), list(range(10))))[::-1]
mut_D, mut_wp = librosa.dtw(X, Y, subseq=True, metric='cosine',
step_sizes_sigma=np.array([[1, 1]]),
weights_mul=np.array([1, ]))
assert np.array_equal(np.asarray(gt_wp), np.asarray(mut_wp))
def test_dtw_subseq():
srand()
# query is a linear ramp
X = np.linspace(0, 1, 100)
# database is query surrounded by noise
noise_len = 200
noise = np.random.rand(noise_len)
Y = np.concatenate((noise, noise, X, noise))
_, mut_wp = librosa.dtw(X, Y, subseq=True)
# estimated sequence has to match original sequence
# note the +1 due to python indexing
mut_X = Y[mut_wp[-1][1]:mut_wp[0][1]+1]
assert np.array_equal(X, mut_X)
def test_dtw_fill_off_diagonal_8_8():
# Case 1: Square matrix (N=M)
mut_x = np.ones((8, 8))
librosa.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
assert np.array_equal(mut_x, gt_x)
assert np.array_equal(mut_x, gt_x.T)
def test_dtw_fill_off_diagonal_8_12():
# Case 2a: N!=M
mut_x = np.ones((8, 12))
librosa.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
assert np.array_equal(mut_x, gt_x)
# Case 2b: (N!=M).T
mut_x = np.ones((8, 12)).T
librosa.fill_off_diagonal(mut_x, 0.25)
assert np.array_equal(mut_x, gt_x.T)
| isc | -4,646,562,285,603,238,000 | 28.362745 | 68 | 0.451753 | false |
petry/django-pressroom-googlecode | src/pressroom/models.py | 1 | 4225 | # python imports
from datetime import datetime
import os
# django imports
from django.conf import settings
from django.contrib.comments.moderation import CommentModerator, moderator
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
# other imports
from photologue.models import Gallery, Photo
# Get relative media path
try:
PRESSROOM_DIR = settings.PRESSROOM_DIR
except:
PRESSROOM_DIR = 'pressroom'
# define the models
class ArticleManager(models.Manager):
def get_published(self):
return self.filter(publish=True, pub_date__lte=datetime.now)
def get_drafts(self):
return self.filter(publish=False)
class Article(models.Model):
pub_date = models.DateTimeField(_("publish date"), default=datetime.now)
headline = models.CharField(_("headline"),max_length=200)
slug = models.SlugField(help_text=_('A "Slug" is a unique URL-friendly title for an object.'),
unique_for_date="pub_date")
summary = models.TextField(help_text=_("A single paragraph summary or preview of the article."))
body = models.TextField(_("body text"))
author = models.CharField(_("author"), max_length=100)
publish = models.BooleanField(_("publish on site"), default=True,
help_text=_('Articles will not appear on the site until their "publish date".'))
sections = models.ManyToManyField('Section', related_name='articles', verbose_name=_('sections'))
photos = models.ManyToManyField(Photo, related_name='articles', null=True, blank=True, verbose_name=_('photos'))
documents = models.ManyToManyField('Document', related_name='articles', null=True, blank=True, verbose_name=_('documents'))
enable_comments = models.BooleanField(_('enable comments'),default=True)
# Custom article manager
objects = ArticleManager()
class Meta:
ordering = ['-pub_date']
get_latest_by = 'pub_date'
verbose_name = _('article')
verbose_name_plural = _('articles')
def __unicode__(self):
return self.headline
def get_absolute_url(self):
args = self.pub_date.strftime("%Y/%b/%d").lower().split("/") + [self.slug]
return reverse('pr-article-detail', args=args)
class ArticleCommentModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
def moderate(self, comment, content_object, request):
return True
if Article not in moderator._registry:
moderator.register(Article, ArticleCommentModerator)
class Document(models.Model):
file = models.FileField(_("document"), upload_to=PRESSROOM_DIR+"/documents/%Y/%b/%d")
pub_date = models.DateTimeField(_("date published"), default=datetime.now)
title = models.CharField(_('title'), max_length=200)
slug = models.SlugField(_('slug'))
summary = models.TextField(_('summary'))
class Meta:
ordering = ['-pub_date']
get_latest_by = 'pub_date'
verbose_name = _('document')
verbose_name_plural = _('documents')
def __unicode__(self):
return self.title
def get_absolute_url(self):
args = self.pub_date.strftime("%Y/%b/%d").lower().split("/") + [self.slug]
return reverse('pr-document-detail', args=args)
def doc_dir(self):
doc_dir = None
if self.file is not None:
doc_dir = os.path.dirname(self.file.path)
return doc_dir
def delete(self):
doc_dir = self.doc_dir()
super(Document, self).delete()
if doc_dir is not None:
if os.listdir(doc_dir) == []:
os.removedirs(doc_dir)
class Section(models.Model):
title = models.CharField(_('title'), max_length=80, unique=True)
slug = models.SlugField(_('slug'))
class Meta:
ordering = ['title']
verbose_name = _('section')
verbose_name_plural = _('sections')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('pr-section', args=[self.slug])
| bsd-3-clause | -5,970,563,566,317,768,000 | 34.111111 | 127 | 0.633373 | false |
xhair/TopOdoo_Addons | ToproERP_Expense/tests/test_expense_account.py | 1 | 5078 | # -*- coding: utf-8 -*-
from odoo.tests.common import TransactionCase
from odoo import exceptions
class TestExpenseAccount(TransactionCase):
"""This class extends the base TransactionCase, test ToproERP_Expense
"""
post_install = True
at_install = False
def setUp(self):
super(TestExpenseAccount, self).setUp()
self.set_expense_account = self.env['set.expense.account'].create({ # 报销类型
'name': u'食宿'
})
self.relevance_account = self.env['relevance.account'].create({ # 银行账户
'name': 'testAccount',
'relevance_account': 'testBank'
})
expense_account_details = [(0, 0, { # 费用明细
'details_name': 'things1',
'details_expenses': 199.00,
'details_remark': 'remark1',
}), (0, 0, {
'details_name': 'thing2',
'details_expenses': 200.00,
'details_remark': 'remark2',
})]
self.expense_account = self.env['expense.account'].create({ # 主单据
'type_ids': self.set_expense_account.id,
'relevance_account_ids': self.relevance_account.id,
'details_ids': expense_account_details,
'state': 'draft'
})
self.confirm = self.env['confirm'].create({
'actual_pay': 200.00,
'should_pay': 200.00,
})
self.enterprise_conf = self.env['wechat.enterprise.config'].create({
'name': u'test',
'corp_id': u'test',
'corp_secret': u'test'
})
def test_enterprise_conf(self):
self.assertEquals(self.enterprise_conf.name, u'test')
self.assertEquals(self.enterprise_conf.corp_id, u'test')
self.assertEquals(self.enterprise_conf.corp_secret, u'test')
def test_expenses(self):
self.assertEquals(self.expense_account.expenses_sum, 399.00) # 测试总金额
self.assertEquals(self.expense_account.type_ids.name, u'食宿') # 测试报销类型
self.assertEquals(self.expense_account.relevance_account_ids.name, 'testAccount') # 测试银行账户
def test_submit(self): # 测试submit方法
if self.expense_account.state == 'draft':
self.expense_account.button_submit()
submit_value = self.env['syt.oa.gld'].search(
[('relevant_documents', '=', self.expense_account.name)] # 测试报销单是否在工联单创建
)
if submit_value:
self.assertEquals(submit_value.state, 'pending')
self.assertEquals(self.expense_account.state, 'pending_approval') # 测试报销单状态是否为待审批
else:
raise exceptions.ValidationError(u"submit_value不能为null!")
else:
raise exceptions.ValidationError(u"当前状态不为draft!")
def test_recall(self): # 测试追回方法
self.expense_account.button_submit()
recall_value = self.env['syt.oa.gld'].search(
[('relevant_documents', '=', self.expense_account.name)])
if recall_value:
if self.expense_account.state == 'pending_approval':
self.expense_account.button_recall()
self.assertEquals(recall_value.state, 'cancel')
self.assertEquals(self.expense_account.state, 'draft') # 测试报销单状态是否为草稿
else:
raise exceptions.ValidationError(u"当前状态不为pending_approval!")
else:
raise exceptions.ValidationError(u"recall_value不能为null!")
def test_confirm(self): # 测试确认付款
self.expense_account.button_submit()
confirm_value = self.env['syt.oa.gld'].search(
[('relevant_documents', '=', self.expense_account.name)])
agree = self.env['syt.oa.gld.opinion'].with_context(gld_id=confirm_value.id, check_state=u'通过').create({
# 'opinion': u'通过'
})
agree.save_opinion()
if confirm_value:
if self.expense_account.state == 'approval_pass':
confirm = self.env['confirm'].create({
'actual_pay': 200.00,
'confirm_id': self.expense_account.id
})
confirm.confirm()
self.assertEquals(self.expense_account.state, 'advanced')
else:
raise exceptions.ValidationError(u"当前状态不为approval_pass!")
else:
raise exceptions.ValidationError(u"confirm_value不能为null!")
def test_approval_reject(self): # 测试审批驳回
self.expense_account.button_submit()
return_value = self.env['syt.oa.gld'].search([(
'relevant_documents', '=', self.expense_account.name
)])
if return_value:
return_value.gld_state_cancel()
self.assertEquals(self.expense_account.state, 'approval_reject')
else:
raise exceptions.ValidationError(U"return_value不能为null!")
| agpl-3.0 | -4,623,067,432,592,819,000 | 40.179487 | 112 | 0.581777 | false |
ch-aurich/lint_eagle | lint_eagle/lint_eagle.py | 1 | 1332 | from lxml import objectify
import argparse
import eagle_lbr
import eagle_brd
#import helper
import sys
import lint_framework
#TODO: state contains a list of issues structured like that: $object_name(str), $object_type(component, symbol, package, library, schematic, board, etc.), $issue_type(E1, E2, E3, W1...)
parser = argparse.ArgumentParser(description='Check eagle files for good style. It checks that you use eagle in a sane way instead stressing your luck and hoping that everything will be ok.')
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="count", default=0,help="increase output verbosity")
group.add_argument("-q", "--quiet", action="store_true", default=0,help="quiet")
parser.add_argument('file', action="store", nargs='+', type=argparse.FileType('rb'))
args = parser.parse_args(sys.argv[1:])
#print (args)
for file_handle in args.file:
issues = []
eagle_xml_string = file_handle.read()
eagle_object = objectify.fromstring(eagle_xml_string)
issues += eagle_lbr.lbr_check_name_and_value_package(eagle_object, args)
issues += eagle_brd.brd_check_testpad_on_all_nets(eagle_object, args)
print ("testing file: \"" + str(file_handle.name) + "\"\n")
for issue in issues:
print("\t" + str(issue))
if issues == []:
sys.exit(0)
else:
sys.exit(1)
| gpl-2.0 | -4,695,327,184,534,536,000 | 32.3 | 191 | 0.713213 | false |
Neill3d/MoPlugs | PythonScripts/Startup/CmdLineRenderingTool.py | 1 | 11975 |
#########################################################
##
## Prepare command line to run a rendering processing from *.bat
##
## Github repo - https://github.com/Neill3d/MoPlugs
## Licensed under BSD 3-clause
## https://github.com/Neill3d/MoPlugs/blob/master/LICENSE
##
## Author Sergey Solokhin (Neill3d) 2017
## e-mail to: [email protected]
## www.neill3d.com
##
#########################################################
from pyfbsdk import *
from pyfbsdk_additions import *
import os
gDEVELOPMENT = False
gCmdLineToolVersion = 0.31
lApp = FBApplication()
lSystem = FBSystem()
gMoBuPath = FBEdit()
gButtonAA = FBButton()
gButtonOffscreen = FBButton()
gListCamera = FBList()
gListRenderer = FBList()
gEditStartTime = FBEditNumber()
gEditStopTime = FBEditNumber()
gEditStepTime = FBEditNumber()
gEditPath = FBEdit()
gListPictureFormat = FBList()
gListExt = FBList()
gListCodec = FBList()
gExtensionKeys = [ 'AVI',
'MOV',
'JPG',
'TGA',
'TIF',
'TIFF',
'YUV',
'SWF (Flash)' ]
gExtensions = {'AVI' : '.avi',
'MOV' : '.mov',
'JPG' : '.jpg',
'TGA' : '.tga',
'TIF' : '.tif',
'TIFF' : '.tif',
'YUV' : 'yuv',
'SWF (Flash)' : '.swf'}
gPictureFormats = {
'From Camera' : 'FromCamera',
'(720 x 486) D1 NTSC' : 'D1_NTSC',
'(640 x 480) NTSC' : 'NTSC',
'(570 x 486) PAL' : 'PAL',
'(720 x 576) D1 PAL' : 'D1_PAL',
'(1920 x 1080) HD' : 'HD',
'(640 x 480) 640x480' : '640x480',
'(320 x 200) 320x200' : '320x200',
'(320 x 240) 320x240' : '320x240',
'(128 x 128) 128x128' : '128x128',
'Full Screen' : 'FullScreen' }
def OnButtonChooseFile(control, event):
print "choose file"
lDialog = FBFilePopup()
lDialog.Style = FBFilePopupStyle.kFBFilePopupSave
lDialog.Filter = '*.avi'
lDialog.FileName = gEditPath.Text
if lDialog.Execute():
gEditPath.Text = lDialog.FullFilename
def GenerateCmdLine():
fbxscenePath = lApp.FBXFileName
cmdline = 'start /d "' + gMoBuPath.Text + '\\"' + ' motionbuilder.exe'
cmdline = cmdline + ' -console -suspendMessages --sdk-begin'
# DONE: choose 'window' or 'offline' render modes
if gButtonOffscreen.State == 1:
cmdline = cmdline + ' --r offscreen'
else:
cmdline = cmdline + ' --r window'
# antialiasing
if gButtonAA.State == 1:
cmdline = cmdline + ' --aa 1'
else:
cmdline = cmdline + ' --aa 0'
# choose renderer
currRendererName = gListRenderer.Items[gListRenderer.ItemIndex]
cmdline = cmdline + ' --rr \"' + currRendererName + '\"'
# render camera
if gListCamera.ItemIndex > 0:
cameraName = gListCamera.Items[gListCamera.ItemIndex]
cmdline = cmdline + ' --cam \"' + cameraName + '\"'
else:
currCamera = lSystem.Renderer.CurrentCamera
if isinstance(currCamera, FBCameraSwitcher):
currCamera = currCamera.CurrentCamera
if None != currCamera:
cmdline = cmdline + ' --cam \"' + currCamera.LongName + '\"'
# render time range
cmdline = cmdline + ' --stt ' + str(int(gEditStartTime.Value))
cmdline = cmdline + ' --stp ' + str(int(gEditStopTime.Value))
cmdline = cmdline + ' --step ' + str(int(gEditStepTime.Value))
# output picture format
pictureFormatKey = gListPictureFormat.Items[gListPictureFormat.ItemIndex]
pictureFormat = gPictureFormats[pictureFormatKey]
cmdline = cmdline + ' --pctFrm ' + pictureFormat
# output file format
formatKey = gListExt.Items[gListExt.ItemIndex]
outformat = gExtensions[formatKey]
cmdline = cmdline + ' --oFmt ' + outformat
codecName = ''
if gListCodec.ItemIndex >= 0:
codecName = gListCodec.Items[gListCodec.ItemIndex]
cmdline = cmdline + ' --c ' + codecName
# output file
cmdline = cmdline + ' --of \"' + gEditPath.Text + '\"'
# fin
cmdline = cmdline + ' --close'
cmdline = cmdline + ' --sdk-end '
cmdline = cmdline + fbxscenePath
return cmdline
def OnButtonMakeCmd(control, event):
print "make cmd"
lDialog = FBFilePopup()
lDialog.Style = FBFilePopupStyle.kFBFilePopupSave
lDialog.Filter = '*.bat'
if lDialog.Execute():
filePath = lDialog.FullFilename
f = open(filePath, 'wt')
f.write( GenerateCmdLine() )
f.close()
def CheckOutputFileName():
currExtKey = gExtensionKeys[gListExt.ItemIndex]
currExt = gExtensions[currExtKey]
thisFile = gEditPath.Text
base = os.path.splitext(thisFile)[0]
middle = ''
if gListExt.ItemIndex > 1 and gListExt.ItemIndex < len(gExtensionKeys)-1:
if len(base) > 4 and base[-4:] == '####':
middle = ''
else:
middle = '####'
else:
if len(base) > 4 and base[-4:] == '####':
middle = ''
base = base[:-4]
gEditPath.Text = base + middle + currExt
def UpdateCodecList():
currFormat = gListExt.Items[gListExt.ItemIndex]
codecMan = FBVideoCodecManager()
codecList = []
codecList = codecMan.GetCodecIdList(currFormat)
gListCodec.Items.removeAll()
for codec in codecList:
if codec != '\x0b':
gListCodec.Items.append(codec)
def OnOutputFormatChange(control, event):
CheckOutputFileName()
UpdateCodecList()
def OnOutputFileChange(control, event):
CheckOutputFileName()
def OnButtonAbout(control, event):
FBMessageBox( 'CmdLine Rendering Tool', ' Version: ' + str(gCmdLineToolVersion) + '\n\n Author Sergey Solokhin (c) 2017', 'Ok' )
def PopulateTool(t):
# Create Main region frame:
x = FBAddRegionParam(5,FBAttachType.kFBAttachLeft,"")
y = FBAddRegionParam(5,FBAttachType.kFBAttachTop,"")
w = FBAddRegionParam(-5,FBAttachType.kFBAttachRight,"")
h = FBAddRegionParam(-5,FBAttachType.kFBAttachBottom,"")
main = FBVBoxLayout()
t.AddRegion("main","main", x, y, w, h)
t.SetControl("main",main)
#
mainOptions = FBHBoxLayout()
mainTitles = FBVBoxLayout()
mainValues = FBVBoxLayout()
main.Add(mainOptions, 365)
mainOptions.AddRelative(mainTitles, 0.25)
mainOptions.AddRelative(mainValues, 0.75)
b = FBLabel()
b.Caption = 'MoBu Path:'
mainTitles.Add(b, 35)
gMoBuPath.Text = lSystem.ApplicationPath # + '\\motionbuilder.exe'
mainValues.Add(gMoBuPath, 35)
b = FBLabel()
b.Caption = ''
mainTitles.Add(b, 25)
gButtonAA.Style = FBButtonStyle.kFBCheckbox
gButtonAA.Caption = 'Antialiasing'
gButtonAA.State = 0
mainValues.Add(gButtonAA, 25)
b = FBLabel()
b.Caption = ''
mainTitles.Add(b, 25)
gButtonOffscreen.Style = FBButtonStyle.kFBCheckbox
gButtonOffscreen.Caption = 'Offscreen Render'
gButtonOffscreen.State = 1
mainValues.Add(gButtonOffscreen, 25)
# choose camera
# TODO: we should use current scene camera instead
b = FBLabel()
b.Caption = 'Camera:'
mainTitles.Add(b, 35)
gListCamera.Style = FBListStyle.kFBDropDownList
mainValues.Add(gListCamera, 35)
# choose render callback
b = FBLabel()
b.Caption = 'Render Using:'
mainTitles.Add(b, 35)
gListRenderer.Style = FBListStyle.kFBDropDownList
mainValues.Add(gListRenderer, 35)
# let's choose a picture format
b = FBLabel()
b.Caption = 'Picture Format:'
mainTitles.Add(b, 35)
gListPictureFormat.Style = FBListStyle.kFBDropDownList
gListPictureFormat.Items.append( 'From Camera' )
gListPictureFormat.Items.append( '(720 x 486) D1 NTSC' )
gListPictureFormat.Items.append( '(640 x 480) NTSC' )
gListPictureFormat.Items.append( '(570 x 486) PAL' )
gListPictureFormat.Items.append( '(720 x 576) D1 PAL' )
gListPictureFormat.Items.append( '(1920 x 1080) HD' )
gListPictureFormat.Items.append( '(640 x 480) 640x480' )
gListPictureFormat.Items.append( '(320 x 200) 320x200' )
gListPictureFormat.Items.append( '(320 x 240) 320x240' )
gListPictureFormat.Items.append( '(128 x 128) 128x128' )
gListPictureFormat.Items.append( 'Full Screen' )
mainValues.Add(gListPictureFormat, 35)
# output filename and path
lGrabber = FBVideoGrabber()
lGrabberOptions = lGrabber.GetOptions()
b = FBLabel()
b.Caption = 'Output Format:'
mainTitles.Add(b, 35)
gListExt.Style = FBListStyle.kFBDropDownList
for name in gExtensionKeys:
gListExt.Items.append(name)
gListExt.OnChange.Add( OnOutputFormatChange )
mainValues.Add(gListExt, 35)
# output file codec
b = FBLabel()
b.Caption = 'Output Video Codec:'
mainTitles.Add(b, 35)
gListCodec.Style = FBListStyle.kFBDropDownList
UpdateCodecList()
mainValues.Add(gListCodec, 35)
# output filename
b = FBLabel()
b.Caption = 'Output File:'
mainTitles.Add(b, 25)
gEditPath.Text = lGrabberOptions.OutputFileName
gEditPath.OnChange.Add( OnOutputFileChange )
mainValues.Add(gEditPath, 25)
#
CheckOutputFileName()
b = FBButton()
b.Caption = "Choose an output file..."
b.OnClick.Add( OnButtonChooseFile )
mainValues.Add(b, 35)
timeLayout = FBHBoxLayout()
b = FBLabel()
b.Caption = 'Start:'
timeLayout.AddRelative(b, 0.1)
gEditStartTime.Value = 0.0
timeLayout.AddRelative(gEditStartTime, 0.25)
b = FBLabel()
b.Caption = 'Stop:'
timeLayout.AddRelative(b, 0.1)
gEditStopTime.Value = 0.0
timeLayout.AddRelative(gEditStopTime, 0.25)
b = FBLabel()
b.Caption = 'Step:'
timeLayout.AddRelative(b, 0.1)
gEditStepTime.Value = 1.0
timeLayout.AddRelative(gEditStepTime, 0.25)
main.Add(timeLayout, 35)
b = FBLabel()
b.Caption = ''
main.Add(b, 25)
toolbox = FBHBoxLayout()
b = FBButton()
b.Caption = "Make a Command Line"
b.OnClick.Add( OnButtonMakeCmd )
toolbox.AddRelative(b, 0.75)
b = FBButton()
b.Caption = "About"
b.OnClick.Add( OnButtonAbout )
toolbox.AddRelative(b, 0.25)
main.Add(toolbox, 25)
def RefreshUI():
# list of scene cameras
gListCamera.Items.removeAll()
gListCamera.Items.append( 'Current Camera' )
for lCamera in lSystem.Scene.Cameras:
gListCamera.Items.append( lCamera.LongName )
# list of renderer callbacks
gListRenderer.Items.removeAll()
gListRenderer.Items.append( "Default Renderer" )
for lRenderCallback in lSystem.Renderer.RendererCallbacks:
gListRenderer.Items.append( lRenderCallback.Name )
def EventFileChange(control, event):
RefreshUI()
def EventShowTool(control, event):
if True == event.Shown:
RefreshUI()
lApp.OnFileNewCompleted.Add(EventFileChange)
lApp.OnFileOpenCompleted.Add(EventFileChange)
lApp.OnFileMerge.Add(EventFileChange)
else:
lApp.OnFileNewCompleted.Remove(EventFileChange)
lApp.OnFileOpenCompleted.Add(EventFileChange)
lApp.OnFileMerge.Remove(EventFileChange)
def CreateTool():
t = None
try:
t = FBCreateUniqueTool("CmdLine Rendering Tool")
except NameError:
t = CreateUniqueTool("CmdLine Rendering Tool")
print "supporting MoBu 2010"
if t:
t.StartSizeX = 400
t.StartSizeY = 500
t.OnShow.Add( EventShowTool )
PopulateTool(t)
if gDEVELOPMENT:
ShowTool(t)
CreateTool() | bsd-3-clause | 5,143,914,506,032,319,000 | 26.531034 | 132 | 0.605762 | false |
tamasgal/django-tornado | run_tornado.py | 1 | 1230 | #!/usr/bin/env python
#
# Runs a Tornado web server with a django project
# Make sure to edit the DJANGO_SETTINGS_MODULE to point to your settings.py
#
# http://localhost:8080/hello-tornado
# http://localhost:8080
import sys
import os
from tornado.options import options, define, parse_command_line
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.wsgi
from django.core.wsgi import get_wsgi_application
define('port', type=int, default=8080)
class HelloHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello from tornado')
def main():
os.environ['DJANGO_SETTINGS_MODULE'] = 'demosite.settings' # TODO: edit this
sys.path.append('./demosite') # path to your project if needed
parse_command_line()
wsgi_app = get_wsgi_application()
container = tornado.wsgi.WSGIContainer(wsgi_app)
tornado_app = tornado.web.Application(
[
('/hello-tornado', HelloHandler),
('.*', tornado.web.FallbackHandler, dict(fallback=container)),
])
server = tornado.httpserver.HTTPServer(tornado_app)
server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
| mit | 3,685,607,962,132,046,300 | 23.6 | 80 | 0.695122 | false |
renatoGarcia/tangram | test/manual/imshow.py | 1 | 1279 | # Copyright 2017 The Tangram Developers. See the AUTHORS file at the
# top-level directory of this distribution and at
# https://github.com/renatoGarcia/tangram/blob/master/AUTHORS.
#
# This file is part of Tangram.
#
# Tangram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tangram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Tangram in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.insert(0, '../../')
import os
from scipy import misc
import tangram as tg
from tangram import recipes
def main():
dir_path = os.path.dirname(os.path.realpath(__file__))
imdata = misc.imread(dir_path + '/../data/bluebell.jpg')
tg.recipes.imshow([imdata, imdata, imdata])
# tg.recipes.imshow(imdata)
if __name__ == '__main__':
main()
| lgpl-3.0 | -278,874,658,528,156,960 | 30.975 | 74 | 0.73104 | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/shortest_paths/astar.py | 1 | 5469 | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
# Authors: Salim Fadhley <[email protected]>
# Matteo Dell'Amico <[email protected]>
"""Shortest paths and path lengths using the A* ("A star") algorithm.
"""
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ['astar_path', 'astar_path_length']
@not_implemented_for('multigraph')
def astar_path(G, source, target, heuristic=None, weight='weight'):
"""Returns a list of nodes in a shortest path between source and target
using the A* ("A-star") algorithm.
There may be more than one shortest path. This returns only one.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.astar_path(G, 0, 4))
[0, 1, 2, 3, 4]
>>> G = nx.grid_graph(dim=[3, 3]) # nodes are two-tuples (x,y)
>>> nx.set_edge_attributes(G, {e: e[1][0]*2 for e in G.edges()}, 'cost')
>>> def dist(a, b):
... (x1, y1) = a
... (x2, y2) = b
... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
>>> print(nx.astar_path(G, (0, 0), (2, 2), heuristic=dist, weight='cost'))
[(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]
See Also
--------
shortest_path, dijkstra_path
"""
if source not in G or target not in G:
msg = 'Either source {} or target {} is not in G'
raise nx.NodeNotFound(msg.format(source, target))
if heuristic is None:
# The default heuristic is h=0 - same as Dijkstra's algorithm
def heuristic(u, v):
return 0
push = heappush
pop = heappop
# The queue stores priority, node, cost to reach, and parent.
# Uses Python heapq to keep in priority order.
# Add a counter to the queue to prevent the underlying heap from
# attempting to compare the nodes themselves. The hash breaks ties in the
# priority and is guaranteed unique for all nodes in the graph.
c = count()
queue = [(0, next(c), source, 0, None)]
# Maps enqueued nodes to distance of discovered paths and the
# computed heuristics to target. We avoid computing the heuristics
# more than once and inserting the node into the queue too many times.
enqueued = {}
# Maps explored nodes to parent closest to the source.
explored = {}
while queue:
# Pop the smallest item from queue.
_, __, curnode, dist, parent = pop(queue)
if curnode == target:
path = [curnode]
node = parent
while node is not None:
path.append(node)
node = explored[node]
path.reverse()
return path
if curnode in explored:
# Do not override the parent of starting node
if explored[curnode] is None:
continue
# Skip bad paths that were enqueued before finding a better one
qcost, h = enqueued[curnode]
if qcost < dist:
continue
explored[curnode] = parent
for neighbor, w in G[curnode].items():
ncost = dist + w.get(weight, 1)
if neighbor in enqueued:
qcost, h = enqueued[neighbor]
# if qcost <= ncost, a less costly path from the
# neighbor to the source was already determined.
# Therefore, we won't attempt to push this neighbor
# to the queue
if qcost <= ncost:
continue
else:
h = heuristic(neighbor, target)
enqueued[neighbor] = ncost, h
push(queue, (ncost + h, next(c), neighbor, ncost, curnode))
raise nx.NetworkXNoPath("Node %s not reachable from %s" % (target, source))
def astar_path_length(G, source, target, heuristic=None, weight='weight'):
"""Returns the length of the shortest path between source and target using
the A* ("A-star") algorithm.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
astar_path
"""
if source not in G or target not in G:
msg = 'Either source {} or target {} is not in G'
raise nx.NodeNotFound(msg.format(source, target))
path = astar_path(G, source, target, heuristic, weight)
return sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
| mit | 4,869,253,740,673,929,000 | 30.073864 | 79 | 0.59115 | false |
diegocortassa/TACTIC | src/tactic/ui/container/wizard_wdg.py | 1 | 19950 | ############################################################
#
# Copyright (c) 2005-2011, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["WizardWdg", "TestWizardWdg"]
from pyasm.common import Common
from pyasm.web import *
from pyasm.widget import IconWdg, IconButtonWdg, SelectWdg, ProdIconButtonWdg, TextWdg
from tactic.ui.common import BaseRefreshWdg
class WizardWdg(BaseRefreshWdg):
ARGS_KEYS = {
'submit_title': {
'description': 'Title shown on submit button',
'values': 'true|false',
'category': 'Display'
},
'command': {
'description': 'Python command class to run on submit',
'category': 'Display'
},
'script': {
'description': 'Python script path to run on submit',
'category': 'Display'
},
'jsscript': {
'description': 'Javascript path to run on submit',
'category': 'Display'
},
'views': {
'description': 'List of views to display for each page',
'category': 'Display'
}
}
def __init__(self, **kwargs):
super(WizardWdg, self).__init__(**kwargs)
self.submit_button = None
def add_submit_button(self, button):
self.submit_button = button
def get_display(self):
top = DivWdg()
top.add_class("spt_wizard_top")
self.height = self.kwargs.get("height")
width = self.kwargs.get("width")
if not width:
width = ""
self.width = width
inner = DivWdg()
top.add(inner)
inner.add_style("width: %s" % width)
title = self.kwargs.get("title")
if not title:
title = "none"
if title != "none":
title_wdg = DivWdg()
inner.add(title_wdg)
title_wdg.add(title)
title_wdg.add_style("font-size: 16px")
title_wdg.add_style("font-weight: bold")
inner.add("<br/>")
self.titles = self.kwargs.get("titles")
if isinstance(self.titles, basestring):
self.titles = self.titles.split("|")
if not self.titles:
self.titles = []
views = self.kwargs.get("views")
if views:
from tactic.ui.panel import CustomLayoutWdg
if isinstance(views, basestring):
views = views.split("|")
for i, view in enumerate(views):
if i < len(self.titles):
title = self.titles[i]
else:
title = widget.get_name()
title = title.replace(".", " ")
title = Common.get_display_title(title)
widget = CustomLayoutWdg(view=view)
self.add(widget, title)
header_wdg = self.get_header_wdg()
inner.add(header_wdg)
#header_wdg.add_color("background", "background", -5)
header_wdg.add_class("spt_popup_header")
inner.add("<br/>")
inner.add("<hr/>")
pages_div = DivWdg()
pages_div.add_class("spt_popup_body")
inner.add(pages_div)
pages_div.add_style("overflow-y: auto")
for i, widget in enumerate(self.widgets):
page_div = DivWdg()
page_div.add_class("spt_wizard_page")
pages_div.add(page_div)
page_div.add_style("padding: 10px")
page_div.add_style("min-height: 300px")
if self.height:
page_div.add_style("height: %s" % self.height)
page_div.add_style("overflow-y: auto")
if i != 0:
page_div.add_style("display: none")
else:
page_div.add_class("spt_wizard_selected")
page_div.add(widget)
pages_div.add("<hr/>")
bottom_wdg = self.get_bottom_wdg()
bottom_wdg.add_class("spt_popup_footer")
inner.add(bottom_wdg)
return top
def add(self, widget, name):
widget.set_name(name)
super(WizardWdg, self).add(widget, name)
def get_header_wdg(self):
div = DivWdg()
div.add_style("text-align: center")
div.add_style("width: %s" % self.width)
div.add("<hr/>")
dots_div = DivWdg()
#dots_div.add_style("margin: -28px auto 0px auto")
dots_div.add_style("margin: -28px auto 0px auto")
div.add(dots_div)
left = 50
width = 50
dots_div.add_style("width", (left+width)*len(self.widgets)+left)
for i, widget in enumerate(self.widgets):
on_dot = DivWdg()
on_dot.add_style("width: 20px")
on_dot.add_style("height: 18px")
on_dot.add_style("padding-top: 2px")
on_dot.add_style("border-radius: 20px")
on_dot.add_style("background: rgba(188,215,207,1.0)")
on_dot.add_style("margin: 6 auto")
#on_dot.add(" ")
on_dot.add_border()
#on_dot = IconWdg("", IconWdg.DOT_GREEN)
on_dot.add_class("spt_wizard_on_dot")
off_dot = DivWdg()
off_dot.add_style("width: 12px")
off_dot.add_style("height: 10px")
off_dot.add_style("padding-top: 2px")
off_dot.add_style("border-radius: 10px")
#off_dot.add_style("background: rgba(215,188,207,1.0)")
off_dot.add_style("background: #DDD")
off_dot.add_style("margin: 11 auto 12 auto")
#off_dot.add(" ")
off_dot.add_border()
#off_dot = IconWdg("", IconWdg.DOT_GREY)
off_dot.add_class("spt_wizard_off_dot")
if i == 0:
off_dot.add_style("display: none")
else:
on_dot.add_style("display: none")
dots_div.add_style("position: relative")
dot_div = DivWdg()
dot_div.add_style("text-align: center")
dot_div.add_attr("spt_selected_index", i)
dot_div.add_class("spt_wizard_link")
dot_div.add_class("hand")
dots_div.add(dot_div)
dot_div.add(on_dot)
dot_div.add(off_dot)
dot_div.add_style("width: %spx" % width)
dot_div.add_style("float: left")
dot_div.add_style("margin-left: %spx" % left)
dot_div.add_style("text-align: center")
on_dot.add("%s" % (i+1))
off_dot.add("%s" % (i+1))
off_dot.add_style("font-size: 0.6em")
on_dot.add_style("text-align: center")
off_dot.add_style("text-align: center")
name_div = DivWdg()
dot_div.add(name_div)
if i < len(self.titles):
title = self.titles[i]
else:
title = widget.get_name()
title = title.replace(".", " ")
title = Common.get_display_title(title)
#title = "%d %s" % (i+1, title)
name_div.add(title)
name_div.add_style("font-weight: bold")
name_div.add_style("width: 80px")
name_div.add_style("margin-left: -17px")
div.add_relay_behavior( {
'type': 'mouseup',
'bvr_match_class': 'spt_wizard_link',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_wizard_top");
var top = bvr.src_el.getParent(".spt_wizard_top");
var pages = top.getElements(".spt_wizard_page");
var on_dots = top.getElements(".spt_wizard_on_dot");
var off_dots = top.getElements(".spt_wizard_off_dot");
var selected_index = parseInt( bvr.src_el.getAttribute("spt_selected_index"));
for (var i = 0; i < pages.length; i++) {
var page = pages[i];
var on_dot = on_dots[i];
var off_dot = off_dots[i];
if (page.hasClass("spt_wizard_selected")) {
page.removeClass("spt_wizard_selected");
}
page.setStyle("display", "none");
on_dot.setStyle("display", "none");
off_dot.setStyle("display", "");
}
var back = top.getElement(".spt_wizard_back");
var next = top.getElement(".spt_wizard_next");
next.setStyle("display", "");
back.setStyle("display", "");
if (selected_index == 0) {
back.setStyle("display", "none");
}
else if (selected_index == pages.length-1) {
next.setStyle("display", "none");
}
var page = pages[selected_index];
page.setStyle("display", "");
page.addClass("spt_wizard_selected");
var on_dot = on_dots[selected_index];
var off_dot = off_dots[selected_index];
on_dot.setStyle("display", "");
off_dot.setStyle("display", "none");
'''
} )
"""
for i, widget in enumerate(self.widgets):
name_div = DivWdg()
div.add(name_div)
name_div.add_class("spt_wizard_link")
name_div.add_attr("spt_selected_index", i)
name_div.add_class("hand")
name_div.add_style("float: left")
name_div.add_style("margin-left: %spx" % left)
name = widget.get_name()
name_div.add(name)
name_div.add_style("width: %spx" % width)
name_div.add_style("text-align: center")
"""
div.add("<br clear='all'/>")
return div
def get_bottom_wdg(self):
from tactic.ui.widget import ActionButtonWdg
div = DivWdg()
div.add_style("margin-top: 10px")
back = ActionButtonWdg(title="< Back", tip="Go back to last page")
div.add(back)
back.add_class("spt_wizard_back")
back.add_style("float: left")
# FIXME: need to do this because set_style is not the same element as
# add class
back.add_behavior( {
'type': 'load',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_wizard_top");
var back = top.getElement(".spt_wizard_back");
back.setStyle("display", "none");
'''
} )
back.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_wizard_top");
var pages = top.getElements(".spt_wizard_page");
var on_dots = top.getElements(".spt_wizard_on_dot");
var off_dots = top.getElements(".spt_wizard_off_dot");
// check boundary
if (pages[0].hasClass("spt_wizard_selected")) {
return;
}
var selected_index = 0;
for (var i = 0; i < pages.length; i++) {
var page = pages[i];
var on_dot = on_dots[i];
var off_dot = off_dots[i];
if (page.hasClass("spt_wizard_selected")) {
page.removeClass("spt_wizard_selected");
selected_index = i;
}
page.setStyle("display", "none");
on_dot.setStyle("display", "none");
off_dot.setStyle("display", "");
}
if (selected_index == 1) {
var back = top.getElement(".spt_wizard_back");
back.setStyle("display", "none");
}
if (selected_index == pages.length-1) {
var next = top.getElement(".spt_wizard_next");
next.setStyle("display", "");
}
var page = pages[selected_index-1];
page.setStyle("display", "");
page.addClass("spt_wizard_selected");
var on_dot = on_dots[selected_index-1];
var off_dot = off_dots[selected_index-1];
on_dot.setStyle("display", "");
off_dot.setStyle("display", "none");
'''
} )
if self.submit_button:
submit = self.submit_button
else:
submit_title = self.kwargs.get("submit_title")
command = self.kwargs.get("command")
script = self.kwargs.get("script")
jsscript = self.kwargs.get("jsscript")
if not submit_title:
submit_title = "Submit"
submit = ActionButtonWdg(title="%s >>" % submit_title, tip=submit_title)
submit.add_class("spt_wizard_submit")
submit.add_behavior( {
'type': 'click_up',
'command': command,
'script': script,
'jsscript': jsscript,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_wizard_top");
var values = spt.api.Utility.get_input_values(top);
var server = TacticServerStub.get();
try {
if (bvr.command) {
spt.app_busy.show("Executing ...", "");
server.execute_cmd(bvr.command, values);
}
else if (bvr.jsscript) {
var values = spt.api.get_input_values(top, null, false);
spt.CustomProject.run_script_by_path(bvr.jsscript, values);
}
else if (bvr.script) {
var values = spt.api.get_input_values(top, null, false);
server.execute_python_script(bvr.script, {values:values});
}
else {
alert("No script or command defined");
}
}
catch(e) {
console.log(e);
var xml = spt.parse_xml(e);
var node = xml.getElementsByTagName("string")[0];
if (node) {
var error = node.textContent;
spt.error("Error: " + error);
spt.app_busy.hide();
}
else {
alert(e);
}
throw(e);
}
spt.app_busy.hide();
'''
} )
div.add(submit)
submit.add_style("float: right")
next = ActionButtonWdg(title="Next >", tip="Go to next page")
div.add(next)
next.add_class("spt_wizard_next")
next.add_style("float: right")
next.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_wizard_top");
var pages = top.getElements(".spt_wizard_page");
var on_dots = top.getElements(".spt_wizard_on_dot");
var off_dots = top.getElements(".spt_wizard_off_dot");
// check boundary
if (pages[pages.length-1].hasClass("spt_wizard_selected")) {
return;
}
var selected_index = 0;
for (var i = 0; i < pages.length; i++) {
var page = pages[i];
var on_dot = on_dots[i];
var off_dot = off_dots[i];
if (page.hasClass("spt_wizard_selected")) {
page.removeClass("spt_wizard_selected");
selected_index = i;
}
page.setStyle("display", "none");
on_dot.setStyle("display", "none");
off_dot.setStyle("display", "");
}
if (selected_index == pages.length-2) {
var next = top.getElement(".spt_wizard_next");
next.setStyle("display", "none");
}
if (selected_index == 0) {
var back = top.getElement(".spt_wizard_back");
back.setStyle("display", "");
}
var page = pages[selected_index+1];
page.setStyle("display", "");
page.addClass("spt_wizard_selected");
var on_dot = on_dots[selected_index+1];
var off_dot = off_dots[selected_index+1];
on_dot.setStyle("display", "");
off_dot.setStyle("display", "none");
'''
} )
div.add("<br clear='all'/>")
return div
class TestWizardWdg(BaseRefreshWdg):
def get_display(self):
top = DivWdg()
top.add_color("color", "color")
top.add_color("background", "background")
top.add_style("padding: 10px")
top.add_border()
wizard = WizardWdg(title="Project Creation Wizard")
top.add(wizard)
page = DivWdg()
first = self.get_first_page()
first.add_style("width: 500px")
first.add_style("height: 300px")
page.add(first)
wizard.add(page, "First")
page = DivWdg()
second = self.get_second_page()
second.add_style("width: 500px")
second.add_style("height: 300px")
page.add(second)
wizard.add(page, "Second")
page = DivWdg()
third = DivWdg()
third.add_style("width: 500px")
third.add_style("height: 300px")
third.add("Third Page")
page.add(third)
wizard.add(page, "Third")
page = DivWdg()
fourth = DivWdg()
fourth.add_style("width: 500px")
fourth.add_style("height: 300px")
fourth.add("Fourth Page")
page.add(fourth)
wizard.add(page, "Fourth")
return top
def get_first_page(self):
div = DivWdg()
div.add("First Page")
div.add("<br/>")
div.add("<br/>")
div.add("Project Name: ")
div.add(TextWdg("project_name"))
div.add("<br/>")
div.add("<br/>")
div.add("Project Title: ")
div.add(TextWdg("project_title"))
div.add("<br/>")
return div
def get_second_page(self):
div = DivWdg()
div.add("Second Page")
div.add("<br/>")
div.add("<br/>")
div.add("Column1: ")
div.add(TextWdg("column1"))
div.add("<br/>")
div.add("<br/>")
div.add("Column2: ")
div.add(TextWdg("column2"))
div.add("<br/>")
return div
__all__.append("ProjectWizardWdg")
class ProjectWizardWdg(BaseRefreshWdg):
def get_display(self):
top = DivWdg()
top.add_color("color", "color")
top.add_color("background", "background")
top.add_style("padding: 15px")
top.add_border()
wizard = WizardWdg(title="Project Creation Wizard")
top.add(wizard)
page = DivWdg()
first = self.get_first_page()
first.add_style("width: 500px")
first.add_style("height: 300px")
page.add(first)
wizard.add(page, "Project Title")
page = DivWdg()
first = self.get_second_page()
first.add_style("width: 500px")
first.add_style("height: 300px")
page.add(first)
wizard.add(page, "Foo")
page = DivWdg()
page.add("Hello world!!!")
text = TextWdg("wow")
page.add(text)
wizard.add(page, "Hello!!")
return top
def get_first_page(self):
div = DivWdg()
div.add("<br/>")
div.add("Project Title: ")
text = TextWdg("project_title")
div.add(text)
div.add("<br/>"*2)
div.add("The project title can be more descriptive and contain spaces")
div.add("<br/><br/><hr/><br/>")
div.add("Project Code: ")
text = TextWdg("project_code")
div.add(text)
div.add("<br/>"*2)
div.add('''* Note: the project code must contain only alphanumeric characters [A-Z]/[0-9] and only an '_' as a separator''')
return div
def get_second_page(self):
div = DivWdg()
div.add("Import Template")
div.add("<br/>" * 2)
div.add('''Import a template''')
div.add("<br/>" * 2)
div.add('''Copy from project ...''')
return div
| epl-1.0 | -4,576,897,553,243,658,000 | 28.599407 | 132 | 0.501955 | false |
cidadania/e-cidadania | src/apps/ecidadania/news/admin.py | 1 | 1440 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from guardian.admin import GuardedModelAdmin
from apps.ecidadania.news.models import Post
class PostAdmin(GuardedModelAdmin):
"""
Administration view for news.
"""
list_display = ('title', 'pub_date', 'post_lastup', 'author',
'space', 'pub_index')
search_fields = ('title', 'author', 'space', 'pub_index')
fieldsets = [
(None, {'fields':
['title', 'description']}),
(_('Other data'), {'fields':
['space', 'pub_index']})
]
def save_model(self, request, obj, form, change):
if not change:
obj.author = request.user
obj.save()
admin.site.register(Post, PostAdmin)
| apache-2.0 | -7,985,490,908,025,688,000 | 29.638298 | 74 | 0.663194 | false |
EnriquePS/ProtocoloPokemon | cliente/cliente.py | 1 | 2612 | import socket
import sys
import struct
import webbrowser
#este metdo lee la trivia, descarga la imagen y recive la respuesta del usuario
def read_trivia(data):
print 'pokemon id:'+ str(data[1])
size = struct.unpack('I',data[2:6])
#print 'image size:'+ str(size[0])
print 'QUIEN ES ESTE POKEMON?'
image = data[51:51+size[0]]
f = open('test.png','wb')
f.write(image)
f.close()
webbrowser.open('test.png')
answer = 0
while answer not in {1,2,3}:
print 'opcion 1:'+ str(data[6:21])
print 'opcion 2:'+ str(data[21:36])
print 'opcion 3:'+ str(data[36:51])
answer = input("ingresa el numero de la opcion valido: ")
return answer
def read_eval(data):
if data[1]:
print 'lo adivinaste'
else:
print 'no lo adivinaste'
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#sock.settimeout(3)
# Connect the socket to the port where the server is listening
server_address = (sys.argv[1], int(sys.argv[2]))
print >>sys.stderr, 'connecting to %s port %s' % server_address
sock.connect(server_address)
try:
# Send data
s = bytearray()
#solicitando la trivia
s.append(10)
sock.sendall(s)
data = bytearray()
count = 0
answer = 0
size = None
while True:
aux = sock.recv(16)
if aux:
data += bytearray(aux)
#keeping a track on when to stop recieving info
count +=16
if data[0] == 20:
if size == None:
size = struct.unpack('I',data[2:6])
elif size[0]+ 51<= count:
answer = read_trivia(data)
break
else:
print 'no se esta mandando la trivia correctament'
else:
break
print 'evaluando la respuesta'
#se va a enviar la respuesta
s = bytearray()
#solicitando la trivia
s.append(11)
s.append(data[1])
s.append(answer)
sock.sendall(s)
data = bytearray()
count = 0
while True:
aux = sock.recv(16)
if aux:
data += bytearray(aux)
#keeping a track on when to stop recieving info
count +=16
if data[0] == 21:
print "antes de read eval "
answer = read_eval(data)
print 'despues del'
break
else:
print 'error de comunicacion'
else:
print 'en el brak'
break
finally:
print 'closing socket'
sock.close()
| mit | 6,914,311,003,820,070,000 | 24.115385 | 79 | 0.546708 | false |
tommasoberlose/p2p_bittorrent | DaemonDownload.py | 1 | 3554 | import threading
import sys
import socket
import Constant as const
import Function as func
import Package as pack
from threading import *
import TextFunc as tfunc
import SocketFunc as sfunc
import Upload as upl
import PartFunc as pfunc
import Download as dnl
mutex = Lock()
class DaemonDownload(Thread):
def __init__(self, host, t_host, sessionID, fileName, md5, partN, peer, listPartOwned, lenFile, lenPart):
Thread.__init__(self)
self.host = host
self.t_host = t_host
self.port = const.PORT
self.sessionID = sessionID
self.fileName = fileName
self.peer = peer
self.md5 = md5
self.partN = partN
self.listPartOwned = listPartOwned
self.lenFile = lenFile
self.lenPart = lenPart
def run(self):
global mutex
sP = sfunc.create_socket_client(func.roll_the_dice(self.peer[0]), self.peer[1])
if sP is None:
#tfunc.error('Error: could not open socket in download')
var = "" # giusto per fargli fare qualcosa
else:
try:
if mutex.acquire(timeout = const.TIME_TO_UPDATE):
dnl.update_own_memory(self.md5, self.partN, self.listPartOwned, "2")
mutex.release()
#tfunc.gtext("Start download della parte " + str(self.partN) + " da " + str(self.peer[0], "ascii"))
pk = pack.request_download(self.md5, self.partN)
sP.sendall(pk)
ricevutoByte = sP.recv(const.LENGTH_HEADER)
if str(ricevutoByte[0:4], "ascii") == pack.CODE_ANSWER_DOWNLOAD:
nChunk = int(ricevutoByte[4:10])
ricevutoByte = b''
i = 0
while i != nChunk:
ricevutoLen = sP.recv(const.LENGTH_NCHUNK)
while (len(ricevutoLen) < const.LENGTH_NCHUNK):
ricevutoLen = ricevutoLen + sP.recv(const.LENGTH_NCHUNK - len(ricevutoLen))
buff = sP.recv(int(ricevutoLen))
while(len(buff) < int(ricevutoLen)):
buff = buff + sP.recv(int(ricevutoLen) - len(buff))
ricevutoByte = ricevutoByte + buff
i = i + 1
sP.close()
# Modifico nel file la parte che ho appena scaricato, se il file non esiste lo creo (es b'00000')
dnl.create_part(ricevutoByte, self.fileName, self.partN, self.lenFile, self.lenPart)
if mutex.acquire(timeout = const.TIME_TO_UPDATE):
# Aggiorno la mia memoria
dnl.update_own_memory(self.md5, self.partN, self.listPartOwned, "1")
mutex.release()
pfunc.part_all(self.listPartOwned[self.md5][0])
# Invio l'update al tracker
send_update(self.t_host, self.sessionID, self.md5, self.partN, self.listPartOwned, self.peer)
else:
raise Exception("Error Download Code")
else:
raise Exception("Error Download Code")
else:
raise Exception("Error Download Code")
except Exception as e:
#tfunc.write_daemon_error(self.name, str(self.peer[0], "ascii"), "ERRORE DOWNLOAD: {0}".format(e))
dnl.update_own_memory(self.md5, self.partN, self.listPartOwned, "0")
# >> PEER
def send_update(t_host, sessionID, md5, partN, listPartOwned, peer):
s = sfunc.create_socket_client(func.roll_the_dice(t_host[0]), t_host[1])
if s is None:
tfunc.error('Error: could not open socket to update Tracker')
else:
pk = pack.request_update_tracker(sessionID, md5, partN)
s.sendall(pk)
ricevutoByte = s.recv(const.LENGTH_PACK)
if str(ricevutoByte[0:4], "ascii") == pack.CODE_ANSWER_UPDATE_PART:
tfunc.dnl_success("Download eseguito della parte " + str(partN) + " da " + str(peer[0], "ascii") + "\nAttualmente in possesso di " + str(int(ricevutoByte[4:])) + "/" + str(len(listPartOwned[md5][0])) + " parti del file.")
s.close()
| mit | 224,405,308,559,656,350 | 33.173077 | 224 | 0.669668 | false |
markkorput/py2030 | py2030/app.py | 1 | 1195 | #!/usr/bin/env python
import time
from optparse import OptionParser
from .component_manager import ComponentManager
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-p', '--profile', dest='profile', default=None)
# parser.add_option('-f', '--file', dest='file', default=None)
parser.add_option('-v', '--verbose', dest='verbose', action="store_true", default=False)
parser.add_option('-y', '--yml', '--yaml', '--config-file', dest='config_file', default=None)
opts, args = parser.parse_args()
if opts.profile == None:
import socket
opts.profile = socket.gethostname().replace('.', '_')
del socket
options = {
'verbose': opts.verbose,
'profile': opts.profile,
'config_file': opts.config_file
}
while True:
cm = ComponentManager(options)
cm.setup()
try:
while cm.running:
cm.update()
except KeyboardInterrupt:
print('KeyboardInterrupt. Quitting.')
cm.destroy()
if not cm.restart:
print(cm.shutdown_message)
break
print('restarting...')
time.sleep(1.0)
| mit | 3,093,308,684,881,002,000 | 26.790698 | 97 | 0.574059 | false |
Strayer/dev-services-launcher | services/PHP.py | 1 | 1039 | import subprocess
from tools import win32_kill
import settings
class PHP(object):
def __init__(self, addresses):
self.path = settings.PHP_CWD
self.executable = settings.PHP_EXECUTABLE
self.ini = settings.PHP_INI
self.addresses = addresses
self.processes = []
def start(self):
for address in self.addresses:
self.processes.append(
subprocess.Popen(
args = [self.executable, "-b", address, "-c", self.ini],
cwd = self.path
)
)
def __str__(self):
args = [self.executable, "--version", "-c", self.ini]
proc = subprocess.Popen(args=args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
stdout_lines = stdout.splitlines()
return stdout_lines[0].decode("utf-8")
def stop(self):
for process in self.processes:
win32_kill(process.pid)
self.processes = []
| gpl-2.0 | 1,621,063,283,698,960,100 | 29.558824 | 113 | 0.573628 | false |
SonyCSL/CSLAIER | src/common/utils.py | 1 | 6890 | # -*- coding: utf-8 -*-
import sys
import os
import random
import datetime
import pkg_resources
import subprocess
from .nvidia_devices_info import get_devices_info
from xml.etree import ElementTree
import nkf
def get_python_version():
v = sys.version_info
return str(v[0]) + '.' + str(v[1]) + '.' + str(v[2])
def get_chainer_version():
try:
from chainer import __version__ as version
except ImportError:
return '---'
return version
def get_tensorflow_version():
try:
from tensorflow import __version__ as version
except ImportError:
return '---'
return version
def get_disk_info():
try:
df = subprocess.check_output(['df', '-h'])
except:
return None
disks = df[:-1].split('\n')
titles = disks[0].split()
filesystem_index = None
mounted_on_index = None
for i, title in enumerate(titles):
if title.startswith('Filesystem'):
filesystem_index = i
elif title.startswith('Mounted'):
mounted_on_index = i
disk_info = []
for disk in disks:
row = disk.split()
if row[filesystem_index].startswith('/'):
st = os.statvfs(row[mounted_on_index])
disk_info.append({
'mount': row[mounted_on_index],
'size': calculate_human_readable_filesize(st.f_frsize * st.f_blocks),
'used': calculate_human_readable_filesize(st.f_frsize * (st.f_blocks-st.f_bfree)),
'avail': calculate_human_readable_filesize(st.f_frsize * st.f_favail)
})
return disk_info
def get_gpu_info(nvidia_smi_cmd='nvidia-smi'):
# return get_devices_info()
try:
xml = subprocess.check_output([nvidia_smi_cmd, '-q', '-x'])
except:
return None
ret = {}
elem = ElementTree.fromstring(xml)
ret['driver_version'] = elem.find('driver_version').text
gpus = elem.findall('gpu')
ret_gpus = []
for g in gpus:
info = {
'product_name': g.find('product_name').text,
'uuid': g.find('uuid').text,
'fan': g.find('fan_speed').text,
'minor_number': g.find('minor_number').text
}
temperature = g.find('temperature')
info['temperature'] = temperature.find('gpu_temp').text
power = g.find('power_readings')
info['power_draw'] = power.find('power_draw').text
info['power_limit'] = power.find('power_limit').text
memory = g.find('fb_memory_usage')
info['memory_total'] = memory.find('total').text
info['memory_used'] = memory.find('used').text
utilization = g.find('utilization')
info['gpu_util'] = utilization.find('gpu_util').text
ret_gpus.append(info)
ret_gpus.sort(cmp=lambda x, y: cmp(int(x['minor_number']), int(y['minor_number'])))
ret['gpus'] = ret_gpus
return ret
def get_system_info():
return {
'python_version': get_python_version(),
'chainer_version': get_chainer_version(),
'tensorflow_version': get_tensorflow_version(),
'disk_info': get_disk_info(),
'gpu_info': get_gpu_info()
}
def is_module_available(module_name):
for dist in pkg_resources.working_set:
if dist.project_name.lower().find(module_name.lower()) > -1:
return True
return False
def get_timestamp():
return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
def find_all_files(directory):
for root, dirs, files in os.walk(directory):
files.sort()
for f in files:
if f.startswith('__MACOSX') or f.startswith('.DS_Store'):
continue
yield os.path.join(root, f)
def find_all_directories(directory):
for root, dirs, files in os.walk(directory):
dirs.sort()
if len(dirs) == 0:
yield root
def count_categories(path):
ch = os.listdir(path)
count = 0
if len(ch) is 1:
if os.path.isdir(path + os.sep + ch[0]):
count += count_categories(path + os.sep + ch[0])
else:
for c in ch:
if os.path.isdir(path + os.sep + c):
count += 1
return count
def get_file_size_all(path):
size = 0
for f in find_all_files(path):
size += os.path.getsize(f)
return size
def calculate_human_readable_filesize(byte):
if byte / 1024 < 1:
return str(byte) + 'bytes'
elif byte / (1024 ** 2) < 1:
return str(byte / 1024) + 'k bytes'
elif byte / (1024 ** 3) < 1:
return str(byte / (1024 ** 2)) + 'M bytes'
else:
return str(byte / (1024 ** 3)) + 'G Bytes'
def count_files(path):
ch = os.listdir(path)
counter = 0
for c in ch:
if os.path.isdir(path + os.sep + c):
counter += count_files(path + os.sep + c)
else:
counter += 1
return counter
def get_files_in_random_order(path, num):
"""
path配下の画像をランダムでnum枚取り出す。
path配下がディレクトリしか無い場合はさらに配下のディレクトリから
"""
children_files = []
for cf in os.listdir(path):
if os.path.isdir(path + os.sep + cf):
if len(os.listdir(path + os.sep + cf)) != 0:
children_files.append(cf)
else:
children_files.append(cf)
children_files_num = len(children_files)
if children_files_num is 0:
return []
elif children_files_num is 1:
if os.path.isdir(path + os.sep + children_files[0]):
path = path + os.sep + children_files[0]
temp_file_num = len(os.listdir(path))
if temp_file_num < num:
num = temp_file_num
else:
num = 1
elif children_files_num < num:
num = children_files_num
files = []
candidates = random.sample(map(lambda n: path + os.sep + n, os.listdir(path)), num)
for f in candidates:
if os.path.isdir(f):
files.extend(get_files_in_random_order(f, 1))
else:
files.append(f)
return files
def get_texts_in_random_order(path, num, character_num=-1):
files = get_files_in_random_order(path, num)
ret = []
for f in files:
if os.path.exists(f):
ret.append(get_text_sample(f, character_num))
return ret
def get_images_in_random_order(path, num):
files = get_files_in_random_order(path, num)
ret = []
for f in files:
(name, ext) = os.path.splitext(f)
ext = ext.lower()
if ext in ('.png', '.jpg', '.jpeg', 'gif'):
ret.append(f)
return ret
def get_text_sample(path, character_num=-1):
raw_text = open(path).read()
encoding = nkf.guess(raw_text)
text = raw_text.decode(encoding)
if character_num > -1:
return text[0:character_num]
else:
return text
| mit | 5,650,222,858,885,279,000 | 27.554622 | 98 | 0.568128 | false |
falcondai/batch_geocode | geocode_csv.py | 1 | 1165 | #!/usr/bin/python
# geocode_csv.py
# A script to geocode the "address" column in a CSV and outputing the result
# into a new CSV with "latitude" and "longitude" columns
#
# Author: Falcon Dai
# Date: 4/7/2013
# License: MIT License
if __name__ == '__main__':
import sys, csv, time
from batch_geocode import batch_geocode_csv
def print_result(r, j):
print r['address'], j['status']
if len(sys.argv) < 2:
print 'Usage: %s <in-csv-filename> [<out-csv-filename>]' % sys.argv[0]
exit(1)
# output csv file name
if sys.argv[1].endswith('.csv'):
out_cn = sys.argv[1].replace('.csv', '.geocoded.csv')
else:
out_cn = sys.argv[1]+'.geocoded'
if len(sys.argv) > 2:
out_cn = sys.argv[2]
t0 = time.time()
with open(sys.argv[1], 'r') as ic:
with open(out_cn, 'wb') as oc:
r = csv.DictReader(ic)
w = csv.DictWriter(oc, r.fieldnames+['latitude', 'longitude'])
w.writeheader()
batch_geocode_csv(r, w, process_func=print_result)
l, dt = r.line_num - 1, time.time() - t0
print 'Done geocoding %d addresses in %.2fs, average %.2f geocode/s' % (l, dt, l/dt)
print 'Saved to file: %s' % out_cn | mit | -6,442,824,608,657,742,000 | 25.785714 | 85 | 0.612876 | false |
EmreAtes/spack | var/spack/repos/builtin/packages/abyss/package.py | 1 | 2512 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Abyss(AutotoolsPackage):
"""ABySS is a de novo, parallel, paired-end sequence assembler
that is designed for short reads. The single-processor version
is useful for assembling genomes up to 100 Mbases in size."""
homepage = "http://www.bcgsc.ca/platform/bioinfo/software/abyss"
url = "https://github.com/bcgsc/abyss/archive/2.0.2.tar.gz"
version('2.0.2', 'bb3f8cebf121312bf81789d963b4ecc5')
version('1.5.2', '10d6d72d1a915e618d41a5cbbcf2364c')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('mpi')
depends_on('boost@:1.50.0,1.53.0:', when='@2.0.2:')
depends_on('boost@:1.50.0,1.53.0:1.59.0', when='@:1.5.2')
depends_on('sparsehash')
depends_on('sqlite')
depends_on('libtool')
conflicts('^intel-mpi')
conflicts('^intel-parallel-studio+mpi')
conflicts('^mvapich2')
conflicts('^spectrum-mpi')
def configure_args(self):
args = ['--with-boost=%s' % self.spec['boost'].prefix,
'--with-sqlite=%s' % self.spec['sqlite'].prefix,
'--with-mpi=%s' % self.spec['mpi'].prefix]
if self.spec['mpi'].name == 'mpich':
args.append('--enable-mpich')
return args
| lgpl-2.1 | 3,412,379,314,919,958,500 | 40.866667 | 78 | 0.64371 | false |
jotterbach/dstk | DSTK/tests/tests_boosted_selectors/test_boselectors.py | 1 | 3316 | from __future__ import division
import pandas as pd
import sklearn.datasets as ds
import DSTK.BoostedFeatureSelectors.boselector as bs
import numpy as np
cancer_ds = ds.load_breast_cancer()
cancer_df = pd.DataFrame(cancer_ds['data'], columns=cancer_ds['feature_names'])
targets = pd.Series(cancer_ds['target'])
def test_bolasso():
b = bs.Bolasso(bootstrap_fraction=0.5, Cs=np.logspace(-1, 1, 3), random_state=42, random_seed=13)
b.fit(cancer_df, targets, epochs=5)
assert b.get_feature_stats().columns.tolist() == ['coef_mean', 'coef_std', 'frac_occurence', 'num_occurence']
assert b.coeff_df.shape == (5, 30)
assert b.coeff_df.isnull().any().sum() == False
def test_botree():
b = bs.Botree(bootstrap_fraction=0.5, random_state=42, random_seed=13)
b.fit(cancer_df, targets, epochs=5)
assert b.get_feature_stats().columns.tolist() == ['coef_mean', 'coef_std', 'frac_occurence', 'num_occurence']
assert b.coeff_df.shape == (5, 30)
assert b.coeff_df.isnull().any().sum() == False
def test_boforest():
b = bs.Boforest(bootstrap_fraction=0.5, random_state=42, random_seed=13)
b.fit(cancer_df, targets, epochs=5)
assert b.get_feature_stats().columns.tolist() == ['coef_mean', 'coef_std', 'frac_occurence', 'num_occurence']
assert b.coeff_df.shape == (5, 30)
assert b.coeff_df.isnull().any().sum() == False
def test_sgdbolasso_with_cv():
b = bs.SGDBolasso(bootstrap_fraction=0.5, random_state=42, random_seed=13)
means = cancer_df.as_matrix().mean(axis=0)
std = cancer_df.as_matrix().std(axis=0)
scaled_data = pd.DataFrame((cancer_df.as_matrix() - means) / std, columns=cancer_df.columns)
cv_params = {'alpha': np.logspace(-3, 2, 10),
'n_iter': np.arange(5, 15, 1),
'eta0': np.arange(0.1, 1.1, 0.1)}
estim, rscv = b.fit_cv(scaled_data, targets, cv_params=cv_params, epochs=1000, cv=2, verbose=0, n_jobs=4)
np.testing.assert_almost_equal(rscv.best_score_, [0.95079086116], decimal=6)
assert_dict = {'alpha': 0.001, 'eta0': 0.10000000000000001, 'n_iter': 5}
for key, val in assert_dict.iteritems():
np.testing.assert_almost_equal(rscv.best_params_[key], val)
stats_df = estim.get_feature_stats()
assert stats_df[stats_df.frac_occurence == 1].index.tolist() == ['area error',
'radius error',
'worst symmetry',
'worst radius',
'worst perimeter',
'worst concave points',
'worst area',
'worst texture',
'mean concave points']
np.testing.assert_almost_equal(stats_df[stats_df.frac_occurence == 1].coef_mean.values,
[-9.49728727, -12.17192604, -9.11384232, -10.06477512, -8.41949524,
-10.16794578, -8.9595186, -12.84921728, -8.13303665])
| mit | 2,077,573,988,005,891,600 | 45.055556 | 113 | 0.537394 | false |
levilucio/SyVOLT | UMLRT2Kiltera_MM/Properties/from_thesis/HMM4_then1_ConnectedLHS.py | 1 | 2650 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM4_then1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM4_then1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM4_then1_ConnectedLHS, self).__init__(name='HMM4_then1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM4_then1')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | 5,102,672,214,256,245,000 | 42.442623 | 125 | 0.47434 | false |
jbzdak/data-base-checker | bdchecker/api.py | 1 | 9195 | from operator import itemgetter
import re
from sqlalchemy.orm import sessionmaker
__author__ = 'jb'
import uuid
from .db_utils import *
import os
import unittest
import io
import logging
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
def create_engine_for(user, password, database, echo=False, host=None):
if host is None:
host = settings.SCHEMA_CHECKER_HOST
if host is None:
host = 'localhost'
from sqlalchemy import create_engine
url = 'postgresql+psycopg2://{user}:{password}@{host}/{database}'.format(
user=user,
password=password,
database=database,
host=host)
return create_engine(
url
, echo=echo
)
@contextmanager
def capture():
import sys
from io import StringIO
# oldout, olderr = sys.stdout, sys.stderr
out=[StringIO()]
handler = logging.StreamHandler(out[0])
logging.getLogger('sqlalchemy.engine').addHandler(handler)
try:
# sys.stdout, sys.stderr = out
yield out
finally:
logging.getLogger('sqlalchemy.engine').removeHandler(handler)
out[0] = out[0].getvalue()
out[0] = re.sub(r"\\\\n", "\\n", out[0])
class BaseTaskChecker(object):
TestSuite = None
display_failure_cause = True
display_stdout = False
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = kwargs
def create_test_suite(self):
suite = self.TestSuite
suite.args = self.args
suite.kwargs = self.kwargs
return type("LocalTestSuite", (suite,), {})
def dispose_test_suite(self, suite):
pass
def grade_result(self, result):
if result.wasSuccessful():
return True, 10
else:
return False, 2
def perform_grading(self, result):
passes, mark = self.grade_result(result)
return passes, mark, result
def perform_test(self):
Suite = self.create_test_suite()
try:
with capture() as captured_streams:
suite = unittest.TestLoader().loadTestsFromTestCase(Suite)
stream = io.StringIO()
result = unittest.TextTestRunner(stream=stream, verbosity=2).run(suite)
passes, mark, result = self.perform_grading(result)
if not self.display_failure_cause:
return passes, mark, ''
if Suite.additional_output_list:
for it in Suite.additional_output_list:
stream.write(it)
if self.display_stdout:
stream.write("=" * 30)
stream.write("\ncaptured stdout\n")
stream.write("=" * 30 + "\n")
stream.write(captured_streams[0])
stream.write("=" * 30)
stream.write("\nend captured stdout\n")
stream.write("=" * 30 + "\n")
stream.seek(0)
return passes, mark, stream.read()
#except Exception as e:
# logging.exception("While executing tests")
finally:
self.dispose_test_suite(Suite)
class DatabaseTaskChecker(BaseTaskChecker):
engine = None
def create_test_suite(self):
suite = super(DatabaseTaskChecker, self).create_test_suite()
suite.engine = self.engine
suite.session = sessionmaker(bind=self.engine)()
return suite
def dispose_test_suite(self, suite):
suite.session.close()
class NewDatabaseTaskChecker(BaseTaskChecker):
ECHO = False
DISPOSE = True
PREFIX = "drop-me"
def create_test_suite(self):
self.db_name = self.PREFIX + str(uuid.uuid4())
self.db_pass = self.db_name
create_user(self.db_name, self.db_pass)
create_database(self.db_name, self.db_name)
self.engine = create_engine_for(self.db_name,
self.db_pass, self.db_name,
self.ECHO)
suite = super().create_test_suite()
suite.db_name = self.db_name
suite.engine = self.engine
return suite
def dispose_test_suite(self, suite):
super().dispose_test_suite(suite)
self.engine.dispose()
self.engine.pool = None
self.engine = None
#dispose = getattr(suite, 'tearDownClass', None)
#if dispose:
# dispose()
if self.DISPOSE:
drop_database(self.db_name)
drop_user(self.db_name)
class BDTester(unittest.TestCase):
additional_output_list = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.additional_output_list = []
def assertListEqual(self, list1, list2, msg=None):
if max(len(list1), len(list2)) >= 100:
self.assertTrue(list1 == list2, msg)
super(BDTester, self).assertListEqual(list1, list2, msg)
class SessionTest(BDTester):
ROLLBACK = True
HAS_TABLE_QUERY = "SELECT COUNT(*) FROM pg_tables WHERE tablename = :table"
SELECT_TABLES_QUERY = "select tablename from pg_tables " \
"WHERE schemaname = 'public' ORDER BY tablename;"
SELECT_COLUMNS_QUERY = """
SELECT column_name
FROM information_schema.columns
WHERE table_name = :table AND table_schema=:schema
ORDER BY column_name;
"""
def assert_has_table(self, table_name, msg = None):
if msg is None:
msg = u"Table {} not found".format(table_name)
self.assertEqual(
self.session.execute(self.HAS_TABLE_QUERY, {'table' : table_name}).scalar(), 1, msg
)
def assert_tables_are(self, table_list, msg=None):
"""
:param table_list:
:param msg:
"""
self.assertEqual(
list(map(itemgetter(0), self.session.execute(self.SELECT_TABLES_QUERY))),
sorted(table_list),
msg
)
def assert_table_columns(self, table, columns, msg=None, schema='public'):
rs = self.session.execute(self.SELECT_COLUMNS_QUERY,
{'table': table, 'schema': schema})
self.assertEqual(
list(map(itemgetter(0), rs)),
sorted(columns),
msg
)
@property
def list_procedures(self):
return list(map(itemgetter(0), self.session.execute("select proname from pg_proc where proowner <> 10;")))
def assert_has_procedure(self, procedures):
if isinstance(procedures, str):
procedures = [procedures]
db_proc = self.list_procedures
for p in procedures:
self.assertIn(p, db_proc)
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.session = None
def reset_session(self):
if self.session:
self.session.close()
self.session = self.sessionmaker()
def close_session(self):
self.session.close()
self.session = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.sessionmaker = sessionmaker(bind=cls.engine)
def setUp(self):
self.reset_session()
def tearDown(self):
if self.ROLLBACK:
self.session.rollback()
else:
self.session.commit()
self.session.close()
super().tearDownClass()
class MultiUserSessionTest(SessionTest):
"""
Test that allows me to login to the database using many roles.
"""
ROLES = {}
"""
Dictionary that maps arbitrary keys to lists of strings. Each item represents
user with given list of roles, so:
.. code-block::
ROLES = {
"foo": ["bar", "baz"]
}
will create user with random username that is assinged to roles:
"bar" and "baz" (we assume that these roles exists).
You'll be able to obtain session to the database using:
self.sessions("foo");
"""
__ROLE_USERS = {}
__ROLE_ENGINES = {}
@classmethod
def setUpClass(cls):
super().setUpClass()
for key_name, role_list in cls.ROLES.items():
user = uuid.uuid4()
create_user(user, user, role_list)
cls.__ROLE_USERS[key_name] = user
cls.__ROLE_ENGINES[key_name] = create_engine_for(user, user, cls.db_name)
@classmethod
def tearDownClass(cls):
for engine in cls.__ROLE_ENGINES.values():
engine.dispose()
for user in cls.__ROLE_USERS.values():
drop_user(user, drop_owned_by=True)
cls.__ROLE_USERS = {}
cls.__ROLE_ENGINES = {}
super().tearDownClass()
def get_session(self, name):
if name in self.sessions:
return self.sessions[name]
engine = self.__ROLE_ENGINES[name]
session = sessionmaker(bind=engine)()
self.sessions[name] = session
return session
def setUp(self):
super().setUp()
self.sessions = {}
def tearDown(self):
super().tearDown()
for session in self.sessions.values():
session.rollback()
session.close()
| gpl-3.0 | -821,261,610,025,321,200 | 24.470914 | 114 | 0.576835 | false |
mikehankey/fireball_camera | move-files-1tb.py | 1 | 5925 | #!/usr/bin/python3
import time
import glob
import ephem
import subprocess
from pathlib import Path
import os
from amscommon import read_config
video_dir = "/mnt/ams2/SD/"
hd_video_dir = "/mnt/ams2/HD/"
def parse_date (this_file):
el = this_file.split("/")
file_name = el[-1]
file_name = file_name.replace("_", "-")
file_name = file_name.replace(".", "-")
fnel = file_name.split("-")
#print("FILE:", file_name, len(fnel))
if len(fnel) == 11:
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, ftype,fnum,fst,xext = fnel
if len(fnel) == 10:
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, ftype,fnum,xext = fnel
if len(fnel) == 9:
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, ftype, xext = fnel
if len(fnel) == 8:
xyear, xmonth, xday, xhour, xmin, xsec, xcam_num, xext = fnel
cam_num = xcam_num.replace("cam", "")
date_str = xyear + "-" + xmonth + "-" + xday + " " + xhour + ":" + xmin + ":" + xsec
capture_date = date_str
return(cam_num, date_str, xyear, xmonth, xday, xhour, xmin, xsec)
def day_or_night(config, capture_date):
obs = ephem.Observer()
obs.pressure = 0
obs.horizon = '-0:34'
obs.lat = config['device_lat']
obs.lon = config['device_lng']
obs.date = capture_date
sun = ephem.Sun()
sun.compute(obs)
(sun_alt, x,y) = str(sun.alt).split(":")
saz = str(sun.az)
(sun_az, x,y) = saz.split(":")
if int(sun_alt) < -1:
sun_status = "night"
else:
sun_status = "day"
return(sun_status)
def move_old_school_files():
cmd = 'find /mnt/ams2/SD/ | grep .mp4'
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
files = output.split('\n')
for file in files:
el = file.split("/")
file_name = el[-1]
if "trim" in file:
cmd = "mv " + file + " /mnt/ams2/SD/meteors/" + file_name
os.system(cmd)
continue
(cam_num, date_str, xyear, xmonth, xday, xhour, xmin, xsec) = parse_date(file)
sun_status = day_or_night(conf, date_str)
date_dir = "/mnt/ams2/SD/proc/" + xyear + "-" + xmonth + "-" + xday
file_exists = Path(date_dir)
skip = 0
if (file_exists.is_dir() is False):
os.system("mkdir " + date_dir)
if sun_status == "day":
cmd = "mv " + file + " /mnt/ams2/SD/proc/daytime/" + file_name
os.system(cmd)
else:
cmd = "mv " + file + " " + date_dir + "/" + file_name
os.system(cmd)
print (cmd)
def purge_sd_files():
files = glob.glob(video_dir + "proc/daytime/*")
for file in files:
(cam_num, date_str, xyear, xmonth, xday, xhour, xmin, xsec) = parse_date(file)
sun_status = day_or_night(conf, date_str)
st = os.stat(file)
cur_time = int(time.time())
mtime = st.st_mtime
tdiff = cur_time - mtime
tdiff = tdiff / 60 / 60 / 24
if sun_status == 'day' and tdiff > 2:
print ("File is daytime and this many days old", tdiff, file)
os.system("rm " + file)
#else:
# print ("File is nighttime and this many days old", tdiff, file)
def purge_hd_files():
files = glob.glob(hd_video_dir + "*")
for file in files:
(cam_num, date_str, xyear, xmonth, xday, xhour, xmin, xsec) = parse_date(file)
sun_status = day_or_night(conf, date_str)
st = os.stat(file)
cur_time = int(time.time())
mtime = st.st_mtime
tdiff = cur_time - mtime
tdiff = tdiff / 60 / 60 / 24
if sun_status == 'day' and tdiff > 2:
print ("File is daytime and this many days old", tdiff, file)
print("rm " + file)
os.system("rm " + file)
elif tdiff > 2:
print ("File is nighttime and this many days old will be purged.", tdiff, file)
print("rm " + file)
os.system("rm " + file)
def purge_SD_proc_dir():
files = glob.glob(video_dir + "proc/*")
for file in files:
st = os.stat(file)
cur_time = int(time.time())
mtime = st.st_mtime
tdiff = cur_time - mtime
tdiff = tdiff / 60 / 60 / 24
print (file, tdiff)
if tdiff >= 25 and file != 'daytime':
print ("We should delete this dir in the archive. it is this many days old:", tdiff)
cmd = "rm -rf " + file
os.system(cmd)
print(cmd)
def move_processed_SD_files():
files = glob.glob(video_dir + "*.jpg")
#print("SUN:", sun_status)
for file in files:
el = file.split("/")
if "-stack.jpg" in file:
video_file = file.replace("-stack.jpg", ".mp4")
else:
video_file = file.replace("-stacked.jpg", ".mp4")
file_name = el[-1]
vel = video_file.split("/")
video_file_name = vel[-1]
print ("Stack File:", file)
print ("Video File:", video_file)
(cam_num, date_str, xyear, xmonth, xday, xhour, xmin, xsec) = parse_date(file)
sun_status = day_or_night(conf, date_str)
print("SUN:", sun_status)
date_dir = "/mnt/ams2/SD/proc/" + xyear + "-" + xmonth + "-" + xday
file_exists = Path(date_dir)
skip = 0
if (file_exists.is_dir() is False):
os.system("mkdir " + date_dir)
if sun_status == "day":
cmd = "mv " + file + " /mnt/ams2/SD/proc/daytime/" + file_name
print(cmd)
os.system(cmd)
cmd = "mv " + video_file + " /mnt/ams2/SD/proc/daytime/" + video_file_name
print(cmd)
os.system(cmd)
else:
if "-stacked" not in file_name:
file_name = file_name.replace("stack", "stacked")
cmd = "mv " + file + " " + date_dir + "/" + file_name
print(cmd)
os.system(cmd)
cmd = "mv " + video_file + " " + date_dir + "/" + video_file_name
print(cmd)
os.system(cmd)
conf = read_config("conf/config-1.txt")
purge_hd_files()
purge_sd_files()
move_processed_SD_files()
purge_SD_proc_dir()
| gpl-3.0 | -9,104,365,942,159,208,000 | 29.859375 | 94 | 0.553418 | false |
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/tests/test_poll.py | 1 | 2336 | # -*- coding: utf-8 -*-
"""Test for Poll Xmodule functional logic."""
from mock import Mock
from xmodule.poll_module import PollDescriptor
from . import LogicTest
from .test_import import DummySystem
class PollModuleTest(LogicTest):
"""Logic tests for Poll Xmodule."""
descriptor_class = PollDescriptor
raw_field_data = {
'poll_answers': {'Yes': 1, 'Dont_know': 0, 'No': 0},
'voted': False,
'poll_answer': ''
}
def test_bad_ajax_request(self):
# Make sure that answer for incorrect request is error json.
response = self.ajax_request('bad_answer', {})
self.assertDictEqual(response, {'error': 'Unknown Command!'})
def test_good_ajax_request(self):
# Make sure that ajax request works correctly.
response = self.ajax_request('No', {})
poll_answers = response['poll_answers']
total = response['total']
callback = response['callback']
self.assertDictEqual(poll_answers, {'Yes': 1, 'Dont_know': 0, 'No': 1})
assert total == 2
self.assertDictEqual(callback, {'objectName': 'Conditional'})
assert self.xmodule.poll_answer == 'No'
def test_poll_export_with_unescaped_characters_xml(self):
"""
Make sure that poll_module will export fine if its xml contains
unescaped characters.
"""
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
id_generator.target_course_id = self.xmodule.course_id
sample_poll_xml = '''
<poll_question display_name="Poll Question">
<p>How old are you?</p>
<answer id="less18">18</answer>
</poll_question>
'''
output = PollDescriptor.from_xml(sample_poll_xml, module_system, id_generator)
# Update the answer with invalid character.
invalid_characters_poll_answer = output.answers[0]
# Invalid less-than character.
invalid_characters_poll_answer['text'] = '< 18'
output.answers[0] = invalid_characters_poll_answer
output.save()
xml = output.definition_to_xml(None)
# Extract texts of all children.
child_texts = xml.xpath('//text()')
# Last index of child_texts contains text of answer tag.
assert child_texts[(- 1)] == '< 18'
| agpl-3.0 | -2,279,210,706,724,800,500 | 33.865672 | 86 | 0.61601 | false |
jamesni/zanata-python-client | zanataclient/zanatalib/projectservice.py | 1 | 7020 | #vim:set et sts=4 sw=4:
#
# Zanata Python Client
#
# Copyright (c) 2011 Jian Ni <[email protected]>
# Copyright (c) 2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
__all__ = (
"ProjectService",
)
try:
import json
except ImportError:
import simplejson as json
from rest.client import RestClient
from project import Project
from project import Iteration
from error import ProjectExistException
from error import NoSuchProjectException
from error import UnAuthorizedException
from error import BadRequestException
from error import NotAllowedException
class ProjectService:
"""
Provides services to interact with Project, handle operaions of list, create and retrieve Project Resources
"""
def __init__(self, base_url, usrname, apikey):
self.restclient = RestClient(base_url)
self.iterations = IterationService(base_url, usrname, apikey)
self.username = usrname
self.apikey = apikey
def list(self):
"""
List the Project Resources on the Flies server
@return: list of Project object
"""
res, content = self.restclient.request_get('/seam/resource/restv1/projects')
if res['status'] == '200':
projects = []
projects_json = json.loads(content)
for p in projects_json:
projects.append(Project(p))
return projects
def get(self, projectid):
"""
Retrieve a specified Project Resource on Flies server
@param projectid: Id of Project Resource
@return: Project object
@raise NoSuchProjectException:
"""
res, content = self.restclient.request_get('/seam/resource/restv1/projects/p/%s'%projectid)
if res['status'] == '200' or res['status'] == '304':
server_return = json.loads(content)
if server_return.has_key('status'):
if server_return['status'] == "Retired":
print "Warning: The project %s is retired!"%projectid
project = Project(server_return)
project.set_iteration(self.iterations)
return project
elif res['status'] == '404':
raise NoSuchProjectException('Error 404', content)
def create(self, project):
"""
Create a Project Resource on Flies Server
@param project: Project object
@return: Success if status of response is 201
@raise ProjectExistException:
@raise NoSuchProjectException:
@raise UnAuthorizedException:
@raise BadRequestException:
"""
headers = {}
headers['X-Auth-User'] = self.username
headers['X-Auth-Token'] = self.apikey
body ='''{"name":"%s","id":"%s","description":"%s","type":"IterationProject"}'''%(project.name,project.id,project.desc)
res, content = self.restclient.request_put('/seam/resource/restv1/projects/p/%s'%project.id, args=body, headers=headers)
if res['status'] == '201':
return "Success"
elif res['status'] == '200':
raise ProjectExistException('Status 200', "The project is already exist on server")
elif res['status'] == '404':
raise NoSuchProjectException('Error 404', content)
elif res['status'] == '401':
raise UnAuthorizedException('Error 401', 'This operation is not authorized, please check username and apikey')
elif res['status'] == '400':
raise BadRequestException('Error 400', content)
def delete(self):
pass
def status(self):
pass
class IterationService:
"""
Provides services to interact with Project iteration, handle operaions of list, create and retrieve iteration Resources
"""
def __init__(self, base_url, usrname = None, apikey = None):
self.restclient = RestClient(base_url)
self.username = usrname
self.apikey = apikey
def get(self, projectid, iterationid):
"""
Retrieve a specified Iteration Resource on Flies server
@param projectid: Id of Project Resource
@param iterationid: Id of Iteration Resource
@return: Iteration object
@raise NoSuchProjectException:
"""
res, content = self.restclient.request_get('/seam/resource/restv1/projects/p/%s/iterations/i/%s'%(projectid,iterationid))
if res['status'] == '200' or res['status'] == '304':
server_return = json.loads(content)
if server_return.has_key('status'):
if server_return['status'] == "Retired":
print "Warning: The version %s is retired!"%iterationid
return Iteration(server_return)
elif res['status'] == '404':
raise NoSuchProjectException('Error 404', content)
def create(self, projectid, iteration):
"""
Create a Iteration Resource on Flies Server
@param projectid: Id of Project Resource
@param iteration: Iteration object
@return: Success if status of response is 201
@raise ProjectExistException:
@raise NoSuchProjectException:
@raise UnAuthorizedException:
@raise BadRequestException:
"""
headers = {}
headers['X-Auth-User'] = self.username
headers['X-Auth-Token'] = self.apikey
body = '''{"name":"%s","id":"%s","description":"%s"}'''%(iteration.name, iteration.id, iteration.desc)
res, content = self.restclient.request_put('/seam/resource/restv1/projects/p/%s/iterations/i/%s'%(projectid,iteration.id), args=body, headers=headers)
if res['status'] == '201':
return "Success"
elif res['status'] == '200':
raise ProjectExistException('Status 200', "The version is already exist on server")
elif res['status'] == '404':
raise NoSuchProjectException('Error 404', content)
elif res['status'] == '401':
raise UnAuthorizedException('Error 401', 'This operation is not authorized, please check username and apikey')
elif res['status'] == '405':
raise NotAllowedException('Error 405', 'The requested method is not allowed')
def delete(self):
pass
| gpl-3.0 | 8,598,788,224,025,806,000 | 38.886364 | 158 | 0.628205 | false |
beproud/bp-cron | src/utils/holiday.py | 1 | 2425 | import logging
from datetime import date, datetime, timedelta
from dateutil import parser
from dateutil.rrule import DAILY, rrule
from src.utils.google_api import get_service
CALENDAR_ID = "ja.japanese#[email protected]"
# 日本の祝日を入れておくセット
holiday_set = set()
# 年末年始休暇の開始日と終了日
START = date(2016, 12, 29)
END = date(2017, 1, 4)
logger = logging.getLogger()
def update_japanese_holiday():
"""
日本の祝日情報を更新する
"""
logger.info("Update japanese holiday")
holiday_set = set()
# 年末年始休暇を設定
newyear_rule = rrule(freq=DAILY, dtstart=START, until=END)
holiday_set.update(x.date() for x in newyear_rule)
# カレンダーの検索範囲は今日から一年後まで
today = date.today()
next_year = today + timedelta(days=365)
today_str = "{:%Y-%m-%d}T00:00:00+09:00".format(today)
next_year_str = "{:%Y-%m-%d}T00:00:00+09:00".format(next_year)
# カレンダーAPIに接続
service = get_service("calendar", "v3")
# 日本の祝日カレンダーにある予定を取得する
event_results = (
service.events()
.list(calendarId=CALENDAR_ID, timeMin=today_str, timeMax=next_year_str)
.execute()
)
for event in event_results.get("items", []):
holiday = parser.parse(event["start"]["date"]).date()
holiday_set.add(holiday)
return holiday_set
def is_holiday(date_data=date.today()):
"""
指定された日付が休日かどうかを返す
:param date: 日付(文字列、datetime、date型のいずれか)
:reutrn: True - 祝日、False - 平日
"""
global holiday_set
if not holiday_set:
holiday_set = update_japanese_holiday()
if isinstance(date_data, datetime):
# datetime は date に変換する
date_data = date_data.date()
elif isinstance(date_data, str):
# 文字列の場合も date に変換する
date_data = parser.parse(date_data).date()
elif not isinstance(date_data, date):
# TODO: 日付以外の場合は例外を返す予定
return False
# 土日だったら True を返す
if date_data.weekday() in (5, 6):
return True
# 日本の祝日カレンダーで祝日なら True を返す
if date_data in holiday_set:
return True
return False
| mit | 6,716,281,637,206,581,000 | 23.590361 | 79 | 0.641842 | false |
katerina7479/pypdflite | pypdflite/pdfobjects/pdftable.py | 1 | 3779 | from .pdfrow import PDFRow
from .pdfcolumn import PDFColumn
class PDFTable(object):
def __init__(self, session, page, rows, cols, cursor, def_font):
self.session = session
self.page = page
self.font = def_font
self.number_of_rows = rows
self.number_of_columns = cols
self.cursor = cursor
self.text_cursor = self.cursor.copy()
self.border_cursor = self.cursor.copy()
self._initiate_cells()
def _initiate_cells(self):
self.rows = []
self.columns = []
for x in range(self.number_of_columns):
self.columns.append(PDFColumn(parent=self))
for x in range(self.number_of_rows):
self.rows.append(PDFRow(self, x, self.number_of_columns,
self.text_cursor, self.border_cursor))
for x in range(self.number_of_rows):
for y in range(self.number_of_columns):
self.columns[y].cells.append(self.rows[x].cells[y])
# Public Methods, called to create table
def write(self, row, col, text, format=None):
cell = self.rows[row][col]
cell._set_text(text)
if format is not None:
cell._set_format(format)
def write_row(self, row, col_start, data, format):
i = 0
for c in range(col_start, col_start + len(data)):
self.write(row, c, data[i], format)
i += 1
def write_column(self, row_start, col, data, format):
i = 0
for r in range(row_start, row_start + len(data)):
self.write(r, col, data[i], format)
i += 1
def set_format(self, row, col, format):
cell = self.rows[row][col]
cell._set_format(format)
def set_format_row(self, row, col_start, format):
for c in range(col_start, self.number_of_columns):
self.set_format(row, c, format)
def set_format_column(self, row_start, col, format):
for r in range(row_start, self.number_of_rows):
self.set_format(r, col, format)
def set_column_width(self, column, value):
self.columns[column]._set_max_width(value)
def set_row_height(self, row, value):
self.rows[row]._set_max_height(value)
# Private methods to build table
def _draw(self):
""" Don't use this, use document.draw_table """
self._compile()
self.rows[0]._advance_first_row()
self._set_borders()
self._draw_fill()
self._draw_borders()
self._draw_text()
self._set_final_cursor()
def _draw_text(self):
for i in range(len(self.rows)):
self.rows[i]._draw_text()
self.text_cursor.x_reset()
if (i + 1) < len(self.rows):
self.text_cursor.y_plus(self.rows[i + 1].max_height)
def _set_borders(self):
for i in range(len(self.rows)):
self.rows[i]._set_borders()
self.border_cursor.x_reset()
if (i + 1) < len(self.rows):
self.border_cursor.y_plus(self.rows[i].max_height)
def _draw_borders(self):
for i in range(len(self.rows)):
self.rows[i]._draw_borders()
def _draw_fill(self):
for i in range(len(self.rows)):
self.rows[i]._draw_fill()
def _set_final_cursor(self):
if self.text_cursor > self.border_cursor:
self.cursor = self.text_cursor
else:
self.cursor = self.border_cursor
def _compile(self):
for row in self.rows:
for cell in row:
cell._compile()
for col in self.columns:
col._finish()
for row in self.rows:
row._finish()
for cell in row:
cell._finish()
| mit | 5,406,865,965,786,356,000 | 30.231405 | 74 | 0.552263 | false |
ActiveState/code | recipes/Python/579113_defdict/recipe-579113.py | 1 | 9898 | #!/usr/bin/env python
# This file is licensed under the GNU General Public License found at <http://www.gnu.org/licenses>
# email: Mark Janssen <[email protected]>
"""Dictionary with default values, collision function, and sorted string output."""
import exceptions
import copy
class KeyAlreadyExists(exceptions.LookupError): pass
class _use_default_:
"""Dummy class used as value in function parameters to indicate
no default value specified; i.e. use the value in DefDict._default.
Created special class to avoid clashing with possible value passed
by user."""
#XXX perhaps should have instantiate default which takes arbitrary
# number of parameters that will be passed to value stored in DefDict._default
# to allow object creation. DefDict.add would check if isinstance(default, _use_default_)
#define some useful collision functions
#May wish to return an expression instead of setting ddict directly
# to allow lambda functions to be passed as collision functions.
# may sacrifice speed for cases when value modified in place or when old value lookup not needed.
_OVERWRITE_ = None #i.e. will use standard dict overwrite semantics
def _RETAIN_(ddict, key, new_value): pass #do nothing to the value
def _RAISE_(ddict, key, new_value): raise KeyAlreadyExists, repr(key)
def _ADD_(ddict, key, new_value): ddict[key] += new_value
def _MAX_(ddict, key, new_value): ddict[key] = max(ddict[key], new_value)
def _MIN_(ddict, key, new_value): ddict[key] = min(ddict[key], new_value)
def _OUTPUT_KEY_(ddict, key, new_value): print key #should probably send to stderr
class DefDict(dict):
"""Extends standard dictionary type by allowing user to
specify a default value when a key is inserted.
A 'collision' function can be provided to specify what should
be done to the value when a key is added that already exists.
User-defined collision function should take
(defdict, key, new_value) as parameters.
"""
#XXX may wish to have collision method instead of constantly passing as parameter
__slots__ = ['_default']
def __init__(self, init={}, default=None, collision=_OVERWRITE_):
"""Create dictionary and initialize with mapping or list of (key, value) pairs, if given.
Sets a default value for the keys when no other specified.
Initialization interface similar to standard dictionary:
>>> dd = DefDict() #create empty DefDict
>>> print dd
{}
>>> print DefDict({1: 1, 2: 4, 3: 9}) #initialize with dict type
{1: 1, 2: 4, 3: 9}
Initialize with list of (key, value) pairs:
>>> print DefDict([('a', 2), ('b', 3), ('c', 1), ('a', 5)], collision=_ADD_)
{'a': 7, 'b': 3, 'c': 1}
A sequence of (key, value) pairs may contain duplicate keys, like above.
The resulting value of such "key collisions" can be specified
by providing a collision function. The collision function will
be called with parameters (self, key, new_value) and should set the
value of self[key] as desired.
This module defines a few useful collision functions:
_OVERWRITE_: the default--standard dictionary semantics;
i.e. if key already exists, new value overwrites existing
key value.
_RETAIN_: value remains unchanged if key already exists.
_RAISE_EXCEPTION_: raise 'KeyAlreadyExists' exception if key already
exists. Value remains unchanged.
_ADD_: sets value to existing value + new value
_MAX_: sets to greater of old, new values
_MIN_: sets to lesser of old, new values
"""
self._default = default
dict.__init__(self)
if isinstance(init, dict): #don't use dict(init) since derives classes have special setitem behavior
init = init.iteritems()
#list of (key, value) pairs may contain duplicates
for key, value in init:
self.setdefault(key, value, collision)
def fromkeys(cls, iterable, default=None, collision=_OVERWRITE_):
"""Create dictionary from iterable with optional default value.
One can initialize defdict with a sequence of keys.
The dictionary values of the keys will be determined by the default value,
if given, otherwise defaults to None.
>>> print DefDict.fromkeys(["apple", "banana", "carrot"])
{'apple': None, 'banana': None, 'carrot': None}
>>> dd = DefDict.fromkeys(range(4), 0) #initialize with values = 0
>>> print dd
{0: 0, 1: 0, 2: 0, 3: 0}
>>> dd.update([0]) #default value retained?
>>> print dd[0] #Yes if output is 1
1
>>> print DefDict.fromkeys("abacab", 1, _ADD_)
{'a': 3, 'b': 2, 'c': 1}
"""
dd = cls(default=default)
for key in iterable:
dd.setdefault(key, default, collision)
return dd
fromkeys = classmethod(fromkeys)
def update(self, other, collision=_OVERWRITE_):
"""Updates defdict from mapping type or iterable with optional collision function.
>>> d = DefDict.fromkeys('ab', 1)
>>> d.update({'a': 0, 'c': 2})
>>> print d
{'a': 0, 'b': 1, 'c': 2}
As seen above, when updating a key which already exists and no
collision function is specified, update defaults to standard dictionary
OVERWRITE semantics. This behavior can be modified by passing a
collision function.
>>> d.update({'b': 3, 'd': 9}, _ADD_)
>>> print d
{'a': 0, 'b': 4, 'c': 2, 'd': 9}
>>> d._default = 5 #manually change default value
>>> d.update(['b','c']) #update from non-dict
>>> d._default = 1 #set back to original
>>> d.update('abd') #string are iterated
>>> print d
{'a': 1, 'b': 1, 'c': 5, 'd': 1}
>>> d.update('de', _ADD_)
>>> print d
{'a': 1, 'b': 1, 'c': 5, 'd': 2, 'e': 1}
NOTE: If collision function raises an exception, DefDict may be
left in partially-updated state.
"""
#perhaps should catch any exceptions that may be caused by collision
# and store aberrent keys in the exception to be reported later.
if isinstance(other, dict):
for key, value in other.iteritems():
self.setdefault(key, value, collision)
else: #given iterable
for key in other:
self.setdefault(key, self._default, collision)
def setdefault(self, key, value = _use_default_, collision=_RETAIN_):
"""Behaves like standard dict.setdefault, but uses value in _default attribute
if no default parameter specified.
>>> dd = DefDict(default=5)
>>> dd.setdefault('a', 10), dd.setdefault('b'), dd.setdefault('b', 11)
(10, 5, 5)
>>> print dd
{'a': 10, 'b': 5}
A collision function can also be passed to override setdefault's
standard RETAIN semantics.
>>> dd.setdefault('a', collision=_OVERWRITE_), dd.setdefault('b', 6, _ADD_)
(5, 11)
>>> dd.setdefault('b', 12, _MAX_), dd.setdefault('b', 10, _MAX_)
(12, 12)
>>> dd.setdefault('c', 10, _RAISE_)
10
>>> dd.setdefault('c', 10, _RAISE_)
Traceback (most recent call last):
KeyAlreadyExists: 'c'
>>> print dd
{'a': 5, 'b': 12, 'c': 10}
Default value is NOT copied if non-simple type (ex. list, dict).
If values must be distinct objects, then you must subclass and
override this method or __setitem__() to create a copy of the default.
>>> dd = DefDict(default=[])
>>> dd.setdefault(1), dd.setdefault(2)
([], [])
>>> dd[1] is dd[2] #keys 1 and 2 do have distinct list objects
True
>>> dd[2].append(42) #will also change value in dd[1]!!!
>>> print dd
{1: [42], 2: [42]}
"""
key_absent = key not in self #fail immediately if key unhashable
if value == _use_default_: value = self._default #XXX should make copy for non-simple default, or rely on __setitem__() to make copy?
if collision == _OVERWRITE_ or key_absent:
self[key] = value #note: subclass may override setitem method so value may be modified
else:
collision(self, key, value)
return dict.__getitem__(self, key) #may wish to allow dd[key] to insert key in dd with default value
def get(self, key, *args):
"""Behaves like standard dict.get, but uses value in _default attribute
if no default parameter specified.
>>> dd = DefDict({'a': 10}, 0)
>>> dd.get('a'), dd.get('b'), dd.get('b', 11)
(10, 0, 11)
>>> print dd
{'a': 10}
"""
if not args: args = (self._default,)
return dict.get(self, key, *args)
def copy(self):
"""Return shallow copy of dictionary.
>>> dd = DefDict.fromkeys(range(5), 5)
>>> ddcopy = dd.copy()
>>> print ddcopy._default, isinstance(ddcopy, DefDict); print ddcopy
5 True
{0: 5, 1: 5, 2: 5, 3: 5, 4: 5}
>>> ddcopy[0] = 7
>>> print dd[0], ddcopy[0]
5 7
"""
return self.__class__(self, self._default)
__copy__ = copy
def __str__(self):
"""Convert self to string with keys in sorted order.
>>> str(DefDict())
'{}'
>>> str(DefDict({9: 0, 'test': 0, 'a': 0, 0: 0}))
"{0: 0, 9: 0, 'a': 0, 'test': 0}"
"""
if not self: return '{}' #nothing to sort
keys = self.keys()
keys.sort()
return '{' + ', '.join(["%r: %s" % (k, self[k]) for k in keys]) + '}'
if __name__ == "__main__":
import doctest
print doctest.testmod()
| mit | -815,319,348,620,935,200 | 39.73251 | 141 | 0.591635 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.