code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'blog_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('uuid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('pubdate', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 5, 18, 0, 0))),
('status', self.gf('django.db.models.fields.CharField')(max_length=1)),
('body', self.gf('django.db.models.fields.TextField')()),
('drupal_id', self.gf('django.db.models.fields.IntegerField')(unique=True, null=True, blank=True)),
))
db.send_create_signal(u'blog', ['Post'])
# Adding model 'Image'
db.create_table(u'blog_image', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('uuid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('alt', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('status', self.gf('django.db.models.fields.BooleanField')(default=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Post'])),
))
db.send_create_signal(u'blog', ['Image'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'blog_post')
# Deleting model 'Image'
db.delete_table(u'blog_image')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'blog.image': {
'Meta': {'ordering': "['-post__pubdate', '-created']", 'object_name': 'Image'},
'alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.Post']"}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'blog.post': {
'Meta': {'ordering': "['-pubdate']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'drupal_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 18, 0, 0)'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
| megaprojectske/megaprojects.co.ke | megaprojects/blog/migrations/0001_initial.py | Python | apache-2.0 | 7,726 |
from linecache import getline, clearcache, checkcache
from datetime import datetime
import os
'''
with open('fort.63', 'r') as f:
for i in range(0,5):
print f.readline()
with open('fort.63', 'r') as f:
end = len(f.readlines())
'''
#---enter # of nodes in mesh
nnodes = 165290
#----nodes of interest
node_list = [60643, 63290]
#---number of recordings
tsteps = 289
#directories to loop through if desired (one only should work fine)
dirs = ['P:\\02\\LakeOntario\\Storm\\20080203','P:\\02\\LakeOntario\\Storm\\19701120',
'P:\\02\\LakeOntario\\Storm\\19710210','P:\\02\\LakeOntario\\Storm\\19731103',
'P:\\02\\LakeOntario\\Storm\\19710124']
clearcache()
a = datetime.now()
for i, d in enumerate(dirs):
print d
os.chdir(d)
#----Read fort.63
clearcache()
for cur_node in node_list:
name = str(cur_node)
with open('fort'+str(i) +'_'+ name +'.txt', 'w') as f:
for j in range(cur_node,(tsteps-1)*nnodes,nnodes+1):
if j == cur_node:
f.write(getline('fort.63',j+3).rstrip() + '\n')
else:
#print i, getline('fort.63',i+3).rstrip()
f.write(getline('fort.63',j+3).rstrip() +'\n')
print os.getcwd(), 'maxele'
clearcache()
'''
#----Read swan_HS.63
for cur_node in node_list:
with open('C:\Users\slawler\Desktop\\HS'+str(i) +'_'+ name +'.txt', 'w') as f:
for i in range(cur_node,(tsteps-1)*nnodes,nnodes+1):
if i == cur_node:
f.write(getline('swan_HS.63',j+3).rstrip() + '\n')
else:
#print i, getline('swan_HS.63',j+3).rstrip()
f.write(getline('swan_HS.63',j+3).rstrip() +'\n')
print os.getcwd(), 'HS'
clearcache()
#----Read swan_TP.63
for cur_node in node_list:
with open('C:\Users\slawler\Desktop\\TP'+str(i) +'_'+ name +'.txt', 'w') as f:
for i in range(cur_node,(tsteps-1)*nnodes,nnodes+1):
if i == cur_node:
f.write(getline('swan_TP.63',j+3).rstrip() + '\n')
else:
#print i, getline('swan_TP.63',j+3).rstrip()
f.write(getline('swan_TP.63',j+3).rstrip() +'\n')
print os.getcwd(), 'TP'
clearcache()
'''
b = datetime.now()
print b-a
#---For 5 storms: 0:09:14.214000
| jdorvi/CSHORE | extract_adcirc_63/extract63.py | Python | apache-2.0 | 2,541 |
#
# File : utils.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import sys
import os
def splitall(loc):
"""
Return a list of the path components in loc. (Used by relpath_).
The first item in the list will be either ``os.curdir``, ``os.pardir``, empty,
or the root directory of loc (for example, ``/`` or ``C:\\).
The other items in the list will be strings.
Adapted from *path.py* by Jason Orendorff.
"""
parts = []
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = os.path.split(prev)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def _make_path_relative(origin, dest):
"""
Return the relative path between origin and dest.
If it's not possible return dest.
If they are identical return ``os.curdir``
Adapted from `path.py <http://www.jorendorff.com/articles/python/path/>`_ by Jason Orendorff.
"""
origin = os.path.abspath(origin).replace('\\', '/')
dest = os.path.abspath(dest).replace('\\', '/')
#
orig_list = splitall(os.path.normcase(origin))
# Don't normcase dest! We want to preserve the case.
dest_list = splitall(dest)
#
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
#
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
#
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return os.curdir
else:
# return os.path.join(*segments).replace('\\', '/')
return os.path.join(*segments)
def xml_indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
xml_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
source_ext = ["c", "h", "s", "S", "cpp", "xpm"]
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
| aozima/rt-thread | tools/utils.py | Python | gpl-2.0 | 3,953 |
#
# pyip is a Python package offering assembling/disassembling of raw ip packet
# including ip, udp, and icmp. Also it includes 2 utilities based on raw ip,
# traceroute and ping.
#
# pyip is released under PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2, and is
# a project inspired by 'ping' written by Jeremy Hylton.
#
# Author: Kenneth Jiang, [email protected]
#
"""Internet packet basic
Simple operations like performing checksums and swapping byte orders.
"""
#from _ip import *
import array
import struct
from socket import htons, ntohs
def cksum(s):
if len(s) & 1:
s = s + '\0'
words = array.array('h', s)
sum = 0
for word in words:
sum = sum + (word & 0xffff)
hi = sum >> 16
lo = sum & 0xffff
sum = hi + lo
sum = sum + (sum >> 16)
return (~sum) & 0xffff
# Should generalize from the *h2net patterns
# This python code is suboptimal because it is based on C code where
# it doesn't cost much to take a raw buffer and treat a section of it
# as a u_short.
# ntohs on Solaris has problem when MSB is set. Replace it with
# struct 'H' and '!H' format
def __ntohs(s):
return struct.pack('H', struct.unpack('!H', s)[0])
def __htons(s):
return struct.pack('!H', struct.unpack('H', s)[0])
def iph2net(s):
return s[:2] + __htons(s[2:4]) + __htons(s[4:6]) + __htons(s[6:8]) + s[8:]
def net2iph(s):
return s[:2] + __ntohs(s[2:4]) + __ntohs(s[4:6]) + __ntohs(s[6:8]) + s[8:]
def udph2net(s):
return __htons(s[0:2]) + __htons(s[2:4]) + __htons(s[4:6]) + s[6:]
def net2updh(s):
return __ntohs(s[0:2]) + __ntohs(s[2:4]) + __ntohs(s[4:6]) + s[6:]
| Zulan/PBStats | tests/fix_upload_bug/Linux/python3-pyip-0.7/inetutils.py | Python | gpl-2.0 | 1,635 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import string
from typing import Any
from typing import Generator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from ._compat import chr
from ._compat import decode
from ._utils import RFC_3339_LOOSE
from ._utils import _escaped
from ._utils import parse_rfc3339
from .container import Container
from .exceptions import EmptyKeyError
from .exceptions import EmptyTableNameError
from .exceptions import InternalParserError
from .exceptions import InvalidCharInStringError
from .exceptions import InvalidControlChar
from .exceptions import InvalidDateError
from .exceptions import InvalidDateTimeError
from .exceptions import InvalidNumberError
from .exceptions import InvalidTimeError
from .exceptions import InvalidUnicodeValueError
from .exceptions import ParseError
from .exceptions import UnexpectedCharError
from .exceptions import UnexpectedEofError
from .items import AoT
from .items import Array
from .items import Bool
from .items import BoolType
from .items import Comment
from .items import Date
from .items import DateTime
from .items import Float
from .items import InlineTable
from .items import Integer
from .items import Item
from .items import Key
from .items import KeyType
from .items import Null
from .items import String
from .items import StringType
from .items import Table
from .items import Time
from .items import Trivia
from .items import Whitespace
from .source import Source
from .toml_char import TOMLChar
from .toml_document import TOMLDocument
CTRL_I = 0x09 # Tab
CTRL_J = 0x0A # Line feed
CTRL_M = 0x0D # Carriage return
CTRL_CHAR_LIMIT = 0x1F
CHR_DEL = 0x7F
class Parser:
"""
Parser for TOML documents.
"""
def __init__(self, string): # type: (str) -> None
# Input to parse
self._src = Source(decode(string))
self._aot_stack = []
@property
def _state(self):
return self._src.state
@property
def _idx(self):
return self._src.idx
@property
def _current(self):
return self._src.current
@property
def _marker(self):
return self._src.marker
def extract(self): # type: () -> str
"""
Extracts the value between marker and index
"""
return self._src.extract()
def inc(self, exception=None): # type: (Optional[ParseError.__class__]) -> bool
"""
Increments the parser if the end of the input has not been reached.
Returns whether or not it was able to advance.
"""
return self._src.inc(exception=exception)
def inc_n(self, n, exception=None): # type: (int, Optional[ParseError]) -> bool
"""
Increments the parser by n characters
if the end of the input has not been reached.
"""
return self._src.inc_n(n=n, exception=exception)
def consume(self, chars, min=0, max=-1):
"""
Consume chars until min/max is satisfied is valid.
"""
return self._src.consume(chars=chars, min=min, max=max)
def end(self): # type: () -> bool
"""
Returns True if the parser has reached the end of the input.
"""
return self._src.end()
def mark(self): # type: () -> None
"""
Sets the marker to the index's current position
"""
self._src.mark()
def parse_error(self, exception=ParseError, *args):
"""
Creates a generic "parse error" at the current position.
"""
return self._src.parse_error(exception, *args)
def parse(self): # type: () -> TOMLDocument
body = TOMLDocument(True)
# Take all keyvals outside of tables/AoT's.
while not self.end():
# Break out if a table is found
if self._current == "[":
break
# Otherwise, take and append one KV
item = self._parse_item()
if not item:
break
key, value = item
if key is not None and key.is_dotted():
# We actually have a table
self._handle_dotted_key(body, key, value)
elif not self._merge_ws(value, body):
body.append(key, value)
self.mark()
while not self.end():
key, value = self._parse_table()
if isinstance(value, Table) and value.is_aot_element():
# This is just the first table in an AoT. Parse the rest of the array
# along with it.
value = self._parse_aot(value, key.key)
body.append(key, value)
body.parsing(False)
return body
def _merge_ws(self, item, container): # type: (Item, Container) -> bool
"""
Merges the given Item with the last one currently in the given Container if
both are whitespace items.
Returns True if the items were merged.
"""
last = container.last_item()
if not last:
return False
if not isinstance(item, Whitespace) or not isinstance(last, Whitespace):
return False
start = self._idx - (len(last.s) + len(item.s))
container.body[-1] = (
container.body[-1][0],
Whitespace(self._src[start : self._idx]),
)
return True
def _is_child(self, parent, child): # type: (str, str) -> bool
"""
Returns whether a key is strictly a child of another key.
AoT siblings are not considered children of one another.
"""
parent_parts = tuple(self._split_table_name(parent))
child_parts = tuple(self._split_table_name(child))
if parent_parts == child_parts:
return False
return parent_parts == child_parts[: len(parent_parts)]
def _split_table_name(self, name): # type: (str) -> Generator[Key]
in_name = False
current = ""
t = KeyType.Bare
parts = 0
for c in name:
c = TOMLChar(c)
if c == ".":
if in_name:
current += c
continue
if not current:
raise self.parse_error()
yield Key(current.strip(), t=t, sep="", original=current)
parts += 1
current = ""
t = KeyType.Bare
continue
elif c in {"'", '"'}:
if in_name:
if (
t == KeyType.Literal
and c == '"'
or t == KeyType.Basic
and c == "'"
):
current += c
continue
if c != t.value:
raise self.parse_error()
in_name = False
else:
if (
current.strip()
and TOMLChar(current[-1]).is_spaces()
and not parts
):
raise self.parse_error()
in_name = True
t = KeyType.Literal if c == "'" else KeyType.Basic
continue
elif in_name or c.is_bare_key_char():
current += c
elif c.is_spaces():
# A space is only valid at this point
# if it's in between parts.
# We store it for now and will check
# later if it's valid
current += c
continue
else:
raise self.parse_error()
if current.strip():
yield Key(current.strip(), t=t, sep="", original=current)
def _parse_item(self): # type: () -> Optional[Tuple[Optional[Key], Item]]
"""
Attempts to parse the next item and returns it, along with its key
if the item is value-like.
"""
self.mark()
with self._state as state:
while True:
c = self._current
if c == "\n":
# Found a newline; Return all whitespace found up to this point.
self.inc()
return None, Whitespace(self.extract())
elif c in " \t\r":
# Skip whitespace.
if not self.inc():
return None, Whitespace(self.extract())
elif c == "#":
# Found a comment, parse it
indent = self.extract()
cws, comment, trail = self._parse_comment_trail()
return None, Comment(Trivia(indent, cws, comment, trail))
elif c == "[":
# Found a table, delegate to the calling function.
return
else:
# Begining of a KV pair.
# Return to beginning of whitespace so it gets included
# as indentation for the KV about to be parsed.
state.restore = True
break
return self._parse_key_value(True)
def _parse_comment_trail(self): # type: () -> Tuple[str, str, str]
"""
Returns (comment_ws, comment, trail)
If there is no comment, comment_ws and comment will
simply be empty.
"""
if self.end():
return "", "", ""
comment = ""
comment_ws = ""
self.mark()
while True:
c = self._current
if c == "\n":
break
elif c == "#":
comment_ws = self.extract()
self.mark()
self.inc() # Skip #
# The comment itself
while not self.end() and not self._current.is_nl():
code = ord(self._current)
if code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I:
raise self.parse_error(InvalidControlChar, code, "comments")
if not self.inc():
break
comment = self.extract()
self.mark()
break
elif c in " \t\r":
self.inc()
else:
raise self.parse_error(UnexpectedCharError, c)
if self.end():
break
while self._current.is_spaces() and self.inc():
pass
if self._current == "\r":
self.inc()
if self._current == "\n":
self.inc()
trail = ""
if self._idx != self._marker or self._current.is_ws():
trail = self.extract()
return comment_ws, comment, trail
def _parse_key_value(self, parse_comment=False): # type: (bool) -> (Key, Item)
# Leading indent
self.mark()
while self._current.is_spaces() and self.inc():
pass
indent = self.extract()
# Key
key = self._parse_key()
self.mark()
found_equals = self._current == "="
while self._current.is_kv_sep() and self.inc():
if self._current == "=":
if found_equals:
raise self.parse_error(UnexpectedCharError, "=")
else:
found_equals = True
pass
if not key.sep:
key.sep = self.extract()
else:
key.sep += self.extract()
# Value
val = self._parse_value()
# Comment
if parse_comment:
cws, comment, trail = self._parse_comment_trail()
meta = val.trivia
if not meta.comment_ws:
meta.comment_ws = cws
meta.comment = comment
meta.trail = trail
else:
val.trivia.trail = ""
val.trivia.indent = indent
return key, val
def _parse_key(self): # type: () -> Key
"""
Parses a Key at the current position;
WS before the key must be exhausted first at the callsite.
"""
if self._current in "\"'":
return self._parse_quoted_key()
else:
return self._parse_bare_key()
def _parse_quoted_key(self): # type: () -> Key
"""
Parses a key enclosed in either single or double quotes.
"""
quote_style = self._current
key_type = None
dotted = False
for t in KeyType:
if t.value == quote_style:
key_type = t
break
if key_type is None:
raise RuntimeError("Should not have entered _parse_quoted_key()")
self.inc()
self.mark()
while self._current != quote_style and self.inc():
pass
key = self.extract()
if self._current == ".":
self.inc()
dotted = True
key += "." + self._parse_key().as_string()
key_type = KeyType.Bare
else:
self.inc()
return Key(key, key_type, "", dotted)
def _parse_bare_key(self): # type: () -> Key
"""
Parses a bare key.
"""
key_type = None
dotted = False
self.mark()
while (
self._current.is_bare_key_char() or self._current.is_spaces()
) and self.inc():
pass
original = self.extract()
key = original.strip()
if not key:
# Empty key
raise self.parse_error(ParseError, "Empty key found")
if " " in key:
# Bare key with spaces in it
raise self.parse_error(ParseError, 'Invalid key "{}"'.format(key))
if self._current == ".":
self.inc()
dotted = True
original += "." + self._parse_key().as_string()
key = original.strip()
key_type = KeyType.Bare
return Key(key, key_type, "", dotted, original=original)
def _handle_dotted_key(
self, container, key, value
): # type: (Union[Container, Table], Key, Any) -> None
names = tuple(self._split_table_name(key.as_string()))
name = names[0]
name._dotted = True
if name in container:
if not isinstance(value, Table):
table = Table(Container(True), Trivia(), False, is_super_table=True)
_table = table
for i, _name in enumerate(names[1:]):
if i == len(names) - 2:
_name.sep = key.sep
_table.append(_name, value)
else:
_name._dotted = True
_table.append(
_name,
Table(
Container(True),
Trivia(),
False,
is_super_table=i < len(names) - 2,
),
)
_table = _table[_name]
value = table
container.append(name, value)
return
else:
table = Table(Container(True), Trivia(), False, is_super_table=True)
if isinstance(container, Table):
container.raw_append(name, table)
else:
container.append(name, table)
for i, _name in enumerate(names[1:]):
if i == len(names) - 2:
_name.sep = key.sep
table.append(_name, value)
else:
_name._dotted = True
if _name in table.value:
table = table.value[_name]
else:
table.append(
_name,
Table(
Container(True),
Trivia(),
False,
is_super_table=i < len(names) - 2,
),
)
table = table[_name]
def _parse_value(self): # type: () -> Item
"""
Attempts to parse a value at the current position.
"""
self.mark()
c = self._current
trivia = Trivia()
if c == StringType.SLB.value:
return self._parse_basic_string()
elif c == StringType.SLL.value:
return self._parse_literal_string()
elif c == BoolType.TRUE.value[0]:
return self._parse_true()
elif c == BoolType.FALSE.value[0]:
return self._parse_false()
elif c == "[":
return self._parse_array()
elif c == "{":
return self._parse_inline_table()
elif c in "+-" or self._peek(4) in {
"+inf",
"-inf",
"inf",
"+nan",
"-nan",
"nan",
}:
# Number
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
elif c in string.digits:
# Integer, Float, Date, Time or DateTime
while self._current not in " \t\n\r#,]}" and self.inc():
pass
raw = self.extract()
m = RFC_3339_LOOSE.match(raw)
if m:
if m.group(1) and m.group(5):
# datetime
try:
dt = parse_rfc3339(raw)
return DateTime(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
trivia,
raw,
)
except ValueError:
raise self.parse_error(InvalidDateTimeError)
if m.group(1):
try:
dt = parse_rfc3339(raw)
date = Date(dt.year, dt.month, dt.day, trivia, raw)
self.mark()
while self._current not in "\t\n\r#,]}" and self.inc():
pass
time_raw = self.extract()
if not time_raw.strip():
trivia.comment_ws = time_raw
return date
dt = parse_rfc3339(raw + time_raw)
return DateTime(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
trivia,
raw + time_raw,
)
except ValueError:
raise self.parse_error(InvalidDateError)
if m.group(5):
try:
t = parse_rfc3339(raw)
return Time(
t.hour,
t.minute,
t.second,
t.microsecond,
t.tzinfo,
trivia,
raw,
)
except ValueError:
raise self.parse_error(InvalidTimeError)
item = self._parse_number(raw, trivia)
if item is not None:
return item
raise self.parse_error(InvalidNumberError)
else:
raise self.parse_error(UnexpectedCharError, c)
def _parse_true(self):
return self._parse_bool(BoolType.TRUE)
def _parse_false(self):
return self._parse_bool(BoolType.FALSE)
def _parse_bool(self, style): # type: (BoolType) -> Bool
with self._state:
style = BoolType(style)
# only keep parsing for bool if the characters match the style
# try consuming rest of chars in style
for c in style:
self.consume(c, min=1, max=1)
return Bool(style, Trivia())
def _parse_array(self): # type: () -> Array
# Consume opening bracket, EOF here is an issue (middle of array)
self.inc(exception=UnexpectedEofError)
elems = [] # type: List[Item]
prev_value = None
while True:
# consume whitespace
mark = self._idx
self.consume(TOMLChar.SPACES)
newline = self.consume(TOMLChar.NL)
indent = self._src[mark : self._idx]
if newline:
elems.append(Whitespace(indent))
continue
# consume comment
if self._current == "#":
cws, comment, trail = self._parse_comment_trail()
elems.append(Comment(Trivia(indent, cws, comment, trail)))
continue
# consume indent
if indent:
elems.append(Whitespace(indent))
continue
# consume value
if not prev_value:
try:
elems.append(self._parse_value())
prev_value = True
continue
except UnexpectedCharError:
pass
# consume comma
if prev_value and self._current == ",":
self.inc(exception=UnexpectedEofError)
elems.append(Whitespace(","))
prev_value = False
continue
# consume closing bracket
if self._current == "]":
# consume closing bracket, EOF here doesn't matter
self.inc()
break
raise self.parse_error(UnexpectedCharError, self._current)
try:
res = Array(elems, Trivia())
except ValueError:
pass
else:
return res
def _parse_inline_table(self): # type: () -> InlineTable
# consume opening bracket, EOF here is an issue (middle of array)
self.inc(exception=UnexpectedEofError)
elems = Container(True)
trailing_comma = None
while True:
# consume leading whitespace
mark = self._idx
self.consume(TOMLChar.SPACES)
raw = self._src[mark : self._idx]
if raw:
elems.add(Whitespace(raw))
if not trailing_comma:
# None: empty inline table
# False: previous key-value pair was not followed by a comma
if self._current == "}":
# consume closing bracket, EOF here doesn't matter
self.inc()
break
if (
trailing_comma is False
or trailing_comma is None
and self._current == ","
):
# Either the previous key-value pair was not followed by a comma
# or the table has an unexpected leading comma.
raise self.parse_error(UnexpectedCharError, self._current)
else:
# True: previous key-value pair was followed by a comma
if self._current == "}" or self._current == ",":
raise self.parse_error(UnexpectedCharError, self._current)
key, val = self._parse_key_value(False)
if key.is_dotted():
self._handle_dotted_key(elems, key, val)
else:
elems.add(key, val)
# consume trailing whitespace
mark = self._idx
self.consume(TOMLChar.SPACES)
raw = self._src[mark : self._idx]
if raw:
elems.add(Whitespace(raw))
# consume trailing comma
trailing_comma = self._current == ","
if trailing_comma:
# consume closing bracket, EOF here is an issue (middle of inline table)
self.inc(exception=UnexpectedEofError)
return InlineTable(elems, Trivia())
def _parse_number(self, raw, trivia): # type: (str, Trivia) -> Optional[Item]
# Leading zeros are not allowed
sign = ""
if raw.startswith(("+", "-")):
sign = raw[0]
raw = raw[1:]
if (
len(raw) > 1
and raw.startswith("0")
and not raw.startswith(("0.", "0o", "0x", "0b", "0e"))
):
return
if raw.startswith(("0o", "0x", "0b")) and sign:
return
digits = "[0-9]"
base = 10
if raw.startswith("0b"):
digits = "[01]"
base = 2
elif raw.startswith("0o"):
digits = "[0-7]"
base = 8
elif raw.startswith("0x"):
digits = "[0-9a-f]"
base = 16
# Underscores should be surrounded by digits
clean = re.sub("(?i)(?<={})_(?={})".format(digits, digits), "", raw)
if "_" in clean:
return
if clean.endswith("."):
return
try:
return Integer(int(sign + clean, base), trivia, sign + raw)
except ValueError:
try:
return Float(float(sign + clean), trivia, sign + raw)
except ValueError:
return
def _parse_literal_string(self): # type: () -> String
with self._state:
return self._parse_string(StringType.SLL)
def _parse_basic_string(self): # type: () -> String
with self._state:
return self._parse_string(StringType.SLB)
def _parse_escaped_char(self, multiline):
if multiline and self._current.is_ws():
# When the last non-whitespace character on a line is
# a \, it will be trimmed along with all whitespace
# (including newlines) up to the next non-whitespace
# character or closing delimiter.
# """\
# hello \
# world"""
tmp = ""
while self._current.is_ws():
tmp += self._current
# consume the whitespace, EOF here is an issue
# (middle of string)
self.inc(exception=UnexpectedEofError)
continue
# the escape followed by whitespace must have a newline
# before any other chars
if "\n" not in tmp:
raise self.parse_error(InvalidCharInStringError, self._current)
return ""
if self._current in _escaped:
c = _escaped[self._current]
# consume this char, EOF here is an issue (middle of string)
self.inc(exception=UnexpectedEofError)
return c
if self._current in {"u", "U"}:
# this needs to be a unicode
u, ue = self._peek_unicode(self._current == "U")
if u is not None:
# consume the U char and the unicode value
self.inc_n(len(ue) + 1)
return u
raise self.parse_error(InvalidUnicodeValueError)
raise self.parse_error(InvalidCharInStringError, self._current)
def _parse_string(self, delim): # type: (StringType) -> String
# only keep parsing for string if the current character matches the delim
if self._current != delim.unit:
raise self.parse_error(
InternalParserError,
"Invalid character for string type {}".format(delim),
)
# consume the opening/first delim, EOF here is an issue
# (middle of string or middle of delim)
self.inc(exception=UnexpectedEofError)
if self._current == delim.unit:
# consume the closing/second delim, we do not care if EOF occurs as
# that would simply imply an empty single line string
if not self.inc() or self._current != delim.unit:
# Empty string
return String(delim, "", "", Trivia())
# consume the third delim, EOF here is an issue (middle of string)
self.inc(exception=UnexpectedEofError)
delim = delim.toggle() # convert delim to multi delim
self.mark() # to extract the original string with whitespace and all
value = ""
# A newline immediately following the opening delimiter will be trimmed.
if delim.is_multiline() and self._current == "\n":
# consume the newline, EOF here is an issue (middle of string)
self.inc(exception=UnexpectedEofError)
escaped = False # whether the previous key was ESCAPE
while True:
code = ord(self._current)
if (
delim.is_singleline()
and not escaped
and (code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I)
):
raise self.parse_error(InvalidControlChar, code, "strings")
elif (
delim.is_multiline()
and not escaped
and (
code == CHR_DEL
or code <= CTRL_CHAR_LIMIT
and code not in [CTRL_I, CTRL_J, CTRL_M]
)
):
raise self.parse_error(InvalidControlChar, code, "strings")
elif not escaped and self._current == delim.unit:
# try to process current as a closing delim
original = self.extract()
close = ""
if delim.is_multiline():
# Consume the delimiters to see if we are at the end of the string
close = ""
while self._current == delim.unit:
close += self._current
self.inc()
if len(close) < 3:
# Not a triple quote, leave in result as-is.
# Adding back the characters we already consumed
value += close
continue
if len(close) == 3:
# We are at the end of the string
return String(delim, value, original, Trivia())
if len(close) >= 6:
raise self.parse_error(InvalidCharInStringError, self._current)
value += close[:-3]
original += close[:-3]
return String(delim, value, original, Trivia())
else:
# consume the closing delim, we do not care if EOF occurs as
# that would simply imply the end of self._src
self.inc()
return String(delim, value, original, Trivia())
elif delim.is_basic() and escaped:
# attempt to parse the current char as an escaped value, an exception
# is raised if this fails
value += self._parse_escaped_char(delim.is_multiline())
# no longer escaped
escaped = False
elif delim.is_basic() and self._current == "\\":
# the next char is being escaped
escaped = True
# consume this char, EOF here is an issue (middle of string)
self.inc(exception=UnexpectedEofError)
else:
# this is either a literal string where we keep everything as is,
# or this is not a special escaped char in a basic string
value += self._current
# consume this char, EOF here is an issue (middle of string)
self.inc(exception=UnexpectedEofError)
def _parse_table(
self, parent_name=None, parent=None
): # type: (Optional[str], Optional[Table]) -> Tuple[Key, Union[Table, AoT]]
"""
Parses a table element.
"""
if self._current != "[":
raise self.parse_error(
InternalParserError, "_parse_table() called on non-bracket character."
)
indent = self.extract()
self.inc() # Skip opening bracket
if self.end():
raise self.parse_error(UnexpectedEofError)
is_aot = False
if self._current == "[":
if not self.inc():
raise self.parse_error(UnexpectedEofError)
is_aot = True
# Consume any whitespace
self.mark()
while self._current.is_spaces() and self.inc():
pass
ws_prefix = self.extract()
# Key
if self._current in [StringType.SLL.value, StringType.SLB.value]:
delimiter = (
StringType.SLL
if self._current == StringType.SLL.value
else StringType.SLB
)
name = self._parse_string(delimiter)
name = "{delimiter}{name}{delimiter}".format(
delimiter=delimiter.value, name=name
)
self.mark()
while self._current != "]" and self.inc():
if self.end():
raise self.parse_error(UnexpectedEofError)
pass
ws_suffix = self.extract()
name += ws_suffix
else:
self.mark()
while self._current != "]" and self.inc():
if self.end():
raise self.parse_error(UnexpectedEofError)
pass
name = self.extract()
name = ws_prefix + name
if not name.strip():
raise self.parse_error(EmptyTableNameError)
key = Key(name, sep="")
name_parts = tuple(self._split_table_name(name))
if any(" " in part.key.strip() and part.is_bare() for part in name_parts):
raise self.parse_error(ParseError, 'Invalid table name "{}"'.format(name))
missing_table = False
if parent_name:
parent_name_parts = tuple(self._split_table_name(parent_name))
else:
parent_name_parts = tuple()
if len(name_parts) > len(parent_name_parts) + 1:
missing_table = True
name_parts = name_parts[len(parent_name_parts) :]
values = Container(True)
self.inc() # Skip closing bracket
if is_aot:
# TODO: Verify close bracket
self.inc()
cws, comment, trail = self._parse_comment_trail()
result = Null()
table = Table(
values,
Trivia(indent, cws, comment, trail),
is_aot,
name=name,
display_name=name,
)
if len(name_parts) > 1:
if missing_table:
# Missing super table
# i.e. a table initialized like this: [foo.bar]
# without initializing [foo]
#
# So we have to create the parent tables
table = Table(
Container(True),
Trivia(indent, cws, comment, trail),
is_aot and name_parts[0].key in self._aot_stack,
is_super_table=True,
name=name_parts[0].key,
)
result = table
key = name_parts[0]
for i, _name in enumerate(name_parts[1:]):
if _name in table:
child = table[_name]
else:
child = Table(
Container(True),
Trivia(indent, cws, comment, trail),
is_aot and i == len(name_parts[1:]) - 1,
is_super_table=i < len(name_parts[1:]) - 1,
name=_name.key,
display_name=name if i == len(name_parts[1:]) - 1 else None,
)
if is_aot and i == len(name_parts[1:]) - 1:
table.append(_name, AoT([child], name=table.name, parsed=True))
else:
table.append(_name, child)
table = child
values = table.value
else:
if name_parts:
key = name_parts[0]
while not self.end():
item = self._parse_item()
if item:
_key, item = item
if not self._merge_ws(item, values):
if _key is not None and _key.is_dotted():
self._handle_dotted_key(table, _key, item)
else:
table.raw_append(_key, item)
else:
if self._current == "[":
is_aot_next, name_next = self._peek_table()
if self._is_child(name, name_next):
key_next, table_next = self._parse_table(name, table)
table.raw_append(key_next, table_next)
# Picking up any sibling
while not self.end():
_, name_next = self._peek_table()
if not self._is_child(name, name_next):
break
key_next, table_next = self._parse_table(name, table)
table.raw_append(key_next, table_next)
break
else:
raise self.parse_error(
InternalParserError,
"_parse_item() returned None on a non-bracket character.",
)
if isinstance(result, Null):
result = table
if is_aot and (not self._aot_stack or name != self._aot_stack[-1]):
result = self._parse_aot(result, name)
return key, result
def _peek_table(self): # type: () -> Tuple[bool, str]
"""
Peeks ahead non-intrusively by cloning then restoring the
initial state of the parser.
Returns the name of the table about to be parsed,
as well as whether it is part of an AoT.
"""
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current != "[":
raise self.parse_error(
InternalParserError,
"_peek_table() entered on non-bracket character",
)
# AoT
self.inc()
is_aot = False
if self._current == "[":
self.inc()
is_aot = True
self.mark()
while self._current != "]" and self.inc():
table_name = self.extract()
return is_aot, table_name
def _parse_aot(self, first, name_first): # type: (Table, str) -> AoT
"""
Parses all siblings of the provided table first and bundles them into
an AoT.
"""
payload = [first]
self._aot_stack.append(name_first)
while not self.end():
is_aot_next, name_next = self._peek_table()
if is_aot_next and name_next == name_first:
_, table = self._parse_table(name_first)
payload.append(table)
else:
break
self._aot_stack.pop()
return AoT(payload, parsed=True)
def _peek(self, n): # type: (int) -> str
"""
Peeks ahead n characters.
n is the max number of characters that will be peeked.
"""
# we always want to restore after exiting this scope
with self._state(restore=True):
buf = ""
for _ in range(n):
if self._current not in " \t\n\r#,]}":
buf += self._current
self.inc()
continue
break
return buf
def _peek_unicode(
self, is_long
): # type: (bool) -> Tuple[Optional[str], Optional[str]]
"""
Peeks ahead non-intrusively by cloning then restoring the
initial state of the parser.
Returns the unicode value is it's a valid one else None.
"""
# we always want to restore after exiting this scope
with self._state(save_marker=True, restore=True):
if self._current not in {"u", "U"}:
raise self.parse_error(
InternalParserError, "_peek_unicode() entered on non-unicode value"
)
self.inc() # Dropping prefix
self.mark()
if is_long:
chars = 8
else:
chars = 4
if not self.inc_n(chars):
value, extracted = None, None
else:
extracted = self.extract()
if extracted[0].lower() == "d" and extracted[1].strip("01234567"):
return None, None
try:
value = chr(int(extracted, 16))
except (ValueError, OverflowError):
value = None
return value, extracted
| python-poetry/poetry-core | src/poetry/core/_vendor/tomlkit/parser.py | Python | mit | 42,075 |
from setuptools import setup, find_packages
setup(
name="python-turbovote",
version="0.0.2",
author="Fight for the Future",
author_email="",
packages=find_packages(),
license="LICENSE.txt",
description="Python wrapper for the TurboVote API",
long_description=open("README.txt").read(),
install_requires=[
"requests >= 0.10.4"],
)
| fightforthefuture/python-turbovote | setup.py | Python | gpl-3.0 | 375 |
import pytest
import pymasapi.client as client
class Test_Asset_Liability:
def setup_class(cls):
cls.c = client.Client()
def teardown_method(cls):
cls.c = None
@pytest.mark.parametrize("period,limit", [("m", 5), ("y", 5)])
def test_mas_asset_liability(cls, period, limit):
''' testing monthly/annual MAS Asset/Liability stats '''
data = cls.c.mas_asset_liability(period, limit)
assert data is not None, "data should not be None"
| laisee/pymasapi | test/test_pymaspi_mas_assetliab.py | Python | mit | 489 |
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ('collectfast', )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('portfolio <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[portfolio] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
| Innogator/portfolio | portfolio/config/production.py | Python | bsd-3-clause | 4,349 |
prediction = simple_seq2seq(first_test_sequence).numpy()
print("prediction shape:", prediction.shape)
# Let's use `argmax` to extract the predicted token ids at each step:
predicted_token_ids = prediction[0].argmax(-1)
print("prediction token ids:", predicted_token_ids)
# We can use the shared reverse vocabulary to map
# this back to the string representation of the tokens,
# as well as removing Padding and EOS symbols
predicted_numbers = [rev_shared_vocab[token_id] for token_id in predicted_token_ids
if token_id not in (shared_vocab[PAD], shared_vocab[EOS])]
print("predicted number:", "".join(predicted_numbers))
print("test number:", num_test[0])
# The model successfully predicted the test sequence.
# However, we provided the full sequence as input, including all the solution
# (except for the last number). In a real testing condition, one wouldn't
# have the full input sequence, but only what is provided before the "GO"
# symbol
| m2dsupsdlclass/lectures-labs | labs/07_seq2seq/solutions/interpret_output.py | Python | mit | 969 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(keras_parameterized.TestCase):
def create_model(self, kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
def get_data(self):
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
@parameterized.named_parameters([
('l1', keras.regularizers.l1()),
('l2', keras.regularizers.l2()),
('l1_l2', keras.regularizers.l1_l2()),
])
def test_kernel_regularization(self, regularizer):
with self.cached_session():
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
assert len(model.losses) == 1
model.fit(x_train, y_train, batch_size=10,
epochs=1, verbose=0)
@parameterized.named_parameters([
('l1', keras.regularizers.l1()),
('l2', keras.regularizers.l2()),
('l2_zero', keras.regularizers.l2(0.)),
])
@test_util.deprecated_graph_mode_only
def test_activity_regularization(self, regularizer):
with self.cached_session():
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
assert len(model.losses) == 1
model.fit(x_train, y_train, batch_size=10,
epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],
input_shape=(10,))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=5, epochs=1)
if __name__ == '__main__':
test.main()
| kevin-coder/tensorflow-fork | tensorflow/python/keras/regularizers_test.py | Python | apache-2.0 | 3,702 |
#! /usr/bin/env python
'''
Demonstrates how simple group can be used as one-to-many
relationship using a column family
'''
import util
from pycassa.columnfamily import ColumnFamily
from pycassa.types import *
import time
from threading import Thread
import json
import datetime
import random
# Load data from data/movies
def loadData():
events = util.readCSV('data/events.csv')
eventsList = []
for event in events:
e = {
'user': event[0],
'page': event[1],
'event': event[2],
'element': event[3]
}
eventsList.append(e)
for i in range(2):
t = Thread(target=randomEvent, args=(i, eventsList))
t.start()
def randomEvent(i, events):
event = random.choice(events)
con = util.getConnection()
eventsCF = ColumnFamily(con, 'event_log')
for j in range(50):
event = random.choice(events)
rowkey = event['user']
timestamp = datetime.datetime.utcnow()
colval = json.dumps(event)
print '[Thread:{3}] Inserting: [{0}=> {{{1}:{2}}}]'.format(rowkey, timestamp, colval, i)
eventsCF.insert(rowkey, {timestamp: colval})
time.sleep(0.1) #100 milliseconds
print 'finishished insertion.'
con.dispose()
def getByTag(tag):
print '''-- MOVIES GROUPED BY USER FOR A GIVE TAG --'''
print '''tag: {}'''.format(tag)
con = util.getConnection()
tagCF = ColumnFamily(con, 'tag_videos_composite')
movies = tagCF.get(tag.strip().lower())
for key, val in movies.iteritems():
compositeCol = key
print '([{0}],[{1}]) => {2}'.format(compositeCol[0], compositeCol[1], val)
movieSlice = tagCF.get(tag.strip().lower(), column_start=("Kara", "The Croods:Kara"), column_finish=("Sally","Gx" ))
#movieSlice = tagCF.get(tag.strip().lower(), column_start=("Kara", ), column_finish=(("Leo Scott",False),))
print '-- SLICES --'
for key, val in movieSlice.iteritems():
compositeCol = key
print '([{0}],[{1}]) => {2}'.format(compositeCol[0], compositeCol[1], val)
con.dispose()
if __name__ == '__main__':
loadData()
#getByTag('action')
| naishe/mastering_cassandra | chapter_3/ch3_2.3.1_time_series1.py | Python | apache-2.0 | 2,069 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| huggingface/pytorch-transformers | src/transformers/models/bert/modeling_bert.py | Python | apache-2.0 | 77,484 |
import json
import os
import time
from urllib.parse import quote
def test_standard_requests():
""" Tests app controller methods. These tests should be
compartmentalized. Also these methods should be made to not retest
the behavior of the associated Manager class. """
from .test_utils import test_pulsar_app
with test_pulsar_app(test_conf={"extra_environ": {"REMOTE_ADDR": "127.101.101.98"}}) as app:
staging_directory = app.app.staging_directory
setup_response = app.post("/jobs?job_id=12345")
setup_config = json.loads(setup_response.body.decode("utf-8"))
assert setup_config["working_directory"].startswith(staging_directory)
outputs_directory = setup_config["outputs_directory"]
assert outputs_directory.startswith(staging_directory)
assert setup_config["path_separator"] == os.sep
job_id = setup_config["job_id"]
def test_upload(upload_type):
url = "/jobs/{}/files?name=input1&type={}".format(job_id, upload_type)
upload_input_response = app.post(url, "Test Contents")
upload_input_config = json.loads(upload_input_response.body.decode("utf-8"))
staged_input_path = upload_input_config["path"]
staged_input = open(staged_input_path)
try:
assert staged_input.read() == "Test Contents"
finally:
staged_input.close()
test_upload("input")
test_upload("tool")
test_output = open(os.path.join(outputs_directory, "test_output"), "w")
try:
test_output.write("Hello World!")
finally:
test_output.close()
download_response = app.get("/jobs/%s/files?name=test_output&type=output" % job_id)
assert download_response.body.decode("utf-8") == "Hello World!"
try:
app.get("/jobs/%s/files?name=test_output2&type=output" % job_id)
raise AssertionError() # Should throw exception
except Exception:
pass
command_line = quote("""python -c "import sys; sys.stdout.write('test_out')" """)
launch_response = app.post("/jobs/{}/submit?command_line={}".format(job_id, command_line))
assert launch_response.body.decode("utf-8") == 'OK'
# Hack: Call twice to ensure postprocessing occurs and has time to
# complete. Monitor thread should get this.
time.sleep(.3)
check_response = app.get("/jobs/%s/status" % job_id)
time.sleep(.3)
check_response = app.get("/jobs/%s/status" % job_id)
check_config = json.loads(check_response.body.decode("utf-8"))
assert check_config['returncode'] == 0
assert check_config['stdout'] == "test_out"
assert check_config['stderr'] == ""
kill_response = app.put("/jobs/%s/cancel" % job_id)
assert kill_response.body.decode("utf-8") == 'OK'
clean_response = app.delete("/jobs/%s" % job_id)
assert clean_response.body.decode("utf-8") == 'OK'
assert os.listdir(staging_directory) == []
| galaxyproject/pulsar | test/wsgi_app_test.py | Python | apache-2.0 | 3,083 |
# -*- coding: utf-8 -*-
'''
Flixnet Add-on
Copyright (C) 2016 Flixnet
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.modules import client
from resources.lib.modules import control
class Flixnet trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' Flixnet trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
apiLang = control.apiLanguage()['youtube']
if apiLang != 'en':
query += ' %s' % control.apiLanguage(True)['youtube']
url += "&relevanceLanguage=%s" % apiLang
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
| azumimuo/family-xbmc-addon | plugin.video.showboxarize/resources/lib/modules/trailer.py | Python | gpl-2.0 | 4,137 |
# encoding: utf-8
# 2009 © Václav Šmilauer <[email protected]>
"""
Core functionality (Scene in c++), such as accessing bodies, materials, interactions. Specific functionality tests should go to engines.py or elsewhere, not here.
"""
import woo
import unittest
import random
import os
from minieigen import *
from math import *
import woo
from woo import utils
from woo.core import *
from woo.dem import *
from woo.pre import *
try: from woo.sparc import *
except: pass
try: from woo.gl import *
except: pass
try: from woo.voro import *
except: pass
try: from woo.cld import *
except: pass
try: from woo.qt import *
except: pass
import woo
woo.master.usesApi=10104
## TODO tests
class TestInteractions(unittest.TestCase): pass
class TestForce(unittest.TestCase): pass
class TestTags(unittest.TestCase): pass
class TestScene(unittest.TestCase):
def setUp(self):
self.scene=woo.core.Scene()
#def _testTags(self):
# 'Core: Scene.tags are str (not unicode) objects'
# S=self.scene
# S.tags['str']='asdfasd'
# S.tags['uni']=u'→ Σ'
# self.assertTrue(type(S.tags['str']==unicode))
# def tagError(S): S.tags['error']=234
# self.assertTrue(type(S.tags['uni']==unicode))
# self.assertRaises(TypeError,lambda: tagError(S))
class TestObjectInstantiation(unittest.TestCase):
def setUp(self):
self.t=woo.core.WooTestClass()
# usually disabled and will be removed completely
if hasattr(woo.core,'WooTestClassStatic'): self.ts=woo.core.WooTestClassStatic()
def testClassCtors(self):
"Core: correct types are instantiated"
# correct instances created with Foo() syntax
import woo.system
for r in woo.system.childClasses(woo.core.Object):
obj=r();
self.assertTrue(obj.__class__==r,'Failed for '+r.__name__)
def testRootDerivedCtors_attrs_few(self):
"Core: class ctor's attributes"
# attributes passed when using the Foo(attr1=value1,attr2=value2) syntax
gm=Shape(color=1.); self.assertTrue(gm.color==1.)
def testDeepcopy(self):
'Core: Object.deepcopy'
t=woo.core.WooTestClass()
t.mass=0.
t2=t.deepcopy(mass=1.0)
self.assertTrue(t2.mass==1.0 and t.mass==0.)
def testPyRunnerCtor(self):
"Core: PyRunner with positional args"
p1,p2,p3=PyRunner('one'),PyRunner(2,'two'),PyRunner(3,'three')
self.assertEqual((p1.command,p1.stepPeriod),('one',1))
self.assertEqual((p2.command,p2.stepPeriod),('two',2))
self.assertEqual((p3.command,p3.stepPeriod),('three',3))
self.assertRaises(ValueError,lambda: PyRunner(111)) # must be a string
self.assertRaises(ValueError,lambda: PyRunner(123,456)) # must be string and number
self.assertRaises(ValueError,lambda: PyRunner('ab','cd'))
self.assertRaises(ValueError,lambda: PyRunner(1,2,3))
def testDispatcherCtor(self):
"Core: dispatcher ctors with functors"
# dispatchers take list of their functors in the ctor
# same functors are collapsed in one
cld1=LawDispatcher([Law2_L6Geom_FrictPhys_IdealElPl(),Law2_L6Geom_FrictPhys_IdealElPl()]); self.assertTrue(len(cld1.functors)==1)
# two different make two different
cld2=LawDispatcher([Law2_L6Geom_FrictPhys_IdealElPl(),Law2_L6Geom_FrictPhys_LinEl6()]); self.assertTrue(len(cld2.functors)==2)
def testContactLoopCtor(self):
"Core: ContactLoop special ctor"
# ContactLoop takes 3 lists
id=ContactLoop([Cg2_Facet_Sphere_L6Geom(),Cg2_Sphere_Sphere_L6Geom()],[Cp2_FrictMat_FrictPhys()],[Law2_L6Geom_FrictPhys_IdealElPl()],)
self.assertTrue(len(id.geoDisp.functors)==2)
self.assertTrue(id.geoDisp.__class__==CGeomDispatcher)
self.assertTrue(id.phyDisp.functors[0].__class__==Cp2_FrictMat_FrictPhys)
self.assertTrue(id.lawDisp.functors[0].__class__==Law2_L6Geom_FrictPhys_IdealElPl)
def testParallelEngineCtor(self):
"Core: ParallelEngine special ctor"
pe=ParallelEngine([InsertionSortCollider(),[BoundDispatcher(),ForceResetter()]])
self.assertTrue(pe.slaves[0].__class__==InsertionSortCollider)
self.assertTrue(len(pe.slaves[1])==2)
pe.slaves=[]
self.assertTrue(len(pe.slaves)==0)
##
## testing incorrect operations that should raise exceptions
##
def testWrongFunctorType(self):
"Core: dispatcher and functor type mismatch is detected"
# dispatchers accept only correct functors
# pybind11: RuntimeError
self.assertRaises((TypeError,RuntimeError),lambda: LawDispatcher([Bo1_Sphere_Aabb()]))
def testInvalidAttr(self):
'Core: invalid attribute access raises AttributeError'
# accessing invalid attributes raises AttributeError
self.assertRaises(AttributeError,lambda: Sphere(attributeThatDoesntExist=42))
self.assertRaises(AttributeError,lambda: Sphere().attributeThatDoesntExist)
###
### shared ownership semantics
###
def testShared(self):
"Core: shared_ptr really shared"
m=woo.utils.defaultMaterial()
s1,s2=woo.utils.sphere((0,0,0),1,mat=m),woo.utils.sphere((0,0,0),2,mat=m)
s1.mat.young=2342333
self.assertTrue(s1.mat.young==s2.mat.young)
def testSharedAfterReload(self):
"Core: shared_ptr preserved when saving/loading"
S=Scene(fields=[DemField()])
m=woo.utils.defaultMaterial()
S.dem.par.add([woo.utils.sphere((0,0,0),1,mat=m),woo.utils.sphere((0,0,0),2,mat=m)])
S.saveTmp(quiet=True); S=Scene.loadTmp()
S.dem.par[0].mat.young=9087438484
self.assertTrue(S.dem.par[0].mat.young==S.dem.par[1].mat.young)
##
## attribute flags
##
def testNoSave(self):
'Core: Attr::noSave'
# update bound of the particle
t=self.t
i0=t.noSaveAttr
t.noSaveAttr=i0+10
t.saveTmp('t')
t2=woo.core.WooTestClass.loadTmp('t')
# loaded copy has the default value
self.assertTrue(t2.noSaveAttr==i0)
@unittest.skipIf(not hasattr(woo.core,'WooTestClassStatic'),'Built without static attrs')
def testNoSave_static(self):
'Core: Attr::noSave (static)'
ts=self.ts
ts.noSave=344
ts.namedEnum='one'
ts.saveTmp('ts')
ts.noSave=123
self.assertTrue(ts.noSave==123)
ts2=woo.core.WooTestClassStatic.loadTmp('ts')
self.assertTrue(ts2.noSave==123) # was not saved
self.assertTrue(ts2.namedEnum=='one') # was saved
# since it is static, self.ts is changed as well now
self.assertTrue(ts.namedEnum==ts2.namedEnum)
self.assertTrue(id(ts.namedEnum)==id(ts2.namedEnum))
def testReadonly(self):
'Core: Attr::readonly'
self.assertTrue(self.t.meaning42==42)
self.assertRaises(AttributeError,lambda: setattr(self.t,'meaning42',43))
@unittest.skipIf(not hasattr(woo.core,'WooTestClassStatic'),'Built without static attrs')
def testReaonly_static(self):
'Core: Attr::readonly (static)'
self.assertRaises(AttributeError,lambda: setattr(self.ts,'readonly',2))
def testTriggerPostLoad(self):
'Core: postLoad & Attr::triggerPostLoad'
WTC=woo.core.WooTestClass
# stage right after construction
self.assertEqual(self.t.postLoadStage,WTC.postLoad_ctor)
baz0=self.t.baz
self.t.foo_incBaz=1 # assign whatever, baz should be incremented
self.assertEqual(self.t.baz,baz0+1)
self.assertEqual(self.t.postLoadStage,WTC.postLoad_foo)
self.t.foo_incBaz=1 # again
self.assertEqual(self.t.baz,baz0+2)
self.t.bar_zeroBaz=1 # assign to reset baz
self.assertEqual(self.t.baz,0)
self.assertEqual(self.t.postLoadStage,WTC.postLoad_bar)
# assign to baz to test postLoad again
self.t.baz=-1
self.assertTrue(self.t.postLoadStage==WTC.postLoad_baz)
def testUnicodeStrConverter(self):
'Core: std::string attributes can be assigned unicode string'
t=woo.core.WooTestClass(strVar=u'abc')
t.strVar=u'abc'
self.assertTrue(t.strVar=='abc')
def testNamedEnum(self):
'Core: Attr::namedEnum'
t=woo.core.WooTestClass()
self.assertRaises(ValueError,lambda: setattr(t,'namedEnum','nonsense'))
self.assertRaises(ValueError,lambda: setattr(t,'namedEnum',-2))
self.assertRaises(TypeError,lambda: setattr(t,'namedEnum',[]))
t.namedEnum='zero'
self.assertTrue(t.namedEnum=='zero')
# try with unicode string
t.namedEnum=u'zero'
self.assertTrue(t.namedEnum=='zero')
t.namedEnum='nothing'
self.assertTrue(t.namedEnum=='zero')
t.namedEnum=0
self.assertTrue(t.namedEnum=='zero')
# ctor
self.assertRaises(ValueError,lambda: woo.core.WooTestClass(namedEnum='nonsense'))
tt=woo.core.WooTestClass(namedEnum='single')
self.assertTrue(tt.namedEnum=='one')
@unittest.skipIf(not hasattr(woo.core,'WooTestClassStatic'),'Built without static attrs')
def testNamedEnum_static(self):
'Core: Attr::namedEnum (static)'
ts=self.ts
self.assertRaises(ValueError,lambda: setattr(ts,'namedEnum','nonsense'))
self.assertRaises(ValueError,lambda: setattr(ts,'namedEnum',-2))
self.assertRaises(TypeError,lambda: setattr(ts,'namedEnum',[]))
ts.namedEnum='zero'
self.assertTrue(ts.namedEnum=='zero')
ts.namedEnum=-1
self.assertTrue(ts.namedEnum=='minus one')
# test passing it in the ctor
tt=woo.core.WooTestClass(namedEnum='NULL') # use the alternative name
self.assertTrue(tt.namedEnum=='zero')
tt=woo.core.WooTestClass(namedEnum=1) # use number
self.assertTrue(tt.namedEnum=='one')
def testBits(self):
'Core: AttrTrait.bits accessors'
t=self.t
# flags and bits read-write
t.bits=1
self.assertTrue(t.bit0)
t.bit2=True
self.assertTrue(t.bits==5)
# flags read-only, bits midifiable
self.assertRaises(AttributeError,lambda: setattr(t,'bitsRw',1))
t.bit2rw=True
t.bit3rw=True
self.assertTrue(t.bitsRw==12)
# both flags and bits read-only
self.assertRaises(AttributeError,lambda: setattr(t,'bitsRo',1))
self.assertRaises(AttributeError,lambda: setattr(t,'bit1ro',True))
self.assertTrue(t.bitsRo==3)
self.assertTrue((t.bit0ro,t.bit1ro,t.bit2ro)==(True,True,False))
def testDeprecated(self):
'Core: AttrTrait.deprecated raises exception on access'
self.assertRaises(ValueError,lambda: getattr(self.t,'deprecatedAttr'))
self.assertRaises(ValueError,lambda: setattr(self.t,'deprecatedAttr',1))
## not (yet) supported for static attributes
# def testBits_static(self):
def testHidden(self):
'Core: Attr::hidden'
# hidden attributes are not wrapped in python at all
self.assertTrue(not hasattr(self.t,'hiddenAttr'))
@unittest.skipIf(not hasattr(woo.core,'WooTestClassStatic'),'Built without static attrs')
def testHidden_static(self):
'Core: Attr::hidden (static)'
self.assertRaises(AttributeError,lambda: getattr(self.ts,'hidden'))
def testNotifyDead(self):
'Core: PeriodicEngine::notifyDead'
e=woo.core.WooTestPeriodicEngine()
self.assertTrue(e.deadCounter==0)
prev=e.deadCounter
e.dead=True
self.assertTrue(e.deadCounter>prev) # ideally, this should be 1, not 4 by now!!
prev=e.deadCounter
e.dead=True
self.assertTrue(e.deadCounter>prev)
prev=e.deadCounter
e.dead=False
self.assertTrue(e.deadCounter>prev)
def testNodeDataCtorAssign(self):
'Core: assign node data using shorthands in the ctor'
n=woo.core.Node(pos=(1,2,3),dem=woo.dem.DemData(mass=100))
self.assertTrue(n.dem.mass==100)
# pybind11: RuntimeError
self.assertRaises((TypeError,RuntimeError),lambda: woo.core.Node(dem=1))
if 'gl' in woo.config.features:
# type mismatch
self.assertRaises(RuntimeError,lambda: woo.core.Node(dem=woo.gl.GlData()))
class TestLoop(unittest.TestCase):
def setUp(self):
woo.master.reset()
woo.master.scene.fields=[DemField()]
woo.master.scene.dt=1e-8
def testSubstepping(self):
'Loop: substepping'
S=woo.master.scene
S.engines=[PyRunner(1,'pass'),PyRunner(1,'pass'),PyRunner(1,'pass')]
# value outside the loop
self.assertTrue(S.subStep==-1)
# O.subStep is meaningful when substepping
S.subStepping=True
S.one(); self.assertTrue(S.subStep==0)
S.one(); self.assertTrue(S.subStep==1)
# when substepping is turned off in the middle of the loop, the next step finishes the loop
S.subStepping=False
S.one(); self.assertTrue(S.subStep==-1)
# subStep==0 inside the loop without substepping
S.engines=[PyRunner(1,'if scene.subStep!=0: raise RuntimeError("scene.subStep!=0 inside the loop with Scene.subStepping==False!")')]
S.one()
def testEnginesModificationInsideLoop(self):
'Loop: Scene.engines can be modified inside the loop transparently.'
S=woo.master.scene
S.engines=[
PyRunner(stepPeriod=1,command='from woo.core import *; from woo.dem import *; scene.engines=[ForceResetter(),ForceResetter(),Leapfrog(reset=False)]'), # change engines here
ForceResetter() # useless engine
]
S.subStepping=True
# run prologue and the first engine, which modifies O.engines
S.one(); S.one(); self.assertTrue(S.subStep==1)
self.assertTrue(len(S.engines)==3) # gives modified engine sequence transparently
self.assertTrue(len(S._nextEngines)==3)
self.assertTrue(len(S._currEngines)==2)
S.one(); S.one(); # run the 2nd ForceResetter, and epilogue
self.assertTrue(S.subStep==-1)
# start the next step, nextEngines should replace engines automatically
S.one()
self.assertTrue(S.subStep==0)
self.assertTrue(len(S._nextEngines)==0)
self.assertTrue(len(S.engines)==3)
self.assertTrue(len(S._currEngines)==3)
def testDead(self):
'Loop: dead engines are not run'
S=woo.master.scene
S.engines=[PyRunner(1,'pass',dead=True)]
S.one(); self.assertTrue(S.engines[0].nDone==0)
def testPausedContext(self):
'Loop: "with Scene.paused()" context manager'
import time
S=woo.master.scene
S.engines=[]
S.run()
with S.paused():
i=S.step
time.sleep(.1)
self.assertTrue(i==S.step) # check there was no advance during those .1 secs
self.assertTrue(S.running) # running should return true nevertheless
time.sleep(.1)
self.assertTrue(i<S.step) # check we run during those .1 secs again
S.stop()
def testNoneEngine(self):
'Loop: None engine raises exception.'
S=woo.master.scene
self.assertRaises(RuntimeError,lambda: setattr(S,'engines',[ContactLoop(),None,ContactLoop()]))
def testStopAtStep(self):
'Loop: S.stopAtStep and S.run(steps=...)'
S=woo.core.Scene(dt=1.)
S.stopAtStep=100 # absolute value
S.run(wait=True)
self.assertEqual(S.step,100)
S.run(steps=100,wait=True) # relative value
self.assertEqual(S.step,200)
def testStopAtTime(self):
'Loop: S.stopAtTime and S.run(time=...)'
S=woo.core.Scene(dt=1e-3)
S.stopAtTime=1.0001 # absolute value
S.run(wait=True)
self.assertAlmostEqual(S.time,1.001,delta=1e-3)
S.run(time=.5,wait=True) # relative value
self.assertAlmostEqual(S.time,1.501,delta=1e-3)
def testStopAtHook(self):
'Loop: S.stopAtHook'
S=woo.core.Scene(dt=1e-3)
# both of them should trivver stopAtHook
S.stopAtTime=10e-3
S.stopAtStep=1000
S.lab.a=1
S.lab._setWritable('a')
S.stopAtHook='S.lab.a+=1'
S.run(wait=True) # stopAtTime applies first
self.assertEqual(S.lab.a,2)
S.run(wait=True) # stopAtStep applies now
self.assertEqual(S.lab.a,3)
def testWait(self):
'Loop: Scene.wait() returns only after the current step finished'
S=woo.core.Scene(dt=1e-3,engines=[PyRunner(1,'import time; S.stop(); time.sleep(.3); S.lab.aa=True')])
S.run(wait=True)
self.assertTrue(hasattr(S.lab,'aa'))
def testWaitTimeout(self):
'Loop: Scene.wait(timeout=...)'
S=woo.core.Scene(dt=1,engines=[PyRunner(1,'import time; time.sleep(1)')])
S.run()
self.assertRaises(RuntimeError,lambda: S.wait(timeout=.01))
def testWaitException(self):
'Loop: Scene.wait() returns on exception'
# silence loggers
ll=dict([(l,woo.log.level(l)) for l in ('Engine','Scene')])
for l in ll.keys(): woo.log.setLevel(l,woo.log.OFF)
S=woo.core.Scene(dt=1,engines=[PyRunner(1,'raise ValueError("Some error error")')])
self.assertRaises(RuntimeError,lambda: S.run(wait=True))
for l,lev in ll.items(): woo.log.setLevel(l,lev)
def testWaitForScenes(self):
'Loop: Master.waitForScenes correctly handles reassignment of the master scene'
S=woo.master.scene=woo.core.Scene(dt=1e-3,engines=[PyRunner(1,'import time; S.stop(); time.sleep(.3); woo.master.scene=woo.core.Scene(dt=1e-4,engines=[woo.core.PyRunner(1,"S.stop()")])')])
S.run()
woo.master.waitForScenes()
self.assertTrue(woo.master.scene.dt==1e-4) # check master scene is the second one
class TestIO(unittest.TestCase):
def testSaveAllClasses(self):
'I/O: All classes can be saved and loaded with boost::serialization'
import woo.system
failed=set()
for c in woo.system.childClasses(woo.core.Object):
t=woo.core.WooTestClass()
try:
t.any=c()
t.saveTmp(quiet=True)
woo.master.loadTmpAny()
except (RuntimeError,ValueError):
print(20*'*'+' error with class '+c.__name__)
import traceback
traceback.print_exc()
failed.add(c.__name__)
failed=list(failed); failed.sort()
if failed:
print(80*'#'+'\nFailed classes were: '+' '.join(failed))
self.assertTrue(len(failed)==0,'Failed classes were: '+' '.join(failed)+'\n'+80*'#')
class TestContact(unittest.TestCase):
def setUp(self):
self.S=S=woo.core.Scene(fields=[DemField()],engines=DemField.minimalEngines())
for i in range(0,10):
S.dem.par.add(woo.dem.Sphere.make((0,0,i),1.1))
S.one()
def testForceSign(self):
'Contact: forceSign'
S=self.S
c45=S.dem.con[4,5]
inc=(c45.id1==4)
self.assertTrue(c45.forceSign(4)==(1 if inc else -1))
self.assertTrue(c45.forceSign(5)==(-1 if inc else 1))
self.assertRaises(RuntimeError,lambda: c45.forceSign(6))
self.assertTrue(c45.forceSign(S.dem.par[4])==(1 if inc else -1))
self.assertTrue(c45.forceSign(S.dem.par[5])==(-1 if inc else 1))
self.assertRaises(RuntimeError,lambda: c45.forceSign(S.dem.par[6]))
class TestParticles(unittest.TestCase):
def setUp(self):
woo.master.reset()
woo.master.scene.fields=[DemField()]
S=woo.master.scene
self.count=100
S.dem.par.add([utils.sphere([random.random(),random.random(),random.random()],random.random()) for i in range(0,self.count)])
random.seed()
def testIterate(self):
"Particles: iteration"
counted=0
S=woo.master.scene
for b in S.dem.par: counted+=1
self.assertTrue(counted==self.count)
def testLen(self):
"Particles: len(S.dem.par)"
S=woo.master.scene
self.assertTrue(len(S.dem.par)==self.count)
def testRemove(self):
"Particles: acessing removed particles raises IndexError"
S=woo.master.scene
S.dem.par.remove(0)
self.assertRaises(IndexError,lambda: S.dem.par[0])
def testRemoveShrink(self):
"Particles: removing particles shrinks storage size"
S=woo.master.scene
for i in range(self.count-1,-1,-1):
S.dem.par.remove(i)
self.assertTrue(len(S.dem.par)==i)
def testNegativeIndex(self):
"Particles: negative index counts backwards (like python sequences)."
S=woo.master.scene
self.assertTrue(S.dem.par[-1]==S.dem.par[self.count-1])
def testRemovedIterate(self):
"Particles: iterator silently skips erased particles"
S=woo.master.scene
removed,counted=0,0
for i in range(0,10):
id=random.randint(0,self.count-1)
if S.dem.par.exists(id): S.dem.par.remove(id); removed+=1
for b in S.dem.par: counted+=1
self.assertTrue(counted==self.count-removed)
class TestArrayAccu(unittest.TestCase):
def setUp(self):
self.t=woo.core.WooTestClass()
self.N=woo.master.numThreads
#def testRead(self):
# 'OpenMP array accu: implicit conversion to python list'
# print(self.t.aaccu)
def testResize(self):
'OpenMP array accu: resizing'
self.assertEqual(len(self.t.aaccuRaw),0) # initial zero size
for sz in (4,8,16,32,33):
self.t.aaccuRaw=[i for i in range(0,sz)]
r=self.t.aaccuRaw
for i in range(0,sz):
self.assertTrue(r[i][0]==i) # first thread is assigned the value
for n in range(1,self.N): self.assertTrue(r[i][n]==0) # other threads are reset
def testPreserveResize(self):
'OpenMP array accu: preserve old data on resize'
self.t.aaccuRaw=(0,1)
self.t.aaccuWriteThreads(2,[2]) # write whatever to index 2: resizes, but should preserve 0,1
self.assertEqual(self.t.aaccuRaw[0][0],0)
self.assertEqual(self.t.aaccuRaw[1][0],1)
def testThreadWrite(self):
'OpenMP array accu: concurrent writes'
self.t.aaccuWriteThreads(0,list(range(self.N)))
for i in range(0,self.N):
self.assertEqual(self.t.aaccuRaw[0][i],i) # each thread has written its own index
class _TestPyClass(woo.core.Object,woo.pyderived.PyWooObject):
'Sample pure-python class integrated into woo (mainly used with preprocessors), for use with :obj:`TestPyDerived`.'
PAT=woo.pyderived.PyAttrTrait
_attrTraits=[
PAT(float,'aF',1.,'float attr'),
PAT([float,],'aFF',[0.,1.,2.],'list of floats attr'),
PAT(Vector2,'aV2',(0.,1.),'vector2 attr'),
PAT([Vector2,],'aVV2',[(0.,0.),(1.,1.)],'list of vector2 attr'),
PAT(woo.core.Node,'aNode',woo.core.Node(pos=(1,1,1)),'node attr'),
PAT(woo.core.Node,'aNodeNone',None,'node attr, uninitialized'),
PAT([woo.core.Node,],'aNNode',[woo.core.Node(pos=(1,1,1)),woo.core.Node(pos=(2,2,2))],'List of nodes'),
PAT(float,'aF_trigger',1.,triggerPostLoad=True,doc='Float triggering postLoad, copying its value to aF'),
PAT(int,'postLoadCounter',0,doc='counter for postLoad (readonly). Incremented by 1 after construction, incremented by 10 when assigning to aF_trigger.'),
PAT(int,'deprecAttr',-1,deprecated=True,doc='deprecated, here should be the explanation.'),
PAT(str,'sChoice','aa',choice=['aa','bb','cc'],doc='String choice attribute')
]
def postLoad(self,I):
if I is None:
self.postLoadCounter+=1
elif I=='aF_trigger':
# print 'postLoad / aF_trigger'
self.postLoadCounter+=10
self.aF=self.aF_trigger
else: raise RuntimeError(self.__class__.__name__+'.postLoad called with unknown attribute id %s'%I)
def __new__(klass,**kw):
self=super().__new__(klass)
self.wooPyInit(klass,woo.core.Object,**kw)
return self
def __init__(self,**kw):
woo.core.Object.__init__(self)
self.wooPyInit(_TestPyClass,woo.core.Object,**kw)
class _TestPyClass2(_TestPyClass):
'Python class deriving from python base class (which in turn derives from c++).'
_PAT=woo.pyderived.PyAttrTrait
def postLoad(self,I):
if I=='f2' or I is None: self.f2counter+=1
else: super(_TestPyClass2,self).postLoad(I)
_attrTraits=[
_PAT(int,'f2',0,triggerPostLoad=True,doc='Float attr in derived class'),
_PAT(int,'f2counter',0,doc='Count how many times was f2 manipulated (to test triggerPostLoad in class with python parent)'),
]
def __new__(klass,**kw):
self=super().__new__(klass)
self.wooPyInit(klass,_TestPyClass,**kw)
return self
def __init__(self,**kw):
_TestPyClass.__init__(self)
self.wooPyInit(_TestPyClass2,_TestPyClass,**kw)
class TestPyDerived(unittest.TestCase):
import woo.pyderived, woo.core
def setUp(self):
self.t=_TestPyClass()
self.t2=_TestPyClass2()
def testTrigger(self):
'PyDerived: postLoad triggers'
# print 'postLoadCounter after ctor:',self.t.postLoadCounter
self.assertTrue(self.t.postLoadCounter==1)
self.t.aF_trigger=514.
self.assertTrue(self.t.aF_trigger==514.)
self.assertTrue(self.t.aF==514.)
self.assertTrue(self.t.postLoadCounter==11)
def testPickle(self):
'PyDerived: deepcopy'
self.assertTrue(self.t.aF==1.)
self.assertTrue(self.t.aNode.pos==Vector3(1,1,1))
self.t.aF=2.
self.t.aNode.pos=Vector3(0,0,0)
self.assertTrue(self.t.aF==2.)
self.assertTrue(self.t.aNode.pos==Vector3(0,0,0))
# pickle needs the class to be found in the module itself
# PicklingError: Can't pickle <class 'woo.tests.core._TestPyClass'>: it's not found as woo.tests.core._TestPyClass
# this is fixed now the class is defined at the module level
t2=self.t.deepcopy()
self.assertTrue(t2.aF==2.)
self.assertTrue(t2.aNode.pos==Vector3(0,0,0))
self.t.aF=0.
t3=self.t.deepcopy(aF=1.0) # with kw arg
self.assertTrue(t3.aF==1. and self.t.aF==0.)
def testTypeCoerceFloats(self):
'PyDerived: type coercion (primitive types)'
# numbers and number sequences
self.assertRaises(TypeError,lambda:setattr(self.t,'aF','asd'))
self.assertRaises(TypeError,lambda:setattr(self.t,'aF','123')) # disallow conversion from strings
self.assertRaises(TypeError,lambda:setattr(self.t,'aFF',(1,2,'ab')))
self.assertRaises(TypeError,lambda:setattr(self.t,'aFF','ab'))
self.assertRaises(TypeError,lambda:setattr(self.t,'aFF',[(1,2,3),(4,5,6)]))
try: self.t.aFF=[]
except: self.fail("Empty list not accepter for list of floats")
try: self.t.aFF=Vector3(1,2,3)
except: self.fail("Vector3 not accepted for list of floats")
try: self.t.aV2=(0,1.)
except: self.fail("2-tuple not accepted for Vector2")
def testTypeCoerceObject(self):
'PyDerived: type coercion (woo.core.Object)'
# c++ objects
try: self.t.aNode=None
except: self.fail("None not accepted as woo.core.Node")
self.assertRaises(TypeError,lambda:setattr(self.t,'aNode',woo.core.Scene()))
# list of c++ objects
self.assertRaises(TypeError,lambda:setattr(self.t,'aNNode',(woo.core.Node(),woo.core.Scene())))
try: self.t.aNNode=[None,woo.core.Node()]
except: self.fail("[None,Node] not accepted for list of Nodes")
def testTypeCoerceCtor(self):
'PyDerived: type coercion (ctor)'
self.assertRaises(TypeError,lambda:_TestPyClass(aF='abc'))
def testTraits(self):
'PyDerived: PyAttrTraits'
self.assertTrue(self.t._attrTraits[0].ini==1.)
self.assertTrue(self.t._attrTraits[0].pyType==float)
def testIniDefault(self):
'PyDerived: default initialization'
self.assertTrue(self.t.aF==1.)
self.assertTrue(self.t.aFF==[0.,1.,2.])
self.assertTrue(self.t.aNodeNone==None)
def testIniUser(self):
'PyDerived: user initialization'
t2=_TestPyClass(aF=2.)
self.assertTrue(t2.aF==2.)
self.assertRaises(AttributeError,lambda: _TestPyClass(nonsense=123))
def testStrValidation(self):
'PyDerived: string choice is validated'
try: self.t.sChoice='bb'
except: self.fail("Valid choice value not accepted as new attribute value")
self.assertRaises(ValueError, lambda: setattr(self.t,'sChoice','abc'))
self.assertRaises(ValueError, lambda: _TestPyClass(sChoice='abc'))
try: _TestPyClass(sChoice='bb')
except: self.fail("Valid choice value not accepted in ctor")
def testBoostSaveError(self):
'PyDerived: refuses to save via Object::save (data loss; dump must be used instead)'
self.assertRaises(IOError,lambda: self.t.save('whatever'))
self.assertRaises(IOError,lambda: self.t2.save('whatever'))
def testBoostDumpError(self):
'PyDerived: refuses to dump with boost::serialization format (data loss)'
self.assertRaises(IOError,lambda: self.t.dump('whatever.xml'))
self.assertRaises(IOError,lambda: self.t2.dump('whatever.xml'))
def testDeprecated(self):
'PyDerived: deprecated attribute raises ValueError on access'
self.assertRaises(ValueError,lambda: getattr(self.t,'deprecAttr'))
self.assertRaises(ValueError,lambda: setattr(self.t,'deprecAttr',1))
def testPickle2(self):
'PyDerived: deepcopy (python parent)'
self.t2.f2=3
self.t2.aF=0
tt=self.t2.deepcopy()
self.assertTrue(tt.aF==0)
self.assertTrue(tt.f2==3)
def testIniUser(self):
'PyDerived: user initialization (python parent)'
t2=_TestPyClass2(aF=0,f2=3)
self.assertTrue(t2.aF==0)
self.assertTrue(t2.f2==3)
def testTrigger(self):
'PyDerived: postLoad trigger (python parent)'
c1=self.t2.postLoadCounter
c2=self.t2.f2counter
self.t2.f2=4.
self.t2.aF_trigger=5.
self.assertTrue(self.t2.f2counter>c2)
self.assertTrue(self.t2.postLoadCounter>c1)
| woodem/woo | py/tests/test_core.py | Python | gpl-2.0 | 30,561 |
from HTMLParser import HTMLParser
import simplejson as json
import database
import re
class MyHTMLParser(HTMLParser):
def array(self):
self.dataArray = []
def handle_data(self, data):
self.dataArray.append(data)
def htmlFormat(json_dump):
html = "<p><img src=\"https://graph.facebook.com/%s/picture\" > Name: %s - Link: <a href=\"%s\">facebook profile</a> - Gender: %s -Locale: %s</p>" %(json_dump['username'],json_dump['name'], json_dump['link'], json_dump['gender'], json_dump['locale'])
return html
def parceros(json_dump):
parser = MyHTMLParser()
parser.array()
names = []
userIds = []
try:
to_parse = str(json_dump['domops'][0][3]['__html'])
parser.feed(to_parse)
except:
print 'Error in json dump or parser.feed'
i = 0
while True:
if ((parser.dataArray[i] == 'Test Users') or (parser.dataArray[i] == 'Delete') or (parser.dataArray[i] == 'Add') or
(parser.dataArray[i] == 'Name') or (parser.dataArray[i] == 'User ID') or (parser.dataArray[i] == 'Email') or
(parser.dataArray[i] == 'Edit') or (parser.dataArray[i] == 'tfbnw.net')):
del parser.dataArray[i]
else:
i += 1
if i == len(parser.dataArray):
break
i = 0
while i < (len(parser.dataArray) - 2):
names.append(parser.dataArray[i])
userIds.append(parser.dataArray[i+1])
i = i + 3
if ( userIds!=[] and names!=[]) and (parser.dataArray[0] != 'This app has no Test Users.'):
database.insertTestUsersDev(userIds,names)
return 1
else:
return -1
def parseData(dataRaw):
parser = MyHTMLParser()
parser.array()
names = []
userIds = []
emails = []
passwords = []
for data in dataRaw:
if data=="":
continue
text = data.strip("for (;;);")
json_dump = json.loads(text)
try:
to_parse = str(json_dump['jsmods']['markup'][0][1]['__html'])
parser.feed(to_parse)
except:
print 'Error in json dump or parser.feed'
for i in range(len(parser.dataArray)):
if parser.dataArray[i] == 'Name':
names.append(parser.dataArray[i+1])
continue
if parser.dataArray[i] == 'User ID':
userIds.append(parser.dataArray[i+1])
continue
if parser.dataArray[i] == 'Login email':
emails.append(parser.dataArray[i+1]+'@'+parser.dataArray[i+2])
continue
if parser.dataArray[i] == 'Login password':
passwords.append(parser.dataArray[i+1])
continue
if ( userIds!=[] and names!=[] and emails!=[] and passwords!=[] ):
database.insertTestUsers(userIds,names,emails,passwords)
return 1
else:
return -1
'''
try:
for i in range(len(names)):
print names[i] + ' ' + userIds[i] + ' ' + emails[i] + ' ' + passwords[i] + ' '
except:
print 'for error in MyParser'
'''
def parseOnline(data):
buddies = []
start = 0
while True:
match = re.search("fbid=", data[start:])
if match is not None:
start += match.end()
matchBis = re.search("&",data[start:])
if matchBis is not None:
end = matchBis.end() + start
buddies.append(str(data[start:end-1]))
start = end
end = 0
else:
break
return buddies
def parseFriends(data):
start = 0
end = 0
lines = []
friends = []
while True:
match = re.search(r'href="/([a-zA-Z]*[0-9]*[\.]*)+(\?fref=fr_tab)',data[start:])
if match is not None:
lines.append(match.group())
start += match.end()
else:
break
for linea in lines:
name = linea.split('/')[1].split('?')[0]
friends.append(name)
match = re.search("[a-zA-Z]+\?v=friends&mutual&startindex=[0-9]+",data)
if match is not None:
raw = match.group()
next = raw.replace("&","&")
else:
match = re.search("[a-zA-Z]+/friends\?([a-zA-Z]+=[0-9]+)+(&)*([a-zA-Z]+=[0-9]+)*",data)
if match is not None:
raw = match.group()
next = raw.replace("&","&")
else:
next = -1
return friends,next
def parsePending():
response = open("respuesta.html","r")
struct = []
aux = []
while True:
linea = response.readline()
if not linea: break
match = re.search('/ajax/reqs.php'+'(.+)',str(linea))
if match is not None:
struct.append(re.search('/ajax/reqs.php'+'(.+)',str(linea)).group())
for lines in struct:
start = 0
while True:
match = re.search('[0-9]{15}',str(lines)[start:])
if match is not None:
if str(lines)[start + match.start():start + match.end()] not in aux:
aux.append(str(lines)[start + match.start():start + match.end()])
start += match.end()
else:
break;
return aux | chinoogawa/fbht | MyParser.py | Python | bsd-2-clause | 5,245 |
"""Super useful module"""
def print_num(number):
print(number)
| brooth/far.vim | test/very long directory name with spaces/very long file name also with spaces.py | Python | mit | 68 |
import fuse
| hubearth/odoo_fuse | models/__init__.py | Python | gpl-3.0 | 14 |
from classifip.models.qda import EuclideanDiscriminant, LinearDiscriminant, QuadraticDiscriminant, NaiveDiscriminant
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
import random, numpy as np, pandas as pd, sys
from sklearn.model_selection import KFold
from collections import Counter
from numpy import linalg
INFIMUM, SUPREMUM = "inf", "sup"
class BaseEstimator:
def __init__(self, store_covariance=False):
self._data, self._N, self._p = None, 0, 0
self._clazz, self._nb_clazz = None, None
self._means, self._prior = dict(), dict()
self._icov, self._dcov = dict(), dict()
def fit(self, X, y):
self._N, self._p = X.shape
self._data = pd.concat([pd.DataFrame(X, dtype="float64"), pd.Series(y, dtype="category")], axis=1)
columns = ["x" + i for i in map(str, range(self._p))]
columns.extend('y')
self._data.columns = columns
self._clazz = np.array(self._data.y.cat.categories.tolist())
self._nb_clazz = len(self._clazz)
def pdf(self, query, mean, inv_cov, det_cov):
_exp = -0.5 * ((query - mean).T @ inv_cov @ (query - mean))
_const = np.power(det_cov, -0.5) / np.power(2 * np.pi, self._p / 2)
return _const * np.exp(_exp)
def predict(self, queries):
predict_clazz = list()
for query in queries:
pbs = np.array(
[self.pdf(query, self._means[clazz], self._icov[clazz], self._dcov[clazz]) * self._prior[clazz] \
for clazz in self._clazz])
predict_clazz.append(self._clazz[pbs.argmax()])
return predict_clazz
class EuclideanDiscriminantPrecise(BaseEstimator):
def fit(self, X, y):
super(EuclideanDiscriminantPrecise, self).fit(X, y)
for clazz in self._clazz:
self._means[clazz] = self._data[self._data.y == clazz].iloc[:, :-1].mean().as_matrix()
self._prior[clazz] = len(self._data[self._data.y == clazz]) / self._N
self._icov[clazz] = np.identity(self._p)
self._dcov[clazz] = 1
class NaiveDiscriminantPrecise(BaseEstimator):
def fit(self, X, y):
super(NaiveDiscriminantPrecise, self).fit(X, y)
# cov_total = np.diag(np.var(self._data.iloc[:, :-1])) # Naive with variance global
for clazz in self._clazz:
self._means[clazz] = self._data[self._data.y == clazz].iloc[:, :-1].mean().as_matrix()
self._prior[clazz] = len(self._data[self._data.y == clazz]) / self._N
cov_clazz = np.diag(np.var(self._data[self._data.y == clazz].iloc[:, :-1]))
if linalg.cond(cov_clazz) < 1 / sys.float_info.epsilon:
self._icov[clazz] = linalg.inv(cov_clazz)
self._dcov[clazz] = linalg.det(cov_clazz)
else: # computing pseudo inverse/determinant to a singular covariance matrix
self._icov[clazz] = linalg.pinv(cov_clazz)
eig_values, _ = linalg.eig(cov_clazz)
self._dcov[clazz] = np.product(eig_values[(eig_values > 1e-12)])
MODEL_TYPES = {'ieda': EuclideanDiscriminant, 'ilda': LinearDiscriminant,
'iqda': QuadraticDiscriminant, 'inda': NaiveDiscriminant}
def __factory_model(model_type, **kwargs):
try:
return MODEL_TYPES[model_type.lower()](**kwargs)
except Exception as _:
raise Exception("Selected model does not exist")
MODEL_TYPES_PRECISE = {'lda': LinearDiscriminantAnalysis, 'qda': QuadraticDiscriminantAnalysis,
'eda': EuclideanDiscriminantPrecise, 'nda': NaiveDiscriminantPrecise}
def __factory_model_precise(model_type, **kwargs):
try:
if model_type == 'lda': kwargs["solver"] = "svd";
return MODEL_TYPES_PRECISE[model_type.lower()](**kwargs)
except Exception as _:
raise Exception("Selected model does not exist")
def generate_seeds(nb_seeds):
return [random.randrange(pow(2, 20)) for _ in range(nb_seeds)]
def generate_sample_cross_validation(data_labels, nb_fold_cv=2, minimum_by_label=1):
nb_by_label = Counter(data_labels)
# int(xxx*(1-1/nb_fold_cv)) split minimum 2 training and other testing
if len(nb_by_label) > 0 and minimum_by_label > int(min(nb_by_label.values()) * (1 - 1 / nb_fold_cv)):
raise Exception('It is not possible to split a minimum number %s of labels for training '
' and others for testing.' % minimum_by_label)
while True:
kf = KFold(n_splits=nb_fold_cv, random_state=None, shuffle=True)
splits, is_minimum_OK = list([]), True
for idx_train, idx_test in kf.split(data_labels):
splits.append((idx_train, idx_test))
nb_by_label = Counter(data_labels[idx_train])
if len(nb_by_label) > 0 and minimum_by_label > min(nb_by_label.values()):
is_minimum_OK = False
break
if is_minimum_OK:
break
return splits
| sdestercke/classifip | experiments/classification/qda/qda_common.py | Python | gpl-2.0 | 4,995 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-01 11:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0028_auto_20170601_1436'),
]
operations = [
migrations.DeleteModel(
name='Github_user',
),
]
| hiezust/teask | website/migrations/0029_delete_github_user.py | Python | gpl-3.0 | 365 |
# reference: https://tkipf.github.io/graph-convolutional-networks/
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from scipy.special import expit
# np.random.seed(1)
G = nx.karate_club_graph()
A = nx.adjacency_matrix(G)
A = np.array(A.todense())
# parameters for synthtization
n = 50 # size of matrix
size_cluster = 10
number_cluster = n / size_cluster
noise_ratio = 0.01
# relation table R
R = np.zeros((n, n), dtype=np.float)
for i in range(number_cluster):
R[i*size_cluster:(i+1)*size_cluster, i*size_cluster:(i+1)*size_cluster] = 1
# add some noise
# for i in range(int(n*n*noise_ratio)):
# x = np.random.randint(0, n)
# y = np.random.randint(0, n)
# R[x, y] = 1 - R[x, y]
# apply permutation
# R_ = R[np.random.permutation(n)].T[np.random.permutation(n)].T
R_ = R
# Display R
# fig, axes = plt.subplots(1,1)
# axes.imshow(R_, interpolation='None', cmap='Greys')
# plt.show()
# Construct adjacency matrix
A = np.vstack((np.hstack((np.zeros((50, 50), dtype=np.float), R_)), np.hstack((R_.T, np.zeros((50, 50), dtype=np.float)))))
D = np.diag(np.sum(A, axis=1)**-0.5)
DAD = np.dot(np.dot(D, (A + np.identity(A.shape[0], dtype=np.float))), D)
# propagation
W = np.random.rand(100, 100) * 2 - 1
H = np.tanh(np.dot(DAD, W))
H = H / np.sum(H, axis=1)[:, np.newaxis]
for i in range(20):
W = np.random.rand(100, 100) * 2 - 1 # initialize weight
H = np.tanh(np.dot(np.dot(DAD, H), W))
H = H / np.sum(H, axis=1)[:, np.newaxis]
W = np.random.rand(100, 2) * 2 - 1
H = np.tanh(np.dot(np.dot(DAD, H), W))
# print DAD
fig, ax = plt.subplots()
for color, i in zip(['red', 'green', 'blue', 'grey', 'brown'], range(number_cluster)):
x, y = H[i*size_cluster:(i+1)*size_cluster,0], H[i*size_cluster:(i+1)*size_cluster,1]
ax.scatter(x, y, c=color, label=color, edgecolors='none')
# ax.legend()
ax.grid(True)
# plt.scatter(H4[:,0], H4[:,1], marker='x')
plt.show() | acgacgacgacg/biclustering | test_WL.py | Python | agpl-3.0 | 1,934 |
#
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
from errno import EAGAIN, EINTR
from gobject import io_add_watch, IO_IN
import socket
from random import randint
from bencode import fmt_bdecode, bencode
from ioutils import create_udp_socket, send_broadcast, TCP_Queue
from plugins import Plugin, get_plugins, get_plugin_by_type
from support import warning, info, debug, get_debug_mode
from proximateprotocol import DEFAULT_COMMUNITY_NAME, FRIENDS_COMMUNITY_NAME, \
BLACKLIST_COMMUNITY_NAME, USER_STATUS_LIST, \
TP_HELLO, TP_QUIT, TP_SCAN_INTERVAL, TP_CONNECT_TIMEOUT, \
PROXIMATE_PROTOCOL_VERSION, valid_community, \
valid_nick, valid_port, valid_uid, valid_protocol_version, \
PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_FETCHER, \
PLUGIN_TYPE_NOTIFICATION, PLUGIN_TYPE_SCHEDULER, PROFILE_ICON_CHANGED, \
PLUGIN_TYPE_NETWORK_CONTROL, \
PLUGIN_TYPE_SETTINGS, DEFAULT_PROXIMATE_PORT, TP_MIN_PORT, TP_MAX_PORT
from proximatestate import create_community, find_communities, get_community_dir, \
get_broadcast_ports, delete_community, \
seek_community_icon_name, get_myself, get_ordinary_community, get_user, \
get_users, get_user_dir, save_user, \
save_community_icon, save_communities, save_face, seek_face_name, \
create_user, delete_face, create_user_communities, delete_community_icon, \
normal_traffic_mode
from typevalidator import validate, ZERO_OR_MORE
from utils import read_file_contents, Rate_Limiter
from pathname import get_path, FRIEND_COMMUNITY_ICON
from meta import is_unsigned_int
from user import User
from communitymeta import Community
community = None
REQUEST_INTERVAL = 300 // TP_SCAN_INTERVAL
ICON_PUSH_INTERVAL = 60
# Avoid icon transfers with huge number of online users
MAX_ICON_ACTIVE = 30
class Community_Plugin(Plugin):
IP_NETWORK = 0
def __init__(self, options):
self.register_plugin(PLUGIN_TYPE_COMMUNITY)
self.register_server(TP_HELLO, Hello_Server)
self.fetcher = None
self.fetchhandlers = {
TP_HELLO: self.handle_hello,
'uprofile': self.handle_user_profile_fetch,
'iconrequest': self.handle_icon_request,
'iconpush': self.handle_icon_push,
'cprofile': self.handle_community_profile_fetch,
'cprofiles': self.handle_community_profiles_fetch,
'cinvite': self.handle_invite_community,
}
self.notify = None
self.net = None
self.community_gui = None
self.req_counter = 0
self.activeport = options.activeport
# Note ipactive is not dependent on udp_listen and udp_send variables
self.ipactive = True
self.activeusers = {}
self.remoteusers = {}
for user in self.get_users(False):
remotes = user.get('remotes')
if remotes != None and len(remotes) > 0:
self.remoteusers[user] = 0
self.myself = get_myself()
self.myuid = self.myself.get('uid')
self.udp_listen = (options.udpmode & 1) != 0
self.udp_send = (options.udpmode & 2) != 0
if not self.udp_listen or not self.udp_send:
info('UDP broadcast listen: %s send: %s\n' % (self.udp_listen, self.udp_send))
self.blacklist = {}
self.blistcom = self.create_community(BLACKLIST_COMMUNITY_NAME, peer=False, public=False)
self.blistcom.set('invisible', True)
self.iconfetchlimiters = {'user': Rate_Limiter(ICON_PUSH_INTERVAL)}
self.personal_communities = options.personal_communities
# Create a community of friends, if it doesn't already exist
friends = self.get_friend_community()
if friends == None:
friends = self.create_community(FRIENDS_COMMUNITY_NAME, peer=False, public=False, desc='My friends')
self.set_community_icon(friends, get_path(FRIEND_COMMUNITY_ICON))
def register_ui(self, ui):
self.community_gui = ui
def add_friend(self, user):
assert(isinstance(user, User))
self.get_friend_community().add_member(user)
self.announce_user_change(user)
def add_member(self, com, user):
""" Add member to a personal community. """
assert(isinstance(com, Community))
assert(isinstance(user, User))
if com.get('peer'):
warning('Can not add member to peer community\n')
return False
if com == self.blistcom:
self.blacklist[user] = None
com.add_member(user)
self.save_communities([com])
self.announce_user_change(user, allowme=True)
self.notify.notify('%s added to community %s' % (user.tag(), com.get('name')))
return True
def add_or_update_user(self, uid, updatelist, profileversion, ip, port, profile=None):
user = get_user(uid)
newuser = (user == None)
if newuser:
user = create_user(uid)
if not user:
warning('community: Unable to create a new user %s\n' % uid)
return
if ip != None:
user.set('ip', ip)
user.set('port', port)
if newuser or user.get('v') != profileversion:
user.update_attributes(updatelist, user.get('v'))
if profile != None:
self.got_user_profile(user, profile, None)
elif not user.inprogress:
debug('Fetching new profile from user %s\n' % user.tag())
request = {'t': 'uprofile'}
if self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, self.got_user_profile):
user.inprogress = True
elif not user.present and not user.inprogress:
# User appears and user profile is already up-to-date
self.request_user_icon(user)
self.fetch_community_profiles(user)
if user.update_presence(True):
self.announce_user(user)
def broadcast(self, msg):
if not self.get_network_state(self.IP_NETWORK):
# Act like there is no IP network
return
for (dev, state) in self.net.get_interfaces().items():
(ip, bcast) = state
if ip == None:
continue
for port in get_broadcast_ports():
send_broadcast(bcast, port, msg)
def cleanup(self):
self.log_users(0, None)
if self.udp_send:
self.broadcast(self.gen_rpc_bye())
for user in self.activeusers.keys():
self.depart_user(user)
def create_community(self, name, peer=True, public=True, desc=None):
existing = self.find_communities(name, peer=peer, public=public)
if len(existing) > 0:
assert(len(existing) == 1)
community = existing[0]
else:
community = create_community(name)
community.set('peer', peer)
community.set('public', public)
community.set('creator', self.myself.get('nick'))
community.set('creatoruid', self.myself.get('uid'))
community.set('description', desc)
self.save_communities([community])
if peer == False:
self.announce_user_change(self.myself, allowme=True)
return community
def create_udp_listener(self):
if self.activeport != None:
# Port specified in the command line
port = self.activeport
else:
port = self.listener_port_setting.value
rfd = create_udp_socket('', port, False, reuse = True)
if rfd == None:
warning('Can not listen to UDP broadcasts on port %d\n' % port)
return
info('Listening to UDP broadcasts on port %d\n' % port)
rfd.setblocking(False)
io_add_watch(rfd, IO_IN, self.udp_listener_read)
def depart_user(self, user):
""" This is called when user is denounced or the program quits.
In the latter case this method is called for all active users. """
user.update_presence(False)
def denounce_user(self, user):
try:
self.activeusers.pop(user)
except KeyError:
# we got a false bye-bye message
return
if not user.is_present():
return
if get_debug_mode():
self.notify.user_notify(user, 'disappears')
user.set('ip', None)
user.set('port', None)
self.depart_user(user)
if self.community_gui != None:
self.community_gui.user_disappears(user)
for plugin in get_plugins():
plugin.user_disappears(user)
if user.dirty:
self.save_user(user)
def announce_community_change(self, com):
if self.community_gui != None:
self.community_gui.community_changes(com)
for plugin in get_plugins():
plugin.community_changes(com)
def announce_user_change(self, user, allowme=False, what=None):
""" Report a modified user to plugins and subsystems """
if allowme == False and user == self.myself:
return
if self.community_gui != None:
self.community_gui.user_changes(user, what)
for plugin in get_plugins():
plugin.user_changes(user, what)
def announce_user(self, user):
""" Report a new user to plugins and subsystems """
if user == self.myself:
self.notify.notify('Announce bug, not announcing myself')
return
self.activeusers[user] = None
if get_debug_mode() or user.get('friend'):
appearsmsg = 'appears'
hops = user.get('hops')
if hops != None:
appearsmsg += ' at %d hops distance' % hops
self.notify.user_notify(user, appearsmsg)
if self.community_gui != None:
self.community_gui.user_appears(user)
for plugin in get_plugins():
plugin.user_appears(user)
def fetch_community_profiles(self, user):
cnames = []
versions = []
for com in self.get_user_communities(user):
if com.get('name') != DEFAULT_COMMUNITY_NAME:
cnames.append(com.get('name'))
versions.append(com.get('v'))
if len(cnames) == 0:
return
request = {'t': 'cprofiles', 'cname': cnames, 'version': versions}
self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, self.got_community_profiles)
def find_communities(self, name=None, peer=None, public=None):
""" Search for communities given 3 criteria.
'name' is the name of a community to search for, or None.
name == None means all community names.
'peer' and 'public' have 3 possible values: None, False and True.
None means both False and True."""
return find_communities(name, peer, public)
def gen_rpc_hello(self):
return {'t': TP_HELLO,
'v': PROXIMATE_PROTOCOL_VERSION,
'pv': self.myself.get('v'),
'port': self.myself.get('port'),
'nick': self.myself.get('nick'),
'uid': self.myuid,
}
def gen_rpc_bye(self):
return bencode({'t': TP_QUIT, 'uid': self.myuid})
def get_community_dir(self, community):
return get_community_dir(community)
def get_community_members(self, community):
""" Get users belonging to the community """
if not community.get('peer'):
members = self.personal_community_members(community)
return filter(lambda user: user.is_present(), members)
users = self.activeusers.keys()
cname = community.get('name')
return filter(lambda user: cname in user.get('communities'), users)
def get_default_community(self):
return self.get_ordinary_community(DEFAULT_COMMUNITY_NAME)
def get_friend_community(self):
l = self.find_communities(FRIENDS_COMMUNITY_NAME, peer=False, public=False)
assert(len(l) <= 1)
if len(l) == 0:
return None
return l[0]
def get_friends(self):
return self.get_friend_community().get('members')
def get_myself(self):
return self.myself
def get_myuid(self):
return self.myuid
def get_network_state(self, network):
assert(network == self.IP_NETWORK)
if network == self.IP_NETWORK:
return self.ipactive
return False
def get_ordinary_community(self, cname):
return get_ordinary_community(cname)
def get_user(self, uid):
return get_user(uid)
def get_user_communities(self, user):
""" Get list of peer communities the user is member of. """
communities = []
for cname in user.get('communities'):
community = self.get_ordinary_community(cname)
if community != None:
communities.append(community)
return communities
def get_user_dir(self, user=None):
""" If user == None, use myself """
assert(user == None or isinstance(user, User))
if user == None:
user = self.myself
return get_user_dir(user)
def get_users(self, active):
if active:
return self.activeusers.keys()
else:
return get_users()
def got_community_profiles(self, user, reply, ctx):
if reply == None:
return
validator = {
'cname': [ZERO_OR_MORE, str],
'profile': [ZERO_OR_MORE, {}]
}
if not validate(validator, reply):
warning('Invalid community profiles reply\n' % str(reply))
return
communities = self.get_user_communities(user)
for (cname, profile) in zip(reply['cname'], reply['profile']):
if cname == DEFAULT_COMMUNITY_NAME:
continue
com = self.get_ordinary_community(cname)
if com in communities:
self.update_community_profile(com, user, profile)
communities.remove(com)
# Do icon requests for the rest of communities
for com in communities:
if com.get('name') != DEFAULT_COMMUNITY_NAME:
self.request_com_icon(user, com)
def got_user_profile(self, user, reply, ctx):
""" This is called when other person's profile has been received """
user.inprogress = False
if reply == None:
return
profile = reply.get('uprofile')
if profile == None:
warning('Invalid user profile: %s\n' % str(reply))
return
uid = profile.get('uid')
if not valid_uid(uid):
warning('Invalid uid: %s\n' % str(uid))
return
if uid == self.myuid or uid != user.get('uid'):
warning('uid treason detected. Message from %s: %s\n' % (user.get('uid'), str(profile)))
return
oldstatus = (user.get('status'), user.get('status_icon'))
if not user.unserialize(profile):
warning('Invalid user profile: %s\n' % str(profile))
return
# Now we know the profile is valid
create_user_communities(user)
self.announce_user_change(user)
self.save_user(user)
if oldstatus != (user.get('status'), user.get('status_icon')):
self.show_status_change(user)
# The user profile is now up-to-date. Now we can fetch everything else.
self.request_user_icon(user)
self.fetch_community_profiles(user)
def handle_icon_push(self, user, request):
""" This is called when is received. Save the icon image. """
validator = {'icon': str,
'iconid': str,
'version': lambda i: is_unsigned_int('version', i)
}
if not validate(validator, request):
return None
icon = request['icon']
iconid = request['iconid']
if iconid == 'user':
if user.get('faceversion') != request['version']:
# This is an outdated version of the icon..
return None
if icon == '':
# if we got an empty string, user removed the icon
# giving None to save_face removes the picture
delete_face(user)
elif not save_face(user, icon):
warning('Could not save face for %s: %d bytes\n' % (user.tag(), len(icon)))
return None
user.set('myfaceversion', request['version'])
self.announce_user_change(user, what=(PROFILE_ICON_CHANGED, None))
elif iconid.startswith('c:'):
cname = iconid[2:]
if cname == DEFAULT_COMMUNITY_NAME:
return None
com = self.get_ordinary_community(cname)
if com == None:
return None
if com.get('iconversion') != request['version']:
# This is an outdated version of the icon..
return None
if com.get('iconlocked'):
return None
if icon == '':
delete_community_icon(com)
elif not save_community_icon(com, icon):
warning('Failed to update community icon: %s\n' % cname)
return None
com.set('myiconversion', request['version'])
self.announce_community_change(com)
return None
def handle_community_profile_fetch(self, user, request):
cname = request.get('cname')
if type(cname) != str:
warning('Invalid community profile fetch\n' % str(request))
return None
community = self.get_ordinary_community(cname)
if community == None:
return None
return {'cprofile': community.serialize()}
def handle_community_profiles_fetch(self, user, request):
validator = {
'cname': [ZERO_OR_MORE, str],
'version': [ZERO_OR_MORE, lambda i: is_unsigned_int('version', i)]
}
if not validate(validator, request):
warning('Invalid community profiles fetch\n' % str(request))
return None
cnames = []
profiles = []
for (cname, version) in zip(request['cname'], request['version']):
com = self.get_ordinary_community(cname)
if com == None:
continue
if version < com.get('v'):
cnames.append(cname)
profiles.append(com.serialize())
debug('Sending %s community profile to %s\n' %
(com.get('name'), user.get('nick')))
return {'cname': cnames, 'profile': profiles}
def handle_request(self, user, request):
handler = self.fetchhandlers.get(request['t'])
if handler == None:
warning('Community not handling request: %s\n' % str(request))
return None
return handler(user, request)
def handle_hello(self, user, hello):
self.remoteusers[user] = 0
self.got_hello(hello, None)
return {}
def handle_invite_community(self, user, request):
cname = request.get('cname')
if cname == None:
return None
community = self.get_ordinary_community(cname)
if community == None:
warning('Got invite to unknown community: %s\n' % cname)
return None
if community in self.get_user_communities(self.myself):
warning('Got invite to community I am already in: %s\n' % cname)
return None
self.notify.notify_with_response('%s invited you to community %s. Join the community?' %
(user.tag(), community.get('name')), \
self.invite_response, community)
return {}
def invite_response(self, response, msg, community):
if response == self.notify.RESPONSE_ACTIVATED:
self.join_community(community)
return True
return False
def handle_icon_request(self, user, request):
iconid = request.get('iconid')
if iconid == None or type(iconid) != str:
return None
debug('Icon request from %s: %s\n' % (user.get('nick'), iconid))
if iconid == 'user':
icon = read_file_contents(seek_face_name(self.myself))
version = self.myself.get('faceversion')
limiter = self.iconfetchlimiters['user']
elif iconid.startswith('c:'):
cname = iconid[2:]
if not valid_community(cname):
return None
if cname not in self.myself.get('communities'):
return None
com = self.get_ordinary_community(cname)
if com == None:
return None
if com.get('myiconversion') != com.get('iconversion'):
# Do not reply with a old version of the icon!
return
icon = read_file_contents(seek_community_icon_name(com))
version = com.get('iconversion')
limiter = self.iconfetchlimiters.get(iconid)
if limiter == None:
limiter = Rate_Limiter(ICON_PUSH_INTERVAL)
self.iconfetchlimiters[iconid] = limiter
else:
return None
if icon == None:
icon = ''
if version == None:
version = 0
request = {'t': 'iconpush', 'iconid': iconid, 'icon': icon, 'version': version}
if normal_traffic_mode():
self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, None, ack=False)
elif limiter == None or limiter.check():
self.fetcher.fetch_community(self.get_default_community(), PLUGIN_TYPE_COMMUNITY, request, None, ack=False)
return {}
def handle_user_profile_fetch(self, user, request):
return {'uprofile': self.myself.serialize()}
def is_blacklisted(self, user):
return self.blacklist.has_key(user)
def is_me(self, user):
return user == self.myself
def is_member(self, user, com, allowtemporary=True):
""" Test if a user belongs to a community """
if user == self.myself and com.get('peer') == False:
return True
return user.in_community(com, allowtemporary=allowtemporary)
def is_my_friend(self, user):
assert(isinstance(user, User))
return user.get('uid') in self.get_friends()
def invite_member(self, com, user, cb):
""" Invite user to a peer community. """
request = {'t': 'cinvite', 'cname': com.get('name')}
return self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, self.invite_sent, cb, retries=1)
def invite_sent(self, user, reply, cb):
success = (reply != None)
cb(success)
def join_community(self, community, temporary=False):
""" Join to a peer community. """
if not community.get('peer'):
warning('Can not join to personal communities\n')
return False
self.myself.join_community(community, temporary)
self.announce_user_change(self.myself, allowme=True)
self.notify.notify('Joined to community %s' % community.get('name'))
return True
def leave_community(self, community):
""" Leave a peer community """
if not community.get('peer'):
warning('Can not leave a personal community\n')
return False
self.myself.leave_community(community)
self.announce_user_change(self.myself, allowme=True)
self.notify.notify('Left community %s' % community.get('name'))
return True
def log_users(self, t, ctx):
users = {}
for user in self.activeusers.keys():
d = {}
for attr, val in user.d.items():
if attr in ['uid', 'v', 'fscounter', 'faceversion', 'status_icon']:
continue
if val == None:
continue
ma = user.metaattributes.get(attr)
if ma == None or ma.public == False:
continue
if type(val) == list or type(val) == str:
x = len(val)
else:
x = 1
d[attr] = x
users[user.get('uid')] = d
def delete_personal_community(self, community):
# Don't delete friends community
if community.get('peer') or community == self.get_friend_community():
return False
for uid in community.get('members'):
self.remove_member(community, self.get_user(uid))
delete_community(community)
self.announce_user_change(self.myself, allowme=True)
return True
def request_user_icon(self, user):
if user.get('myfaceversion') != user.get('faceversion') and \
len(self.activeusers) < MAX_ICON_ACTIVE:
request = {'t': 'iconrequest', 'iconid': 'user'}
self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, None, ack=False)
def request_com_icon(self, user, com):
if com.get('myiconversion') != com.get('iconversion') and \
not com.get('iconlocked') and len(self.activeusers) < MAX_ICON_ACTIVE:
iconid = 'c:' + com.get('name')
request = {'t': 'iconrequest', 'iconid': iconid}
self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, request, None, com, ack=False)
def remote_discovery(self):
""" remote discovery keeps remote connections open in each possible
directions to bypass one-sided firewalls. """
if self.get_network_state(self.IP_NETWORK) == False:
return
hello = None
benhello = None
for user in self.remoteusers:
counter = self.remoteusers[user]
if self.activeusers.has_key(user):
# Hello period is 15s
self.remoteusers[user] = (counter + 1) % 3
if counter != 0:
continue
if hello == None:
hello = self.gen_rpc_hello()
self.fetcher.fetch(user, PLUGIN_TYPE_COMMUNITY, hello, None)
continue
addresses = user.get('remotes')
if addresses == None or len(addresses) == 0:
continue
# Try connection period is 30s -> 2880 connections/day
self.remoteusers[user] = (counter + 1) % 6
if counter != 0:
continue
if benhello == None:
if hello == None:
hello = self.gen_rpc_hello()
benhello = bencode(hello)
for address in addresses:
port = address[1]
if port == None:
port = DEFAULT_PROXIMATE_PORT
Hello_Client((address[0], port), benhello)
def periodic_event(self, t, ctx):
if self.udp_send:
self.broadcast(bencode(self.gen_rpc_hello()))
for user in self.activeusers.keys():
if user.timeout():
self.denounce_user(user)
self.remote_discovery()
self.req_counter += 1
if self.req_counter >= REQUEST_INTERVAL:
for user in self.activeusers.keys():
self.request_user_icon(user)
self.req_counter = 0
if self.myself.dirty:
self.save_user(self.myself)
return True
def personal_community_members(self, community):
assert(community.get('peer') == False)
members = community.get('members')
assert(type(members) == list)
return filter(lambda u: u != None, map(lambda uid: self.get_user(uid), members))
def ready(self):
global community
community = self
self.fetcher = get_plugin_by_type(PLUGIN_TYPE_FETCHER)
self.notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION)
self.net = get_plugin_by_type(PLUGIN_TYPE_NETWORK_CONTROL)
self.fetcher.register_handler(PLUGIN_TYPE_COMMUNITY, self.handle_request, 'community fetch')
settings = get_plugin_by_type(PLUGIN_TYPE_SETTINGS)
self.default_rpc_port_setting = settings.register('community.rpc_port', int, 'TCP listening port; 0 means a random port.\nTakes effect after restart', default=0, validator=valid_port)
self.listener_port_setting = settings.register('community.listener_port', int, 'Peer discovery (UDP) listening port.\nTakes effect after restart', default=DEFAULT_PROXIMATE_PORT, validator=valid_port)
# The command line setting has the highest priority, then comes
# the config file port, and random port has the least priority.
if self.activeport != None:
self.myself.set('port', self.activeport)
else:
port = self.default_rpc_port_setting.value
if port != 0:
self.myself.set('port', port)
else:
self.gen_port()
if self.udp_listen:
self.create_udp_listener()
sch = get_plugin_by_type(PLUGIN_TYPE_SCHEDULER)
sch.call_periodic(TP_SCAN_INTERVAL * sch.SECOND, self.periodic_event, callnow=True)
# Set periodic active user logging
sch.call_periodic(15 * 60 * sch.SECOND, self.log_users)
def remove_friend(self, user):
assert(isinstance(user, User))
self.get_friend_community().remove_member(user)
self.announce_user_change(user)
def remove_member(self, com, user):
""" Remove member from a personal community. """
assert(isinstance(com, Community))
assert(isinstance(user, User))
if com.get('peer'):
warning('Can not remove member from peer community\n')
return False
if com == self.blistcom:
self.blacklist.pop(user, None)
com.remove_member(user)
self.save_communities([com])
self.announce_user_change(user, allowme=True)
self.notify.notify('%s removed from community %s' % (user.tag(), com.get('name')))
return True
def get_rpc_port(self):
return self.myself.get('port')
def gen_port(self):
port = randint(TP_MIN_PORT, TP_MAX_PORT)
self.myself.set('port', port)
def got_rpc_msg(self, data, address):
if not self.ipactive:
return
d = fmt_bdecode({'t': str}, data)
if d == None:
return
if d['t'] == TP_HELLO:
self.got_hello(d, address)
elif d['t'] == TP_QUIT:
self.got_bye(d, address)
else:
info('Invalid RPC hello type: %s\n' % d['t'])
def got_hello(self, d, address):
""" Check validity of Proximate hello, and register the other party. """
validator = {
'v': valid_protocol_version,
'pv': lambda x: type(x) == int and x >= 0,
'port': valid_port,
'nick': valid_nick,
'uid': lambda s: valid_uid(s) and s != self.myuid,
}
if not validate(validator, d):
if type(d) != dict or d.get('uid') != self.myuid:
info('Rejecting signature: %s\n' % str(d))
return
updatelist = [('nick', d['nick']), ('protocolversion', d['v'])]
if address != None:
ip = address[0]
else:
ip = None
self.add_or_update_user(d['uid'], updatelist, d['pv'], ip, d['port'])
def got_bye(self, d, address):
""" User quit, denounce """
validator = {
'uid': lambda s: valid_uid(s) and s != self.myuid,
}
if not validate(validator, d):
return
user = self.safe_get_user(d.get('uid'), address[0])
if user == None:
info('Rejecting quit message from uid %s\n' % d.get('uid'))
else:
self.denounce_user(user)
def safe_get_user(self, uid, ip):
if valid_uid(uid) == False or uid == self.myuid:
return None
user = self.get_user(uid)
if user == None:
# Create a minimal user object for the peer so that we can reply
return create_user(uid)
oldip = user.get('ip')
if ip != None and oldip != None and ip != oldip:
return None
if self.is_blacklisted(user):
return None
return user
def save_communities(self, communities):
save_communities(communities)
def save_user(self, user):
save_user(user)
def set_community_icon(self, com, icon_fname):
if icon_fname == None:
delete_community_icon(com)
else:
icon = read_file_contents(icon_fname)
if icon == None:
warning('Can not set community icon from %s\n' % icon_fname)
return False
if not save_community_icon(com, icon):
warning('Could not save community icon from %s\n' % icon_fname)
return False
# New icon version so other users will be notified. Random number
# because this is a distributed system
version = randint(0, 1 << 32 - 1)
com.set('iconversion', version)
com.set('myiconversion', version)
self.announce_community_change(com)
return True
def set_network_state(self, network, state):
""" set_network_state() is used to disable networks """
assert(network == self.IP_NETWORK)
if self.ipactive == state:
return
self.ipactive = state
if state == False:
# Close IP network: rpc.py will react indirectly
msg = 'IP networking disabled'
self.fetcher.close_ip_connections(msg)
self.notify.notify(msg + ' (this is a fake disable)', highpri=True)
else:
self.notify.notify('IP networking enabled', highpri=True)
def set_my_face(self, face_fname):
""" Set new profile picture for given user. Should be myself! """
if not face_fname:
delete_face(self.myself)
else:
face = read_file_contents(face_fname)
if face == None:
warning('Can not set user face from %s\n' % face_fname)
return False
if not save_face(self.myself, face):
warning('Could not save user face from %s\n' % face_fname)
return False
if self.myself.get('faceversion') == None:
self.myself.set('faceversion', 0)
else:
self.myself.set('faceversion', self.myself.get('faceversion') + 1)
self.announce_user_change(self.myself, allowme=True)
return True
def udp_listener_read(self, rfd, condition):
""" Receive packet from listening socket and check Proximate hello """
try:
data, address = rfd.recvfrom(1024)
except socket.error, (errno, strerror):
ret = (errno == EAGAIN or errno == EINTR)
if not ret:
warning('WLAN UDP Listen: Socket error(%s): %s\n' % (errno, strerror))
return ret
self.got_rpc_msg(data, address)
return True
def update_community_profile(self, com, user, profile):
if com.get('name') != profile.get('name'):
warning('Name mismatch in community profile: %s vs %s\n' % (com.get('name'), str(profile)))
return
if not com.unserialize(profile):
warning('At least part of the community profile failed: %s\n' % profile)
return
self.announce_community_change(com)
self.save_communities([com])
self.request_com_icon(user, com)
def get_user_personal_communities(self, user):
uid = user.get('uid')
coms = []
for com in self.find_communities(peer=False):
members = com.get('members')
assert(type(members) == list)
if uid in members:
coms.append(com)
return coms
def show_status_change(self, user):
status = user.get('status')
status_icon = user.get('status_icon')
text = 'changed status to '
if status:
text += status
if status_icon:
if status:
text += ' (%s)' % status_icon
else:
text += status_icon
self.notify.user_notify(user, text)
class Hello_Client:
def __init__(self, address, benhello):
self.address = address
self.q = TCP_Queue(self.msghandler)
if not self.q.connect(address, TP_CONNECT_TIMEOUT):
return
prefix = TP_HELLO + '\n'
self.q.write(prefix, writelength=False)
self.q.write(benhello)
self.q.set_timeout(TP_CONNECT_TIMEOUT)
def msghandler(self, q, data, ctx):
community.got_rpc_msg(data, self.address)
return False
class Hello_Server:
def __init__(self, address, sock, data):
self.q = TCP_Queue(self.msghandler)
self.q.set_timeout(TP_CONNECT_TIMEOUT)
self.address = address
self.q.append_input(data)
self.q.initialize(sock)
def msghandler(self, q, benhello, ctx):
community.got_rpc_msg(benhello, self.address)
self.q.write(bencode(community.gen_rpc_hello()))
self.q.close_after_send()
return True
def init(options):
Community_Plugin(options)
| proximate/proximate | community.py | Python | bsd-3-clause | 37,731 |
#!/usr/bin/env python
import rospy
import math
import time
from sensor_msgs.msg import Range
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from sonar_data_aggregator import SonarDataAggregator
from laser_data_aggregator import LaserDataAggregator
from navigation import Navigation
# Class for assigning the robot speeds
class RobotController:
# Constructor
def __init__(self):
# Debugging purposes
self.print_velocities = rospy.get_param('print_velocities')
# Where and when should you use this?
self.stop_robot = False
# Create the needed objects
self.sonar_aggregation = SonarDataAggregator()
self.laser_aggregation = LaserDataAggregator()
self.navigation = Navigation()
self.linear_velocity = 0
self.angular_velocity = 0
# Check if the robot moves with target or just wanders
self.move_with_target = rospy.get_param("calculate_target")
# The timer produces events for sending the speeds every 110 ms
rospy.Timer(rospy.Duration(0.11), self.publishSpeeds)
self.velocity_publisher = rospy.Publisher(\
rospy.get_param('speeds_pub_topic'), Twist,\
queue_size = 10)
# This function publishes the speeds and moves the robot
def publishSpeeds(self, event):
# Produce speeds
self.produceSpeeds()
# Create the commands message
twist = Twist()
twist.linear.x = self.linear_velocity
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = self.angular_velocity
# Send the command
self.velocity_publisher.publish(twist)
# Print the speeds for debuggind purposes
if self.print_velocities == True:
print "[L,R] = [" + str(twist.linear.x) + " , " + \
str(twist.angular.z) + "]"
# Produces speeds from the laser
def produceSpeedsLaser(self):
scan = self.laser_aggregation.laser_scan
linear = 0
angular = 0
############################### NOTE QUESTION ############################
# Check what laser_scan contains and create linear and angular speeds
# for obstacle avoidance
##########################################################################
return [linear, angular]
# Combines the speeds into one output using a motor schema approach
def produceSpeeds(self):
# Produce target if not existent
if self.move_with_target == True and \
self.navigation.target_exists == False:
# Create the commands message
twist = Twist()
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0
# Send the command
self.velocity_publisher.publish(twist)
self.navigation.selectTarget()
# Get the submodule's speeds
[l_laser, a_laser] = self.produceSpeedsLaser()
# You must fill these
self.linear_velocity = 0
self.angular_velocity = 0
if self.move_with_target == True:
[l_goal, a_goal] = self.navigation.velocitiesToNextSubtarget()
############################### NOTE QUESTION ############################
# You must combine the two sets of speeds. You can use motor schema,
# subsumption of whatever suits your better.
##########################################################################
else:
############################### NOTE QUESTION ############################
# Implement obstacle avoidance here using the laser speeds.
# Hint: Subtract them from something constant
pass
##########################################################################
# Assistive functions
def stopRobot(self):
self.stop_robot = True
def resumeRobot(self):
self.stop_robot = False
| etsardou/intelligent_robot_systems_2016 | art_autonomous_exploration/src/speeds_assignment.py | Python | gpl-3.0 | 4,002 |
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
""" Entangled DHT and distributed tuple space
The distributed hash table (DHT) used by Entangled is based on Kademlia,
and be accessed by the C{entangled.kademlia package}, or by simply
instantiating/subclassing the exposed C{KademliaNode} in the main C{entangled}
package.
On top of this Kademlia node Entangled provides some extra functionality
in the form of a "C{DELETE}" RPC and keyword-based search operations; these
functions are accessible via the C{EntangledNode} class in the main
C{entangled} package.
The Entangled distributed tuple space is exposed as the
C{DistributedTupleSpacePeer} class, accessible via the main C{entangled}
package or its C{dtuple} module.
"""
from kademlia.node import Node as KademliaNode
from node import EntangledNode
from dtuple import DistributedTupleSpacePeer
| anupcshan/buddyfs | entangled/__init__.py | Python | mit | 992 |
"""
Test PEP 0448 -- Additional Unpacking Generalizations
https://www.python.org/dev/peps/pep-0448/
"""
# pylint: disable=superfluous-parens, unnecessary-comprehension
UNPACK_TUPLE = (*range(4), 4)
UNPACK_LIST = [*range(4), 4]
UNPACK_SET = {*range(4), 4}
UNPACK_DICT = {'a': 1, **{'b': '2'}}
UNPACK_DICT2 = {**UNPACK_DICT, "x": 1, "y": 2}
UNPACK_DICT3 = {**{'a': 1}, 'a': 2, **{'a': 3}}
UNPACK_IN_COMP = {elem for elem in (*range(10))} # [star-needs-assignment-target]
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/star/star_needs_assignment_target_py35.py | Python | mit | 472 |
# _*_ coding:utf-8 _*_
"""
Created on 2015-06-13
@author: lujin
"""
from django.conf.urls import url
from app.setup import views
urlpatterns = [
url(r'^website/$', views.website),
url(r'^personal/$', views.personal),
] | myangeline/rorobot | app/setup/urls.py | Python | apache-2.0 | 228 |
'''
>>> p = Portfolio()
>>> stocks = (('APPL', 1000, 251.80, 252.73),
... ('CSCO', 5000, 23.09, 23.74),
... ('GOOG', 500, 489.23, 491.34),
... ('MSFT', 2000, 24.63, 25.44))
...
>>> for stock in stocks:
... p.add(Investment(*stock))
>>> print p['APPL']
1000 shares of APPL worth 252730.00
>>> p['GOOG'].quantity
500
>>> p['GOOG'].close
491.33999999999997
>>> p['GOOG'].open
489.23000000000002
>>> for stock in p:
... print stock
1000 shares of APPL worth 252730.00
5000 shares of CSCO worth 118700.00
500 shares of GOOG worth 245670.00
2000 shares of MSFT worth 50880.00
>>> for stock in p.sorted('open'):
... print stock.name
CSCO
MSFT
APPL
GOOG
>>> p['MSFT'].gain
0.81000000000000227
>>> p['CSCO'].total_gain
3249.9999999999927
>>> 'GOOG' in p
True
>>> 'YHOO' in p
False
'''
if __name__ == '__main__':
import doctest
doctest.testmod()
| csuttles/utils | python/py-book/oop/oop-3-portfolio.py | Python | apache-2.0 | 887 |
# -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from openerp.addons.connector.unit.mapper import (ExportMapper, ExportMapChild)
class ICOPSExportMapChild(ExportMapChild):
""" :py:class:`MapChild` for the Exports """
def _child_mapper(self):
mapper = self.get_connector_unit_for_model(
ExportMapper, self.model._name)
mapper._icops = self._icops
mapper._backend_to = self._backend_to
return mapper
def format_items(self, items_values):
items = super(ICOPSExportMapChild, self).format_items(items_values)
return [(5, 0)] + [(0, 0, data) for data in items]
class ICOPSExportMapper(ExportMapper):
_map_child_class = ICOPSExportMapChild
def __init__(self, environment):
"""
:param environment: current environment (backend, session, ...)
:type environment: :py:class:`connector.connector.Environment`
"""
super(ICOPSExportMapper, self).__init__(environment)
self._icops = None
self._backend_to = None
def _get_map_child_unit(self, model_name):
mapper = super(ICOPSExportMapper, self)._get_map_child_unit(model_name)
mapper._icops = self._icops
mapper._backend_to = self._backend_to
return mapper
def _get_mapping(self, name, record):
res = {}
for method in dir(self):
if method.startswith('%s_' % name):
new_dict = getattr(self, method)(record)
res = dict(res.items() + new_dict.items())
return res
| Elico-Corp/openerp-7.0 | base_intercompany/unit/mapper.py | Python | agpl-3.0 | 1,643 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Crazyflie radio bootloader for flashing firmware.
"""
import logging
import time
import struct
import math
import random
import cflib.crtp
from cflib.crtp.crtpstack import CRTPPacket, CRTPPort
from .boottypes import TargetTypes, Target
__author__ = 'Bitcraze AB'
__all__ = ['Cloader']
logger = logging.getLogger(__name__)
class Cloader:
"""Bootloader utility for the Crazyflie"""
def __init__(self, link, info_cb=None, in_boot_cb=None):
"""Init the communication class by starting to communicate with the
link given. clink is the link address used after resetting to the
bootloader.
The device is actually considered in firmware mode.
"""
self.link = None
self.uri = link
self.in_loader = False
self.page_size = 0
self.buffer_pages = 0
self.flash_pages = 0
self.start_page = 0
self.cpuid = "N/A"
self.error_code = 0
self.protocol_version = 0xFF
self._info_cb = info_cb
self._in_boot_cb = in_boot_cb
self.targets = {}
self.mapping = None
self._available_boot_uri = ("radio://0/110/2M", "radio://0/0/2M")
def close(self):
""" Close the link """
if self.link:
self.link.close()
def scan_for_bootloader(self):
link = cflib.crtp.get_link_driver("radio://0")
ts = time.time()
res = ()
while len(res) == 0 and (time.time() - ts) < 10:
res = link.scan_selected(self._available_boot_uri)
link.close()
if len(res) > 0:
return res[0]
return None
def reset_to_bootloader(self, target_id):
retry_counter = 5
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (target_id, 0xFF)
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
while ((not pk or pk.header != 0xFF or
struct.unpack("<BB", pk.data[0:2]) != (target_id, 0xFF)
) and retry_counter >= 0):
pk = self.link.receive_packet(1)
retry_counter -= 1
if pk:
new_address = (0xb1,) + struct.unpack("<BBBB", pk.data[2:6][::-1])
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (target_id, 0xF0, 0x00)
self.link.send_packet(pk)
addr = int(struct.pack("B" * 5, *new_address).encode('hex'), 16)
time.sleep(0.2)
self.link.close()
time.sleep(0.2)
self.link = cflib.crtp.get_link_driver(
"radio://0/0/2M/{}".format(addr))
return True
else:
return False
def reset_to_bootloader1(self, cpu_id):
""" Reset to the bootloader
The parameter cpuid shall correspond to the device to reset.
Return true if the reset has been done and the contact with the
bootloader is established.
"""
# Send an echo request and wait for the answer
# Mainly aim to bypass a bug of the crazyflie firmware that prevents
# reset before normal CRTP communication
pk = CRTPPacket()
pk.port = CRTPPort.LINKCTRL
pk.data = (1, 2, 3) + cpu_id
self.link.send_packet(pk)
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if pk.port == CRTPPort.LINKCTRL:
break
# Send the reset to bootloader request
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (0xFF, 0xFE) + cpu_id
self.link.send_packet(pk)
# Wait to ack the reset ...
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if pk.port == 0xFF and tuple(pk.data) == (0xFF, 0xFE) + cpu_id:
pk.data = (0xFF, 0xF0) + cpu_id
self.link.send_packet(pk)
break
time.sleep(0.1)
self.link.close()
self.link = cflib.crtp.get_link_driver(self.clink_address)
# time.sleep(0.1)
return self._update_info()
def reset_to_firmware(self, target_id):
""" Reset to firmware
The parameter cpuid shall correspond to the device to reset.
Return true if the reset has been done
"""
# The fake CPU ID is legacy from the Crazyflie 1.0
# In order to reset the CPU id had to be sent, but this
# was removed before launching it. But the length check is
# still in the bootloader. So to work around this bug so
# some extra data needs to be sent.
fake_cpu_id = (1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12)
# Send the reset to bootloader request
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (target_id, 0xFF) + fake_cpu_id
self.link.send_packet(pk)
# Wait to ack the reset ...
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if (pk.header == 0xFF and struct.unpack(
"B" * len(pk.data), pk.data)[:2] == (target_id, 0xFF)):
# Difference in CF1 and CF2 (CPU ID)
if target_id == 0xFE:
pk.data = (target_id, 0xF0, 0x01)
else:
pk.data = (target_id, 0xF0) + fake_cpu_id
self.link.send_packet(pk)
break
time.sleep(0.1)
def open_bootloader_uri(self, uri=None):
if self.link:
self.link.close()
if uri:
self.link = cflib.crtp.get_link_driver(uri)
else:
self.link = cflib.crtp.get_link_driver(self.clink_address)
def check_link_and_get_info(self, target_id=0xFF):
"""Try to get a connection with the bootloader by requesting info
5 times. This let roughly 10 seconds to boot the copter ..."""
for _ in range(0, 5):
if self._update_info(target_id):
if self._in_boot_cb:
self._in_boot_cb.call(True, self.targets[
target_id].protocol_version)
if self._info_cb:
self._info_cb.call(self.targets[target_id])
if self.protocol_version != 1:
return True
# Set radio link to a random address
addr = [0xbc] + [random.randint(0, 255) for x in range(4)]
return self._set_address(addr)
return False
def _set_address(self, new_address):
""" Change copter radio address.
This function works only with crazyradio CRTP link.
"""
logging.debug("Setting bootloader radio address to"
" {}".format(new_address))
if len(new_address) != 5:
raise Exception("Radio address should be 5 bytes long")
self.link.pause()
for _ in range(10):
logging.debug("Trying to set new radio address")
self.link.cradio.set_address((0xE7,) * 5)
pkdata = (0xFF, 0xFF, 0x11) + tuple(new_address)
self.link.cradio.send_packet(pkdata)
self.link.cradio.set_address(tuple(new_address))
if self.link.cradio.send_packet((0xff,)).ack:
logging.info("Bootloader set to radio address"
" {}".format(new_address))
self.link.restart()
return True
self.link.restart()
return False
def request_info_update(self, target_id):
if target_id not in self.targets:
self._update_info(target_id)
if self._info_cb:
self._info_cb.call(self.targets[target_id])
return self.targets[target_id]
def _update_info(self, target_id):
""" Call the command getInfo and fill up the information received in
the fields of the object
"""
# Call getInfo ...
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (target_id, 0x10)
self.link.send_packet(pk)
# Wait for the answer
pk = self.link.receive_packet(2)
if (pk and pk.header == 0xFF and struct.unpack("<BB", pk.data[0:2]) ==
(target_id, 0x10)):
tab = struct.unpack("BBHHHH", pk.data[0:10])
cpuid = struct.unpack("B" * 12, pk.data[10:22])
if target_id not in self.targets:
self.targets[target_id] = Target(target_id)
self.targets[target_id].addr = target_id
if len(pk.data) > 22:
self.targets[target_id].protocol_version = pk.datat[22]
self.protocol_version = pk.datat[22]
self.targets[target_id].page_size = tab[2]
self.targets[target_id].buffer_pages = tab[3]
self.targets[target_id].flash_pages = tab[4]
self.targets[target_id].start_page = tab[5]
self.targets[target_id].cpuid = "%02X" % cpuid[0]
for i in cpuid[1:]:
self.targets[target_id].cpuid += ":%02X" % i
if (self.protocol_version == 0x10 and
target_id == TargetTypes.STM32):
self._update_mapping(target_id)
return True
return False
def _update_mapping(self, target_id):
pk = CRTPPacket()
pk.set_header(0xff, 0xff)
pk.data = (target_id, 0x12)
self.link.send_packet(pk)
pk = self.link.receive_packet(2)
if (pk and pk.header == 0xFF and struct.unpack("<BB", pk.data[0:2]) ==
(target_id, 0x12)):
m = pk.datat[2:]
if (len(m) % 2) != 0:
raise Exception("Malformed flash mapping packet")
self.mapping = []
page = 0
for i in range(int(len(m) / 2)):
for j in range(m[2 * i]):
self.mapping.append(page)
page += m[(2 * i) + 1]
def upload_buffer(self, target_id, page, address, buff):
"""Upload data into a buffer on the Crazyflie"""
# print len(buff)
count = 0
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack("=BBHH", target_id, 0x14, page, address)
for i in range(0, len(buff)):
pk.data.append(buff[i])
count += 1
if count > 24:
self.link.send_packet(pk)
count = 0
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack("=BBHH", target_id, 0x14, page,
i + address + 1)
self.link.send_packet(pk)
def read_flash(self, addr=0xFF, page=0x00):
"""Read back a flash page from the Crazyflie and return it"""
buff = bytearray()
page_size = self.targets[addr].page_size
for i in range(0, int(math.ceil(page_size / 25.0))):
pk = None
retry_counter = 5
while ((not pk or pk.header != 0xFF or
struct.unpack("<BB", pk.data[0:2]) != (addr, 0x1C)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack("<BBHH", addr, 0x1C, page, (i * 25))
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if (retry_counter < 0):
return None
else:
buff += pk.data[6:]
# For some reason we get one byte extra here...
return buff[0:page_size]
def write_flash(self, addr, page_buffer, target_page, page_count):
"""Initiate flashing of data in the buffer to flash."""
# print "Write page", flashPage
# print "Writing page [%d] and [%d] forward" % (flashPage, nPage)
pk = None
retry_counter = 5
# print "Flasing to 0x{:X}".format(addr)
while ((not pk or pk.header != 0xFF or
struct.unpack("<BB", pk.data[0:2]) != (addr, 0x18)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack("<BBHHH", addr, 0x18, page_buffer,
target_page, page_count)
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if retry_counter < 0:
self.error_code = -1
return False
self.error_code = pk.data[3]
return pk.data[2] == 1
def decode_cpu_id(self, cpuid):
"""Decode the CPU id into a string"""
ret = ()
for i in cpuid.split(':'):
ret += (eval("0x" + i),)
return ret
| manojngb/Crazyfly_simple_lift | src/cflib/cflib/bootloader/cloader.py | Python | gpl-2.0 | 14,059 |
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_monitor_tcp import (
ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='/Common/my-tcp',
send='the send string',
receive='the receive string',
ip='1.1.1.1',
port='80',
interval='10',
timeout='20',
ignore_down_response=True,
transparent=False,
probe_timeout='30',
reverse=True
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/my-tcp'
assert p.send == 'the send string'
assert p.receive == 'the receive string'
assert p.destination == '1.1.1.1:80'
assert p.ip == '1.1.1.1'
assert p.port == 80
assert p.interval == 10
assert p.timeout == 20
assert p.ignore_down_response is True
assert p.transparent is False
assert p.probe_timeout == 30
assert p.reverse is True
def test_api_parameters(self):
args = load_fixture('load_gtm_monitor_tcp_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/tcp'
assert p.send == 'the send string'
assert p.receive == 'the receive string'
assert p.destination == '1.1.1.1:80'
assert p.ip == '1.1.1.1'
assert p.port == 80
assert p.interval == 30
assert p.timeout == 120
assert p.ignore_down_response is False
assert p.transparent is True
assert p.probe_timeout == 5
assert p.reverse is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p1 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_monitor_tcp.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_monitor_tcp.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_monitor_tcp.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p1.stop()
self.p2.stop()
self.p3.stop()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_change_ip(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_gtm_monitor_tcp_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[True, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['ip'] == '10.10.10.10'
def test_change_ignore_down_response(self, *args):
set_module_args(dict(
name='foo',
ignore_down_response=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_gtm_monitor_tcp_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[True, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['ignore_down_response'] is True
| F5Networks/f5-ansible-modules | ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_gtm_monitor_tcp.py | Python | mit | 6,578 |
from __future__ import division
from sympy.external import import_module
from sympy.plotting.intervalmath import (
Abs, acos, acosh, And, asin, asinh, atan, atanh, ceil, cos, cosh,
exp, floor, imax, imin, interval, log, log10, Or, sin, sinh, sqrt,
tan, tanh,
)
np = import_module('numpy')
if not np:
disabled = True
#requires Numpy. Hence included in interval_functions
def test_interval_pow():
a = 2**interval(1, 2) == interval(2, 4)
assert a == (True, True)
a = interval(1, 2)**interval(1, 2) == interval(1, 4)
assert a == (True, True)
a = interval(-1, 1)**interval(0.5, 2)
assert a.is_valid is None
a = interval(-2, -1) ** interval(1, 2)
assert a.is_valid is False
a = interval(-2, -1) ** (1 / 2)
assert a.is_valid is False
a = interval(-1, 1)**(1 / 2)
assert a.is_valid is None
a = interval(-1, 1)**(1 / 3) == interval(-1, 1)
assert a == (True, True)
a = interval(-1, 1)**2 == interval(0, 1)
assert a == (True, True)
a = interval(-1, 1) ** (1 / 29) == interval(-1, 1)
assert a == (True, True)
a = -2**interval(1, 1) == interval(-2, -2)
assert a == (True, True)
a = interval(1, 2, is_valid=False)**2
assert a.is_valid is False
a = (-3)**interval(1, 2)
assert a.is_valid is False
a = (-4)**interval(0.5, 0.5)
assert a.is_valid is False
assert ((-3)**interval(1, 1) == interval(-3, -3)) == (True, True)
a = interval(8, 64)**(2 / 3)
assert abs(a.start - 4) < 1e-10 # eps
assert abs(a.end - 16) < 1e-10
a = interval(-8, 64)**(2 / 3)
assert abs(a.start - 4) < 1e-10 # eps
assert abs(a.end - 16) < 1e-10
def test_exp():
a = exp(interval(-np.inf, 0))
assert a.start == np.exp(-np.inf)
assert a.end == np.exp(0)
a = exp(interval(1, 2))
assert a.start == np.exp(1)
assert a.end == np.exp(2)
a = exp(1)
assert a.start == np.exp(1)
assert a.end == np.exp(1)
def test_log():
a = log(interval(1, 2))
assert a.start == 0
assert a.end == np.log(2)
a = log(interval(-1, 1))
assert a.is_valid is None
a = log(interval(-3, -1))
assert a.is_valid is False
a = log(-3)
assert a.is_valid is False
a = log(2)
assert a.start == np.log(2)
assert a.end == np.log(2)
def test_log10():
a = log10(interval(1, 2))
assert a.start == 0
assert a.end == np.log10(2)
a = log10(interval(-1, 1))
assert a.is_valid is None
a = log10(interval(-3, -1))
assert a.is_valid is False
a = log10(-3)
assert a.is_valid is False
a = log10(2)
assert a.start == np.log10(2)
assert a.end == np.log10(2)
def test_atan():
a = atan(interval(0, 1))
assert a.start == np.arctan(0)
assert a.end == np.arctan(1)
a = atan(1)
assert a.start == np.arctan(1)
assert a.end == np.arctan(1)
def test_sin():
a = sin(interval(0, np.pi / 4))
assert a.start == np.sin(0)
assert a.end == np.sin(np.pi / 4)
a = sin(interval(-np.pi / 4, np.pi / 4))
assert a.start == np.sin(-np.pi / 4)
assert a.end == np.sin(np.pi / 4)
a = sin(interval(np.pi / 4, 3 * np.pi / 4))
assert a.start == np.sin(np.pi / 4)
assert a.end == 1
a = sin(interval(7 * np.pi / 6, 7 * np.pi / 4))
assert a.start == -1
assert a.end == np.sin(7 * np.pi / 6)
a = sin(interval(0, 3 * np.pi))
assert a.start == -1
assert a.end == 1
a = sin(interval(np.pi / 3, 7 * np.pi / 4))
assert a.start == -1
assert a.end == 1
a = sin(np.pi / 4)
assert a.start == np.sin(np.pi / 4)
assert a.end == np.sin(np.pi / 4)
a = sin(interval(1, 2, is_valid=False))
assert a.is_valid is False
def test_cos():
a = cos(interval(0, np.pi / 4))
assert a.start == np.cos(np.pi / 4)
assert a.end == 1
a = cos(interval(-np.pi / 4, np.pi / 4))
assert a.start == np.cos(-np.pi / 4)
assert a.end == 1
a = cos(interval(np.pi / 4, 3 * np.pi / 4))
assert a.start == np.cos(3 * np.pi / 4)
assert a.end == np.cos(np.pi / 4)
a = cos(interval(3 * np.pi / 4, 5 * np.pi / 4))
assert a.start == -1
assert a.end == np.cos(3 * np.pi / 4)
a = cos(interval(0, 3 * np.pi))
assert a.start == -1
assert a.end == 1
a = cos(interval(- np.pi / 3, 5 * np.pi / 4))
assert a.start == -1
assert a.end == 1
a = cos(interval(1, 2, is_valid=False))
assert a.is_valid is False
def test_tan():
a = tan(interval(0, np.pi / 4))
assert a.start == 0
assert a.end == np.tan(np.pi / 4)
a = tan(interval(np.pi / 4, 3 * np.pi / 4))
#discontinuity
assert a.is_valid is None
def test_sqrt():
a = sqrt(interval(1, 4))
assert a.start == 1
assert a.end == 2
a = sqrt(interval(0.01, 1))
assert a.start == np.sqrt(0.01)
assert a.end == 1
a = sqrt(interval(-1, 1))
assert a.is_valid is None
a = sqrt(interval(-3, -1))
assert a.is_valid is False
a = sqrt(4)
assert (a == interval(2, 2)) == (True, True)
a = sqrt(-3)
assert a.is_valid is False
def test_imin():
a = imin(interval(1, 3), interval(2, 5), interval(-1, 3))
assert a.start == -1
assert a.end == 3
a = imin(-2, interval(1, 4))
assert a.start == -2
assert a.end == -2
a = imin(5, interval(3, 4), interval(-2, 2, is_valid=False))
assert a.start == 3
assert a.end == 4
def test_imax():
a = imax(interval(-2, 2), interval(2, 7), interval(-3, 9))
assert a.start == 2
assert a.end == 9
a = imax(8, interval(1, 4))
assert a.start == 8
assert a.end == 8
a = imax(interval(1, 2), interval(3, 4), interval(-2, 2, is_valid=False))
assert a.start == 3
assert a.end == 4
def test_sinh():
a = sinh(interval(-1, 1))
assert a.start == np.sinh(-1)
assert a.end == np.sinh(1)
a = sinh(1)
assert a.start == np.sinh(1)
assert a.end == np.sinh(1)
def test_cosh():
a = cosh(interval(1, 2))
assert a.start == np.cosh(1)
assert a.end == np.cosh(2)
a = cosh(interval(-2, -1))
assert a.start == np.cosh(-1)
assert a.end == np.cosh(-2)
a = cosh(interval(-2, 1))
assert a.start == 1
assert a.end == np.cosh(-2)
a = cosh(1)
assert a.start == np.cosh(1)
assert a.end == np.cosh(1)
def test_tanh():
a = tanh(interval(-3, 3))
assert a.start == np.tanh(-3)
assert a.end == np.tanh(3)
a = tanh(3)
assert a.start == np.tanh(3)
assert a.end == np.tanh(3)
def test_asin():
a = asin(interval(-0.5, 0.5))
assert a.start == np.arcsin(-0.5)
assert a.end == np.arcsin(0.5)
a = asin(interval(-1.5, 1.5))
assert a.is_valid is None
a = asin(interval(-2, -1.5))
assert a.is_valid is False
a = asin(interval(0, 2))
assert a.is_valid is None
a = asin(interval(2, 5))
assert a.is_valid is False
a = asin(0.5)
assert a.start == np.arcsin(0.5)
assert a.end == np.arcsin(0.5)
a = asin(1.5)
assert a.is_valid is False
def test_acos():
a = acos(interval(-0.5, 0.5))
assert a.start == np.arccos(0.5)
assert a.end == np.arccos(-0.5)
a = acos(interval(-1.5, 1.5))
assert a.is_valid is None
a = acos(interval(-2, -1.5))
assert a.is_valid is False
a = acos(interval(0, 2))
assert a.is_valid is None
a = acos(interval(2, 5))
assert a.is_valid is False
a = acos(0.5)
assert a.start == np.arccos(0.5)
assert a.end == np.arccos(0.5)
a = acos(1.5)
assert a.is_valid is False
def test_ceil():
a = ceil(interval(0.2, 0.5))
assert a.start == 1
assert a.end == 1
a = ceil(interval(0.5, 1.5))
assert a.start == 1
assert a.end == 2
assert a.is_valid is None
a = ceil(interval(-5, 5))
assert a.is_valid is None
a = ceil(5.4)
assert a.start == 6
assert a.end == 6
def test_floor():
a = floor(interval(0.2, 0.5))
assert a.start == 0
assert a.end == 0
a = floor(interval(0.5, 1.5))
assert a.start == 0
assert a.end == 1
assert a.is_valid is None
a = floor(interval(-5, 5))
assert a.is_valid is None
a = floor(5.4)
assert a.start == 5
assert a.end == 5
def test_asinh():
a = asinh(interval(1, 2))
assert a.start == np.arcsinh(1)
assert a.end == np.arcsinh(2)
a = asinh(0.5)
assert a.start == np.arcsinh(0.5)
assert a.end == np.arcsinh(0.5)
def test_acosh():
a = acosh(interval(3, 5))
assert a.start == np.arccosh(3)
assert a.end == np.arccosh(5)
a = acosh(interval(0, 3))
assert a.is_valid is None
a = acosh(interval(-3, 0.5))
assert a.is_valid is False
a = acosh(0.5)
assert a.is_valid is False
a = acosh(2)
assert a.start == np.arccosh(2)
assert a.end == np.arccosh(2)
def test_atanh():
a = atanh(interval(-0.5, 0.5))
assert a.start == np.arctanh(-0.5)
assert a.end == np.arctanh(0.5)
a = atanh(interval(0, 3))
assert a.is_valid is None
a = atanh(interval(-3, -2))
assert a.is_valid is False
a = atanh(0.5)
assert a.start == np.arctanh(0.5)
assert a.end == np.arctanh(0.5)
a = atanh(1.5)
assert a.is_valid is False
def test_Abs():
assert (Abs(interval(-0.5, 0.5)) == interval(0, 0.5)) == (True, True)
assert (Abs(interval(-3, -2)) == interval(2, 3)) == (True, True)
assert (Abs(-3) == interval(3, 3)) == (True, True)
def test_And():
args = [(True, True), (True, False), (True, None)]
assert And(*args) == (True, False)
args = [(False, True), (None, None), (True, True)]
assert And(*args) == (False, None)
def test_Or():
args = [(True, True), (True, False), (False, None)]
assert Or(*args) == (True, True)
args = [(None, None), (False, None), (False, False)]
assert Or(*args) == (None, None)
| wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/plotting/intervalmath/tests/test_interval_functions.py | Python | mit | 9,816 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['MovingAverage'] , ['Seasonal_Minute'] , ['AR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingAverage_Seasonal_Minute_AR.py | Python | bsd-3-clause | 170 |
"""Script to build the xpi add-in for firefox
Usage : python build-implicit-wait.py "x.x.x.x"
"""
import os, re, sys, shutil, datetime, zipfile, glob
CD = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = CD + r'\implicit-wait'
OUT_DIR = CD + r'\bin'
RDF_PATH = CD + r'\implicit-wait\install.rdf'
def main(args):
arg_version = args and args[0]
set_working_dir(CD)
last_modified_time = get_file_mtime(RDF_PATH, '%Y-%m-%d %H:%M:%S')
current_version = find_in_file(RDF_PATH, r'version>([.\d]+)<');
print __doc__
print 'Last compilation : ' + (last_modified_time or 'none')
print 'Current Version : ' + current_version
new_version = arg_version or get_input_version(current_version)
print 'New version : ' + new_version + '\n'
print 'Update version number ...'
replace_in_file(RDF_PATH, r'(?<=version>)[.\d]+(?=<)', new_version)
print 'Build formater xpi ...'
make_dir(OUT_DIR)
set_working_dir(SRC_DIR)
with ZipFile(OUT_DIR + r'\implicit-wait.xpi', 'w') as zip:
zip.add(r'*')
print '\nDone'
def set_working_dir(directory):
make_dir(directory)
os.chdir(directory)
def make_dir(directory):
if not os.path.isdir(directory):
os.makedirs(directory)
def clear_dir(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
os.makedirs(directory)
def get_file_mtime(filepath, format=None):
if(not os.path.isfile(filepath)):
return None
dt = datetime.datetime.fromtimestamp(os.path.getmtime(filepath))
if format:
return dt.strftime(format)
return dt
def delete_file(filepath):
if(os.path.isfile(filepath)):
os.remove(filepath)
def find_in_file(filepath, pattern):
with open(filepath, 'r') as f:
result = re.search(pattern, f.read())
return result.group(result.re.groups)
def replace_in_file(filepath, pattern, replacement):
with open(filepath, 'r') as f:
text = re.sub(pattern, replacement, f.read())
with open(filepath, 'w') as f:
f.write(text)
def get_input(message):
try: return raw_input(message)
except NameError: return input(message)
def get_input_version(version):
while True:
input = get_input('Digit to increment [w.x.y.z] or version [0.0.0.0] or skip [s] ? ').strip()
if re.match(r's|w|x|y|z', input) :
idx = {'s': 99, 'w': 0, 'x': 1, 'y': 2, 'z': 3}[input]
return '.'.join([str((int(v)+(i == idx))*(i <= idx)) for i, v in enumerate(version.split('.'))])
elif re.match(r'\d+\.\d+\.\d+\.\d+', input):
return input
class ZipFile(zipfile.ZipFile):
def __init__(cls, file, mode):
zipfile.ZipFile.__init__(cls, file, mode)
def add(self, path):
for item in glob.glob(path):
if os.path.isdir(item):
self.add(item + r'\*');
else:
self.write(item)
if __name__ == '__main__':
main(sys.argv[1:])
| florentbr/SeleniumBasic | FirefoxAddons/build-implicit-wait.py | Python | bsd-3-clause | 3,005 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# (c) 2012, Jayson Vantuyl <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_key
author: "Jayson Vantuyl & others (@jvantuyl)"
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
required: false
default: none
description:
- identifier of key. Including this allows check mode to correctly report the changed state.
- "If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead."
data:
required: false
default: none
description:
- keyfile contents to add to the keyring
file:
required: false
default: none
description:
- path to a keyfile to add to the keyring
keyring:
required: false
default: none
description:
- path to specific keyring file in /etc/apt/trusted.gpg.d
version_added: "1.3"
url:
required: false
default: none
description:
- url to retrieve key from.
keyserver:
version_added: "1.6"
required: false
default: none
description:
- keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Add an apt key by id from a keyserver
- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Add an Apt signing key, will not download if present
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Remove an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent
# Remove a Apt specific signing key, leading 0x is valid
- apt_key: id=0x473041FA state=absent
# Add a key from a file on the Ansible server
- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present
# Add an Apt signing key to a specific keyring file
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
apt_key_bin = None
def find_needed_binaries(module):
global apt_key_bin
apt_key_bin = module.get_bin_path('apt-key', required=True)
### FIXME: Is there a reason that gpg and grep are checked? Is it just
# cruft or does the apt .deb package not require them (and if they're not
# installed, /usr/bin/apt-key fails?)
module.get_bin_path('gpg', required=True)
module.get_bin_path('grep', required=True)
def parse_key_id(key_id):
"""validate the key_id and break it into segments
:arg key_id: The key_id as supplied by the user. A valid key_id will be
8, 16, or more hexadecimal chars with an optional leading ``0x``.
:returns: The portion of key_id suitable for apt-key del, the portion
suitable for comparisons with --list-public-keys, and the portion that
can be used with --recv-key. If key_id is long enough, these will be
the last 8 characters of key_id, the last 16 characters, and all of
key_id. If key_id is not long enough, some of the values will be the
same.
* apt-key del <= 1.10 has a bug with key_id != 8 chars
* apt-key adv --list-public-keys prints 16 chars
* apt-key adv --recv-key can take more chars
"""
# Make sure the key_id is valid hexadecimal
int(key_id, 16)
key_id = key_id.upper()
if key_id.startswith('0X'):
key_id = key_id[2:]
key_id_len = len(key_id)
if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
short_key_id = key_id[-8:]
fingerprint = key_id
if key_id_len > 16:
fingerprint = key_id[-16:]
return short_key_id, fingerprint, key_id
def all_keys(module, keyring, short_format):
if keyring:
cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
results = []
lines = to_native(out).split('\n')
for line in lines:
if line.startswith("pub") or line.startswith("sub"):
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyring, keyserver, key_id):
if keyring:
cmd = "%s --keyring %s adv --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
else:
cmd = "%s adv --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
for retry in range(5):
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
if rc == 0:
break
else:
# Out of retries
if rc == 2 and 'not found on keyserver' in out:
msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg)
else:
msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
else:
cmd = "%s add -" % apt_key_bin
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
else:
cmd = "%s add %s" % (apt_key_bin, keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
else:
cmd = '%s del %s' % (apt_key_bin, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False, type='path'),
key=dict(required=False),
keyring=dict(required=False, type='path'),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True,
mutually_exclusive=(('filename', 'keyserver', 'data', 'url'),),
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
fingerprint = short_key_id = key_id
short_format = False
if key_id:
try:
short_key_id, fingerprint, key_id = parse_key_id(key_id)
except ValueError:
module.fail_json(msg='Invalid key_id', id=key_id)
if len(fingerprint) == 8:
short_format = True
find_needed_binaries(module)
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if fingerprint and fingerprint in keys:
module.exit_json(changed=False)
elif fingerprint and fingerprint not in keys and module.check_mode:
### TODO: Someday we could go further -- write keys out to
# a temporary file and then extract the key id from there via gpg
# to decide if the key is installed or not.
module.exit_json(changed=True)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyring, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed = False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if fingerprint and fingerprint not in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if fingerprint in keys:
if module.check_mode:
module.exit_json(changed=True)
# we use the "short" id: key_id[-8:], short_format=True
# it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
if remove_key(module, short_key_id, keyring):
keys = all_keys(module, keyring, short_format)
if fingerprint in keys:
module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)", id=key_id)
changed = True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
if __name__ == '__main__':
main()
| Jmainguy/ansible-modules-core | packaging/os/apt_key.py | Python | gpl-3.0 | 12,627 |
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui
import Muon.GUI.Common.message_box as message_box
from qtpy import PYQT4
# determine whether the interface is opened from within Mantid or not
# (outside of Mantid we cannot use the "Manage user directories" functionality)
STANDALONE_EXEC = True
try:
from mantidqtpython import MantidQt
except:
STANDALONE_EXEC = False
if PYQT4:
IN_MANTIDPLOT = False
try:
from pymantidplot import proxies
IN_MANTIDPLOT = True
except ImportError:
# We are not in MantidPlot e.g. testing
pass
class HelpWidgetView(QtGui.QWidget):
@staticmethod
def warning_popup(message):
message_box.warning(str(message))
def __init__(self, parent=None):
super(HelpWidgetView, self).__init__(parent)
self.setup_interface_layout()
def setup_interface_layout(self):
self.setObjectName("HelpWidget")
self.resize(500, 100)
self.help_label = QtGui.QLabel(self)
self.help_label.setObjectName("helpLabel")
self.help_label.setText("Help : ")
self.help_button = QtGui.QToolButton(self)
self.help_button.setObjectName("helpButton")
self.help_button.setText("?")
self.manage_user_dir_button = QtGui.QPushButton(self)
self.manage_user_dir_button.setObjectName("manageUserDirectoriesButton")
self.manage_user_dir_button.setText("Manage User Directories")
self.horizontal_layout = QtGui.QHBoxLayout()
self.horizontal_layout.setObjectName("horizontalLayout")
self.horizontal_layout.addWidget(self.help_label)
self.horizontal_layout.addWidget(self.help_button)
self.horizontal_layout.addStretch(0)
self.horizontal_layout.addWidget(self.manage_user_dir_button)
self.setLayout(self.horizontal_layout)
def getLayout(self):
return self.horizontal_layout
def on_manage_user_directories_clicked(self, slot):
self.manage_user_dir_button.clicked.connect(slot)
def on_help_button_clicked(self, slot):
self.help_button.clicked.connect(slot)
def show_directory_manager(self):
if STANDALONE_EXEC:
MantidQt.API.ManageUserDirectories.openUserDirsDialog(self)
else:
self.warning_popup("Cannot open user directories dailog outside MantidPlot.")
def _on_help_button_clicked(self):
if PYQT4:
proxies.showCustomInterfaceHelp('Frequency Domain Analysis')
| mganeva/mantid | scripts/Muon/GUI/Common/help_widget/help_widget_view.py | Python | gpl-3.0 | 2,535 |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup, find_packages
import unittest
def all_tests():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('.', pattern='*_test.py')
return test_suite
setup(
name="google-pasta",
version="0.2.0",
packages=find_packages(),
# metadata for upload to PyPI
author="Nick Smith",
author_email="[email protected]",
description="pasta is an AST-based Python refactoring library",
license="Apache 2.0",
keywords="python refactoring ast",
url="https://github.com/google/pasta",
test_suite='setup.all_tests',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=[
'six',
],
)
| google/pasta | setup.py | Python | apache-2.0 | 1,865 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Fully test this module's functionality through the use of fixtures."
from megacosm.generators import Gem
import unittest2 as unittest
import fakeredis
from config import TestConfiguration
import fixtures
class TestGem(unittest.TestCase):
def setUp(self):
""" """
self.redis = fakeredis.FakeRedis()
fixtures.gem.import_fixtures(self)
def tearDown(self):
self.redis.flushall()
def test_random_gem(self):
""" """
gem = Gem(self.redis)
self.assertNotEquals('', gem.text)
self.assertNotEquals('', gem.count)
self.assertNotEquals('', gem.color)
self.assertNotEquals('', str(gem))
def test_static_gem(self):
""" """
gem = Gem(self.redis, {'text':'foo bar', 'count':3, 'color':'green'})
self.assertEqual('Foo bar', gem.text)
self.assertEqual(3, gem.count)
self.assertEqual('green', gem.color)
| CityGenerator/Megacosm-Generator | tests/test_gem.py | Python | gpl-2.0 | 980 |
"""
Object containing dynamic simulation state.
"""
import opiniongame.IO as og_io
import opiniongame.coupling as og_coupling
import opiniongame.opinions as og_opinions
import opiniongame.adjacency as og_adj
import numpy as np
class WorldState:
def __init__(self, adj, couplingWeights, initialOpinions, initialHistorySize=100, historyGrowthScale=2):
self.adj = adj
self.couplingWeights = couplingWeights
self.initialOpinions = initialOpinions
self.individualCount = np.size(initialOpinions, 0)
self.initialHistorySize = initialHistorySize
self.historyGrowthScale = historyGrowthScale
self.initializeHistory()
self.iterCount = 0
@classmethod
def fromCmdlineArguments(cls, cmdline, config):
#
# check optional arguments and generate defaults if missing
#
weights = None
initialOpinions = None
adj = None
if cmdline.args.weights is not None:
weights = og_io.loadNamedMatrix(cmdline.args.weights, 'weights')
else:
weights = og_coupling.weights_no_coupling(config.popSize, config.ntopics)
if cmdline.args.initialOpinions is not None:
initialOpinions = og_io.loadNamedMatrix(cmdline.args.initialOpinions, 'initialOpinions')
else:
initialOpinions = og_opinions.initialize_opinions(config.popSize, config.ntopics)
if cmdline.args.adjacency is not None:
adj = og_io.loadNamedMatrix(cmdline.args.adjacency, 'adjacency')
else:
adj = og_adj.make_adj(config.popSize, 'full')
state = cls(adj, weights, initialOpinions)
state.validate()
#
# set popsize and ntopics based on current state. warn if config
# disagrees with loaded files.
#
wPopsize = np.shape(weights)[0]
wNtopics = np.shape(weights)[1]
if wPopsize != config.popSize:
print("WARNING: popsize from data files disagrees with cfg.")
config.popSize = wPopsize
if wNtopics != config.ntopics:
print("WARNING: ntopics from data files disagrees with cfg.")
config.ntopics = wNtopics
return state
def initializeHistory(self):
self.individualCount = np.size(self.initialOpinions, 0)
self.numTopics = np.size(self.initialOpinions, 1)
self.history = np.zeros([self.initialHistorySize, self.individualCount, self.numTopics])
self.history[0,:,:] = self.initialOpinions[:,:]
self.nextHistoryIndex = 1
def appendToHistory(self, newOpinions):
curHistorySize = np.size(self.history,0)
if curHistorySize == self.nextHistoryIndex:
newhist = np.zeros([int(curHistorySize * self.historyGrowthScale), self.individualCount, self.numTopics])
newhist[0:curHistorySize, :, :] = self.history
self.history = newhist
self.history[self.nextHistoryIndex,:,:] = newOpinions[:,:]
self.nextHistoryIndex += 1
def reset(self):
self.history[:,:,:] = 0
self.nextHistoryIndex = 0
self.iterCount = 0
def currentOpinions(self):
return self.history[self.nextHistoryIndex-1]
# def previousOpinions(self):
# return self.history[self.nextHistoryIndex-2]
def validate(self):
# validation of data sizes
print("WEIGHT SHAPE : "+str(np.shape(self.couplingWeights)))
print("OPINION SHAPE : "+str(np.shape(self.initialOpinions)))
print("ADJACENCY SHAPE: "+str(np.shape(self.adj)))
wPopsize = np.shape(self.couplingWeights)[0]
wNtopics1 = np.shape(self.couplingWeights)[1]
wNtopics2 = np.shape(self.couplingWeights)[2]
oPopsize = np.shape(self.initialOpinions)[0]
oNtopics = np.shape(self.initialOpinions)[1]
aPopsize1 = np.shape(self.adj)[0]
aPopsize2 = np.shape(self.adj)[1]
if aPopsize1 != aPopsize2:
raise ValueError("Adjacency matrix must be square.")
if wNtopics1 != wNtopics2:
raise ValueError("Per-topic weight matrix must be square.")
if wPopsize != oPopsize or wPopsize != aPopsize1 or aPopsize1 != oPopsize:
raise ValueError("Weight tensor, opinion state, and adjacency matrix disagree on population size.")
if oNtopics != wNtopics1:
raise ValueError("Weight tensor and opinion state disagree on topic count.")
print("==> World state validation passed.")
print("")
| HNoorazar/PyOpinionGame | opiniongame/state.py | Python | gpl-3.0 | 4,551 |
"""Unit tests for reviewboard.reviews.views.ReviewRequestDetailView."""
from datetime import timedelta
from django.contrib.auth.models import User
from django.test.html import parse_html
from djblets.extensions.hooks import TemplateHook
from djblets.extensions.models import RegisteredExtension
from djblets.siteconfig.models import SiteConfiguration
from kgb import SpyAgency
from reviewboard.extensions.base import Extension, get_extension_manager
from reviewboard.reviews.detail import InitialStatusUpdatesEntry, ReviewEntry
from reviewboard.reviews.fields import get_review_request_fieldsets
from reviewboard.reviews.models import Comment, GeneralComment, Review
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing import TestCase
class ReviewRequestDetailViewTests(SpyAgency, TestCase):
"""Unit tests for reviewboard.reviews.views.ReviewRequestDetailView."""
fixtures = ['test_users', 'test_scmtools', 'test_site']
def test_get(self):
"""Testing ReviewRequestDetailView.get"""
review_request = self.create_review_request(publish=True)
response = self.client.get('/r/%d/' % review_request.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['review_request'].pk,
review_request.pk)
def test_context(self):
"""Testing ReviewRequestDetailView context variables"""
# Make sure this request is made while logged in, to catch the
# login-only pieces of the review_detail view.
self.client.login(username='admin', password='admin')
username = 'admin'
summary = 'This is a test summary'
description = 'This is my description'
testing_done = 'Some testing'
review_request = self.create_review_request(
publish=True,
submitter=username,
summary=summary,
description=description,
testing_done=testing_done)
response = self.client.get('/r/%s/' % review_request.pk)
self.assertEqual(response.status_code, 200)
review_request = response.context['review_request']
self.assertEqual(review_request.submitter.username, username)
self.assertEqual(review_request.summary, summary)
self.assertEqual(review_request.description, description)
self.assertEqual(review_request.testing_done, testing_done)
self.assertEqual(review_request.pk, review_request.pk)
def test_diff_comment_ordering(self):
"""Testing ReviewRequestDetailView and ordering of diff comments on a
review
"""
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create the users who will be commenting.
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
# Create the master review.
main_review = self.create_review(review_request, user=user1)
main_comment = self.create_diff_comment(main_review, filediff,
text=comment_text_1)
main_review.publish()
# First reply
reply1 = self.create_reply(
main_review,
user=user1,
timestamp=(main_review.timestamp + timedelta(days=1)))
self.create_diff_comment(reply1, filediff, text=comment_text_2,
reply_to=main_comment)
# Second reply
reply2 = self.create_reply(
main_review,
user=user2,
timestamp=(main_review.timestamp + timedelta(days=2)))
self.create_diff_comment(reply2, filediff, text=comment_text_3,
reply_to=main_comment)
# Publish them out of order.
reply2.publish()
reply1.publish()
# Make sure they published in the order expected.
self.assertTrue(reply1.timestamp > reply2.timestamp)
# Make sure they're looked up in the order expected.
comments = list(
Comment.objects
.filter(review__review_request=review_request)
.order_by('timestamp')
)
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_3)
self.assertEqual(comments[2].text, comment_text_2)
# Now figure out the order on the page.
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
entries = response.context['entries']
initial_entries = entries['initial']
self.assertEqual(len(initial_entries), 1)
self.assertIsInstance(initial_entries[0], InitialStatusUpdatesEntry)
main_entries = entries['main']
self.assertEqual(len(main_entries), 1)
entry = main_entries[0]
self.assertIsInstance(entry, ReviewEntry)
comments = entry.comments['diff_comments']
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].text, comment_text_1)
replies = comments[0].public_replies()
self.assertEqual(len(replies), 2)
self.assertEqual(replies[0].text, comment_text_3)
self.assertEqual(replies[1].text, comment_text_2)
def test_general_comment_ordering(self):
"""Testing ReviewRequestDetailView and ordering of general comments on
a review
"""
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
review_request = self.create_review_request(create_repository=True,
publish=True)
# Create the users who will be commenting.
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
# Create the master review.
main_review = self.create_review(review_request, user=user1)
main_comment = self.create_general_comment(main_review,
text=comment_text_1)
main_review.publish()
# First reply
reply1 = self.create_reply(
main_review,
user=user1,
timestamp=(main_review.timestamp + timedelta(days=1)))
self.create_general_comment(reply1, text=comment_text_2,
reply_to=main_comment)
# Second reply
reply2 = self.create_reply(
main_review,
user=user2,
timestamp=(main_review.timestamp + timedelta(days=2)))
self.create_general_comment(reply2, text=comment_text_3,
reply_to=main_comment)
# Publish them out of order.
reply2.publish()
reply1.publish()
# Make sure they published in the order expected.
self.assertTrue(reply1.timestamp > reply2.timestamp)
# Make sure they're looked up in the order expected.
comments = list(
GeneralComment.objects
.filter(review__review_request=review_request)
.order_by('timestamp')
)
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_3)
self.assertEqual(comments[2].text, comment_text_2)
def test_file_attachments_visibility(self):
"""Testing ReviewRequestDetailView default visibility of file
attachments
"""
caption_1 = 'File Attachment 1'
caption_2 = 'File Attachment 2'
caption_3 = 'File Attachment 3'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
user1 = User.objects.get(username='doc')
review_request = self.create_review_request()
# Add two file attachments. One active, one inactive.
file1 = self.create_file_attachment(review_request, caption=caption_1)
file2 = self.create_file_attachment(review_request, caption=caption_2,
active=False)
review_request.publish(user1)
# Create a third file attachment on a draft.
self.create_file_attachment(review_request, caption=caption_3,
draft=True)
# Create the review with comments for each screenshot.
review = Review.objects.create(review_request=review_request,
user=user1)
review.file_attachment_comments.create(file_attachment=file1,
text=comment_text_1)
review.file_attachment_comments.create(file_attachment=file2,
text=comment_text_2)
review.publish()
# Check that we can find all the objects we expect on the page.
self.client.login(username='doc', password='doc')
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
file_attachments = response.context['file_attachments']
self.assertEqual(len(file_attachments), 2)
self.assertEqual(file_attachments[0].caption, caption_1)
self.assertEqual(file_attachments[1].caption, caption_3)
# Make sure that other users won't see the draft one.
self.client.logout()
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
file_attachments = response.context['file_attachments']
self.assertEqual(len(file_attachments), 1)
self.assertEqual(file_attachments[0].caption, caption_1)
# Make sure we loaded the reviews and all data correctly.
entries = response.context['entries']
initial_entries = entries['initial']
self.assertEqual(len(initial_entries), 1)
self.assertIsInstance(initial_entries[0], InitialStatusUpdatesEntry)
main_entries = entries['main']
self.assertEqual(len(main_entries), 1)
entry = main_entries[0]
self.assertIsInstance(entry, ReviewEntry)
comments = entry.comments['file_attachment_comments']
self.assertEqual(len(comments), 2)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
def test_screenshots_visibility(self):
"""Testing ReviewRequestDetailView default visibility of screenshots"""
caption_1 = 'Screenshot 1'
caption_2 = 'Screenshot 2'
caption_3 = 'Screenshot 3'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
user1 = User.objects.get(username='doc')
review_request = self.create_review_request()
# Add two screenshots. One active, one inactive.
screenshot1 = self.create_screenshot(review_request, caption=caption_1)
screenshot2 = self.create_screenshot(review_request, caption=caption_2,
active=False)
review_request.publish(user1)
# Add a third screenshot on a draft.
self.create_screenshot(review_request, caption=caption_3, draft=True)
# Create the review with comments for each screenshot.
user1 = User.objects.get(username='doc')
review = Review.objects.create(review_request=review_request,
user=user1)
review.screenshot_comments.create(screenshot=screenshot1,
text=comment_text_1,
x=10,
y=10,
w=20,
h=20)
review.screenshot_comments.create(screenshot=screenshot2,
text=comment_text_2,
x=0,
y=0,
w=10,
h=10)
review.publish()
# Check that we can find all the objects we expect on the page.
self.client.login(username='doc', password='doc')
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
screenshots = response.context['screenshots']
self.assertEqual(len(screenshots), 2)
self.assertEqual(screenshots[0].caption, caption_1)
self.assertEqual(screenshots[1].caption, caption_3)
# Make sure that other users won't see the draft one.
self.client.logout()
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
screenshots = response.context['screenshots']
self.assertEqual(len(screenshots), 1)
self.assertEqual(screenshots[0].caption, caption_1)
entries = response.context['entries']
initial_entries = entries['initial']
self.assertEqual(len(initial_entries), 1)
self.assertIsInstance(initial_entries[0], InitialStatusUpdatesEntry)
main_entries = entries['main']
self.assertEqual(len(main_entries), 1)
entry = main_entries[0]
self.assertIsInstance(entry, ReviewEntry)
# Make sure we loaded the reviews and all data correctly.
comments = entry.comments['screenshot_comments']
self.assertEqual(len(comments), 2)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
def test_with_anonymous_and_requires_site_wide_login(self):
"""Testing ReviewRequestDetailView with anonymous user and site-wide
login required
"""
with self.siteconfig_settings({'auth_require_sitewide_login': True},
reload_settings=False):
self.create_review_request(publish=True)
response = self.client.get('/r/1/')
self.assertEqual(response.status_code, 302)
def test_etag_with_issues(self):
"""Testing ReviewRequestDetailView ETags with issue status toggling"""
self.client.login(username='doc', password='doc')
# Some objects we need.
user = User.objects.get(username='doc')
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create a review.
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff,
issue_opened=True)
review.publish()
# Get the etag
response = self.client.get(review_request.get_absolute_url())
self.assertEqual(response.status_code, 200)
etag1 = response['ETag']
self.assertNotEqual(etag1, '')
# Change the issue status
comment.issue_status = Comment.RESOLVED
comment.save()
# Check the etag again
response = self.client.get(review_request.get_absolute_url())
self.assertEqual(response.status_code, 200)
etag2 = response['ETag']
self.assertNotEqual(etag2, '')
# Make sure they're not equal
self.assertNotEqual(etag1, etag2)
def test_review_request_box_template_hooks(self):
"""Testing ReviewRequestDetailView template hooks for the review
request box
"""
class ContentTemplateHook(TemplateHook):
def initialize(self, name, content):
super(ContentTemplateHook, self).initialize(name)
self.content = content
def render_to_string(self, request, context):
return self.content
class TestExtension(Extension):
registration = RegisteredExtension.objects.create(
class_name='test-extension',
name='test-extension',
enabled=True,
installed=True)
extension = TestExtension(get_extension_manager())
review_request = self.create_review_request(publish=True)
hooks = []
for name in ('before-review-request-summary',
'review-request-summary-pre',
'review-request-summary-post',
'after-review-request-summary-post',
'before-review-request-fields',
'after-review-request-fields',
'before-review-request-extra-panes',
'review-request-extra-panes-pre',
'review-request-extra-panes-post',
'after-review-request-extra-panes'):
hooks.append(ContentTemplateHook(extension, name,
'[%s here]' % name))
# Turn off some parts of the page, to simplify the resulting HTML
# and shorten render/parse times.
self.spy_on(get_review_request_fieldsets,
call_fake=lambda *args, **kwargs: [])
response = self.client.get(
local_site_reverse('review-request-detail',
args=[review_request.display_id]))
self.assertEqual(response.status_code, 200)
parsed_html = str(parse_html(response.content.decode('utf-8')))
self.assertIn(
'<div class="review-request-body">\n'
'[before-review-request-summary here]',
parsed_html)
self.assertIn(
'<div class="review-request-section review-request-summary">\n'
'[review-request-summary-pre here]',
parsed_html)
self.assertIn(
'</time>\n</p>[review-request-summary-post here]\n</div>',
parsed_html)
self.assertIn(
'[before-review-request-fields here]'
'<table class="review-request-section"'
' id="review-request-details">',
parsed_html)
self.assertIn(
'</div>'
'[after-review-request-fields here] '
'[before-review-request-extra-panes here]'
'<div id="review-request-extra">\n'
'[review-request-extra-panes-pre here]',
parsed_html)
self.assertIn(
'</div>[review-request-extra-panes-post here]\n'
'</div>[after-review-request-extra-panes here]\n'
'</div>',
parsed_html)
| reviewboard/reviewboard | reviewboard/reviews/tests/test_review_request_detail_view.py | Python | mit | 18,961 |
# Copyright 2012 Christoph Reiter <[email protected]>,
# 2012,2014 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests.plugin import PluginTestCase
from quodlibet import config
class TTitlecase(PluginTestCase):
def setUp(self):
globals().update(vars(self.modules["Title Case"]))
config.init()
self.plugin = self.plugins["Title Case"].cls
def test_all_caps(self):
self.plugin.config_set("allow_all_caps", True)
p = self.plugin("", "")
self.failUnlessEqual(p.activated("", "foo bar")[0][1], "Foo Bar")
self.failUnlessEqual(p.activated("", "FOO BAR")[0][1], "FOO BAR")
def test_no_all_caps(self):
self.plugin.config_set("allow_all_caps", False)
p = self.plugin("", "")
self.failUnlessEqual(p.activated("", "foo bar")[0][1], "Foo Bar")
self.failUnlessEqual(p.activated("", "FOO BAR")[0][1], "Foo Bar")
def test_humanise(self):
self.plugin.config_set("human_title_case", True)
self.plugin.config_set("allow_all_caps", False)
p = self.plugin("", "")
self.failUnlessEqual(p.activated("", "foo bar")[0][1], "Foo Bar")
self.failUnlessEqual(p.activated("", "FOO the bAR")[0][1],
"Foo the Bar")
def tearDown(self):
config.quit()
| Mellthas/quodlibet | tests/plugin/test_titlecase.py | Python | gpl-2.0 | 1,547 |
#!/usr/bin/env python3
import os
import gzip
import sys
# Run this scrpipt under 33208_Vertebrate folder, mirroring the following site.
# http://eggnog5.embl.de/download/eggnog_5.0/per_tax_level/33208/
dirname_curr = os.path.dirname(os.path.realpath(__file__))
dirname_output = 'MODtree_ENOG50.raw_alg'
filename_out_base = 'MODtree_ENOG50'
# Run under 33208_Metazoa with following file.
filename_members = '33208_members.tsv.gz'
# Directory generated from 33208_raw_algs.tar
dirname_align = '33208'
# Make it by grep 'BLAST_UniProt_GN' e5.sequence_aliases.tsv
filename_GN = os.path.join(dirname_curr, 'MODtree_ENOG50.gene_names.tsv.gz')
filename_species = os.path.join(dirname_curr, 'MODtree_species.txt')
# UniProt_ID UP_taxId EN_taxId sp_code sp_name GOA_name
# UP000005640 9606 9606 HUMAN homo_sapiens 25.H_sapiens.goa
species_list = dict()
f_species = open(filename_species, 'r')
for line in f_species:
if line.startswith('#'):
continue
tokens = line.strip().split("\t")
tax_id = tokens[2]
sp_code = tokens[3]
species_list[tax_id] = sp_code
f_species.close()
sys.stderr.write('Read gene names...')
gene_names = dict()
# 9541.XP_005587739.1 RFX2 BLAST_KEGG_NAME BLAST_UniProt_GN RefSeq_gene
f_GN = gzip.open(filename_GN, 'rt')
for line in f_GN:
tokens = line.strip().split("\t")
seq_id = tokens[0]
tmp_name = tokens[1]
tax_id = seq_id.split('.')[0]
if tax_id in species_list:
gene_names[seq_id] = tmp_name
f_GN.close()
sys.stderr.write('Done.\n')
f_out_members = open('%s.members.tsv' % filename_out_base, 'w')
f_out_members.write('# %s\t%s\t%s\t%s\t%s\t%s\n' %
('EN_Id', 'TotalSpecies', 'TotalSeqs',
'MODtreeSpecies', 'MODtreeSeqs', 'MODtreeSpeciesList'))
family2seq = dict()
exclude_family = dict()
f_members = open(filename_members, 'r')
if filename_members.endswith('.gz'):
f_members = gzip.open(filename_members, 'rt')
for line in f_members:
tokens = line.strip().split("\t")
family_id = tokens[1]
total_seqs = int(tokens[2])
total_species = int(tokens[3])
seq_list = []
sp_code_list = []
for tmp_id in tokens[4].split(','):
tmp_tax_id = tmp_id.split('.')[0]
if tmp_tax_id in species_list:
sp_code_list.append(species_list[tmp_tax_id])
seq_list.append(tmp_id)
count_seqs = len(seq_list)
if count_seqs == 0:
continue
sp_code_list = sorted(list(set(sp_code_list)))
count_species = len(sp_code_list)
species_str = ','.join(sp_code_list)
f_out_members.write('%s\t%d\t%d\t%d\t%d\t%s\n' %
(family_id, total_species, total_seqs,
count_species, count_seqs, species_str))
if count_seqs > 1:
family2seq[family_id] = seq_list
if count_seqs > 150:
exclude_family[family_id] = 1
f_members.close()
sys.stderr.write('Processed members.tsv.\n')
f_out_combined = open('%s.combined.faa' % filename_out_base, 'w')
for tmp_family_id in family2seq.keys():
tmp_filename_fa = os.path.join(dirname_align,
'%s.raw_alg.faa.gz' % tmp_family_id)
tmp_seq_list = dict()
f_fa = gzip.open(tmp_filename_fa, 'rt')
for line in f_fa:
if line.startswith('>'):
tmp_h = line.strip().lstrip('>')
tmp_seq_list[tmp_h] = []
else:
tmp_seq_list[tmp_h].append(line.strip())
f_fa.close()
tmp_filename_out = os.path.join(dirname_output,
'%s.raw_alg.faa' % tmp_family_id)
if tmp_family_id in exclude_family:
tmp_filename_out = os.path.join('excluded_%s.raw_alg.faa' %
tmp_family_id)
f_fa_out = open(tmp_filename_out, 'w')
for tmp_seq_id in family2seq[tmp_family_id]:
tmp_tax_id = tmp_seq_id.split('.')[0]
if tmp_seq_id not in tmp_seq_list:
sys.stderr.write('%s has no sequences. (%s)\n' %
(tmp_seq_id, tmp_filename_fa))
continue
tmp_sp_code = species_list[tmp_tax_id]
tmp_name = 'NotAvail'
if tmp_seq_id in gene_names:
tmp_name = gene_names[tmp_seq_id]
# Refine the gene name
# because of DANRE name like si:ch211-151m7.6
tmp_name = tmp_name.replace(':', '_')
tmp_name = tmp_name.replace(' ', '_')
# because of CIOIN name like zf(cchc)-22
tmp_name = tmp_name.replace('(', '_').replace(')', '_')
tmp_name = tmp_name.replace('/', '_')
tmp_id = tmp_seq_id.split('.')[1]
tmp_new_h = '%s|%s|%s|%s' % (tmp_name, tmp_sp_code,
tmp_family_id, tmp_id)
tmp_seq = ''.join(tmp_seq_list[tmp_seq_id])
tmp_new_seq = tmp_seq.replace('-', '')
if tmp_family_id in exclude_family:
tmp_new_h = '%s|excluded' % tmp_new_h
f_fa_out.write('>%s\n%s\n' % (tmp_new_h, tmp_seq))
f_out_combined.write('>%s\n%s\n' % (tmp_new_h, tmp_new_seq))
f_fa_out.close()
f_out_combined.write
| taejoonlab/NuevoTx | MODtree/eggnog/ENOG50-to-MODtree.py | Python | apache-2.0 | 5,092 |
# Copyright (c) 2015 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi, Sophie Jazwiecki
import os
import time
from zope.interface import Interface, implementer
from elasticsearch.client import Elasticsearch
from arango import Arango
from dace.util import utility
from lac import log
ELASTICSEARCH_PORT = os.getenv('ELASTICSEARCH_PORT', 'localhost:9200')
es = Elasticsearch(ELASTICSEARCH_PORT)
ARANGO_HOST, ARANGO_PORT = os.getenv(
'ARANGO_PORT', 'localhost:8529').split(':')
ARANGO_ROOT_PASSWORD = os.getenv('ARANGO_ROOT_PASSWORD', '')
arango_server = Arango(
host=ARANGO_HOST, port=ARANGO_PORT,
password=ARANGO_ROOT_PASSWORD)
class IResourceManager(Interface):
def add_entry(self, key, value, mapping={}, id=None):
pass
def set_entry(self, key, value, id):
pass
def get_entry(self, id):
pass
def get_entries(self, key=None, query={"match_all": {}},
params={}, sort={}, fields=[]):
pass
def remove_entries(self, key=None, query={}):
pass
def remove_entry(self, id):
pass
def normalize_value_types(value):
if isinstance(value, dict):
return {key: normalize_value_types(v)
for key, v in value.items()}
if isinstance(value, (list, tuple, set)):
return [normalize_value_types(v) for v in value]
return value
def normalize_value(values):
result = {}
for key, value in values.items():
if key.endswith('suggest'):
result[key] = normalize_value_types(value)
elif isinstance(value, dict):
for key_value in value:
new_key = key + "_" + key_value
new_value = value[key_value]
if isinstance(new_value, set):
new_value = list(new_value)
result[new_key] = new_value
else:
if isinstance(value, set):
value = list(value)
result[key] = value
return normalize_value_types(result)
@utility(name='elasticsearch_resource_manager')
@implementer(IResourceManager)
class ElasticSearchResourceManager(object):
def create_index(self):
try:
try:
exists = es.indices.exists('lac')
except Exception:
# wait elasticsearch start and check again
time.sleep(5)
exists = es.indices.exists('lac')
if not exists:
es.indices.create(
index='lac',
body={'settings': {
'number_of_replicas': 0,
'number_of_shards': 1,
}},
ignore=400)
# RequestError: TransportError(400, u'IndexAlreadyExistsException[[lac] already exists]')
return False
except Exception as e:
log.warning(e)
return True
def remove_index(self):
try:
es.indices.delete(index='lac', ignore=[400, 404])
except Exception as e:
# NotFoundError: TransportError(404, u'IndexMissingException[[lac] missing]')
log.warning(e)
@property
def index(self):
return es
def add_entry(self, key, value, mapping={}, id=None):
"""Send entry to elasticsearch."""
result = normalize_value(value)
if not es.indices.exists_type('lac', key):
es.indices.put_mapping(
index='lac',
doc_type=key,
body={
key: {
'properties': mapping
}
}
)
return es.index(index='lac', doc_type=key,
body=result, id=id,
refresh=True)
def set_entry(self, key, value, id):
result = normalize_value(value)
return es.index(index='lac', doc_type=key,
body=result, id=id,
refresh=True)
def get_entry(self, id):
try:
results = es.get(index='lac', id=id)
except Exception as e:
log.warning(e)
return None
if results:
return results['_source']
return None
def get_entries(self, key=None, query={"match_all": {}},
params={}, sort={}, fields=[]):
try:
body = {'query': query,
'sort': sort
}
if fields:
body['fields'] = fields
results = es.search(index='lac',
doc_type=key,
params=params,
body=body)
except Exception as e:
log.warning(e)
return None, 0
total = results['hits']['total']
return results['hits']['hits'], total
def remove_entries(self, key=None, query={}):
pass
def remove_entry(self, key, id):
try:
results = es.delete(index='lac',
doc_type=key,
id=id,
refresh=True)
except Exception as e:
log.warning(e)
default_resourcemanager = 'elasticsearch_resource_manager'
def arango_db__check():
try:
arango_server.create_database("lac")
except Exception:
pass
arango_db__check()
def create_collection(db, id_):
try:
db.create_collection(id_)
except Exception:
pass
return db.col(id_)
| ecreall/lagendacommun | lac/content/resources.py | Python | agpl-3.0 | 5,692 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-21 23:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.CharField(max_length=500)),
('content', models.CharField(max_length=255)),
('upvotes', models.IntegerField(default=0)),
('downvotes', models.IntegerField(default=0)),
('score', models.IntegerField(default=0)),
('published', models.DateTimeField(auto_now_add=True, verbose_name='time published')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| Mxious/Mxious | posts/migrations/0001_initial.py | Python | gpl-2.0 | 1,210 |
#!/usr/bin/env python
import unittest
class TestGood(unittest.TestCase):
def test_zero(self):
self.assertEqual(0, 0)
if __name__ == '__main__':
unittest.main()
| davetcoleman/catkin_tools | tests/integrated/resources/pkg_with_test/test_good.py | Python | apache-2.0 | 180 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.models import Connection
from airflow.providers.apache.spark.hooks.spark_jdbc import SparkJDBCHook
from airflow.utils import db
class TestSparkJDBCHook(unittest.TestCase):
_config = {
'cmd_type': 'spark_to_jdbc',
'jdbc_table': 'tableMcTableFace',
'jdbc_driver': 'org.postgresql.Driver',
'metastore_table': 'hiveMcHiveFace',
'jdbc_truncate': False,
'save_mode': 'append',
'save_format': 'parquet',
'batch_size': 100,
'fetch_size': 200,
'num_partitions': 10,
'partition_column': 'columnMcColumnFace',
'lower_bound': '10',
'upper_bound': '20',
'create_table_column_types': 'columnMcColumnFace INTEGER(100), name CHAR(64),'
'comments VARCHAR(1024)'
}
# this config is invalid because if one of [partitionColumn, lowerBound, upperBound]
# is set, all of the options must be enabled (enforced by Spark)
_invalid_config = {
'cmd_type': 'spark_to_jdbc',
'jdbc_table': 'tableMcTableFace',
'jdbc_driver': 'org.postgresql.Driver',
'metastore_table': 'hiveMcHiveFace',
'jdbc_truncate': False,
'save_mode': 'append',
'save_format': 'parquet',
'batch_size': 100,
'fetch_size': 200,
'num_partitions': 10,
'partition_column': 'columnMcColumnFace',
'upper_bound': '20',
'create_table_column_types': 'columnMcColumnFace INTEGER(100), name CHAR(64),'
'comments VARCHAR(1024)'
}
def setUp(self):
db.merge_conn(
Connection(
conn_id='spark-default', conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='jdbc-default', conn_type='postgres',
host='localhost', schema='default', port=5432,
login='user', password='supersecret',
extra='{"conn_prefix":"jdbc:postgresql://"}'
)
)
def test_resolve_jdbc_connection(self):
# Given
hook = SparkJDBCHook(jdbc_conn_id='jdbc-default')
expected_connection = {
'url': 'localhost:5432',
'schema': 'default',
'conn_prefix': 'jdbc:postgresql://',
'user': 'user',
'password': 'supersecret'
}
# When
connection = hook._resolve_jdbc_connection()
# Then
self.assertEqual(connection, expected_connection)
def test_build_jdbc_arguments(self):
# Given
hook = SparkJDBCHook(**self._config)
# When
cmd = hook._build_jdbc_application_arguments(hook._resolve_jdbc_connection())
# Then
expected_jdbc_arguments = [
'-cmdType', 'spark_to_jdbc',
'-url', 'jdbc:postgresql://localhost:5432/default',
'-user', 'user',
'-password', 'supersecret',
'-metastoreTable', 'hiveMcHiveFace',
'-jdbcTable', 'tableMcTableFace',
'-jdbcDriver', 'org.postgresql.Driver',
'-batchsize', '100',
'-fetchsize', '200',
'-numPartitions', '10',
'-partitionColumn', 'columnMcColumnFace',
'-lowerBound', '10',
'-upperBound', '20',
'-saveMode', 'append',
'-saveFormat', 'parquet',
'-createTableColumnTypes', 'columnMcColumnFace INTEGER(100), name CHAR(64),'
'comments VARCHAR(1024)'
]
self.assertEqual(expected_jdbc_arguments, cmd)
def test_build_jdbc_arguments_invalid(self):
# Given
hook = SparkJDBCHook(**self._invalid_config)
# Expect Exception
hook._build_jdbc_application_arguments(hook._resolve_jdbc_connection())
| wooga/airflow | tests/providers/apache/spark/hooks/test_spark_jdbc.py | Python | apache-2.0 | 4,769 |
from __future__ import print_function
from itertools import chain
import ast
import cgi
import re
import sys
from . import codegen
from . import runtime
PY35 = sys.version_info >= (3, 5, 0)
class Base(object):
def __init__(self):
self.inline_child = None
self.children = []
def iter_all_children(self):
'''Return an iterator that yields every node which is a child of this one.
This includes inline children, and control structure `else` clauses.
'''
if self.inline_child:
yield self.inline_child
for x in self.children:
yield x
def add_child(self, node, inline=False):
if inline:
self.inline_child = node
else:
self.children.append(node)
def consume_sibling(self, node):
return False
def render(self, engine):
return chain(
self.render_start(engine),
self.render_content(engine),
self.render_end(engine),
)
def render_start(self, engine):
return []
def render_content(self, engine):
to_chain = []
if self.inline_child:
to_chain.append(self.inline_child.render(engine))
for child in self.children:
to_chain.append(child.render(engine))
return chain(*to_chain)
def render_end(self, engine):
return []
def __repr__(self):
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
def print_tree(self, _depth=0, _inline=False):
if _inline:
print('-> ' + repr(self), end='')
else:
print('| ' * _depth + repr(self), end='')
_depth += int(not _inline)
if self.inline_child:
self.inline_child.print_tree(_depth, True)
else:
print()
for child in self.children:
child.print_tree(_depth)
class FilterBase(Base):
def __init__(self, *args, **kwargs):
super(FilterBase, self).__init__(*args, **kwargs)
self._content = []
def add_line(self, indent, content):
self._content.append((indent, content))
def iter_dedented(self):
indent_to_remove = None
for indent, content in self._content:
if indent_to_remove is None:
yield content
if content:
indent_to_remove = len(indent)
else:
yield (indent + content)[indent_to_remove:]
class GreedyBase(Base):
def __init__(self, *args, **kwargs):
super(GreedyBase, self).__init__(*args, **kwargs)
self._greedy_root = self
def add_child(self, child, *args):
super(GreedyBase, self).add_child(child, *args)
child._greedy_root = self._greedy_root
class Document(Base):
def render_start(self, engine):
yield engine.start_document()
class Content(Base):
def __init__(self, content):
super(Content, self).__init__()
self.content = content
def render_start(self, engine):
yield engine.indent()
yield self.content
yield engine.endl
yield engine.inc_depth
def render_end(self, engine):
yield engine.dec_depth
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.content)
class Expression(Content, GreedyBase):
def __init__(self, content, filters=''):
super(Expression, self).__init__(content)
self.filters = filters
def render_start(self, engine):
if self.content.strip():
yield engine.indent()
filters = self._greedy_root.filters
yield '${%s%s}' % (self.content.strip(), ('|' + filters if filters else ''))
yield engine.endl
yield engine.inc_depth # This is countered by the Content.render_end
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.content, self.filters)
class Tag(Base):
self_closing_names = set('''
br
hr
img
input
link
meta
'''.strip().split())
def __init__(self, name, id, class_,
kwargs_expr=None,
object_reference=None,
object_reference_prefix=None,
self_closing=False,
strip_inner=False,
strip_outer=False,
):
super(Tag, self).__init__()
self.name = (name or 'div').lower()
self.id = id
self.class_ = (class_ or '').replace('.', ' ').strip()
self.kwargs_expr = kwargs_expr
self.object_reference = object_reference
self.object_reference_prefix = object_reference_prefix
self.self_closing = self_closing
self.strip_inner = strip_inner
self.strip_outer = strip_outer
def render_start(self, engine):
const_attrs = {}
if self.id:
const_attrs['id'] = self.id
if self.class_:
const_attrs['class'] = self.class_
kwargs_expr = self.kwargs_expr or ''
# Object references are actually handled by the attribute formatting
# function.
if self.object_reference:
kwargs_expr += (', ' if kwargs_expr else '') + '__obj_ref=' + self.object_reference
if self.object_reference_prefix:
kwargs_expr += ', __obj_ref_pre=' + self.object_reference_prefix
# Mako tags should not convert camel case.
if self.name.startswith('%'):
const_attrs['__adapt_camelcase'] = False
if kwargs_expr:
# If all of the kwargs are literals, then lets convert them up front.
try:
root = ast.parse('func(%s)' % kwargs_expr)
except SyntaxError:
valid = False
else:
valid = True
func = root.body[0].value
literal_attrs = {}
# Python 3.5 changed how this works, so our logic is a little
# wacky here.
if PY35:
args = []
starargs = []
for x in func.args:
if isinstance(x, ast.Starred):
starargs.append(x)
else:
args.append(x)
kwargs = []
starkwargs = []
for x in func.keywords:
if x.arg is None:
starkwargs.append(x.value)
else:
kwargs.append(x)
else:
args = func.args
starargs = [func.starargs] if func.starargs else []
kwargs = func.keywords
starkwargs = [func.kwargs] if func.kwargs else []
if valid:
for x in args:
try:
value = ast.literal_eval(x)
except ValueError:
valid = False
break
else:
literal_attrs.update(value)
if valid:
for x in kwargs:
try:
value = ast.literal_eval(x.value)
except ValueError:
valid = False
break
else:
literal_attrs[x.arg] = value
if valid:
for x in starargs:
try:
value = ast.literal_eval(x)
except ValueError:
valid = False
break
else:
for map_ in value:
literal_attrs.update(map_)
if valid:
for x in starkwargs:
try:
value = ast.literal_eval(x)
except ValueError:
valid = False
break
else:
literal_attrs.update(value)
if valid:
const_attrs.update(literal_attrs)
kwargs_expr = None
if not kwargs_expr:
attr_str = runtime.attribute_str(const_attrs)
elif not const_attrs:
attr_str = '<%% __M_writer(__HAML.attribute_str(%s)) %%>' % kwargs_expr
else:
attr_str = '<%% __M_writer(__HAML.attribute_str(%r, %s)) %%>' % (const_attrs, kwargs_expr)
if self.strip_outer:
yield engine.lstrip
yield engine.indent()
if self.self_closing or self.name in self.self_closing_names:
yield '<%s%s />' % (self.name, attr_str)
if self.strip_outer:
yield engine.rstrip
else:
yield engine.endl
else:
yield '<%s%s>' % (self.name, attr_str)
if self.children:
if self.strip_inner or self.inline_child:
yield engine.rstrip
else:
yield engine.endl
yield engine.inc_depth
def render_content(self, engine):
if self.inline_child:
return chain(
[engine.lstrip, engine.rstrip],
super(Tag, self).render_content(engine),
[engine.lstrip, engine.rstrip],
)
else:
return super(Tag, self).render_content(engine)
def render_end(self, engine):
if self.strip_inner or self.inline_child:
yield engine.lstrip
if not (self.self_closing or self.name in self.self_closing_names):
if self.children:
yield engine.dec_depth
yield engine.indent()
yield '</%s>' % self.name
if self.strip_outer:
yield engine.rstrip
yield engine.endl
elif self.strip_outer:
yield engine.rstrip
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%r' % (k, getattr(self, k)) for k in (
'name', 'id', 'class_', 'kwargs_expr',
'strip_inner', 'strip_outer'
) if getattr(self, k))
)
class MixinDef(Tag):
def __init__(self, name, argspec):
super(MixinDef, self).__init__(
'%def', # tag name
None, # ID
None, # class
'name=%r' % ('%s(%s)' % (name, argspec or '')), # kwargs expr
strip_inner=True,
)
class MixinCall(Tag):
def __init__(self, name, argspec):
super(MixinCall, self).__init__(
'%call', # tag name
None, # ID
None, # class
'expr=%r' % ('%s(%s)' % (name, argspec or '')), # kwargs expr
)
class HTMLComment(Base):
def __init__(self, inline_content, IE_condition=''):
super(HTMLComment, self).__init__()
self.inline_content = inline_content
self.IE_condition = IE_condition
def render_start(self, engine):
yield engine.indent()
yield '<!--'
if self.IE_condition:
yield self.IE_condition
yield '>'
if self.inline_content:
yield ' '
yield self.inline_content
yield ' '
if self.children:
yield engine.inc_depth
yield engine.endl
def render_end(self, engine):
if self.children:
yield engine.dec_depth
yield engine.indent()
if self.IE_condition:
yield '<![endif]'
yield '-->'
yield engine.endl
def __repr__(self):
return '%s()' % self.__class__.__name__
class Control(Base):
def __init__(self, type, test):
super(Control, self).__init__()
self.type = type
self.test = test
self.elifs = []
self.else_ = None
def iter_all_children(self):
for x in super(Control, self).iter_all_children():
yield x
for x in self.elifs:
yield x
if self.else_:
yield x
def consume_sibling(self, node):
if not isinstance(node, Control):
return False
if node.type == 'elif':
self.elifs.append(node)
return True
if node.type == 'else' and self.else_ is None:
self.else_ = node
return True
def print_tree(self, depth, inline=False):
super(Control, self).print_tree(depth)
for node in self.elifs:
node.print_tree(depth)
if self.else_ is not None:
self.else_.print_tree(depth)
def render(self, engine):
to_chain = [self.render_start(engine), self.render_content(engine)]
for node in self.elifs:
to_chain.append(node.render(engine))
if self.else_:
to_chain.append(self.else_.render(engine))
to_chain.append(self.render_end(engine))
return chain(*to_chain)
def render_start(self, engine):
yield engine.line_continuation
yield engine.indent(-1)
if self.test is not None:
yield '%% %s %s: ' % (self.type, self.test)
else:
yield '%% %s: ' % (self.type)
yield engine.no_strip(engine.endl)
def render_end(self, engine):
if self.type in ('else', 'elif'):
return
yield engine.line_continuation
yield engine.indent(-1)
yield '%% end%s' % self.type
yield engine.no_strip(engine.endl)
def __repr__(self):
if self.test is not None:
return '%s(type=%r, test=%r)' % (
self.__class__.__name__,
self.type,
self.test
)
else:
return '%s(type=%r)' % (self.__class__.__name__, self.type)
class Python(FilterBase):
def __init__(self, content, module=False):
super(Python, self).__init__()
if content.strip():
self.add_line('', content)
self.module = module
def render(self, engine):
if self.module:
yield '<%! '
else:
yield '<% '
yield engine.endl
for line in self.iter_dedented():
yield line
yield engine.endl
yield '%>'
yield engine.endl_no_break
def __repr__(self):
return '%s(%r%s)' % (
self.__class__.__name__,
self._content,
', module=True' if self.module else ''
)
class Filter(FilterBase):
def __init__(self, content, filter):
super(Filter, self).__init__()
if content and content.strip():
self.add_line('', content)
self.filter = filter
def _escape_expressions(self, source):
parts = re.split(r'(\${.*?})', source)
for i in range(0, len(parts), 2):
parts[i] = parts[i] and ('<%%text>%s</%%text>' % parts[i])
return ''.join(parts)
def render(self, engine):
# Hopefully this chain respects proper scope resolution.
yield '<%%block filter="locals().get(%r) or globals().get(%r) or getattr(__HAML.filters, %r, UNDEFINED)">' % (self.filter, self.filter, self.filter)
yield engine.endl_no_break
yield self._escape_expressions(engine.endl.join(self.iter_dedented()).strip())
yield '</%block>'
yield engine.endl
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self._content,
self.filter)
class HAMLComment(Base):
def __init__(self, comment):
super(HAMLComment, self).__init__()
self.comment = comment
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self.comment
)
def render(self, engine):
return []
class Doctype(Base):
doctypes = {
'xml': {
None: """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">""",
"strict": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">""",
"frameset": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">""",
"5": """<!DOCTYPE html>""",
"1.1": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">""",
"basic": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">""",
"mobile": """<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">""",
"rdfa": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">""",
}, 'html': {
None: """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">""",
"strict": """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">""",
"frameset": """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">""",
"5": """<!DOCTYPE html>""",
}}
def __init__(self, name=None, charset=None):
super(Doctype, self).__init__()
self.name = name.lower() if name else None
self.charset = charset
def __repr__(self):
return '%s(%r, %r)' % (
self.__class__.__name__,
self.name,
self.charset
)
def render_start(self, engine):
if self.name in ('xml', 'html'):
mode = self.name
engine.node_data['Doctype.mode'] = mode
else:
mode = engine.node_data.get('Doctype.mode', 'html')
if self.name == 'xml':
charset = self.charset or 'utf-8'
yield "<?xml version='1.0' encoding='%s' ?>" % charset
yield engine.no_strip('\n')
return
yield self.doctypes[mode][self.name]
yield engine.no_strip('\n')
| mikeboers/PyHAML | haml/nodes.py | Python | bsd-3-clause | 18,376 |
import functools
from pyp2rpm import settings
def memoize_by_args(func):
"""Memoizes return value of a func based on args."""
memory = {}
@functools.wraps(func)
def memoized(*args):
if not args in memory.keys():
value = func(*args)
memory[args] = value
return memory[args]
return memoized
def license_from_trove(trove):
"""Finds out license from list of trove classifiers.
Args:
trove: list of trove classifiers
Returns:
Fedora name of the package license or empty string, if no licensing information is found in trove classifiers.
"""
license = []
for classifier in trove:
if classifier is None: continue
if 'License' in classifier != -1:
stripped = classifier.strip()
# if taken from EGG-INFO, begins with Classifier:
stripped = stripped[stripped.find('License'):]
if stripped in settings.TROVE_LICENSES:
license.append(settings.TROVE_LICENSES[stripped])
return ' and '.join(license)
| henrysher/spec4pypi | pyp2rpm/utils.py | Python | mit | 1,076 |
# Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import traceback
import urllib, urllib2
import re
import sickbeard
import generic
from sickbeard.common import Quality, Overview
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
from sickbeard import clients
from lib import requests
from bs4 import BeautifulSoup
from lib.unidecode import unidecode
class KATProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "KickAssTorrents")
self.supportsBacklog = True
self.cache = KATCache(self)
self.url = 'http://katproxy.com/'
self.searchurl = self.url+'usearch/%s/?field=seeders&sorder=desc' #order by seed
def isEnabled(self):
return sickbeard.KAT
def imageName(self):
return 'kat.png'
def getQuality(self, item):
quality = Quality.sceneQuality(item[0])
return quality
def _reverseQuality(self, quality):
quality_string = ''
if quality == Quality.SDTV:
quality_string = 'HDTV x264'
if quality == Quality.SDDVD:
quality_string = 'DVDRIP'
elif quality == Quality.HDTV:
quality_string = '720p HDTV x264'
elif quality == Quality.FULLHDTV:
quality_string = '1080p HDTV x264'
elif quality == Quality.RAWHDTV:
quality_string = '1080i HDTV mpeg2'
elif quality == Quality.HDWEBDL:
quality_string = '720p WEB-DL h264'
elif quality == Quality.FULLHDWEBDL:
quality_string = '1080p WEB-DL h264'
elif quality == Quality.HDBLURAY:
quality_string = '720p Bluray x264'
elif quality == Quality.FULLHDBLURAY:
quality_string = '1080p Bluray x264'
return quality_string
def _find_season_quality(self,title, torrent_link, ep_number):
""" Return the modified title of a Season Torrent with the quality found inspecting torrent file list """
mediaExtensions = ['avi', 'mkv', 'wmv', 'divx',
'vob', 'dvr-ms', 'wtv', 'ts'
'ogv', 'rar', 'zip', 'mp4']
quality = Quality.UNKNOWN
fileName = None
data = self.getURL(torrent_link)
if not data:
return None
try:
soup = BeautifulSoup(data, features=["html5lib", "permissive"])
file_table = soup.find('table', attrs = {'class': 'torrentFileList'})
if not file_table:
return None
files = [x.text for x in file_table.find_all('td', attrs = {'class' : 'torFileName'} )]
videoFiles = filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, files)
#Filtering SingleEpisode/MultiSeason Torrent
if len(videoFiles) < ep_number or len(videoFiles) > float(ep_number * 1.1 ):
logger.log(u"Result " + title + " Seem to be a Single Episode or MultiSeason torrent, skipping result...", logger.DEBUG)
return None
for fileName in videoFiles:
quality = Quality.sceneQuality(os.path.basename(fileName))
if quality != Quality.UNKNOWN: break
if fileName!=None and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(os.path.basename(fileName))
if quality == Quality.UNKNOWN:
logger.log(u"Unable to obtain a Season Quality for " + title, logger.DEBUG)
return None
try:
myParser = NameParser()
parse_result = myParser.parse(fileName)
except InvalidNameException:
return None
logger.log(u"Season quality for "+title+" is "+Quality.qualityStrings[quality], logger.DEBUG)
if parse_result.series_name and parse_result.season_number:
title = parse_result.series_name+' S%02d' % int(parse_result.season_number)+' '+self._reverseQuality(quality)
return title
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
def _get_season_search_strings(self, show, season=None):
search_string = {'Episode': []}
if not show:
return []
self.show = show
seasonEp = show.getAllEpisodes(season)
wantedEp = [x for x in seasonEp if show.getOverview(x.status) in (Overview.WANTED, Overview.QUAL)]
#If Every episode in Season is a wanted Episode then search for Season first
if wantedEp == seasonEp and not show.air_by_date:
search_string = {'Season': [], 'Episode': []}
for show_name in set(allPossibleShowNames(show)):
ep_string = show_name +' S%02d' % int(season) + ' -S%02d' % int(season) + 'E' + ' category:tv' #1) ShowName SXX -SXXE
search_string['Season'].append(ep_string)
ep_string = show_name+' Season '+str(season)+' -Ep*' + ' category:tv' #2) ShowName Season X
search_string['Season'].append(ep_string)
#Building the search string with the episodes we need
for ep_obj in wantedEp:
search_string['Episode'] += self._get_episode_search_strings(ep_obj)[0]['Episode']
#If no Episode is needed then return an empty list
if not search_string['Episode']:
return []
return [search_string]
def _get_episode_search_strings(self, ep_obj):
search_string = {'Episode': []}
if not ep_obj:
return []
self.show = ep_obj.show
if ep_obj.show.air_by_date:
for show_name in set(allPossibleShowNames(ep_obj.show)):
ep_string = sanitizeSceneName(show_name) +' '+ str(ep_obj.airdate)
search_string['Episode'].append(ep_string)
else:
for show_name in set(allPossibleShowNames(ep_obj.show)):
ep_string = sanitizeSceneName(show_name) +' '+ \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.season, 'episodenumber': ep_obj.episode} +'|'+\
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.season, 'episodenumber': ep_obj.episode} +'|'+\
sickbeard.config.naming_ep_type[3] % {'seasonnumber': ep_obj.season, 'episodenumber': ep_obj.episode} + ' category:tv' \
search_string['Episode'].append(ep_string)
return [search_string]
def _doSearch(self, search_params):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
if mode != 'RSS':
searchURL = self.searchurl %(urllib.quote(unidecode(search_string)))
logger.log(u"Search string: " + searchURL, logger.DEBUG)
else:
searchURL = self.url + 'tv/?field=time_add&sorder=desc'
logger.log(u"KAT cache update URL: "+ searchURL, logger.DEBUG)
html = self.getURL(searchURL)
if not html:
continue
try:
soup = BeautifulSoup(html, features=["html5lib", "permissive"])
torrent_table = soup.find('table', attrs = {'class' : 'data'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
if not torrent_rows:
# logger.log(u"The Data returned from " + self.name + " do not contains any torrent", logger.ERROR)
continue
for tr in torrent_rows[1:]:
link = self.url + (tr.find('div', {'class': 'torrentname'}).find_all('a')[1])['href']
id = tr.get('id')[-7:]
title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text
url = tr.find('a', 'imagnet')['href']
verified = True if tr.find('a', 'iverify') else False
trusted = True if tr.find('img', {'alt': 'verified'}) else False
seeders = int(tr.find_all('td')[-2].text)
leechers = int(tr.find_all('td')[-1].text)
if mode != 'RSS' and seeders == 0:
continue
if sickbeard.KAT_VERIFIED and not verified:
logger.log(u"KAT Provider found result "+title+" but that doesn't seem like a verified result so I'm ignoring it",logger.DEBUG)
continue
if mode == 'Season' and Quality.sceneQuality(title) == Quality.UNKNOWN:
ep_number = int(len(search_params['Episode']) / len(set(allPossibleShowNames(self.show))))
title = self._find_season_quality(title, link, ep_number)
if not title:
continue
item = title, url, id, seeders, leechers
items[mode].append(item)
except Exception, e:
logger.log(u"Failed to parsing " + self.name + (" Exceptions: " + str(e) if e else ''), logger.ERROR)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if url:
url = url.replace('&','&')
return (title, url)
def getURL(self, url, headers=None):
try:
r = requests.get(url)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u"Error loading "+self.name+" URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return None
if r.status_code != 200:
logger.log(self.name + u" page requested with url " + url +" returned status code is " + str(r.status_code) + ': ' + clients.http_error_code[r.status_code], logger.WARNING)
return None
return r.content
def downloadResult(self, result):
"""
Save the result to disk.
"""
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
if not torrent_hash:
logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
return False
try:
r = requests.get('http://torcache.net/torrent/' + torrent_hash + '.torrent')
except Exception, e:
logger.log("Unable to connect to Torcache: " + ex(e), logger.ERROR)
return False
if not r.status_code == 200:
return False
magnetFileName = ek.ek(os.path.join, sickbeard.TORRENT_DIR, helpers.sanitizeFileName(result.name) + '.' + self.providerType)
magnetFileContent = r.content
try:
fileOut = open(magnetFileName, 'wb')
fileOut.write(magnetFileContent)
fileOut.close()
helpers.chmodAsParent(magnetFileName)
except IOError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
logger.log(u"Saved magnet link to " + magnetFileName + " ", logger.MESSAGE)
return True
class KATCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll ThePirateBay every 10 minutes max
self.minTime = 20
def updateCache(self):
if not self.shouldUpdate():
return
search_params = {'RSS': ['rss']}
rss_results = self.provider._doSearch(search_params)
if rss_results:
self.setLastUpdate()
else:
return []
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
for result in rss_results:
item = (result[0], result[1])
self._parseItem(item)
def _parseItem(self, item):
(title, url) = item
if not title or not url:
return
logger.log(u"Adding item to cache: "+title, logger.DEBUG)
self._addCacheEntry(title, url)
provider = KATProvider()
| fernandog/Sick-Beard | sickbeard/providers/kat.py | Python | gpl-3.0 | 14,136 |
from django_filters.filters import MultipleChoiceFilter, ModelMultipleChoiceFilter
from django_filters.filterset import FilterSet
from account.models import School, Profile
from tag.models import TreeTag
class SchoolGenericFilterSet(FilterSet):
id = MultipleChoiceFilter(name='id',
# here extra parameter will be passed to field_class
choices=School.objects.values_list('id', 'name'))
class Meta:
model = School
class ProfileGenericFilterSet(FilterSet):
id = MultipleChoiceFilter(name='id',
choices=Profile.objects.values_list('id', 'phone_num'))
tags = ModelMultipleChoiceFilter(name='tags', queryset=TreeTag.objects.all())
class Meta:
model = Profile
| ace-han/onedegree | admin/account/api/v1/filtersets.py | Python | bsd-3-clause | 822 |
# -*- coding: utf-8 -*-
"""Docx renderer module."""
import sys
import bleach
if not sys.version_info >= (3, 0):
from pydocx.parsers import Docx2Html
from mfr import RenderResult
def render_docx(fp, *args, **kwargs):
"""Generate an html representation of the docx file using PyDocx
:param fp: File pointer
:return: RenderResult object containing the content html
"""
bleach.ALLOWED_TAGS.append(u'h1')
bleach.ALLOWED_TAGS.append(u'span')
content = Docx2Html(fp).parsed_without_head
content = bleach.clean(content, styles=None, strip=True)
return RenderResult(content=content.encode('ascii', 'ignore'), assets={})
| chrisseto/modular-file-renderer | mfr/ext/docx/render.py | Python | apache-2.0 | 699 |
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure LTCG is working properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('ltcg.gyp', chdir=CHDIR)
# Here we expect LTCG is able to inline functions beyond compile unit.
# Note: This marker is embedded in 'inline_test_main.cc'
INLINE_MARKER = '==== inlined ===='
# test 'LinkTimeCodeGenerationOptionDefault'
test.build('ltcg.gyp', 'test_ltcg_off', chdir=CHDIR)
test.run_built_executable('test_ltcg_off', chdir=CHDIR)
test.must_not_contain_any_line(test.stdout(), [INLINE_MARKER])
# test 'LinkTimeCodeGenerationOptionUse'
test.build('ltcg.gyp', 'test_ltcg_on', chdir=CHDIR)
test.must_contain_any_line(test.stdout(), ['Generating code'])
test.run_built_executable('test_ltcg_on', chdir=CHDIR)
test.must_contain_any_line(test.stdout(), [INLINE_MARKER])
test.pass_test()
| Jet-Streaming/gyp | test/win/gyptest-link-ltcg.py | Python | bsd-3-clause | 1,133 |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2012, Machinalis S.R.L.
# This file is part of quepy and is distributed under the Modified BSD License.
# You should have received a copy of license in the LICENSE file.
#
# Authors: Rafael Carrascosa <[email protected]>
# Gonzalo Garcia Berrotaran <[email protected]>
"""
This file implements the ``Expression`` class.
``Expression`` is the base class for all the semantic representations in quepy.
It's meant to carry all the information necessary to build a database query in
an abstract form.
By desing it's aimed specifically to represent a SPARQL query, but it should
be able to represent queries in other database languages too.
A (simple) SPARQL query can be thought as a subgraph that has to match into a
larger graph (the database). Each node of the subgraph is a variable and every
edge a relation. So in order to represent a query, ``Expression`` implements a
this subgraph using adjacency lists.
Also, ``Expression`` instances are meant to be combined with each other somehow
to make complex queries out of simple ones (this is one main objectives
of quepy).
To do that, every ``Expression`` has a special node called the ``head``, which
is the target node (variable) of the represented query. All operations over
``Expression`` instances work on the ``head`` node, leaving the rest of the
nodes intact.
So ``Expression`` graphs are not built by explicitly adding nodes and edges
like any other normal graph. Instead they are built by a combination of the
following basic operations:
- ``__init__``: When a ``Expression`` is instantiated a single solitary
node is created in the graph.
- ``decapitate``: Creates a blank node and makes it the new ``head`` of the
``Expression``. Then it adds an edge (a relation) linking
this new head to the old one. So in a single operation a
node and an edge are added. Used to represent stuff like
``?x rdf:type ?y``.
- ``add_data``: Adds a relation into some constant data from the ``head``
node of the ``Expression``. Used to represent stuff like
``?x rdf:label "John Von Neumann"``.
- ``merge``: Given two ``Expressions``, it joins their graphs preserving
every node and every edge intact except for their ``head``
nodes.
The ``head`` nodes are merged into a single node that is the
new ``head`` and shares all the edges of the previous heads.
This is used to combine two graphs like this:
::
A = ?x rdf:type ?y
B = ?x rdf:label "John Von Neumann"
Into a new one:
::
A + B = ?x rdf:type ?y;
?x rdf:label "John Von Neumann"
You might be saying "Why?! oh gosh why you did it like this?!".
The reasons are:
- It allows other parts of the code to build queries in a super
intuive language, like ``IsPerson() + HasKeyword("Russell")``.
Go and see the DBpedia example.
- You can only build connected graphs (ie, no useless variables in query).
- You cannot have variable name clashes.
- You cannot build cycles into the graph (could be a con to some, a
plus to other(it's a plus to me))
- There are just 3 really basic operations and their semantics are defined
consisely without special cases (if you care for that kind of stuff
(I do)).
"""
from collections import defaultdict
from copy import deepcopy
def isnode(x):
return isinstance(x, int)
class Expression(object):
def __init__(self):
"""
Creates a new graph with a single solitary blank node.
"""
self.nodes = []
self.head = self._add_node()
def _add_node(self):
"""
Adds a blank node to the graph and returns it's index (a unique
identifier).
"""
i = len(self.nodes)
self.nodes.append([])
return i
def get_head(self):
"""
Returns the index (the unique identifier) of the head node.
"""
return self.head
def merge(self, other):
"""
Given other Expression, it joins their graphs preserving every
node and every edge intact except for the ``head`` nodes.
The ``head`` nodes are merged into a single node that is the new
``head`` and shares all the edges of the previous heads.
"""
translation = defaultdict(self._add_node)
translation[other.head] = self.head
for node in other.iter_nodes():
for relation, dest in other.iter_edges(node):
xs = self.nodes[translation[node]]
if isnode(dest):
dest = translation[dest]
xs.append((relation, dest))
def decapitate(self, relation, reverse=False):
"""
Creates a new blank node and makes it the ``head`` of the
Expression. Then it adds an edge (a ``relation``) linking the
the new head to the old one. So in a single operation a
node and an edge are added.
If ``reverse`` is ``True`` then the ``relation`` links the old head to
the new head instead of the opposite (some relations are not
commutative).
"""
oldhead = self.head
self.head = self._add_node()
if reverse:
self.nodes[oldhead].append((relation, self.head))
else:
self.nodes[self.head].append((relation, oldhead))
def add_data(self, relation, value):
"""
Adds a ``relation`` to some constant ``value`` to the ``head`` of the
Expression.
``value`` is recommended be of type:
- ``unicode``
- ``str`` and can be decoded using the default encoding (settings.py)
- A custom class that implements a ``__unicode__`` method.
- It can *NEVER* be an ``int``.
You should not use this to relate nodes in the graph, only to add
data fields to a node.
To relate nodes in a graph use a combination of merge and decapitate.
"""
assert not isnode(value)
self.nodes[self.head].append((relation, value))
def iter_nodes(self):
"""
Iterates the indexes (the unique identifiers) of the Expression nodes.
"""
return xrange(len(self.nodes))
def iter_edges(self, node):
"""
Iterates over the pairs: ``(relation, index)`` which are the neighbors
of ``node`` in the expression graph, where:
- ``node`` is the index of the node (the unique identifier).
- ``relation`` is the label of the edge between the nodes
- ``index`` is the index of the neighbor (the unique identifier).
"""
return iter(self.nodes[node])
def __add__(self, other):
"""
Merges ``self`` and ``other`` in a new Expression instance.
Ie, ``self`` and ``other`` are not modified.
"""
new = deepcopy(self)
new.merge(other)
return new
def __iadd__(self, other):
"""
Merges ``self`` and ``other`` into ``self``
``other`` is not modified but the original data in ``self`` is lost.
"""
self.merge(other)
return self
def __len__(self):
"""
Amount of nodes in the graph.
"""
return len(self.nodes)
def make_canonical_expression(e):
i = 0
q = [e.get_head()]
seen = set()
while i != len(q):
node = q[i]
i += 1
assert node not in seen, "Nouuu, expression is cyclic!"
for relation, child in e.iter_edges(node):
if isnode(child):
q.append(child)
q.reverse()
canon = {}
for node in q:
childs = []
for label, child in e.iter_edges(node):
if isnode(child):
child = canon[child]
childs.append((label, child))
childs.sort()
canon[node] = tuple(childs)
return canon[e.get_head()]
if __name__ == "__main__":
from AIML.CAML.semiotics.quepy.quepy.printout import expression_to_dot, expression_to_sparql
def HasKeyword(x):
e = Expression()
e.add_data("Keyword", x)
return e
def HasTopic(e, reverse=False):
e.decapitate("HasTopic", reverse)
return e
def WasBornIn(e, reverse=False):
e.decapitate("WasBornIn", reverse)
return e
poet = HasKeyword("poet") + HasKeyword("famous")
drama = HasKeyword("drama")
germany = HasKeyword("germany")
E = poet + HasTopic(drama) + WasBornIn(germany)
print expression_to_dot(E)
print expression_to_sparql(E)[1]
from pprint import pprint
pprint(make_canonical_expression(E))
| mpetyx/pychatbot | AIML/CAML/semiotics/quepy/quepy/expression.py | Python | apache-2.0 | 8,937 |
#!/usr/bin/python2.4
#
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This Script will prompt the user if portable server is running."""
import os
import sys
import urllib
import portable_config
import Tkinter
import tkMessageBox
def IsServerRunning(port):
"""Returns whether server is already running."""
try:
fp = urllib.urlopen('http://localhost:%s/ping'% port)
fp.close()
# sOk, if there isn't a server running.
except:
return False
return True
def main():
"""Display Error Message, if the server is running."""
port = portable_config.PortableConfig().Port()
root = Tkinter.Tk()
root.title('Uninstall portable server')
root.withdraw()
uninstaller_flag = os.getenv('GEE_PORTABLE_SILENT_UNINSTALLER_DELETE_DATA')
unistall_msg = ('Portable Server is Running. \n'
'Please Stop the server for Uninstallation.')
if IsServerRunning(port):
if uninstaller_flag is None:
tkMessageBox.showerror('Uninstallation Failed!', '%s'% unistall_msg)
else:
print 'Uninstallation Failed!\n%s '% unistall_msg
sys.exit(1)
if __name__ == '__main__':
main()
| iparanza/earthenterprise | earth_enterprise/legacy_installer/Scripts/portable/windows-installer/check_server_running.py | Python | apache-2.0 | 1,661 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core import signals
from indico.core.db import db
from indico.modules.events.agreements.models.agreements import Agreement
from indico.modules.events.agreements.notifications import notify_agreement_new
from indico.util.signals import named_objects_from_signal
def get_agreement_definitions():
return named_objects_from_signal(signals.agreements.get_definitions.send(), plugin_attr='plugin')
def send_new_agreements(event, name, people, email_body, cc_addresses, from_address):
"""Create and send agreements for a list of people on a given event.
:param event: The `Event` associated with the agreement
:param name: The agreement type matcing a :class:`AgreementDefinition` name
:param people: The list of people for whom agreements will be created
:param email_body: The body of the email
:param cc_addresses: Email addresses to send CCs to
:param from_address: Email address of the sender
"""
agreements = []
for person in people.values():
agreement = Agreement.create_from_data(event=event, type_=name, person=person)
db.session.add(agreement)
agreements.append(agreement)
db.session.flush()
for agreement in agreements:
notify_agreement_new(agreement, email_body, cc_addresses, from_address)
return agreements
| indico/indico | indico/modules/events/agreements/util.py | Python | mit | 1,533 |
from django.core.urlresolvers import reverse
from testbase.unit import UnitTestCase
class TestAssertContextValueEqual(UnitTestCase):
def test_raisesNoErrorIfContextContainsExpectedValue(self):
response = self.get('home')
self.assertContextValueEqual(response, 'context_var', 'expected')
def test_raisesAssertionErrorIfValueIsIncorrect(self):
response = self.get('home')
self.assertRaises(AssertionError, self.assertContextValueEqual, response, 'context_var', self.randStr())
def test_raisesAssertionErrorIfContextValueNotPresent(self):
response = self.get('home')
self.assertRaises(AssertionError, self.assertContextValueEqual, response, self.randStr(), self.randStr())
def test_lastContextConsidersContextFromLastResponse(self):
self.get('home')
self.assertLastContextValueEqual('context_var', 'expected')
| tctimmeh/django-testing-base | testsite/testapp/tests/unit/testAssertContextValueEqual.py | Python | mit | 892 |
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import sys
import collections
import numbers
from six import string_types
import warnings
import numpy as np
# If numba is installed, import jit. Otherwise, define an empty decorator with
# the same name.
HAS_NUMBA = False
try:
from numba import jit
HAS_NUMBA = True
except ImportError:
warnings.warn("Numba not installed. Faking it")
class jit(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, func):
def wrapped_f(*args, **kwargs):
return func(*args, **kwargs)
return wrapped_f
def _root_squared_mean(array):
return np.sqrt(np.sum(array ** 2)) / len(array)
def simon(message, **kwargs):
"""The Statistical Interpretation MONitor.
A warning system designed to always remind the user that Simon
is watching him/her.
Parameters
----------
message : string
The message that is thrown
kwargs : dict
The rest of the arguments that are passed to warnings.warn
"""
warnings.warn("SIMON says: {0}".format(message), **kwargs)
def rebin_data(x, y, dx_new, yerr=None, method='sum', dx=None):
"""Rebin some data to an arbitrary new data resolution. Either sum
the data points in the new bins or average them.
Parameters
----------
x: iterable
The dependent variable with some resolution dx_old = x[1]-x[0]
y: iterable
The independent variable to be binned
dx_new: float
The new resolution of the dependent variable x
Other parameters
----------------
yerr: iterable, optional
The uncertainties of y, to be propagated during binning.
method: {"sum" | "average" | "mean"}, optional, default "sum"
The method to be used in binning. Either sum the samples y in
each new bin of x, or take the arithmetic mean.
dx: float
The old resolution (otherwise, calculated from median diff)
Returns
-------
xbin: numpy.ndarray
The midpoints of the new bins in x
ybin: numpy.ndarray
The binned quantity y
ybin_err: numpy.ndarray
The uncertainties of the binned values of y.
step_size: float
The size of the binning step
"""
y = np.asarray(y)
yerr = np.asarray(assign_value_if_none(yerr, np.zeros_like(y)))
dx_old = assign_value_if_none(dx, np.median(np.diff(x)))
if dx_new < dx_old:
raise ValueError("New frequency resolution must be larger than "
"old frequency resolution.")
step_size = dx_new / dx_old
output = []
outputerr = []
for i in np.arange(0, y.shape[0], step_size):
total = 0
totalerr = 0
int_i = int(i)
prev_frac = int_i + 1 - i
prev_bin = int_i
total += prev_frac * y[prev_bin]
totalerr += prev_frac * (yerr[prev_bin] ** 2)
if i + step_size < len(x):
# Fractional part of next bin:
next_frac = i + step_size - int(i + step_size)
next_bin = int(i + step_size)
total += next_frac * y[next_bin]
totalerr += next_frac * (yerr[next_bin] ** 2)
total += sum(y[int(i + 1):int(i + step_size)])
totalerr += sum(yerr[int(i + 1):int(step_size)] ** 2)
output.append(total)
outputerr.append(np.sqrt(totalerr))
output = np.asarray(output)
outputerr = np.asarray(outputerr)
if method in ['mean', 'avg', 'average', 'arithmetic mean']:
ybin = output / np.float(step_size)
ybinerr = outputerr / np.sqrt(np.float(step_size))
elif method == "sum":
ybin = output
ybinerr = outputerr
else:
raise ValueError("Method for summing or averaging not recognized. "
"Please enter either 'sum' or 'mean'.")
tseg = x[-1] - x[0] + dx_old
if (tseg / dx_new % 1) > 0:
ybin = ybin[:-1]
ybinerr = ybinerr[:-1]
new_x0 = (x[0] - (0.5 * dx_old)) + (0.5 * dx_new)
xbin = np.arange(ybin.shape[0]) * dx_new + new_x0
return xbin, ybin, ybinerr, step_size
def rebin_data_log(x, y, f, y_err=None, dx=None):
"""Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency modified by a factor f:
dnu_j = dnu_{j-1}*(1+f)
Parameters
----------
x: iterable
The dependent variable with some resolution dx_old = x[1]-x[0]
y: iterable
The independent variable to be binned
f: float
The factor of increase of each bin wrt the previous one.
Other Parameters
----------------
yerr: iterable, optional
The uncertainties of y, to be propagated during binning.
method: {"sum" | "average" | "mean"}, optional, default "sum"
The method to be used in binning. Either sum the samples y in
each new bin of x, or take the arithmetic mean.
dx: float, optional
The binning step of the initial xs
Returns
-------
xbin: numpy.ndarray
The midpoints of the new bins in x
ybin: numpy.ndarray
The binned quantity y
ybin_err: numpy.ndarray
The uncertainties of the binned values of y.
step_size: float
The size of the binning step
"""
import scipy
dx_init = assign_value_if_none(dx, np.median(np.diff(x)))
y = np.asarray(y)
y_err = np.asarray(assign_value_if_none(y_err, np.zeros_like(y)))
minx = x[1] * 0.5 # frequency to start from
maxx = x[-1] # maximum frequency to end
binx = [minx, minx + dx_init] # first
dx = x[1] # the frequency resolution of the first bin
# until we reach the maximum frequency, increase the width of each
# frequency bin by f
while binx[-1] <= maxx:
binx.append(binx[-1] + dx * (1.0 + f))
dx = binx[-1] - binx[-2]
# compute the mean of the ys that fall into each new frequency bin.
# we cast to np.double due to scipy's bad handling of longdoubles
biny, bin_edges, binno = scipy.stats.binned_statistic(
x.astype(np.double), y.astype(np.double),
statistic="mean", bins=binx)
biny_err, bin_edges, binno = scipy.stats.binned_statistic(
x.astype(np.double), y_err.astype(np.double),
statistic=_root_squared_mean, bins=binx)
# compute the number of powers in each frequency bin
nsamples = np.array([len(binno[np.where(binno == i)[0]])
for i in range(np.max(binno))])
return binx, biny, biny_err, nsamples
def assign_value_if_none(value, default):
return default if value is None else value
def look_for_array_in_array(array1, array2):
return next((i for i in array1 if i in array2), None)
def is_string(s): # pragma : no cover
"""Portable function to answer this question."""
PY2 = sys.version_info[0] == 2
if PY2:
return isinstance(s, basestring) # NOQA
else:
return isinstance(s, str) # NOQA
def is_iterable(stuff):
"""Test if stuff is an iterable."""
return isinstance(stuff, collections.Iterable)
def order_list_of_arrays(data, order):
if hasattr(data, 'items'):
data = dict([(key, value[order])
for key, value in data.items()])
elif is_iterable(data):
data = [i[order] for i in data]
else:
data = None
return data
def optimal_bin_time(fftlen, tbin):
"""Vary slightly the bin time to have a power of two number of bins.
Given an FFT length and a proposed bin time, return a bin time
slightly shorter than the original, that will produce a power-of-two number
of FFT bins.
"""
return fftlen / (2 ** np.ceil(np.log2(fftlen / tbin)))
def contiguous_regions(condition):
"""Find contiguous True regions of the boolean array "condition".
Return a 2D array where the first column is the start index of the region
and the second column is the end index.
Parameters
----------
condition : boolean array
Returns
-------
idx : [[i0_0, i0_1], [i1_0, i1_1], ...]
A list of integer couples, with the start and end of each True blocks
in the original array
Notes
-----
From : http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-
fulfilling-condition-in-a-numpy-array
"""
# NOQA
# Find the indices of changes in "condition"
diff = np.logical_xor(condition[1:], condition[:-1])
idx, = diff.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size]
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def is_int(obj):
return isinstance(obj, (numbers.Integral, np.integer))
def get_random_state(random_state=None):
if not random_state:
random_state = np.random.mtrand._rand
else:
if is_int(random_state):
random_state = np.random.RandomState(random_state)
elif not isinstance(random_state, np.random.RandomState):
raise ValueError("{value} can't be used to generate a numpy.random.RandomState".format(
value=random_state
))
return random_state
def baseline_als(y, lam, p, niter=10):
"""Baseline Correction with Asymmetric Least Squares Smoothing.
Modifications to the routine from Eilers & Boelens 2005
https://www.researchgate.net/publication/228961729_Technical_Report_Baseline_Correction_with_Asymmetric_Least_Squares_Smoothing
The Python translation is partly from
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
Parameters
----------
y : array of floats
the "light curve". It assumes equal spacing.
lam : float
"smoothness" parameter. Larger values make the baseline stiffer
Typically 1e2 < lam < 1e9
p : float
"asymmetry" parameter. Smaller values make the baseline more
"horizontal". Typically 0.001 < p < 0.1, but not necessary.
"""
from scipy import sparse
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w * y)
w = p * (y > z) + (1 - p) * (y < z)
return z
def excess_variance(lc, normalization='fvar'):
"""Calculate the excess variance.
Vaughan+03
Parameters
----------
lc : a :class:`Lightcurve` object
normalization : str
if 'fvar', return normalized square-root excess variance. If 'none',
return the unnormalized variance
Returns
-------
var_xs : float
var_xs_err : float
"""
lc_mean_var = np.mean(lc.counts_err ** 2)
lc_actual_var = np.var(lc.counts)
var_xs = lc_actual_var - lc_mean_var
mean_lc = np.mean(lc.counts)
mean_ctvar = mean_lc ** 2
fvar = np.sqrt(var_xs / mean_ctvar)
N = len(lc.counts)
var_xs_err_A = np.sqrt(2 / N) * lc_mean_var / mean_lc ** 2
var_xs_err_B = np.sqrt(mean_lc ** 2 / N) * 2 * fvar / mean_lc
var_xs_err = np.sqrt(var_xs_err_A ** 2 + var_xs_err_B ** 2)
fvar_err = var_xs_err / (2 * fvar)
if normalization == 'fvar':
return fvar, fvar_err
elif normalization == 'none' or normalization is None:
return var_xs, var_xs_err
def create_window(N, window_type='uniform'):
""" A method to create window functions commonly used in signal processing.
Windows supported are:
Hamming, Hanning, uniform(rectangular window), triangular window, blackmann window among others.
Parameters
----------
N : int
Total number of data points in window. If negative, abs is taken.
window_type : {'uniform', 'parzen', 'hamming', 'hanning', 'traingular', 'welch', 'blackmann', 'flat-top'}, optional, default 'uniform'
Type of window to create.
Returns
-------
window: numpy.ndarray
Window function of length N.
"""
if not isinstance(N, int):
raise TypeError('N (window length) must be an integer')
WINDOWS = ['uniform', 'parzen', 'hamming', 'hanning', 'triangular', 'welch', 'blackmann', 'flat-top']
if not isinstance(window_type, string_types):
raise TypeError('type of window must be specified as string!')
window_type = window_type.lower()
if window_type not in WINDOWS:
raise ValueError("Wrong window type specified or window function is not available")
# Return empty array as window if N = 0
if N == 0:
return np.array([])
window = None
N = abs(N)
# Window samples index
n = np.arange(N)
# Constants
N_minus_1 = N - 1
N_by_2 = np.int((np.floor((N_minus_1) / 2)))
# Create Windows
if window_type == 'uniform':
window = np.ones(N)
if window_type == 'parzen':
N_parzen = np.int(np.ceil((N + 1) / 2))
N2_plus_1 = np.int(np.floor((N_parzen / 2))) + 1
window = np.zeros(N_parzen)
windlag0 = np.arange(0, N2_plus_1) / (N_parzen - 1)
windlag1 = 1 - np.arange(N2_plus_1, N_parzen) / (N_parzen - 1)
window[:N2_plus_1] = 1 - (1 - windlag0) * windlag0 * windlag0 * 6
window[N2_plus_1:] = windlag1 * windlag1 * windlag1 * 2
lagindex = np.arange(N_parzen - 1, 0, -1)
window = np.concatenate((window[lagindex], window))
window = window[:N]
if window_type == 'hamming':
window = 0.54 - 0.46 * np.cos((2 * np.pi * n) / N_minus_1)
if window_type == 'hanning':
window = 0.5 * (1 - np.cos(2 * np.pi * n / N_minus_1))
if window_type == 'triangular':
window = 1 - np.abs((n - (N_by_2)) / N)
if window_type == 'welch':
N_minus_1_by_2 = N_minus_1 / 2
window = 1 - np.square((n - N_minus_1_by_2) / N_minus_1_by_2)
if window_type == 'blackmann':
a0 = 0.42659
a1 = 0.49656
a2 = 0.076849
window = a0 - a1 * np.cos((2 * np.pi * n) / N_minus_1) + a2 * np.cos((4 * np.pi * n) / N_minus_1)
if window_type == 'flat-top':
a0 = 1
a1 = 1.93
a2 = 1.29
a3 = 0.388
a4 = 0.028
window = a0 - a1 * np.cos((2 * np.pi * n) / N_minus_1) + \
a2 * np.cos((4 * np.pi * n) / N_minus_1) - \
a3 * np.cos((6 * np.pi * n) / N_minus_1) + \
a4 * np.cos((8 * np.pi * n) / N_minus_1)
return window | pabell/stingray | stingray/utils.py | Python | mit | 14,885 |
from pylab import *
def func(tt):
if tt=='aa':
print " ----> hell!"
raise SystemExit
else:
return 999.
print func('uu')
print func('aa')
| jimsrc/seatos | mcs/src/tmp/te.py | Python | mit | 171 |
#!/usr/bin/python
import Tkinter
from Tkinter import *
from PIL import ImageTk, Image
import os
import time
root = Tk()
frame = Frame(root)
root.wm_title("Switch to Windows")
frame.pack()
bottomframe = Frame(root)
bottomframe.pack( side = BOTTOM )
def shutdownPress():
text.config(text = "Shutting down...")
os.system("sudo /mnt/sd/Downloads/Software/Portable/BootOpts/bootWindows.sh")
os.system("systemctl poweroff")
root.update()
def restartPress():
text.config(text = "Restarting...")
os.system("sudo /mnt/sd/Downloads/Software/Portable/BootOpts/bootWindows.sh")
os.system("systemctl reboot")
root.update()
def cancelPress():
quit()
root.update()
text = Label(frame, text="Do you want to switch to Windows?")
text.pack( side = TOP)
redbutton = Button(frame, text="Restart", command=restartPress)
redbutton.pack( side = LEFT)
greenbutton = Button(frame, text="Shutdown", command=shutdownPress)
greenbutton.pack( side = LEFT )
bluebutton = Button(frame, text="Cancel", command=cancelPress)
bluebutton.pack( side = LEFT )
root.mainloop()
| billwi/Ubuntu-setup | Configs/SwitchToWindows/windowsStartup.py | Python | mpl-2.0 | 1,090 |
import warnings
from asl_data import SinglesData
def recognize(models: dict, test_set: SinglesData):
""" Recognize test word sequences from word models set
:param models: dict of trained models
{'SOMEWORD': GaussianHMM model object, 'SOMEOTHERWORD': GaussianHMM model object, ...}
:param test_set: SinglesData object
:return: (list, list) as probabilities, guesses
both lists are ordered by the test set word_id
probabilities is a list of dictionaries where each key a word and value is Log Liklihood
[{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
]
guesses is a list of the best guess words ordered by the test set word_id
['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
probabilities = []
guesses = []
# TODO implement the recognizer
# return probabilities, guesses
for word_id in range(0, len(test_set.get_all_Xlengths())):
current_sequence, current_length = test_set.get_item_Xlengths(word_id)
probability_dict = {}
for word, model in models.items():
try:
probability_dict[word] = model.score(current_sequence, current_length)
except:
probability_dict[word] = float('-inf')
continue
probabilities.append(probability_dict)
best_score = max(probability_dict, key = probability_dict.get)
guesses.append(best_score)
return probabilities, guesses
| loredanacirstea/ai-algos | hidden_markov_models/my_recognizer.py | Python | gpl-3.0 | 1,646 |
"""
Testing for the tree module (sklearn.tree).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn import tree
from sklearn import datasets
from sklearn.externals.six import StringIO
from sklearn.preprocessing import balance_weights
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
# Decision trees
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
clf = tree.DecisionTreeClassifier(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Extra-trees
clf = tree.ExtraTreeClassifier()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
clf = tree.ExtraTreeClassifier(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
clf = tree.DecisionTreeClassifier()
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result)
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result)
def test_regression_toy():
"""Check regression on a toy dataset."""
# Decision trees
clf = tree.DecisionTreeRegressor()
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
clf = tree.DecisionTreeRegressor(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
# Extra-trees
clf = tree.ExtraTreeRegressor()
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
clf = tree.ExtraTreeRegressor(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.DecisionTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
def test_graphviz_toy():
"""Check correctness of graphviz output on a toy dataset."""
clf = tree.DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# test export code
out = StringIO()
tree.export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
tree_toy = StringIO(
"digraph Tree {\n"
"0 [label=\"X[0] <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\n"
"value = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\n"
"value = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test with feature_names
out = StringIO()
out = tree.export_graphviz(clf, out_file=out,
feature_names=["feature1", ""])
contents1 = out.getvalue()
tree_toy = StringIO(
"digraph Tree {\n"
"0 [label=\"feature1 <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\n"
"value = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\n"
"value = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test improperly formed feature_names
out = StringIO()
assert_raises(IndexError, tree.export_graphviz,
clf, out, feature_names=[])
def test_iris():
"""Check consistency on dataset iris."""
for c in ('gini',
'entropy'):
clf = tree.DecisionTreeClassifier(criterion=c).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeClassifier(criterion=c,
max_features=2,
random_state=1).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.5, "Failed with criterion " + c + \
" and score = " + str(score)
def test_boston():
"""Check consistency on dataset boston house prices."""
for c in ('mse',):
clf = tree.DecisionTreeRegressor(criterion=c).fit(boston.data,
boston.target)
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 1, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeRegressor(criterion=c,
max_features=6,
random_state=1).fit(boston.data,
boston.target)
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 2, "Failed with criterion " + c + \
" and score = " + str(score)
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
clf = tree.DecisionTreeClassifier(max_depth=1, max_features=1,
random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_arrayrepr():
"""Check the array representation."""
# Check resize
clf = tree.DecisionTreeRegressor(max_depth=None)
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
clf.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
clf = tree.DecisionTreeClassifier().fit(X, y)
assert_array_equal(clf.predict(X), y)
clf = tree.DecisionTreeRegressor().fit(X, y)
assert_array_equal(clf.predict(X), y)
def test_numerical_stability():
"""Check numerical stability."""
old_settings = np.geterr()
np.seterr(all="raise")
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
dt = tree.DecisionTreeRegressor()
dt.fit(X, y)
dt.fit(X, -y)
dt.fit(-X, y)
dt.fit(-X, -y)
np.seterr(**old_settings)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
importances = clf.feature_importances_
n_important = sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert 0 < X_new.shape[1] < X.shape[1]
def test_error():
"""Test that it gives proper exception on deficient input."""
# Invalid values for parameters
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_samples_leaf=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(max_depth=-1).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(min_density=2.0).fit,
X, y)
assert_raises(ValueError,
tree.DecisionTreeClassifier(max_features=42).fit,
X, y)
# Wrong dimensions
clf = tree.DecisionTreeClassifier()
y2 = y[:-1]
assert_raises(ValueError, clf.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
clf = tree.DecisionTreeClassifier()
clf.fit(Xf, y)
assert_array_equal(clf.predict(T), true_result)
# predict before fitting
clf = tree.DecisionTreeClassifier()
assert_raises(Exception, clf.predict, T)
# predict on vector with different dims
clf.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, clf.predict, t[:, 1:])
# use values of max_features that are invalid
clf = tree.DecisionTreeClassifier(max_features=10)
assert_raises(ValueError, clf.fit, X, y)
clf = tree.DecisionTreeClassifier(max_features=-1)
assert_raises(ValueError, clf.fit, X, y)
clf = tree.DecisionTreeClassifier(max_features="foobar")
assert_raises(ValueError, clf.fit, X, y)
tree.DecisionTreeClassifier(max_features="auto").fit(X, y)
tree.DecisionTreeClassifier(max_features="sqrt").fit(X, y)
tree.DecisionTreeClassifier(max_features="log2").fit(X, y)
tree.DecisionTreeClassifier(max_features=None).fit(X, y)
# predict before fit
clf = tree.DecisionTreeClassifier()
assert_raises(Exception, clf.predict_proba, X)
clf.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, clf.predict_proba, X2)
# wrong sample shape
Xt = np.array(X).T
clf = tree.DecisionTreeClassifier()
clf.fit(np.dot(X, Xt), y)
assert_raises(ValueError, clf.predict, X)
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
# wrong length of sample mask
clf = tree.DecisionTreeClassifier()
sample_mask = np.array([1])
assert_raises(ValueError, clf.fit, X, y, sample_mask=sample_mask)
# wrong length of X_argsorted
clf = tree.DecisionTreeClassifier()
X_argsorted = np.array([1])
assert_raises(ValueError, clf.fit, X, y, X_argsorted=X_argsorted)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
for tree_class in [tree.DecisionTreeClassifier, tree.ExtraTreeClassifier]:
clf = tree_class(min_samples_leaf=5).fit(X, y)
out = clf.tree_.apply(X)
node_counts = np.bincount(out)
leaf_count = node_counts[node_counts != 0] # drop inner nodes
assert np.min(leaf_count) >= 5
def test_pickle():
import pickle
# classification
obj = tree.DecisionTreeClassifier()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert score == score2, "Failed to generate same score " + \
" after pickling (classification) "
# regression
obj = tree.DecisionTreeRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert score == score2, "Failed to generate same score " + \
" after pickling (regression) "
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
clf = tree.DecisionTreeClassifier()
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
clf = tree.DecisionTreeRegressor()
y_hat = clf.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_sample_mask():
"""Test sample_mask argument. """
# test list sample_mask
clf = tree.DecisionTreeClassifier()
sample_mask = [1] * len(X)
clf.fit(X, y, sample_mask=sample_mask)
assert_array_equal(clf.predict(T), true_result)
# test different dtype
clf = tree.DecisionTreeClassifier()
sample_mask = np.ones((len(X),), dtype=np.int32)
clf.fit(X, y, sample_mask=sample_mask)
assert_array_equal(clf.predict(T), true_result)
def test_X_argsorted():
"""Test X_argsorted argument. """
# test X_argsorted with different layout and dtype
clf = tree.DecisionTreeClassifier()
X_argsorted = np.argsort(np.array(X).T, axis=1).T
clf.fit(X, y, X_argsorted=X_argsorted)
assert_array_equal(clf.predict(T), true_result)
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
# Classification, single output
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = tree.DecisionTreeClassifier()
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_equal(clf.n_classes_, [2, 2])
assert_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = balance_weights(unbalanced_y)
clf = tree.DecisionTreeClassifier()
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = tree.DecisionTreeClassifier()
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = tree.DecisionTreeClassifier(max_depth=1)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .50 # Samples of class '2' are no longer weightier
clf = tree.DecisionTreeClassifier(max_depth=1)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 1000)
clf = tree.DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
from sklearn.utils.fixes import bincount
sample_weight = bincount(duplicates, minlength=X.shape[0])
clf2 = tree.DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
# Test negative weights
X = iris.data
y = iris.target
sample_weight = -np.ones(X.shape[0])
clf = tree.DecisionTreeClassifier(random_state=1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(X.shape[0])
sample_weight[0] = -1
clf = tree.DecisionTreeClassifier(random_state=1)
clf.fit(X, y, sample_weight=sample_weight)
# Check that predict_proba returns valid probabilities in the presence of
# samples with negative weight
X = iris.data
y = iris.target
sample_weight = rng.normal(.5, 1.0, X.shape[0])
clf = tree.DecisionTreeClassifier(random_state=1)
clf.fit(X, y, sample_weight=sample_weight)
proba = clf.predict_proba(X)
assert (proba >= 0).all() and (proba <= 1).all()
if __name__ == "__main__":
import nose
nose.runmodule()
| maxlikely/scikit-learn | sklearn/tree/tests/test_tree.py | Python | bsd-3-clause | 19,420 |
import os
from botplugin import BotPlugin
gifs = []
def get_doggif():
import random
global gifs
if not len(gifs):
fname = os.path.join(os.path.dirname(__file__), 'doggif.txt')
f = open(fname, 'r')
gifs = f.read().split()
f.close()
random.shuffle(gifs)
return gifs.pop()
class DogGif(BotPlugin):
def command_doggif(self, bot, command, args, nick):
bot.speak(get_doggif())
def page_generator():
import requests
url = 'http://fuckyeahdoggifs.tumblr.com/page/%d'
page_num = 1
finished = False
while not finished:
r = requests.get(url % page_num)
if r.ok:
yield r.text
else:
finished = True
page_num += 1
def generate_gif_list():
from BeautifulSoup import BeautifulSoup
import re
doggif_urls = []
pages = page_generator()
for page in pages:
s = BeautifulSoup(page)
images = s.findAll('img')
for image in images:
url = image.get('src')
if re.search('\.gif$', url):
doggif_urls.append(url)
if len(images) < 5: # beyond final page
break
return doggif_urls
if __name__ == '__main__':
doggif_urls = generate_gif_list()
f = open('botplugins/doggif.txt', 'w')
f.write('\n'.join(doggif_urls))
f.close()
| aelse/MrHappy | botplugins/cmd_doggif.py | Python | gpl-3.0 | 1,369 |
from sklearn.linear_model import Lasso
def get_lasso_prediction(train_data, train_truth, test_data, test_truth, alpha=1.0, iter_id=0):
clf = Lasso(alpha=alpha)
clf.fit(train_data, train_truth)
predicted = clf.predict(test_data)
return predicted.ravel()
| rileymcdowell/genomic-neuralnet | genomic_neuralnet/methods/lasso_regression.py | Python | mit | 270 |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class for storing Skia Gold comparison properties.
Examples:
* git revision being tested
* Whether the test is being run locally or on a bot
* What the continuous integration system is
"""
import logging
import os
class SkiaGoldProperties(object):
def __init__(self, args):
"""Abstract class to validate and store properties related to Skia Gold.
Args:
args: The parsed arguments from an argparse.ArgumentParser.
"""
self._git_revision = None
self._issue = None
self._patchset = None
self._job_id = None
self._local_pixel_tests = None
self._no_luci_auth = None
self._bypass_skia_gold_functionality = None
self._code_review_system = None
self._continuous_integration_system = None
self._local_png_directory = None
self._InitializeProperties(args)
def IsTryjobRun(self):
return self.issue is not None
@property
def continuous_integration_system(self):
return self._continuous_integration_system or 'buildbucket'
@property
def code_review_system(self):
return self._code_review_system or 'gerrit'
@property
def git_revision(self):
return self._GetGitRevision()
@property
def issue(self):
return self._issue
@property
def job_id(self):
return self._job_id
@property
def local_pixel_tests(self):
return self._IsLocalRun()
@property
def local_png_directory(self):
return self._local_png_directory
@property
def no_luci_auth(self):
return self._no_luci_auth
@property
def patchset(self):
return self._patchset
@property
def bypass_skia_gold_functionality(self):
return self._bypass_skia_gold_functionality
@staticmethod
def _GetGitOriginMainHeadSha1():
raise NotImplementedError()
def _GetGitRevision(self):
if not self._git_revision:
# Automated tests should always pass the revision, so assume we're on
# a workstation and try to get the local origin/master HEAD.
if not self._IsLocalRun():
raise RuntimeError(
'--git-revision was not passed when running on a bot')
revision = self._GetGitOriginMainHeadSha1()
if not revision or len(revision) != 40:
raise RuntimeError(
'--git-revision not passed and unable to determine from git')
self._git_revision = revision
return self._git_revision
def _IsLocalRun(self):
if self._local_pixel_tests is None:
# Look for the presence of the SWARMING_SERVER environment variable as a
# heuristic to determine whether we're running on a workstation or a bot.
# This should always be set on swarming, but would be strange to be set on
# a workstation.
self._local_pixel_tests = 'SWARMING_SERVER' not in os.environ
if self._local_pixel_tests:
logging.warning(
'Automatically determined that test is running on a workstation')
else:
logging.warning(
'Automatically determined that test is running on a bot')
return self._local_pixel_tests
def _InitializeProperties(self, args):
if hasattr(args, 'local_pixel_tests'):
# If not set, will be automatically determined later if needed.
self._local_pixel_tests = args.local_pixel_tests
if hasattr(args, 'skia_gold_local_png_write_directory'):
self._local_png_directory = args.skia_gold_local_png_write_directory
if hasattr(args, 'no_luci_auth'):
self._no_luci_auth = args.no_luci_auth
if hasattr(args, 'bypass_skia_gold_functionality'):
self._bypass_skia_gold_functionality = args.bypass_skia_gold_functionality
if hasattr(args, 'code_review_system'):
self._code_review_system = args.code_review_system
if hasattr(args, 'continuous_integration_system'):
self._continuous_integration_system = args.continuous_integration_system
# Will be automatically determined later if needed.
if not hasattr(args, 'git_revision') or not args.git_revision:
return
self._git_revision = args.git_revision
# Only expected on tryjob runs.
if not hasattr(args, 'gerrit_issue') or not args.gerrit_issue:
return
self._issue = args.gerrit_issue
if not hasattr(args, 'gerrit_patchset') or not args.gerrit_patchset:
raise RuntimeError(
'--gerrit-issue passed, but --gerrit-patchset not passed.')
self._patchset = args.gerrit_patchset
if not hasattr(args, 'buildbucket_id') or not args.buildbucket_id:
raise RuntimeError(
'--gerrit-issue passed, but --buildbucket-id not passed.')
self._job_id = args.buildbucket_id
| ric2b/Vivaldi-browser | chromium/build/skia_gold_common/skia_gold_properties.py | Python | bsd-3-clause | 4,742 |
from .fw1 import get_dot_completions, get_script_completions, get_inline_documentation
from .. import completions, inline_documentation
completions.add_completion_source('script', get_script_completions)
completions.add_completion_source('dot', get_dot_completions)
inline_documentation.add_documentation_source(get_inline_documentation)
| jcberquist/SublimeText-Lucee | src/fw1/__init__.py | Python | mit | 339 |
import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template2", path_list=[
[TestAction.create_volume, "volume1", "=scsi"],
[TestAction.create_volume, "volume2", "=scsi"],
#[TestAction.create_volume, "volume3", "=scsi"],
#[TestAction.create_volume, "volume4", "=scsi"],
#[TestAction.create_volume, "volume5", "=scsi"],
#[TestAction.create_volume, "volume6", "=scsi"],
#[TestAction.create_volume, "volume7", "=scsi"],
#[TestAction.create_volume, "volume8", "=scsi"],
[TestAction.attach_volume, "vm1", "volume1"],
[TestAction.attach_volume, "vm1", "volume2"],
#[TestAction.attach_volume, "vm1", "volume3"],
#[TestAction.attach_volume, "vm1", "volume4"],
#[TestAction.attach_volume, "vm1", "volume5"],
#[TestAction.attach_volume, "vm1", "volume6"],
#[TestAction.attach_volume, "vm1", "volume7"],
#[TestAction.attach_volume, "vm1", "volume8"],
[TestAction.create_volume_snapshot, "volume2", "snapshot2-1"],
[TestAction.delete_volume_snapshot, "snapshot2-1"],
[TestAction.detach_volume, "volume1"],
[TestAction.create_volume_snapshot, "vm1-root", "snapshot1"],
[TestAction.attach_volume, "vm1", "volume1"],
[TestAction.create_volume_backup, "volume1", "backup1"],
[TestAction.detach_volume, "volume1"],
[TestAction.stop_vm, "vm1"],
[TestAction.change_vm_image, "vm1"],
[TestAction.start_vm, "vm1"],
[TestAction.attach_volume, "vm1", "volume1"],
[TestAction.resize_data_volume, "volume1", 5*1024*1024],
[TestAction.detach_volume, "volume1"],
[TestAction.attach_volume, "vm1", "volume1"],
[TestAction.stop_vm, "vm1"],
[TestAction.use_volume_backup, "backup1"],
[TestAction.start_vm, "vm1"],
[TestAction.detach_volume, "volume1"],
[TestAction.detach_volume, "volume2"],
#[TestAction.detach_volume, "volume3"],
#[TestAction.detach_volume, "volume4"],
#[TestAction.detach_volume, "volume5"],
#[TestAction.detach_volume, "volume6"],
#[TestAction.detach_volume, "volume7"],
#[TestAction.detach_volume, "volume8"],
[TestAction.stop_vm, "vm1"],
[TestAction.ps_migrate_volume, "vm1-root"],
[TestAction.start_vm, "vm1"],
[TestAction.delete_volume_snapshot, "snapshot1"],
[TestAction.reboot_vm, "vm1"]])
| zstackio/zstack-woodpecker | integrationtest/vm/multihosts/volumes/paths/path99.py | Python | apache-2.0 | 2,275 |
# Implementation of Binary Search algorithm
import random
def main():
_list = random.sample(xrange(1, 101), 10)
value = 87
print("Searching for the value: " + str(value))
if binarySearch(value):
print("The number " + str(value) + " found in the list")
else:
print("The number " + str(value) + " not found in the list")
def binarySearch(theValues, target):
# Start with the entire sequence of elements. 0:length
low = 0
high = len(theValues - 1)
# Repeatedly subdivide the sequence in half until the target is found.
while low <= high:
# Find the midpoint of the sequence.
mid = (high + low) // 2
# Does the midpoint contain the target?
if theValues[mid] == target:
return True
# Or does the target precede the midpoint?
elif target < theValues[mid]:
high = mid - 1 # Update the upper bound
# Or does it follow the midpoint
else:
low = mid + 1 # Update the lower bound
# If the sequence cannot be subdivided further, we're done.
return False
if __name__ == '__main__':
main()
| eneskemalergin/Data_Structures_and_Algorithms | Chapter5/binarysearch.py | Python | gpl-3.0 | 1,151 |
from modularui import csv_autolog
csv_autolog() | jasongtz/RataTAT-web | csvonly.py | Python | mit | 48 |
import random
import datetime
import time
import hashlib
from django.db import models
from django.conf import settings
from django.urls import reverse
from django.contrib.auth.models import User, Group
from django.db.models.signals import post_save
from djangopress.core.models import Property
from django.utils import timezone
from PIL import Image
DEFAULT_USER_GROUP = getattr(settings, 'DEFAULT_USER_GROUP', None)
def avatar_path(instance, filename):
return ("avatars/%s/%s-%s-%s" % (time.strftime("%y/%m"), instance.user.pk, instance.user.username.lower(), filename.lower()))
class UserProfile(models.Model):
EMAIL_SETTINGS = (
('HI', 'Hide Email'),
('SW', 'Show Email'),
('HB', 'Use Web Form')
)
title = models.CharField(max_length=100, default="New member")
homepage = models.CharField(max_length=100, blank=True, null=True)
#IM contact (jabber, icq, msn, aim, yahoo, gtalk, twitter, facebook)
location = models.CharField(max_length=50, blank=True, null=True)
avatar = models.ImageField(blank=True, null=True, upload_to=avatar_path)
signature = models.TextField(blank=True, null=True)
timezone = models.CharField(max_length=50, null=True, blank=True)
language = models.CharField(max_length=50, null=True, blank=True)
registration_ip = models.GenericIPAddressField(blank=True, null=True, )
last_ip_used = models.GenericIPAddressField(blank=True, null=True)
admin_note = models.TextField(blank=True, null=True)
activate_key = models.CharField(max_length=127, blank=True, editable=False)
activate_key_expirary = models.DateTimeField(blank=True, editable=False)
banned = models.BooleanField(default=False)
#remember_between_visits = models.BooleanField(default=True)
user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE)
email_settings = models.CharField(choices=EMAIL_SETTINGS, default='HI', max_length=2)
gender = models.CharField(max_length=1, blank=True, null=True, default=None, choices=(('', 'Private'), ('M', 'Male'), ('F', 'Female')))
date_of_birth = models.DateTimeField(blank=True, null=True)
def get_ip(self):
if self.last_ip_used:
return self.last_ip_used
return self.registration_ip
def __getattr__(self, name):
if name.startswith("social_"):
try:
return self.user.social.filter(account=name[7:])[0]
except:
raise AttributeError(name)
return super(UserProfile, self).__getattr__(name)
def get_absolute_url(self):
return reverse('accounts-profile', kwargs={"username": self.user.username})
def __init__(self, *args, **kwargs):
super(UserProfile, self).__init__(*args, **kwargs)
self._banned = self.banned
self._avatar = self.avatar
def save(self, force_insert=False, force_update=False):
if self._banned == False and self.banned == True:
# if we banned them, they can't then login
self.user.is_active = False
self.user.save()
if self._avatar != self.avatar and self.avatar:
image = Image.open(self.avatar)
size = settings.ACCOUNTS_USER_LIMITS.get('avatar', {}).get('size', 50)
image.resize((size, size), Image.ANTIALIAS)
image.save(self.avatar.path)
super(UserProfile, self).save(force_insert, force_update)
self._banned = self.banned
self._avatar = self.avatar
def set_activate_key(self):
salt = hashlib.sha1((str(random.random()) + str(random.random())).encode('utf-8')).hexdigest()[:5]
key = "".join(str(item) for item in (self.user.username,
self.user.email, datetime.datetime.now()))
hsh = hashlib.sha1((salt + key).encode('utf-8')).hexdigest()
self.activate_key = hsh
self.activate_key_expirary = datetime.datetime.fromtimestamp(time.time() + (7 * 24 * 60 * 60))
def check_activate_key(self, hsh):
return (hsh == self.activate_key
and timezone.now() <= self.activate_key_expirary)
class UserSocial(models.Model):
ACCOUNTS = (
('twitter', 'Twitter'),
('google_plus', 'Google Plus'),
('facebook', 'Facebook'),
('linkedin', 'Linked In'),
('pinterest', 'Pinterest'),
)
account = models.CharField(max_length=20, choices=ACCOUNTS)
value = models.CharField(max_length=100)
user_profile = models.ForeignKey(User, related_name="social", on_delete=models.CASCADE)
class UserProperty(Property):
user_profile = models.ForeignKey(User, related_name="properties", on_delete=models.CASCADE)
def create_profile(sender, **kargs):
if kargs.get("created", False):
profile = UserProfile(user=kargs.get("instance"))
profile.set_activate_key()
profile.save()
post_save.connect(create_profile, User, dispatch_uid="djangopress.accounts.create_profile")
def add_to_group(sender, **kargs):
if DEFAULT_USER_GROUP and kargs.get("created", False):
user = kargs.get("instance")
user.groups.add(Group.objects.get(name=DEFAULT_USER_GROUP))
post_save.connect(add_to_group, User, dispatch_uid="djangopress.accounts.add_to_group") | codefisher/djangopress | djangopress/accounts/models.py | Python | mit | 5,282 |
########################################################################
# Copyright 2012 wst, [email protected]
#
# This file is part of wstbot.
#
# wstbot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wstbot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wstbot. If not, see <http://www.gnu.org/licenses/>.
########################################################################
# converts media files to an sqlite db
# will _not_ be updated anymore
import sqlite3
import os
import json
from util import str_list_to_int
MEDIA_PATH = os.path.join("data", "media")
MEDIA_DB_PATH = os.path.join("data", "media.db")
def main():
filelist = os.listdir(MEDIA_PATH)
if filelist is None or (len(filelist)) <= 0:
print("no files found")
return
filelist_int = str_list_to_int(filelist)
filelist = [str(x) for x in sorted(filelist_int)]
with sqlite3.connect(MEDIA_DB_PATH) as conn:
cur = conn.cursor()
for filename in filelist:
with open(os.path.join(MEDIA_PATH, filename), "r") as mfile:
for line in mfile.readlines():
data = json.loads(line)
if type(data) != dict:
continue
if "title" not in data:
data["title"] = ""
cur.execute("insert into media (type, title, url) values (?, ?, ?)",
(data["type"], data["title"], data["url"]))
conn.commit()
if __name__ == "__main__":
main()
| salamiwst/wstbot | patches/media_to_db.py | Python | gpl-3.0 | 1,982 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "bom_data_parser"
cfg.versionfile_source = "bom_data_parser/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| amacd31/bom_data_parser | bom_data_parser/_version.py | Python | bsd-3-clause | 15,781 |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for generating new binary protobuf seeds for fuzzers.
Currently supports creating a single seed binary protobuf of the form
zucchini.fuzzer.FilePair.
"""
import argparse
import hashlib
import logging
import os
import platform
import subprocess
import sys
ABS_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__)))
ABS_TESTDATA_PATH = os.path.join(ABS_PATH, 'testdata')
def parse_args():
"""Parses arguments from command-line."""
parser = argparse.ArgumentParser()
parser.add_argument('--raw', help='Whether to use Raw Zucchini.',
action='store_true')
parser.add_argument('old_file', help='Old file to generate/apply patch.')
parser.add_argument('new_file', help='New file to generate patch from.')
parser.add_argument('patch_file', help='Patch filename to use.')
parser.add_argument('output_file', help='File to write binary protobuf to.')
return parser.parse_args()
def gen(old_file, new_file, patch_file, output_file, is_raw, is_win):
"""Generates a new patch and binary encodes a protobuf pair."""
# Create output directory if missing.
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Handle Windows executable names.
zucchini = 'zucchini'
protoc = 'protoc'
if is_win:
zucchini += '.exe'
protoc += '.exe'
zuc_cmd = [os.path.abspath(zucchini), '-gen']
if is_raw:
zuc_cmd.append('-raw')
# Generate a new patch.
ret = subprocess.call(zuc_cmd + [old_file, new_file, patch_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if ret:
logging.error('Patch generation failed for ({}, {})'.format(old_file,
new_file))
return ret
# Binary encode the protobuf pair.
ret = subprocess.call([sys.executable,
os.path.join(ABS_PATH, 'create_seed_file_pair.py'),
os.path.abspath(protoc), old_file, patch_file,
output_file])
os.remove(patch_file)
return ret
def main():
args = parse_args()
return gen(os.path.join(ABS_TESTDATA_PATH, args.old_file),
os.path.join(ABS_TESTDATA_PATH, args.new_file),
os.path.abspath(args.patch_file),
os.path.abspath(args.output_file),
args.raw,
platform.system() == 'Windows')
if __name__ == '__main__':
sys.exit(main())
| chromium/chromium | components/zucchini/fuzzers/generate_fuzzer_data.py | Python | bsd-3-clause | 2,665 |
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2010 Jonathan Matthew
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
import rb
from config import ReplayGainConfigDialog
from player import ReplayGainPlayer
class ReplayGainPlugin(rb.Plugin):
def __init__ (self):
rb.Plugin.__init__ (self)
self.config_dialog = None
def activate (self, shell):
self.player = ReplayGainPlayer(shell)
def deactivate (self, shell):
self.config_dialog = None
self.player.deactivate()
self.player = None
def create_configure_dialog(self, dialog=None):
if self.config_dialog is None:
self.config_dialog = ReplayGainConfigDialog(self)
self.config_dialog.connect('response', self.config_dialog_response_cb)
self.config_dialog.present()
return self.config_dialog
def config_dialog_response_cb(self, dialog, response):
dialog.hide()
| dignan/control | plugins/replaygain/replaygain/__init__.py | Python | gpl-2.0 | 2,008 |
import traci
import os, sys
import environment_state
if 'SUMO_HOME' not in os.environ:
print "SUMO setup incomplete. Exiting."
exit()
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
sumoBinary = "C:/Program Files (x86)/DLR/Sumo/bin/sumo-gui"
sumoCmd = [sumoBinary, "-c", "C:/Users/sreeniva/Desktop/Reinforcement Learning/madrl_traffic_control/Sumo Stuff/hello.sumocfg"]
traci.start(sumoCmd)
# run the entire RL workflow
environment_state.test_workflow()
| kamikazekartik/madrl_traffic_control | Sumo Stuff/main.py | Python | mit | 491 |
from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from pipenv.patched.notpip._vendor.six.moves import urllib_parse as urlparse
from . import base
from ..constants import namespaces, prefixes
__all__ = ["Filter"]
allowed_elements = frozenset((
(namespaces['html'], 'a'),
(namespaces['html'], 'abbr'),
(namespaces['html'], 'acronym'),
(namespaces['html'], 'address'),
(namespaces['html'], 'area'),
(namespaces['html'], 'article'),
(namespaces['html'], 'aside'),
(namespaces['html'], 'audio'),
(namespaces['html'], 'b'),
(namespaces['html'], 'big'),
(namespaces['html'], 'blockquote'),
(namespaces['html'], 'br'),
(namespaces['html'], 'button'),
(namespaces['html'], 'canvas'),
(namespaces['html'], 'caption'),
(namespaces['html'], 'center'),
(namespaces['html'], 'cite'),
(namespaces['html'], 'code'),
(namespaces['html'], 'col'),
(namespaces['html'], 'colgroup'),
(namespaces['html'], 'command'),
(namespaces['html'], 'datagrid'),
(namespaces['html'], 'datalist'),
(namespaces['html'], 'dd'),
(namespaces['html'], 'del'),
(namespaces['html'], 'details'),
(namespaces['html'], 'dfn'),
(namespaces['html'], 'dialog'),
(namespaces['html'], 'dir'),
(namespaces['html'], 'div'),
(namespaces['html'], 'dl'),
(namespaces['html'], 'dt'),
(namespaces['html'], 'em'),
(namespaces['html'], 'event-source'),
(namespaces['html'], 'fieldset'),
(namespaces['html'], 'figcaption'),
(namespaces['html'], 'figure'),
(namespaces['html'], 'footer'),
(namespaces['html'], 'font'),
(namespaces['html'], 'form'),
(namespaces['html'], 'header'),
(namespaces['html'], 'h1'),
(namespaces['html'], 'h2'),
(namespaces['html'], 'h3'),
(namespaces['html'], 'h4'),
(namespaces['html'], 'h5'),
(namespaces['html'], 'h6'),
(namespaces['html'], 'hr'),
(namespaces['html'], 'i'),
(namespaces['html'], 'img'),
(namespaces['html'], 'input'),
(namespaces['html'], 'ins'),
(namespaces['html'], 'keygen'),
(namespaces['html'], 'kbd'),
(namespaces['html'], 'label'),
(namespaces['html'], 'legend'),
(namespaces['html'], 'li'),
(namespaces['html'], 'm'),
(namespaces['html'], 'map'),
(namespaces['html'], 'menu'),
(namespaces['html'], 'meter'),
(namespaces['html'], 'multicol'),
(namespaces['html'], 'nav'),
(namespaces['html'], 'nextid'),
(namespaces['html'], 'ol'),
(namespaces['html'], 'output'),
(namespaces['html'], 'optgroup'),
(namespaces['html'], 'option'),
(namespaces['html'], 'p'),
(namespaces['html'], 'pre'),
(namespaces['html'], 'progress'),
(namespaces['html'], 'q'),
(namespaces['html'], 's'),
(namespaces['html'], 'samp'),
(namespaces['html'], 'section'),
(namespaces['html'], 'select'),
(namespaces['html'], 'small'),
(namespaces['html'], 'sound'),
(namespaces['html'], 'source'),
(namespaces['html'], 'spacer'),
(namespaces['html'], 'span'),
(namespaces['html'], 'strike'),
(namespaces['html'], 'strong'),
(namespaces['html'], 'sub'),
(namespaces['html'], 'sup'),
(namespaces['html'], 'table'),
(namespaces['html'], 'tbody'),
(namespaces['html'], 'td'),
(namespaces['html'], 'textarea'),
(namespaces['html'], 'time'),
(namespaces['html'], 'tfoot'),
(namespaces['html'], 'th'),
(namespaces['html'], 'thead'),
(namespaces['html'], 'tr'),
(namespaces['html'], 'tt'),
(namespaces['html'], 'u'),
(namespaces['html'], 'ul'),
(namespaces['html'], 'var'),
(namespaces['html'], 'video'),
(namespaces['mathml'], 'maction'),
(namespaces['mathml'], 'math'),
(namespaces['mathml'], 'merror'),
(namespaces['mathml'], 'mfrac'),
(namespaces['mathml'], 'mi'),
(namespaces['mathml'], 'mmultiscripts'),
(namespaces['mathml'], 'mn'),
(namespaces['mathml'], 'mo'),
(namespaces['mathml'], 'mover'),
(namespaces['mathml'], 'mpadded'),
(namespaces['mathml'], 'mphantom'),
(namespaces['mathml'], 'mprescripts'),
(namespaces['mathml'], 'mroot'),
(namespaces['mathml'], 'mrow'),
(namespaces['mathml'], 'mspace'),
(namespaces['mathml'], 'msqrt'),
(namespaces['mathml'], 'mstyle'),
(namespaces['mathml'], 'msub'),
(namespaces['mathml'], 'msubsup'),
(namespaces['mathml'], 'msup'),
(namespaces['mathml'], 'mtable'),
(namespaces['mathml'], 'mtd'),
(namespaces['mathml'], 'mtext'),
(namespaces['mathml'], 'mtr'),
(namespaces['mathml'], 'munder'),
(namespaces['mathml'], 'munderover'),
(namespaces['mathml'], 'none'),
(namespaces['svg'], 'a'),
(namespaces['svg'], 'animate'),
(namespaces['svg'], 'animateColor'),
(namespaces['svg'], 'animateMotion'),
(namespaces['svg'], 'animateTransform'),
(namespaces['svg'], 'clipPath'),
(namespaces['svg'], 'circle'),
(namespaces['svg'], 'defs'),
(namespaces['svg'], 'desc'),
(namespaces['svg'], 'ellipse'),
(namespaces['svg'], 'font-face'),
(namespaces['svg'], 'font-face-name'),
(namespaces['svg'], 'font-face-src'),
(namespaces['svg'], 'g'),
(namespaces['svg'], 'glyph'),
(namespaces['svg'], 'hkern'),
(namespaces['svg'], 'linearGradient'),
(namespaces['svg'], 'line'),
(namespaces['svg'], 'marker'),
(namespaces['svg'], 'metadata'),
(namespaces['svg'], 'missing-glyph'),
(namespaces['svg'], 'mpath'),
(namespaces['svg'], 'path'),
(namespaces['svg'], 'polygon'),
(namespaces['svg'], 'polyline'),
(namespaces['svg'], 'radialGradient'),
(namespaces['svg'], 'rect'),
(namespaces['svg'], 'set'),
(namespaces['svg'], 'stop'),
(namespaces['svg'], 'svg'),
(namespaces['svg'], 'switch'),
(namespaces['svg'], 'text'),
(namespaces['svg'], 'title'),
(namespaces['svg'], 'tspan'),
(namespaces['svg'], 'use'),
))
allowed_attributes = frozenset((
# HTML attributes
(None, 'abbr'),
(None, 'accept'),
(None, 'accept-charset'),
(None, 'accesskey'),
(None, 'action'),
(None, 'align'),
(None, 'alt'),
(None, 'autocomplete'),
(None, 'autofocus'),
(None, 'axis'),
(None, 'background'),
(None, 'balance'),
(None, 'bgcolor'),
(None, 'bgproperties'),
(None, 'border'),
(None, 'bordercolor'),
(None, 'bordercolordark'),
(None, 'bordercolorlight'),
(None, 'bottompadding'),
(None, 'cellpadding'),
(None, 'cellspacing'),
(None, 'ch'),
(None, 'challenge'),
(None, 'char'),
(None, 'charoff'),
(None, 'choff'),
(None, 'charset'),
(None, 'checked'),
(None, 'cite'),
(None, 'class'),
(None, 'clear'),
(None, 'color'),
(None, 'cols'),
(None, 'colspan'),
(None, 'compact'),
(None, 'contenteditable'),
(None, 'controls'),
(None, 'coords'),
(None, 'data'),
(None, 'datafld'),
(None, 'datapagesize'),
(None, 'datasrc'),
(None, 'datetime'),
(None, 'default'),
(None, 'delay'),
(None, 'dir'),
(None, 'disabled'),
(None, 'draggable'),
(None, 'dynsrc'),
(None, 'enctype'),
(None, 'end'),
(None, 'face'),
(None, 'for'),
(None, 'form'),
(None, 'frame'),
(None, 'galleryimg'),
(None, 'gutter'),
(None, 'headers'),
(None, 'height'),
(None, 'hidefocus'),
(None, 'hidden'),
(None, 'high'),
(None, 'href'),
(None, 'hreflang'),
(None, 'hspace'),
(None, 'icon'),
(None, 'id'),
(None, 'inputmode'),
(None, 'ismap'),
(None, 'keytype'),
(None, 'label'),
(None, 'leftspacing'),
(None, 'lang'),
(None, 'list'),
(None, 'longdesc'),
(None, 'loop'),
(None, 'loopcount'),
(None, 'loopend'),
(None, 'loopstart'),
(None, 'low'),
(None, 'lowsrc'),
(None, 'max'),
(None, 'maxlength'),
(None, 'media'),
(None, 'method'),
(None, 'min'),
(None, 'multiple'),
(None, 'name'),
(None, 'nohref'),
(None, 'noshade'),
(None, 'nowrap'),
(None, 'open'),
(None, 'optimum'),
(None, 'pattern'),
(None, 'ping'),
(None, 'point-size'),
(None, 'poster'),
(None, 'pqg'),
(None, 'preload'),
(None, 'prompt'),
(None, 'radiogroup'),
(None, 'readonly'),
(None, 'rel'),
(None, 'repeat-max'),
(None, 'repeat-min'),
(None, 'replace'),
(None, 'required'),
(None, 'rev'),
(None, 'rightspacing'),
(None, 'rows'),
(None, 'rowspan'),
(None, 'rules'),
(None, 'scope'),
(None, 'selected'),
(None, 'shape'),
(None, 'size'),
(None, 'span'),
(None, 'src'),
(None, 'start'),
(None, 'step'),
(None, 'style'),
(None, 'summary'),
(None, 'suppress'),
(None, 'tabindex'),
(None, 'target'),
(None, 'template'),
(None, 'title'),
(None, 'toppadding'),
(None, 'type'),
(None, 'unselectable'),
(None, 'usemap'),
(None, 'urn'),
(None, 'valign'),
(None, 'value'),
(None, 'variable'),
(None, 'volume'),
(None, 'vspace'),
(None, 'vrml'),
(None, 'width'),
(None, 'wrap'),
(namespaces['xml'], 'lang'),
# MathML attributes
(None, 'actiontype'),
(None, 'align'),
(None, 'columnalign'),
(None, 'columnalign'),
(None, 'columnalign'),
(None, 'columnlines'),
(None, 'columnspacing'),
(None, 'columnspan'),
(None, 'depth'),
(None, 'display'),
(None, 'displaystyle'),
(None, 'equalcolumns'),
(None, 'equalrows'),
(None, 'fence'),
(None, 'fontstyle'),
(None, 'fontweight'),
(None, 'frame'),
(None, 'height'),
(None, 'linethickness'),
(None, 'lspace'),
(None, 'mathbackground'),
(None, 'mathcolor'),
(None, 'mathvariant'),
(None, 'mathvariant'),
(None, 'maxsize'),
(None, 'minsize'),
(None, 'other'),
(None, 'rowalign'),
(None, 'rowalign'),
(None, 'rowalign'),
(None, 'rowlines'),
(None, 'rowspacing'),
(None, 'rowspan'),
(None, 'rspace'),
(None, 'scriptlevel'),
(None, 'selection'),
(None, 'separator'),
(None, 'stretchy'),
(None, 'width'),
(None, 'width'),
(namespaces['xlink'], 'href'),
(namespaces['xlink'], 'show'),
(namespaces['xlink'], 'type'),
# SVG attributes
(None, 'accent-height'),
(None, 'accumulate'),
(None, 'additive'),
(None, 'alphabetic'),
(None, 'arabic-form'),
(None, 'ascent'),
(None, 'attributeName'),
(None, 'attributeType'),
(None, 'baseProfile'),
(None, 'bbox'),
(None, 'begin'),
(None, 'by'),
(None, 'calcMode'),
(None, 'cap-height'),
(None, 'class'),
(None, 'clip-path'),
(None, 'color'),
(None, 'color-rendering'),
(None, 'content'),
(None, 'cx'),
(None, 'cy'),
(None, 'd'),
(None, 'dx'),
(None, 'dy'),
(None, 'descent'),
(None, 'display'),
(None, 'dur'),
(None, 'end'),
(None, 'fill'),
(None, 'fill-opacity'),
(None, 'fill-rule'),
(None, 'font-family'),
(None, 'font-size'),
(None, 'font-stretch'),
(None, 'font-style'),
(None, 'font-variant'),
(None, 'font-weight'),
(None, 'from'),
(None, 'fx'),
(None, 'fy'),
(None, 'g1'),
(None, 'g2'),
(None, 'glyph-name'),
(None, 'gradientUnits'),
(None, 'hanging'),
(None, 'height'),
(None, 'horiz-adv-x'),
(None, 'horiz-origin-x'),
(None, 'id'),
(None, 'ideographic'),
(None, 'k'),
(None, 'keyPoints'),
(None, 'keySplines'),
(None, 'keyTimes'),
(None, 'lang'),
(None, 'marker-end'),
(None, 'marker-mid'),
(None, 'marker-start'),
(None, 'markerHeight'),
(None, 'markerUnits'),
(None, 'markerWidth'),
(None, 'mathematical'),
(None, 'max'),
(None, 'min'),
(None, 'name'),
(None, 'offset'),
(None, 'opacity'),
(None, 'orient'),
(None, 'origin'),
(None, 'overline-position'),
(None, 'overline-thickness'),
(None, 'panose-1'),
(None, 'path'),
(None, 'pathLength'),
(None, 'points'),
(None, 'preserveAspectRatio'),
(None, 'r'),
(None, 'refX'),
(None, 'refY'),
(None, 'repeatCount'),
(None, 'repeatDur'),
(None, 'requiredExtensions'),
(None, 'requiredFeatures'),
(None, 'restart'),
(None, 'rotate'),
(None, 'rx'),
(None, 'ry'),
(None, 'slope'),
(None, 'stemh'),
(None, 'stemv'),
(None, 'stop-color'),
(None, 'stop-opacity'),
(None, 'strikethrough-position'),
(None, 'strikethrough-thickness'),
(None, 'stroke'),
(None, 'stroke-dasharray'),
(None, 'stroke-dashoffset'),
(None, 'stroke-linecap'),
(None, 'stroke-linejoin'),
(None, 'stroke-miterlimit'),
(None, 'stroke-opacity'),
(None, 'stroke-width'),
(None, 'systemLanguage'),
(None, 'target'),
(None, 'text-anchor'),
(None, 'to'),
(None, 'transform'),
(None, 'type'),
(None, 'u1'),
(None, 'u2'),
(None, 'underline-position'),
(None, 'underline-thickness'),
(None, 'unicode'),
(None, 'unicode-range'),
(None, 'units-per-em'),
(None, 'values'),
(None, 'version'),
(None, 'viewBox'),
(None, 'visibility'),
(None, 'width'),
(None, 'widths'),
(None, 'x'),
(None, 'x-height'),
(None, 'x1'),
(None, 'x2'),
(namespaces['xlink'], 'actuate'),
(namespaces['xlink'], 'arcrole'),
(namespaces['xlink'], 'href'),
(namespaces['xlink'], 'role'),
(namespaces['xlink'], 'show'),
(namespaces['xlink'], 'title'),
(namespaces['xlink'], 'type'),
(namespaces['xml'], 'base'),
(namespaces['xml'], 'lang'),
(namespaces['xml'], 'space'),
(None, 'y'),
(None, 'y1'),
(None, 'y2'),
(None, 'zoomAndPan'),
))
attr_val_is_uri = frozenset((
(None, 'href'),
(None, 'src'),
(None, 'cite'),
(None, 'action'),
(None, 'longdesc'),
(None, 'poster'),
(None, 'background'),
(None, 'datasrc'),
(None, 'dynsrc'),
(None, 'lowsrc'),
(None, 'ping'),
(namespaces['xlink'], 'href'),
(namespaces['xml'], 'base'),
))
svg_attr_val_allows_ref = frozenset((
(None, 'clip-path'),
(None, 'color-profile'),
(None, 'cursor'),
(None, 'fill'),
(None, 'filter'),
(None, 'marker'),
(None, 'marker-start'),
(None, 'marker-mid'),
(None, 'marker-end'),
(None, 'mask'),
(None, 'stroke'),
))
svg_allow_local_href = frozenset((
(None, 'altGlyph'),
(None, 'animate'),
(None, 'animateColor'),
(None, 'animateMotion'),
(None, 'animateTransform'),
(None, 'cursor'),
(None, 'feImage'),
(None, 'filter'),
(None, 'linearGradient'),
(None, 'pattern'),
(None, 'radialGradient'),
(None, 'textpath'),
(None, 'tref'),
(None, 'set'),
(None, 'use')
))
allowed_css_properties = frozenset((
'azimuth',
'background-color',
'border-bottom-color',
'border-collapse',
'border-color',
'border-left-color',
'border-right-color',
'border-top-color',
'clear',
'color',
'cursor',
'direction',
'display',
'elevation',
'float',
'font',
'font-family',
'font-size',
'font-style',
'font-variant',
'font-weight',
'height',
'letter-spacing',
'line-height',
'overflow',
'pause',
'pause-after',
'pause-before',
'pitch',
'pitch-range',
'richness',
'speak',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'text-align',
'text-decoration',
'text-indent',
'unicode-bidi',
'vertical-align',
'voice-family',
'volume',
'white-space',
'width',
))
allowed_css_keywords = frozenset((
'auto',
'aqua',
'black',
'block',
'blue',
'bold',
'both',
'bottom',
'brown',
'center',
'collapse',
'dashed',
'dotted',
'fuchsia',
'gray',
'green',
'!important',
'italic',
'left',
'lime',
'maroon',
'medium',
'none',
'navy',
'normal',
'nowrap',
'olive',
'pointer',
'purple',
'red',
'right',
'solid',
'silver',
'teal',
'top',
'transparent',
'underline',
'white',
'yellow',
))
allowed_svg_properties = frozenset((
'fill',
'fill-opacity',
'fill-rule',
'stroke',
'stroke-width',
'stroke-linecap',
'stroke-linejoin',
'stroke-opacity',
))
allowed_protocols = frozenset((
'ed2k',
'ftp',
'http',
'https',
'irc',
'mailto',
'news',
'gopher',
'nntp',
'telnet',
'webcal',
'xmpp',
'callto',
'feed',
'urn',
'aim',
'rsync',
'tag',
'ssh',
'sftp',
'rtsp',
'afs',
'data',
))
allowed_content_types = frozenset((
'image/png',
'image/jpeg',
'image/gif',
'image/webp',
'image/bmp',
'text/plain',
))
data_content_type = re.compile(r'''
^
# Match a content type <application>/<type>
(?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
# Match any character set and encoding
(?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
|(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
# Assume the rest is data
,.*
$
''',
re.VERBOSE)
class Filter(base.Filter):
"""Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes"""
def __init__(self,
source,
allowed_elements=allowed_elements,
allowed_attributes=allowed_attributes,
allowed_css_properties=allowed_css_properties,
allowed_css_keywords=allowed_css_keywords,
allowed_svg_properties=allowed_svg_properties,
allowed_protocols=allowed_protocols,
allowed_content_types=allowed_content_types,
attr_val_is_uri=attr_val_is_uri,
svg_attr_val_allows_ref=svg_attr_val_allows_ref,
svg_allow_local_href=svg_allow_local_href):
"""Creates a Filter
:arg allowed_elements: set of elements to allow--everything else will
be escaped
:arg allowed_attributes: set of attributes to allow in
elements--everything else will be stripped
:arg allowed_css_properties: set of CSS properties to allow--everything
else will be stripped
:arg allowed_css_keywords: set of CSS keywords to allow--everything
else will be stripped
:arg allowed_svg_properties: set of SVG properties to allow--everything
else will be removed
:arg allowed_protocols: set of allowed protocols for URIs
:arg allowed_content_types: set of allowed content types for ``data`` URIs.
:arg attr_val_is_uri: set of attributes that have URI values--values
that have a scheme not listed in ``allowed_protocols`` are removed
:arg svg_attr_val_allows_ref: set of SVG attributes that can have
references
:arg svg_allow_local_href: set of SVG elements that can have local
hrefs--these are removed
"""
super(Filter, self).__init__(source)
self.allowed_elements = allowed_elements
self.allowed_attributes = allowed_attributes
self.allowed_css_properties = allowed_css_properties
self.allowed_css_keywords = allowed_css_keywords
self.allowed_svg_properties = allowed_svg_properties
self.allowed_protocols = allowed_protocols
self.allowed_content_types = allowed_content_types
self.attr_val_is_uri = attr_val_is_uri
self.svg_attr_val_allows_ref = svg_attr_val_allows_ref
self.svg_allow_local_href = svg_allow_local_href
def __iter__(self):
for token in base.Filter.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes
# are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and
# ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI
# are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are
# allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in ("StartTag", "EndTag", "EmptyTag"):
name = token["name"]
namespace = token["namespace"]
if ((namespace, name) in self.allowed_elements or
(namespace is None and
(namespaces["html"], name) in self.allowed_elements)):
return self.allowed_token(token)
else:
return self.disallowed_token(token)
elif token_type == "Comment":
pass
else:
return token
def allowed_token(self, token):
if "data" in token:
attrs = token["data"]
attr_names = set(attrs.keys())
# Remove forbidden attributes
for to_remove in (attr_names - self.allowed_attributes):
del token["data"][to_remove]
attr_names.remove(to_remove)
# Remove attributes with disallowed URL values
for attr in (attr_names & self.attr_val_is_uri):
assert attr in attrs
# I don't have a clue where this regexp comes from or why it matches those
# characters, nor why we call unescape. I just know it's always been here.
# Should you be worried by this comment in a sanitizer? Yes. On the other hand, all
# this will do is remove *more* than it otherwise would.
val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
try:
uri = urlparse.urlparse(val_unescaped)
except ValueError:
uri = None
del attrs[attr]
if uri and uri.scheme:
if uri.scheme not in self.allowed_protocols:
del attrs[attr]
if uri.scheme == 'data':
m = data_content_type.match(uri.path)
if not m:
del attrs[attr]
elif m.group('content_type') not in self.allowed_content_types:
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
(namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*',
attrs[(namespaces['xlink'], 'href')])):
del attrs[(namespaces['xlink'], 'href')]
if (None, 'style') in attrs:
attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')])
token["data"] = attrs
return token
def disallowed_token(self, token):
token_type = token["type"]
if token_type == "EndTag":
token["data"] = "</%s>" % token["name"]
elif token["data"]:
assert token_type in ("StartTag", "EmptyTag")
attrs = []
for (ns, name), v in token["data"].items():
attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v)))
token["data"] = "<%s%s>" % (token["name"], ''.join(attrs))
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
token["type"] = "Characters"
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if keyword not in self.allowed_css_keywords and \
not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
| kennethreitz/pipenv | pipenv/patched/notpip/_vendor/html5lib/filters/sanitizer.py | Python | mit | 26,266 |
# Copyright (C) 2010 Michael Foord
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
from __future__ import with_statement
from _weakref import ref
__all__ = ['WeakSet']
__version__ = '1.0.0'
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return sum(x() is not None for x in self.data)
def __contains__(self, item):
return ref(item) in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
if isinstance(other, self.__class__):
self.data.update(other.data)
else:
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
# Helper functions for simple delegating methods.
def _apply(self, other, method):
if not isinstance(other, self.__class__):
other = self.__class__(other)
newdata = method(other.data)
newset = self.__class__()
newset.data = newdata
return newset
def difference(self, other):
return self._apply(other, self.data.difference)
__sub__ = difference
def difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self._apply(other, self.data.intersection)
__and__ = intersection
def intersection_update(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__lt__ = issubset
def __le__(self, other):
return self.data <= set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__gt__ = issuperset
def __ge__(self, other):
return self.data >= set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
return self._apply(other, self.data.symmetric_difference)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
return self
def union(self, other):
return self._apply(other, self.data.union)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| spacetelescope/PyFITS | pyfits/_compat/_weakset_py2/__init__.py | Python | bsd-3-clause | 6,423 |
from parser.calc.base import calc
| tehmaze/parser | parser/calc/__init__.py | Python | mit | 34 |
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
from ..PulseSequencePlotter import plot_pulse_files
from .helpers import create_cal_seqs
from itertools import product
import operator
from ..ControlFlow import *
from ..TdmInstructions import *
from functools import reduce
from typing import Iterable, Union, Tuple
@qfunction
def qreset(qubits: Channels.LogicalChannel,
signVec: Tuple[bool],
measDelay: Union[int,float],
buf: Union[int,float],
reg_size: int = None,
TDM_map: Iterable[Union[int,bool]] = None) -> list:
"""
For each qubit, build the set of feedback actions to perform when
receiving a zero or one in the comparison register
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits to reset
signVec : boolean tuple
A hashable (immutable) tuple of binary values from the compairison
register indicating the measured state of each qubit in the register
before reset.
measDelay : int/float
Delay after measurement before performing the LOADCMP comparison with
value in the register (seconds)
buf : int/float
Wait time between (seconds)
reg_size : int, optional
Size of the register in number of qubits, including those not reset.
Default value is set to len(qubits).
TDM_map : bit mask, optional
Map each qubit to a TDM digital input. If True, arguments reset a
subset of the qubit register (see Reset).
Default: np.array(qN, qN-1, ..., q1) from MSB to LSB.
Returns
-------
seq : QGL.ControlFlow.Call
QGL sequence with the qreset calls
Examples
--------
>>> qreset((q1, q2), (0,1), 2e-6, 2e-6);
CALL(H:)
"""
if not reg_size:
reg_size = len(qubits)
TDM_map = np.arange(reg_size,0,-1)
FbGates = []
for ct, q in enumerate(qubits):
if signVec[ct] == 0:
FbGates.append([gate(q) for gate in [Id, X]])
else: # inverted logic
FbGates.append([gate(q) for gate in [X, Id]])
FbSeq = [reduce(operator.mul, x) for x in product(*FbGates)]
# load register
seq = [Id(qubits[0], measDelay), qwait(kind='CMP'), Id(qubits[0], buf)]
# create a branch for each possible comparison value
for ct in range(2**reg_size):
# duplicate branches for the irrelevant results
# if reg_size > len(TDM_map)
meas_result = [(ct & TDM_bit)>0 for TDM_bit in 2**(np.array(TDM_map)-1)]
branch_idx = sum([t*2**(len(qubits)-ind-1)
for ind,t in enumerate((meas_result))])
seq += qif(ct, [FbSeq[branch_idx]])
return seq
def Reset(qubits: Iterable[Channels.LogicalChannel],
measDelay: Union[int,float]=1e-6,
signVec: Tuple[bool] = None,
doubleRound: bool = True,
buf: Union[int,float] = 20e-9,
showPlot: bool = False,
measChans: Channels.LogicalChannel = None,
add_cals: bool = True,
calRepeats: int = 2,
reg_size: int = None,
TDM_map: Iterable[Union[int,bool]]=None) -> str:
"""
Preparation, simultanoeus reset, and measurement of an arbitrary number
of qubits
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits to reset
measDelay : int/float, optional
Delay after measurement before performing the LOADCMP compairison with
value in the register (seconds)
signVec : boolean tuple, optional
conditions for feedback. Tuple of 0 (flip if signal is above threshold) and 1 (flip if below) for each qubit. Default = 0 for all qubits
doubleRound : boolean, optional
If true, do two rounds of feedback
showPlot : boolean, optional
Whether to plot
measChans : LogicalChannel tuple, optional
A hashable (immutable) tuple of qubits to measured.
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
reg_size : int, optional
Size of the register in number of qubits, including those not reset.
Default value is set to len(qubits).
TDM_map : bit mask, optional
Map each qubit to a TDM digital input. If True, arguments reset a
subset of the qubit register (see Reset).
Default: np.array(qN, qN-1, ..., q1) from MSB to LSB.
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> Reset((q1, q2));
Compiled 12 sequences.
>>> mf
'/path/to/exp/exp-meta.json'
"""
if measChans is None:
measChans = qubits
if signVec == None:
signVec = (0, ) * len(qubits)
seqs = [prep + [qreset(qubits,
signVec,
measDelay,
buf,
reg_size=reg_size,
TDM_map=TDM_map)]
for prep in create_cal_seqs(qubits, 1)]
measBlock = reduce(operator.mul, [MEAS(q) for q in qubits])
if doubleRound:
for seq in seqs:
seq += [measBlock]
seq.append(qreset(qubits,
signVec,
measDelay,
buf,
reg_size=reg_size,
TDM_map=TDM_map))
# add final measurement
for seq in seqs:
seq += [measBlock, Id(qubits[0], measDelay), qwait(kind='CMP')]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats,
measChans=measChans,
waitcmp=True)
metafile = compile_to_hardware(seqs, 'Reset/Reset')
if showPlot:
plot_pulse_files(metafile)
return metafile
# do not make it a subroutine for now
def BitFlip3(data_qs: Iterable[Channels.LogicalChannel],
ancilla_qs: Iterable[Channels.LogicalChannel],
theta: Union[int,float] = None,
phi: Union[int,float] = None,
nrounds: int = 1,
meas_delay: Union[int,float] = 1e-6,
add_cals: bool = False,
calRepeats: int = 2) -> str:
"""
Encoding on 3-qubit bit-flip code, followed by n rounds of syndrome
detection, and final correction using the n results.
Parameters
----------
data_qs : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits of the 3 code qubits
ancilla_qs : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits of the 2 syndrome qubits
theta : int/float, optional
Longitudinal rotation angle for the encoded state (radians).
Default = None.
phi : int/float, optional
Azimuthal rotation angle for the encoded state (radians).
Default = None.
nrounds: int, optional
Number of consecutive measurements
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = BitFlip3((q1, q2, q3), (q4, q5));
Compiled 12 sequences.
>>> mf
'/path/to/exp/exp-meta.json'
"""
if len(data_qs) != 3 or len(ancilla_qs) != 2:
raise Exception("Wrong number of qubits")
seqs = [
DecodeSetRounds(1,0,nrounds),
Invalidate(10, 2*nrounds),
Invalidate(11, 0x1)]
# encode single-qubit state into 3 qubits
if theta and phi:
seqs+=[Utheta(data_qs[1], theta, phi),
CNOT(data_qs[1], data_qs[0]),
CNOT(data_qs[1], data_qs[2])]
# multiple rounds of syndrome measurements
for n in range(nrounds):
seqs+= [CNOT(data_qs[0],ancilla_qs[0])*CNOT(data_qs[1],ancilla_qs[1])],
seqs+= [CNOT(data_qs[1], ancilla_qs[0])*CNOT(data_qs[2],ancilla_qs[1])],
seqs+= [MEASA(ancilla_qs[0], maddr=(10, 2*n))*
MEASA(ancilla_qs[1], maddr=(10, 2*n+1)),
Id(ancilla_qs[0], meas_delay),
MEAS(data_qs[0], amp=0)*
MEAS(data_qs[1], amp=0)*
MEAS(data_qs[2], amp=0)]
# virtual msmt's just to keep the number of segments
# uniform across digitizer channels
seqs+=Decode(10, 11, 2*nrounds)
seqs+=qwait("RAM",11)
seqs+=[MEAS(data_qs[0])*
MEAS(data_qs[1])*
MEAS(data_qs[2])*
MEAS(ancilla_qs[0], amp=0)*
MEAS(ancilla_qs[1], amp=0)]
# virtual msmt's
# apply corrective pulses depending on the decoder result
FbGates = []
for q in data_qs:
FbGates.append([gate(q) for gate in [Id, X]])
FbSeq = [reduce(operator.mul, x) for x in product(*FbGates)]
for k in range(8):
seqs += qif(k, [FbSeq[k]])
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs, 'BitFlip/BitFlip', tdm_seq=True)
return metafile
def MajorityVoteN(qubits: Iterable[Channels.LogicalChannel],
nrounds: int,
prep: Iterable[bool] = [],
meas_delay: float = 1e-6,
add_cals: bool = False,
calRepeats: int = 2) -> str:
"""
Majority vote across multiple measurement results (same or different qubits)
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits for majority vote
nrounds: int
Number of consecutive measurements
prep : boolean iterable, optional
Array of binary values mapping X(q) pulses to the list of qubits
proivided. Ex: (q1,q2), prep=(1,0) -> would apply a pi pulse to q1
before the majority vote measurement. Default = []
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = MajorityVoteN((q1, q2, q3), 10);
Compiled 1 sequences.
o INVALIDATE(channel=None, addr=0x1, mask=0x0)
o WRITEADDR(channel=None, addr=0x1, value=0xfffff)
MAJORITYMASK(in_addr=1, out_addr=0)
o INVALIDATE(channel=None, addr=0xa, mask=0xfffff)
o INVALIDATE(channel=None, addr=0xb, mask=0x1)
MAJORITY(in_addr=a, out_addr=b)
>>> mf
'/path/to/exp/exp-meta.json'
"""
nqubits = len(qubits)
seqs = [MajorityMask(1, 0, nrounds*nqubits),
Invalidate(10, nrounds*nqubits),
Invalidate(11, 1)]
if prep:
seqs += [reduce(operator.mul,
[X(q) for n,q in enumerate(qubits) if prep[n]])]
for n in range(nrounds):
seqs += [reduce(operator.mul,
[MEASA(q, (10, nqubits*n+m)) for m,q in enumerate(qubits)]),
Id(qubits[0],meas_delay)]
seqs+=MajorityVote(10,11, nrounds*nqubits)
seqs+=qwait("RAM", 11)
seqs+=[Id(qubits[0],100e-9)]
seqs+=qif(1,[X(qubits[0])]) # placeholder for any conditional operation
seqs=[seqs]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs,
'MajorityVote/MajorityVote',
tdm_seq=True)
return metafile
| BBN-Q/QGL | QGL/BasicSequences/Feedback.py | Python | apache-2.0 | 12,237 |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test functions relating to roles and actions."""
from core.domain import role_services
from core.tests import test_utils
import feconf
class RoleDomainUnitTests(test_utils.GenericTestBase):
"""Tests for PARENT_ROLES and ROLE_ACTIONS."""
PARENT_ROLES = role_services.PARENT_ROLES
ACTIONS = role_services.ROLE_ACTIONS
def test_dicts_have_same_keys(self):
"""Test that PARENT_ROLES and ROLE_ACTIONS have same keys."""
self.assertEqual(
set(self.PARENT_ROLES.keys()), set(self.ACTIONS.keys()))
def test_dicts_have_list_value(self):
"""Test that PARENT_ROLES and ROLE_ACTIONS, both have list as value
to all the keys.
"""
for role_name in self.PARENT_ROLES:
self.assertTrue(isinstance(self.PARENT_ROLES[role_name], list))
for role_name in self.ACTIONS:
self.assertTrue(isinstance(self.ACTIONS[role_name], list))
def test_every_dict_entry_is_string(self):
"""Test that all keys and values(elements in lists) in PARENT_ROLES
and ROLE_ACTIONS are string.
"""
for role_name in self.PARENT_ROLES:
self.assertTrue(isinstance(role_name, str))
for role in self.PARENT_ROLES[role_name]:
self.assertTrue(isinstance(role, str))
for role_name in self.ACTIONS:
self.assertTrue(isinstance(role_name, str))
for action_name in self.ACTIONS[role_name]:
self.assertTrue(isinstance(action_name, str))
def test_valid_parents(self):
"""Test that all the roles present in value list for any key in
PARENT_ROLES are valid(i.e there exists a key with that name).
"""
valid_roles = self.PARENT_ROLES.keys()
for role_name in self.PARENT_ROLES:
for role in self.PARENT_ROLES[role_name]:
self.assertIn(role, valid_roles)
def test_that_role_graph_has_no_directed_cycles(self):
"""Visits each role and checks that there is no cycle from that
role.
"""
visited = set()
def check_cycle(source, roles):
"""Checks that source is not reachable from any of the given roles.
Args:
source: str. Role that should not be reachable via any path
from roles.
roles: list(str). List of roles that should not be able to
reach source.
"""
for role in roles:
self.assertNotEqual(role, source)
if role not in visited:
visited.add(role)
check_cycle(source, self.PARENT_ROLES[role])
for role_name in self.PARENT_ROLES:
visited = set()
check_cycle(role_name, self.PARENT_ROLES[role_name])
def test_get_all_actions(self):
"""Test that get_all_actions works as expected."""
# Case when wrong input is given.
with self.assertRaisesRegexp(
Exception, 'Role TEST_ROLE does not exist.'):
role_services.get_all_actions('TEST_ROLE')
# Case for collection editor is checked.
collection_editor_actions = list(
set(role_services.ROLE_ACTIONS[feconf.ROLE_ID_EXPLORATION_EDITOR]) |
set(role_services.ROLE_ACTIONS[feconf.ROLE_ID_BANNED_USER]) |
set(role_services.ROLE_ACTIONS[feconf.ROLE_ID_GUEST]) |
set(role_services.ROLE_ACTIONS[feconf.ROLE_ID_COLLECTION_EDITOR]))
# Sets are compared as their element order don't need to be same.
self.assertEqual(set(collection_editor_actions),
set(role_services.get_all_actions(
feconf.ROLE_ID_COLLECTION_EDITOR)))
| souravbadami/oppia | core/domain/role_services_test.py | Python | apache-2.0 | 4,392 |
from django.conf.urls import patterns, include, url
from django.conf import settings
# Here, user contacts.profile will cause some 'mismatch' since contacts is also a module
from profile import ProfileView
from contacts import ContactsView
from authen import Authenticate
strid = settings.CONTACT_URL['strid']
user = settings.CONTACT_URL['user']
contact = settings.CONTACT_URL['contact']
auth = settings.CONTACT_URL['auth']
urlpatterns = patterns('',
url(r'^api/'+auth+'$', Authenticate.as_view()),
url(r'^api/(?P<'+strid+r'>\w{16})/$', ProfileView.as_view()),
url(r'^api/(?P<'+strid+r'>\w{16})/(?P<'+contact+r'>\d+)/$', ContactsView.as_view()),
url(r'^(?P<'+user+r'>\w{5,18})/(?P<'+strid+r'>\w{16})/$', ProfileView.as_view()),
url(r'^(?P<'+user+r'>\w{5,18})/(?P<'+strid+r'>\w{16})/(?P<'+contact+r'>\d+)/$', ContactsView.as_view()),
)
| sharehub/DBRest | dbrest/contacts/urls.py | Python | mit | 858 |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Optional
from UM.Application import Application
from UM.Decorators import override
from UM.Settings.Interfaces import PropertyEvaluationContext
from UM.Settings.SettingInstance import InstanceState
from .CuraContainerStack import CuraContainerStack
class PerObjectContainerStack(CuraContainerStack):
def isDirty(self):
# This stack should never be auto saved, so always return that there is nothing to save.
return False
@override(CuraContainerStack)
def getProperty(self, key: str, property_name: str, context: Optional[PropertyEvaluationContext] = None) -> Any:
if context is None:
context = PropertyEvaluationContext()
context.pushContainer(self)
global_stack = Application.getInstance().getGlobalContainerStack()
if not global_stack:
return None
# Return the user defined value if present, otherwise, evaluate the value according to the default routine.
if self.getContainer(0).hasProperty(key, property_name):
if self.getContainer(0).getProperty(key, "state") == InstanceState.User:
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
# Handle the "limit_to_extruder" property.
limit_to_extruder = super().getProperty(key, "limit_to_extruder", context)
if limit_to_extruder is not None:
limit_to_extruder = str(limit_to_extruder)
# if this stack has the limit_to_extruder "not overridden", use the original limit_to_extruder as the current
# limit_to_extruder, so the values retrieved will be from the perspective of the original limit_to_extruder
# stack.
if limit_to_extruder == "-1":
if "original_limit_to_extruder" in context.context:
limit_to_extruder = context.context["original_limit_to_extruder"]
if limit_to_extruder is not None and limit_to_extruder != "-1" and limit_to_extruder in global_stack.extruders:
# set the original limit_to_extruder if this is the first stack that has a non-overridden limit_to_extruder
if "original_limit_to_extruder" not in context.context:
context.context["original_limit_to_extruder"] = limit_to_extruder
if super().getProperty(key, "settable_per_extruder", context):
result = global_stack.extruders[str(limit_to_extruder)].getProperty(key, property_name, context)
if result is not None:
context.popContainer()
return result
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
@override(CuraContainerStack)
def setNextStack(self, stack: CuraContainerStack) -> None:
super().setNextStack(stack)
# trigger signal to re-evaluate all default settings
for key in self.getContainer(0).getAllKeys():
# only evaluate default settings
if self.getContainer(0).getProperty(key, "state") != InstanceState.Default:
continue
self._collectPropertyChanges(key, "value")
self._emitCollectedPropertyChanges()
| thopiekar/Cura | cura/Settings/PerObjectContainerStack.py | Python | lgpl-3.0 | 3,365 |
from bottle import template
print template('form', {})
| alixedi/recline | test.py | Python | mit | 56 |
"""
Tools for formulating interpretable/implementable MDPs as mathematical optimization
problems (MILP). This solves the models as described in:
Petrik, M., & Luss, R. (2016). Interpretable Policies for Dynamic Product Recommendations. In Uncertainty in Artificial Intelligence (UAI).
Usage:
- Use get_milp_model to create "model.mod" MILP model to run with the data output.
- create_opl_data can be used to transform an interpretable MDP data to a MILP
- solve_opl solves the model
"""
import numpy as np
import subprocess
import json
import io
#%% Create OPL data file
import random
def create_opl_data(name, mdp, p0, observations, discount, filename="milp_output.dat",
action_decomposition = None):
"""
Creates an OPL representation of the interpretable MDP. OPL is a modelling
language included with CPLEX optimizer. This representation can be used
to conveniently solve LP and MILP formulations of MDPs
Parameters
----------
name : string
Name of the problem
mdp : craam.MDP
MDP specification with no robustness
observations : array-like
Observation index for each state
discount : float
Discount factor
filename : string, optional
Name of the opl data output file
If None, the outputs the string
action_decomposition : list of lists, optional
If actions can be decomposed then for each action it lists the indexes
of decomposed actions.
Returns
-------
ident : string
Identifier of the problem (to check that the solution is for the same)
string_rep : string
Only if provided with filename=None, also returns OPL string representation
"""
ident = str(random.randint(0,1e6))
if filename is not None:
f = open(filename,"w")
else:
f = io.StringIO()
try:
f.write('problemName="'+name+'";\n')
f.write('discount='+str(discount)+';\n')
f.write('upper_bound='+str(1/discount)+';\n')
f.write('generatedID="' + ident + '";\n')
f.write('initial = {')
first = True
for s in range(mdp.state_count()):
if p0[s] > 0:
if first:
f.write('<')
first = False
else:
f.write(',<')
f.write(str(s) + ',' + str(p0[s]) + '>')
f.write('};\n')
f.write('states = {')
first = True
for s in range(mdp.state_count()):
if first:
first = False
else:
f.write(',')
f.write(str(s));
f.write('};\n')
f.write('samples = {')
first = True
for s in range(mdp.state_count()):
for a in range(mdp.action_count(s)):
for ts, prob, rew in zip(mdp.get_toids(s,a), mdp.get_probabilities(s,a), mdp.get_rewards(s,a)):
if first:
f.write('<')
first = False
else:
f.write(',<')
f.write(','.join((str(s),str(a),str(ts),str(prob),str(rew))) + '>')
f.write('};\n')
f.write('observation_states = {')
first = True
for s in range(mdp.state_count()):
if first:
f.write('<')
first = False
else:
f.write(',<')
f.write(str(observations[s]) + ',' + str(s) + '>')
f.write('};\n')
if action_decomposition is not None:
assert len(action_decomposition) == mdp.action_count(1)
f.write('decomposed_actions = {')
first = True
for i,ad in enumerate(action_decomposition):
if first:
f.write('<')
first = False
else:
f.write(',<')
f.write(str(i)+',')
f.write(','.join(str(a) for a in ad))
f.write('>')
f.write('};\n')
if filename is not None:
return ident
else:
return ident, f.getvalue()
finally:
f.close()
def solve_opl(model='milp.mod', data="milp_output.dat", result="solution.json", \
ident = None, oplrun_exec="oplrun", verbose=False):
"""
Solves the OPL formulation constructed by create_opl_data.
All files must be in the same directory.
Parameters
----------
model : string, optional
Name of the model to run
data : string
Name of the data file
result : string, optional
Name of the output file
ident : string, optional
Solution identifier to make sure that the solution is for the correct
problem
oplrun_exec : string, optional
Path to the oplrun executable "CPLEX"
Note
----
One may also need to set the path to the OPL libraries (e,g., libicuuc.so),
by using:
oplrun_exec = 'export LD_LIBRARY_PATH=/opt/ibm/ILOG/CPLEX_Studio_Community1263/opl/bin/x86-64_linux/; /opt/ibm/ILOG/CPLEX_Studio_Community1263/opl/bin/x86-64_linux/oplrun'
The method requires a shell to be present
Returns
-------
obj : float
Objective value
psi : array
Interpretable policy, action index for each observation
"""
try:
command = [oplrun_exec, model, data]
if verbose:
print("Executing:", " ".join(command))
stdout = subprocess.check_output(" ".join(command), shell=True)
if verbose:
print("Output")
print(stdout.decode('utf-8'))
except subprocess.CalledProcessError as e:
print('OPL failed with:')
print(e.output.decode('utf-8'))
raise e
with open("solution.json", mode='r') as fileinput:
datainput = fileinput.read()
d = json.JSONDecoder().decode(datainput)
if ident is not None:
ident_sol = d['GeneratedId']
assert ident == ident_sol, "Solution identifier does not match problem identifier"
obj = d['Objective']
oc = d['ObservCount']
ac = d['ActionCount']
psi = d['psi']
psi = np.reshape(psi,(oc,ac))
psi = psi.argmax(1)
return obj, psi
def get_milp_model():
"""
Returns a string definition a MILP model that work with the generated OPL data.
Save the output as model.mod
"""
return \
r"""
/********************************************************************
* MILP formulation for interpretable MDPs
*********************************************************************/
execute {cplex.tilim = 180;};
// Used to identify the problem
string problemName = ...;
// Used to identify the problem run
string generatedID = ...;
float upper_bound = ...;
// The discount factor
float discount = ...;
// Sample definition
tuple Sample {
int sourceid;
int action;
int targetid;
float probability;
float reward;
};
tuple Initial{
int stateid;
float probability;
};
tuple Observation {
int observid;
int stateid;
};
{Initial} initial = ...;
{Sample} samples = ...;
{Observation} observation_states = ...;
{int} states = ...; //{s.sourceid | s in samples};
{int} actions = {s.action | s in samples};
{int} observations = {os.observid | os in observation_states};
// Occupancy frequency
dvar float+ u[states][actions];
// Interpretable policy
//dvar int psi[observations][actions] in 0..1;
dvar boolean psi[observations][actions];
dexpr float reward[s in states][a in actions] =
sum(ss in samples : ss.sourceid == s && ss.action == a) ss.reward * ss.probability;
dexpr float d[s in states] = sum(a in actions) u[s][a];
dexpr float initdist[s in states] = sum(ss in initial : ss.stateid == s) ss.probability;
dexpr float objective = sum(s in states, a in actions) u[s][a] * reward[s][a];
maximize objective;
subject to {
forall(s in states){
d[s] == initdist[s] + sum(ss in samples : ss.targetid == s)
(discount * u[ss.sourceid][ss.action] * ss.probability);
};
forall(o in observations){
forall(os in observation_states : os.observid == o){
forall(a in actions){
u[os.stateid][a] <= upper_bound * psi[o][a];
upper_bound*(psi[o][a] -1) + d[os.stateid] <= u[os.stateid][a];
};
};
};
forall(o in observations){
sum(a in actions) psi[o][a] == 1;
};
};
execute{
var result = new IloOplOutputFile("solution.json");
var counter = 0;
result.write("{");
result.write("\"ProblemName\" : \"" + problemName + "\",\n");
result.write("\"GeneratedId\" : \"" + generatedID + "\",\n");
result.write("\"Algorithm\" : ");
result.write("\"MILP\",\n");
result.write("\"Objective\" : ");
var obj = "" + objective;
//prevent a trailing dot
if(obj.indexOf(".") == obj.length-1) obj = obj + "0";
result.write(obj);
result.write(",\n");
result.write("\"StateCount\" : ");
result.write(thisOplModel.states.size);
result.write(",\n");
result.write("\"ObservCount\" : ");
result.write(thisOplModel.observations.size);
result.write(",\n");
result.write("\"ActionCount\" : ");
result.write(thisOplModel.actions.size);
result.write(",\n");
result.write("\"psi\" : [");
for(var o in thisOplModel.observations){
for(var a in thisOplModel.actions){
if(counter > 0) result.write(",");
var val = "" + thisOplModel.psi[o][a];
//prevent trailing dot
if(val.indexOf(".") == val.length - 1) val = val + "0";
result.write(val);
counter = counter + 1;
}
}
result.write("]");
result.write("}\n");
result.close();
}
"""
| marekpetrik/RAAM | raam/implementable.py | Python | mit | 11,043 |
"""
kombu.utils.eventio
===================
Evented IO support for multiple platforms.
"""
from __future__ import absolute_import
import errno
import select as __select__
import socket
from numbers import Integral
_selectf = __select__.select
_selecterr = __select__.error
epoll = getattr(__select__, 'epoll', None)
kqueue = getattr(__select__, 'kqueue', None)
kevent = getattr(__select__, 'kevent', None)
KQ_EV_ADD = getattr(__select__, 'KQ_EV_ADD', 1)
KQ_EV_DELETE = getattr(__select__, 'KQ_EV_DELETE', 2)
KQ_EV_ENABLE = getattr(__select__, 'KQ_EV_ENABLE', 4)
KQ_EV_CLEAR = getattr(__select__, 'KQ_EV_CLEAR', 32)
KQ_EV_ERROR = getattr(__select__, 'KQ_EV_ERROR', 16384)
KQ_EV_EOF = getattr(__select__, 'KQ_EV_EOF', 32768)
KQ_FILTER_READ = getattr(__select__, 'KQ_FILTER_READ', -1)
KQ_FILTER_WRITE = getattr(__select__, 'KQ_FILTER_WRITE', -2)
KQ_FILTER_AIO = getattr(__select__, 'KQ_FILTER_AIO', -3)
KQ_FILTER_VNODE = getattr(__select__, 'KQ_FILTER_VNODE', -4)
KQ_FILTER_PROC = getattr(__select__, 'KQ_FILTER_PROC', -5)
KQ_FILTER_SIGNAL = getattr(__select__, 'KQ_FILTER_SIGNAL', -6)
KQ_FILTER_TIMER = getattr(__select__, 'KQ_FILTER_TIMER', -7)
KQ_NOTE_LOWAT = getattr(__select__, 'KQ_NOTE_LOWAT', 1)
KQ_NOTE_DELETE = getattr(__select__, 'KQ_NOTE_DELETE', 1)
KQ_NOTE_WRITE = getattr(__select__, 'KQ_NOTE_WRITE', 2)
KQ_NOTE_EXTEND = getattr(__select__, 'KQ_NOTE_EXTEND', 4)
KQ_NOTE_ATTRIB = getattr(__select__, 'KQ_NOTE_ATTRIB', 8)
KQ_NOTE_LINK = getattr(__select__, 'KQ_NOTE_LINK', 16)
KQ_NOTE_RENAME = getattr(__select__, 'KQ_NOTE_RENAME', 32)
KQ_NOTE_REVOKE = getattr(__select__, 'kQ_NOTE_REVOKE', 64)
from kombu.syn import detect_environment
from . import fileno
from .compat import get_errno
__all__ = ['poll']
READ = POLL_READ = 0x001
WRITE = POLL_WRITE = 0x004
ERR = POLL_ERR = 0x008 | 0x010
try:
SELECT_BAD_FD = set((errno.EBADF, errno.WSAENOTSOCK))
except AttributeError:
SELECT_BAD_FD = set((errno.EBADF,))
class Poller(object):
def poll(self, timeout):
try:
return self._poll(timeout)
except Exception as exc:
if get_errno(exc) != errno.EINTR:
raise
class _epoll(Poller):
def __init__(self):
self._epoll = epoll()
def register(self, fd, events):
try:
self._epoll.register(fd, events)
except Exception as exc:
if get_errno(exc) != errno.EEXIST:
raise
def unregister(self, fd):
try:
self._epoll.unregister(fd)
except (socket.error, ValueError, KeyError, TypeError):
pass
except (IOError, OSError) as exc:
if get_errno(exc) not in (errno.ENOENT, errno.EPERM):
raise
def _poll(self, timeout):
return self._epoll.poll(timeout if timeout is not None else -1)
def close(self):
self._epoll.close()
class _kqueue(Poller):
w_fflags = (KQ_NOTE_WRITE | KQ_NOTE_EXTEND |
KQ_NOTE_ATTRIB | KQ_NOTE_DELETE)
def __init__(self):
self._kqueue = kqueue()
self._active = {}
self.on_file_change = None
self._kcontrol = self._kqueue.control
def register(self, fd, events):
self._control(fd, events, KQ_EV_ADD)
self._active[fd] = events
def unregister(self, fd):
events = self._active.pop(fd, None)
if events:
try:
self._control(fd, events, KQ_EV_DELETE)
except socket.error:
pass
def watch_file(self, fd):
ev = kevent(fd,
filter=KQ_FILTER_VNODE,
flags=KQ_EV_ADD | KQ_EV_ENABLE | KQ_EV_CLEAR,
fflags=self.w_fflags)
self._kcontrol([ev], 0)
def unwatch_file(self, fd):
ev = kevent(fd,
filter=KQ_FILTER_VNODE,
flags=KQ_EV_DELETE,
fflags=self.w_fflags)
self._kcontrol([ev], 0)
def _control(self, fd, events, flags):
if not events:
return
kevents = []
if events & WRITE:
kevents.append(kevent(fd,
filter=KQ_FILTER_WRITE,
flags=flags))
if not kevents or events & READ:
kevents.append(
kevent(fd, filter=KQ_FILTER_READ, flags=flags),
)
control = self._kcontrol
for e in kevents:
try:
control([e], 0)
except ValueError:
pass
def _poll(self, timeout):
kevents = self._kcontrol(None, 1000, timeout)
events, file_changes = {}, []
for k in kevents:
fd = k.ident
if k.filter == KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | READ
elif k.filter == KQ_FILTER_WRITE:
if k.flags & KQ_EV_EOF:
events[fd] = ERR
else:
events[fd] = events.get(fd, 0) | WRITE
elif k.filter == KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | ERR
elif k.filter == KQ_FILTER_VNODE:
if k.fflags & KQ_NOTE_DELETE:
self.unregister(fd)
file_changes.append(k)
if file_changes:
self.on_file_change(file_changes)
return list(events.items())
def close(self):
self._kqueue.close()
class _select(Poller):
def __init__(self):
self._all = (self._rfd,
self._wfd,
self._efd) = set(), set(), set()
def register(self, fd, events):
fd = fileno(fd)
if events & ERR:
self._efd.add(fd)
if events & WRITE:
self._wfd.add(fd)
if events & READ:
self._rfd.add(fd)
def _remove_bad(self):
for fd in self._rfd | self._wfd | self._efd:
try:
_selectf([fd], [], [], 0)
except (_selecterr, socket.error) as exc:
if get_errno(exc) in SELECT_BAD_FD:
self.unregister(fd)
def unregister(self, fd):
try:
fd = fileno(fd)
except socket.error as exc:
# we don't know the previous fd of this object
# but it will be removed by the next poll iteration.
if get_errno(exc) in SELECT_BAD_FD:
return
raise
self._rfd.discard(fd)
self._wfd.discard(fd)
self._efd.discard(fd)
def _poll(self, timeout):
try:
read, write, error = _selectf(
self._rfd, self._wfd, self._efd, timeout,
)
except (_selecterr, socket.error) as exc:
if get_errno(exc) == errno.EINTR:
return
elif get_errno(exc) in SELECT_BAD_FD:
return self._remove_bad()
raise
events = {}
for fd in read:
if not isinstance(fd, Integral):
fd = fd.fileno()
events[fd] = events.get(fd, 0) | READ
for fd in write:
if not isinstance(fd, Integral):
fd = fd.fileno()
events[fd] = events.get(fd, 0) | WRITE
for fd in error:
if not isinstance(fd, Integral):
fd = fd.fileno()
events[fd] = events.get(fd, 0) | ERR
return list(events.items())
def close(self):
self._rfd.clear()
self._wfd.clear()
self._efd.clear()
def _get_poller():
if detect_environment() != 'default':
# greenlet
return _select
elif epoll:
# Py2.6+ Linux
return _epoll
elif kqueue:
# Py2.6+ on BSD / Darwin
return _select # was: _kqueue
else:
return _select
def poll(*args, **kwargs):
return _get_poller()(*args, **kwargs)
| sunze/py_flask | venv/lib/python3.4/site-packages/kombu/utils/eventio.py | Python | mit | 7,868 |
#import uncertainties
#from uncertainties import ufloat
import math
import numpy
import numpy
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import uncertainties
from uncertainties import ufloat, unumpy
import matplotlib.pyplot as plt
def ff(x, a, I, V):
return I*(numpy.exp(a*(V-x))-1)
bucket = numpy.linspace(0,2700, 1000)
#ARANCIO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiArancio.txt", unpack = True)
popt = (.1, -2.0, 300.0)
pars, cov = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
#plot
x1 = plt.subplot(321)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 2700)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Arancio")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1], pars[2]), color = "red")
#GIALLO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiGialli.txt", unpack = True)
popt = (.1, -2.0, 300.0)
pars, cov = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
#plot
x2 = plt.subplot(322)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 2700)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Giallo")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1], pars[2]), color = "red")
#VERDE
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiVerdi.txt", unpack = True)
popt = (.1, -2.0, 300.0)
pars, cov = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
#plot
x3 = plt.subplot(323)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 2700)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Verde")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1], pars[2]), color = "red")
#VERDE-AZZURRO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiVerdeAzzurro.txt", unpack = True)
popt = (.1, -2.0, 300.0)
pars, cov = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
#plot
x4 = plt.subplot(324)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 2700)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Verde-Azzurro")
pylab.plot(bucket, ff(bucket, pars[0], pars[1], pars[2]), color = "red")
pylab.legend(loc = "upper-right")
#AZZURRO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiAzzurro.txt", unpack = True)
popt = (.1, -2.0, 300.0)
pars, cov = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
#plot
x5 = plt.subplot(325)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 2700)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Azzurro")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1], pars[2]), color = "red")
#BLU
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiBlu.txt", unpack = True)
popt = (.1, -2.0, 300.0)
pars, cov = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
#plot
x6 = plt.subplot(326)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 2700)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Blu")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1], pars[2]), color = "red")
plt.suptitle('Corrente vs Potenziale di frenamento: fit esponenziale', size = 18)
pylab.show() | fedebell/Laboratorio3 | Fotoelettrico/subplotexp.py | Python | gpl-3.0 | 3,927 |
from sensor import Sensor
import grovepi
# (5 * grovepi.analogRead(0) * 100) / 1024 <--- formula for LM35 sensor
class Temperature(Sensor):
name = 'Temperature'
def __init__(self, pin, logger=None):
Sensor.__init__(self, self.name, logger)
self.pin = pin
self.connect()
def connect(self):
if(not isinstance(self.pin, int)):
self.validPin = False
else:
self.validPin = True
grovepi.pinMode(self.pin, "INPUT")
def read(self):
if(not self.validPin):
self.logError('No valid pin provided')
return 0
try:
analogValue = grovepi.analogRead(self.pin)
temperature = (5.0 * analogValue * 100.0) / 1024
return temperature
except (IOError, TypeError) as e:
self.logError('Could not read value from sensor')
return 0
if __name__ == '__main__':
t = Temperature(0)
temp = t.read()
print str(temp)
| GoMake/gomake-telemetry | telemetry/internaltemp.py | Python | mit | 841 |
from typing import List, Tuple
from ...pipeline import Lemmatizer
from ...tokens import Token
class DutchLemmatizer(Lemmatizer):
@classmethod
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
if mode == "rule":
required = ["lemma_lookup", "lemma_rules", "lemma_exc", "lemma_index"]
return (required, [])
else:
return super().get_lookups_config(mode)
def lookup_lemmatize(self, token: Token) -> List[str]:
"""Overrides parent method so that a lowercased version of the string
is used to search the lookup table. This is necessary because our
lookup table consists entirely of lowercase keys."""
lookup_table = self.lookups.get_table("lemma_lookup", {})
string = token.text.lower()
return [lookup_table.get(string, string)]
# Note: CGN does not distinguish AUX verbs, so we treat AUX as VERB.
def rule_lemmatize(self, token: Token) -> List[str]:
# Difference 1: self.rules is assumed to be non-None, so no
# 'is None' check required.
# String lowercased from the get-go. All lemmatization results in
# lowercased strings. For most applications, this shouldn't pose
# any problems, and it keeps the exceptions indexes small. If this
# creates problems for proper nouns, we can introduce a check for
# univ_pos == "PROPN".
cache_key = (token.lower, token.pos)
if cache_key in self.cache:
return self.cache[cache_key]
string = token.text
univ_pos = token.pos_.lower()
if univ_pos in ("", "eol", "space"):
forms = [string.lower()]
self.cache[cache_key] = forms
return forms
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
index = index_table.get(univ_pos, {})
exceptions = exc_table.get(univ_pos, {})
rules = rules_table.get(univ_pos, {})
string = string.lower()
if univ_pos not in (
"noun",
"verb",
"aux",
"adj",
"adv",
"pron",
"det",
"adp",
"num",
):
forms = [string]
self.cache[cache_key] = forms
return forms
lemma_index = index_table.get(univ_pos, {})
# string is already lemma
if string in lemma_index:
forms = [string]
self.cache[cache_key] = forms
return forms
exc_table = self.lookups.get_table("lemma_exc", {})
exceptions = exc_table.get(univ_pos, {})
# string is irregular token contained in exceptions index.
try:
forms = [exceptions[string][0]]
self.cache[cache_key] = forms
return forms
except KeyError:
pass
# string corresponds to key in lookup table
lookup_table = self.lookups.get_table("lemma_lookup", {})
looked_up_lemma = lookup_table.get(string)
if looked_up_lemma and looked_up_lemma in lemma_index:
forms = [looked_up_lemma]
self.cache[cache_key] = forms
return forms
rules_table = self.lookups.get_table("lemma_rules", {})
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index:
forms = [form]
self.cache[cache_key] = forms
return forms
else:
oov_forms.append(form)
forms = list(set(oov_forms))
# Back-off through remaining return value candidates.
if forms:
for form in forms:
if form in exceptions:
forms = [form]
self.cache[cache_key] = forms
return forms
if looked_up_lemma:
forms = [looked_up_lemma]
self.cache[cache_key] = forms
return forms
else:
self.cache[cache_key] = forms
return forms
elif looked_up_lemma:
forms = [looked_up_lemma]
self.cache[cache_key] = forms
return forms
else:
forms = [string]
self.cache[cache_key] = forms
return forms
| spacy-io/spaCy | spacy/lang/nl/lemmatizer.py | Python | mit | 4,608 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0011_sentence'),
]
operations = [
migrations.AddField(
model_name='sentence',
name='article',
field=models.ForeignKey(to='quiz.Article', default=None),
preserve_default=True,
),
migrations.AddField(
model_name='sentence',
name='language',
field=models.TextField(default=''),
preserve_default=True,
),
migrations.AddField(
model_name='sentence',
name='order',
field=models.PositiveSmallIntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='sentence',
name='sentence',
field=models.TextField(default=''),
preserve_default=True,
),
]
| leehosung/writing_sandbox | quiz/migrations/0012_auto_20150127_0316.py | Python | mit | 1,017 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import *
DEBUG = True
TEMPLATE_DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'map_review',
'HOST': 'localhost'
}
}
| stringfellow/map_review | map_review/settings/devel.py | Python | gpl-2.0 | 258 |
from threading import Thread
import Queue
class TQueue(Queue.Queue):
def __init__(self, workers=5, maxsize=0):
#super(TQueue, self).__init__(maxsize=maxsize)
Queue.Queue.__init__(self, maxsize=maxsize)
self._workers = workers
self._start_workers()
def add_task(self, task, *args, **kwargs):
args = args or ()
kwargs = kwargs or {}
self.put((task, args, kwargs))
def _start_workers(self):
for i in xrange(self._workers):
t = Thread(target=self._worker)
t.setDaemon(True) #后台,完成后自动删除
t.start()
def _worker(self):
while True:
task,args,kwargs = self.get()
task(*args, **kwargs)
self.task_done()
| hellopanpy/myscript | zabbix/alarter/thread.py | Python | gpl-3.0 | 805 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Cue."""
from oslo.config import cfg
from cue.common.i18n import _
from cue.common.i18n import _LI
from cue.common import exception
from cue.common import utils
from cue.openstack.common import log as logging
from cue.openstack.common import policy
LOG = logging.getLogger(__name__)
_ENFORCER = None
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def set_rules(data, default_rule=None, overwrite=True):
default_rule = default_rule or cfg.CONF.policy_default_rule
if not _ENFORCER:
LOG.debug("Enforcer not present, recreating at rules stage.")
init()
if default_rule:
_ENFORCER.default_rule = default_rule
msg = "Loading rules %s, default: %s, overwrite: %s"
LOG.debug(msg, data, default_rule, overwrite)
if isinstance(data, dict):
rules = dict((k, policy.parse_rule(v)) for k, v in data.items())
rules = policy.Rules(rules, default_rule)
else:
rules = policy.Rules.load_json(data, default_rule)
_ENFORCER.set_rules(rules, overwrite=overwrite)
def init(default_rule=None):
policy_file = cfg.CONF.find_file(cfg.CONF.policy_file)
if len(policy_file) == 0:
msg = 'Unable to determine appropriate policy json file'
raise exception.ConfigurationError(msg)
LOG.info(_LI('Using policy_file found at: %s') % policy_file)
with open(policy_file) as fh:
policy_string = fh.read()
rules = policy.Rules.load_json(policy_string, default_rule=default_rule)
global _ENFORCER
if not _ENFORCER:
LOG.debug("Enforcer is not present, recreating.")
_ENFORCER = policy.Enforcer()
_ENFORCER.set_rules(rules)
def check(rule, ctxt, target=None, do_raise=True, exc=exception.NotAuthorized):
#creds = ctxt.to_dict()
target = target or {}
try:
result = _ENFORCER.enforce(rule, target, ctxt, do_raise, exc)
except Exception:
result = False
raise
else:
return result
finally:
extra = {'policy': {'rule': rule, 'target': target}}
if result:
LOG.info(_("Policy check succeeded for rule '%(rule)s' "
"on target %(target)s") %
{'rule': rule, 'target': repr(target)}, extra=extra)
else:
LOG.info(_("Policy check failed for rule '%(rule)s' "
"on target %(target)s") %
{'rule': rule, 'target': repr(target)}, extra=extra)
| vipulsabhaya/cue | cue/common/policy.py | Python | apache-2.0 | 3,164 |
from django.contrib.auth.models import User
from .models import Profile
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
try:
instance.profile.save()
except User.DoesNotExist:
Profile.objects.create(user=instance)
| NeverWalkAloner/collectiveblogs | users/signals.py | Python | gpl-3.0 | 520 |
"""
These are debug machines used for content creators, so they're kind of a cross
between dev machines and AWS machines.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .aws import *
DEBUG = True
TEMPLATE_DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
| jswope00/griffinx | lms/envs/content.py | Python | agpl-3.0 | 1,435 |
# http://www.creatis.insa-lyon.fr/~bernard/creaseg/
# http://ascratchpad.blogspot.com/2011/03/image-segmentation-using-active.html
#------------------------------------------------------------------------
# Region Based Active Contour Segmentation
#
# seg = region_seg(I,init_mask,max_its,alpha,display)
#
# Inputs: I 2D image
# init_mask Initialization (1 = foreground, 0 = bg)
# max_its Number of iterations to run segmentation for
# alpha (optional) Weight of smoothing term
# higer = smoother. default = 0.2
# display (optional) displays intermediate outputs
# default = true
#
# Outputs: seg Final segmentation mask (1=fg, 0=bg)
#
# Description: This code implements the paper: "Active Contours Without
# Edges" By Chan Vese. This is a nice way to segment images whose
# foregrounds and backgrounds are statistically different and homogeneous.
#
# Example:
# img = imread('tire.tif');
# m = zeros(size(img));
# m(33:33+117,44:44+128) = 1;
# seg = region_seg(img,m,500);
#
# Coded by: Shawn Lankton (www.shawnlankton.com)
#------------------------------------------------------------------------
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
eps = np.finfo(np.float).eps
def chanvese(I,init_mask,max_its=200,alpha=0.2,thresh=0,color='r',display=False):
I = I.astype('float')
#-- Create a signed distance map (SDF) from mask
phi = mask2phi(init_mask)
if display:
plt.ion()
showCurveAndPhi(I, phi, color)
plt.savefig('levelset_start.pdf',bbox_inches='tight')
#--main loop
its = 0
stop = False
prev_mask = init_mask
c = 0
while (its < max_its and not stop):
# get the curve's narrow band
idx = np.flatnonzero( np.logical_and( phi <= 1.2, phi >= -1.2) )
if len(idx) > 0:
#-- intermediate output
if display:
if np.mod(its,50) == 0:
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
print 'iteration:', its
showCurveAndPhi(I, phi, color)
else:
if np.mod(its,10) == 0:
print 'iteration:', its
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
#drawnow;
#-- find interior and exterior mean
upts = np.flatnonzero(phi<=0) # interior points
vpts = np.flatnonzero(phi>0) # exterior points
u = np.sum(I.flat[upts])/(len(upts)+eps) # interior mean
v = np.sum(I.flat[vpts])/(len(vpts)+eps) # exterior mean
F = (I.flat[idx]-u)**2-(I.flat[idx]-v)**2 # force from image information
curvature = get_curvature(phi,idx) # force from curvature penalty
dphidt = F /np.max(np.abs(F)) + alpha*curvature # gradient descent to minimize energy
#-- maintain the CFL condition
dt = 0.45/(np.max(np.abs(dphidt))+eps)
#-- evolve the curve
phi.flat[idx] += dt*dphidt
#-- Keep SDF smooth
phi = sussman(phi, 0.5)
new_mask = phi<=0
c = convergence(prev_mask,new_mask,thresh,c)
if c <= 5:
its = its + 1
prev_mask = new_mask
else: stop = True
else:
break
#-- final output
if display:
showCurveAndPhi(I, phi, color)
#plt.savefig('levelset_end.pdf',bbox_inches='tight')
time.sleep(10)
#-- make mask from SDF
seg = phi<=0 #-- Get mask from levelset
return seg,phi,its
#---------------------------------------------------------------------
#---------------------------------------------------------------------
#-- AUXILIARY FUNCTIONS ----------------------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
def bwdist(a):
"""
this is an intermediary function, 'a' has only True, False vals,
so we convert them into 0, 1 values -- in reverse. True is 0,
False is 1, distance_transform_edt wants it that way.
"""
return nd.distance_transform_edt(a == 0)
import time
#-- Displays the image with curve superimposed
def showCurveAndPhi(I, phi, color):
# subplot(numRows, numCols, plotNum)
#myplot = plt.subplot(121)
#fig, axes = plt.subplots()
#axes = myplot.axes
#axes.get_xaxis().set_visible(False)
#axes.get_yaxis().set_visible(False)
plt.clf()
plt.imshow(I, cmap='gray')
#plt.hold(True)
CS = plt.contour(phi, 0, colors=color)
plt.draw()
#plt.hold(False)
# myplot = plt.subplot(122)
# axes = myplot.axes
# axes.get_xaxis().set_visible(False)
# axes.get_yaxis().set_visible(False)
# plt.imshow(phi)
plt.draw()
#time.sleep(1)
def im2double(a):
a = a.astype('float')
a /= a.max()
return a
#-- converts a mask to a SDF
def mask2phi(init_a):
phi = bwdist(init_a)-bwdist(1-init_a)+im2double(init_a) -0.5
return phi
#-- compute curvature along SDF
def get_curvature(phi,idx):
dimy, dimx = phi.shape
yx = np.array([np.unravel_index(i, phi.shape)for i in idx]) # get subscripts
y = yx[:,0]
x = yx[:,1]
#-- get subscripts of neighbors
ym1 = y-1; xm1 = x-1; yp1 = y+1; xp1 = x+1;
#-- bounds checking
ym1[ym1<0] = 0; xm1[xm1<0] = 0;
yp1[yp1>=dimy]=dimy - 1; xp1[xp1>=dimx] = dimx - 1;
#-- get indexes for 8 neighbors
idup = np.ravel_multi_index( (yp1,x),phi.shape)
iddn = np.ravel_multi_index( (ym1,x),phi.shape)
idlt = np.ravel_multi_index( (y,xm1),phi.shape)
idrt = np.ravel_multi_index( (y,xp1),phi.shape)
idul = np.ravel_multi_index( (yp1,xm1),phi.shape)
idur = np.ravel_multi_index( (yp1,xp1),phi.shape)
iddl = np.ravel_multi_index( (ym1,xm1),phi.shape)
iddr = np.ravel_multi_index( (ym1,xp1),phi.shape)
#-- get central derivatives of SDF at x,y
phi_x = -phi.flat[idlt]+phi.flat[idrt]
phi_y = -phi.flat[iddn]+phi.flat[idup]
phi_xx = phi.flat[idlt]-2*phi.flat[idx]+phi.flat[idrt]
phi_yy = phi.flat[iddn]-2*phi.flat[idx]+phi.flat[idup]
phi_xy = (-0.25*phi.flat[iddl]-0.25*phi.flat[idur]
+0.25*phi.flat[iddr]+0.25*phi.flat[idul])
phi_x2 = phi_x**2
phi_y2 = phi_y**2
#-- compute curvature (Kappa)
curvature = ( ((phi_x2*phi_yy + phi_y2*phi_xx - 2*phi_x*phi_y*phi_xy)
/ (phi_x2 + phi_y2 +eps)**(3/2))
*(phi_x2 + phi_y2)**(1/2))
return curvature
#-- level set re-initialization by the sussman method
def sussman(D, dt):
# forward/backward differences
a = D - shiftR(D) # backward
b = shiftL(D) - D # forward
c = D - shiftD(D) # backward
d = shiftU(D) - D # forward
a_p = a.copy(); a_n = a.copy(); # a+ and a-
b_p = b.copy(); b_n = b.copy();
c_p = c.copy(); c_n = c.copy();
d_p = d.copy(); d_n = d.copy();
a_p[a < 0] = 0
a_n[a > 0] = 0
b_p[b < 0] = 0
b_n[b > 0] = 0
c_p[c < 0] = 0
c_n[c > 0] = 0
d_p[d < 0] = 0
d_n[d > 0] = 0
dD = np.zeros(D.shape)
D_neg_ind = np.flatnonzero(D < 0)
D_pos_ind = np.flatnonzero(D > 0)
dD.flat[D_pos_ind] = np.sqrt( np.max( np.concatenate( ([a_p.flat[D_pos_ind]**2],
[b_n.flat[D_pos_ind]**2]) ),
axis=0
)
+ np.max( np.concatenate( ([c_p.flat[D_pos_ind]**2],
[d_n.flat[D_pos_ind]**2])),
axis=0
)
) - 1
dD.flat[D_neg_ind] = np.sqrt( np.max( np.concatenate( ([a_n.flat[D_neg_ind]**2],
[b_p.flat[D_neg_ind]**2])),
axis=0
)
+ np.max( np.concatenate( ([c_n.flat[D_neg_ind]**2],
[d_p.flat[D_neg_ind]**2]) ),
axis=0
)
) - 1
D = D - dt * sussman_sign(D) * dD
return D
#-- whole matrix derivatives
def shiftD(M):
return shiftR(M.transpose()).transpose()
def shiftL(M):
#shift = np.concatenate( (M[:,1:], np.zeros((M.shape[1],1))), axis=1 )
#shift = np.concatenate( (M[:,1:], M[:,-1]), axis=1 )
shift = M[:,range(1,M.shape[1])+[M.shape[1]-1]]
return shift
def shiftR(M):
#shift = np.concatenate( (np.zeros((M.shape[1],1)), M[:,:-1]), axis=1 )
#shift = np.concatenate( (M[:,0], M[:,:-1]), axis=1 )
shift = M[:,[0]+range(0,M.shape[1]-1)]
return shift
def shiftU(M):
return shiftL(M.transpose()).transpose()
def sussman_sign(D):
return D / np.sqrt(D**2 + 1)
# Convergence Test
def convergence(p_mask,n_mask,thresh,c):
diff = p_mask - n_mask
n_diff = np.sum(np.abs(diff))
if n_diff < thresh:
c = c + 1
else:
c = 0
return c
if __name__ == "__main__":
import cv2
img = cv2.imread("/home/kevin/Imperial/PhD/DATASETS/Training/positive/246_cropped_c/8.png_0022_0115_0117_0132_0132_0.png",0)
#img = nd.imread('sagittal8.png')
mask = np.zeros(img.shape)
mask[55:65,55:65] = 1
chanvese(img,mask,max_its=2000,display=True,alpha=0.1)
| BioMedIA/irtk-legacy | wrapping/cython/irtk/ext/chanvese.py | Python | bsd-3-clause | 9,929 |
# MIT licensed
# Copyright (c) 2013-2020 lilydjwg <[email protected]>, et al.
import pytest
pytestmark = pytest.mark.asyncio
async def test_substitute_prefix(get_version):
assert await get_version("example", {
"source": "manual",
"manual": "v1.0",
"prefix": "v",
}) == "1.0"
async def test_substitute_prefix_missing_ok(get_version):
assert await get_version("example", {
"source": "manual",
"manual": "1.0",
"prefix": "v",
}) == "1.0"
async def test_substitute_regex(get_version):
assert await get_version("example", {
"source": "manual",
"manual": "r15c",
"from_pattern": r"r(\d+)([a-z])",
"to_pattern": r"r\1.\2",
}) == "r15.c"
async def test_substitute_regex_missing_ok(get_version):
assert await get_version("example", {
"source": "manual",
"manual": "r15",
"from_pattern": r"r(\d+)([a-z])",
"to_pattern": r"r\1.\2",
}) == "r15"
async def test_substitute_regex_empty_to_pattern(get_version):
assert await get_version("example", {
"source": "manual",
"manual": "15-debian",
"from_pattern": r"-\w+$",
"to_pattern": r"",
}) == "15"
async def test_substitute_prefix_has_higher_priority(get_version):
assert await get_version("example", {
"source": "manual",
"manual": "r15",
"prefix": "r",
"from_pattern": r"r(\d+)",
"to_pattern": r"R\1",
}) == "15"
| lilydjwg/nvchecker | tests/test_substitute.py | Python | mit | 1,490 |
import contextlib # closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
# The walkthrough implied this manages browser cookies when used...
from flask import session
TEST_DSN = 'dbname=test_learning_journal user=fried'
SUBMIT_BTN = '<input type="submit" value="Share" name="Share"/>'
# Used for testing isolation. The wipe half of reinitting the database.
def clear_db():
# This ensures the connection is closed later.
# Context library is all for this kind of context stuff.
with contextlib.closing(connect_db()) as db:
# Testing is not supposed to be used with a deployed database,
# apparently. That's where TEST_DSN's specification comes in:
# This will all be done in the test_learning_journal db.
# ...
# NOTE: This database must be created manually on the CLI.
# Done with:
# createdb test_learning_journal
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
''' Configure the app for use in testing. '''
# What test_app() will do here is access the testing database
# (which is created outside of my python, on the CLI (for now))
# Flask apps have config dictionaries in them by design.
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
# "The fixture function is defined with parameters.
# The names of the parameters must match registered fixtures.
# The fixtures named as parameters will be run surrounding the new fixture."
@pytest.fixture(scope='session')
def db(test_app, request):
''' Initialize the entries table and drop it when finished. '''
# This is the "fixture function" with its "registered fixture" parameters.
# The request parameter is a fixture that pytest gives you; you use it
# to connect the cleanup() function to the db fixture.
init_db()
# Unexplained methods: cleardb addfinalizer cleanup
# "The request parameter is a fixture that pytest registers.
# You use it to connect the cleanup function to the db fixture.
# This means that cleanup will be run after tests are complete
# as a tear-down action."
def cleanup():
clear_db()
# I THINK @app.teardown_request is a finalizer? Maaaaybe... ???
request.addfinalizer(cleanup)
# This one makes helps tests run in isolation from each other.
# Specifically it makes a generator function fixture.
# This is critical because generators preserve internal state.
# As a result, "the entire test happens inside context manager scope"
@pytest.yield_fixture(scope='function')
def req_context(db):
''' Run tests within a test request context so that 'g' is present. '''
# Wait... flask.g would not be available if we didn't make this
# "request context" function?
with app.test_request_context('/'):
# First, yield nothing.
# Wat.
yield
con = get_database_connection()
con.rollback()
# "Flask creates g when a cycle begines, but tests
# have no request/response cycle.
# Flasks's app.test_request_context is a "context provider".
# Used in a with statement, it creates a mock request/response cycle."
# ...
# What this means is, there's no web server running to test this,
# BUT we can simulate what would happen if there was.. by calling
# appname.app.test_request_context()
# "The request only exists inside the with block, so the
# callback pattern used in the db fixture would not work."
# I think that is referring to the request.addfinalizer(cleanup)
# line?
# "Because yield preserves internal state, the entire test
# happens inside the context manager scope"
# "When control returns to the fixture, code after the yield
# statement is executed as the tear-down action."
# Now begins the testing of the database schema.
def run_independent_query(query, params=[]):
# This function simply formalizes what I've been doing all along
# to make DB queries inside Python.
con = get_database_connection()
cur = con.cursor()
cur.execute(query, params)
return cur.fetchall()
def test_write_entry(req_context):
from journal import write_entry
expected = ("My Title", "My Text")
# Remember, star args are just how you unpack things.
# ((double star args unpack things into a dict.))
write_entry(*expected)
# "run_independent_query() is a 'helper function' you can re-use."
# Where's it come from, pytest? By way of the decorator??
rows = run_independent_query("SELECT * FROM entries")
# Huh, so this is just assertEquals... from pytest?
# Maybe not, since it's its own freestanding operation?
assert len(rows) == 1
for val in expected:
assert val in rows[0]
def test_edit_entry(req_context):
from journal import edit_entry
from journal import write_entry
expected = ("My Title", "My Text")
write_entry(*expected)
the_row_we_added = run_independent_query("SELECT * FROM entries")
def test_get_all_entries_empty(req_context):
from journal import get_all_entries
entries = get_all_entries()
assert len(entries) == 0
def test_get_all_entries(req_context):
from journal import get_all_entries, write_entry
expected = ("My Title", "My Test")
write_entry(*expected)
entries = get_all_entries()
assert len(entries) == 1
for entry in entries:
assert expected[0] == entry['title']
assert expected[1] == entry['text']
assert 'created' in entry
def test_get_entry(req_context):
from journal import get_entry, write_entry, get_all_entries
# Verify it's empty
entries = get_all_entries()
assert len(entries) == 0
expected = ("Get Entry Title", "Get Entry Test")
write_entry(*expected)
# Verify it isn't empty
entries = get_all_entries()
assert len(entries) == 1
# Derive the id and use it to get_entry():
the_only_entry = get_entry(entries[0]['id'])
# I wish I had more time to do that properly, but it works.
assert expected[0] == the_only_entry['title']
assert expected[1] == the_only_entry['text']
assert 'created' in the_only_entry
def test_empty_listing(db):
# "app.test_client() returns a mock HTTP client,
# like a web browser for development."
# "Because this test actually creates a request, we don't need to use
# the req_context fixture. Having an initialized database is enough"
# "The data attribute of the response returned by client.get()
# holds the full rendered HTML of our page."
actual = app.test_client().get('/').data
expected = 'No entries here so far'
assert expected in actual
@pytest.fixture(scope='function')
def with_entry(db, request):
from journal import write_entry
expected = (u'Test Title)', u'Test Text')
with app.test_request_context('/'):
write_entry(*expected)
get_database_connection().commit()
def cleanup():
# NOTE: "You use a test_request_context in both setup and
# teardown to ensure that flask.g exists."
with app.test_request_context('/'):
con = get_database_connection()
cur = con.cursor()
cur.execute("DELETE FROM entries")
con.commit()
# Also note that allowing the two "with" blocks to close commits the
# transactions for each test context.
request.addfinalizer(cleanup)
return expected
def test_listing(with_entry):
expected = with_entry
actual = app.test_client().get('/').data
for value in expected:
assert value in actual
def test_add_entries(db):
entry_data = {
u'title': u'Hello',
u'text': u'This is a post',
}
# "The post method of the Flask test_client sends an HTTP POST
# request to the provided URL."
actual = app.test_client().post(
'/add', data=entry_data, follow_redirects=True
).data
assert 'No entries here so far' not in actual
for expected in entry_data.values():
# "assert that the line in entry data is also in the actual data"
assert expected in actual
def test_do_login_success(req_context):
username, password = ('admin', 'admin')
# In-function imports look weird and wrong.
# Shouldn't they be for things that might be optional
# and thus could be skipped? Such as not unit tests?
from journal import do_login
assert 'logged_in' not in session
do_login(username, password)
assert 'logged_in' in session
def test_do_login_bad_password(req_context):
username = 'admin'
bad_password = 'wrongpassword'
from journal import do_login
with pytest.raises(ValueError):
do_login(username, bad_password)
def test_do_login_bad_username(req_context):
bad_username = 'wronguser'
password = 'admin'
from journal import do_login
with pytest.raises(ValueError):
do_login(bad_username, password)
def login_helper(username, password):
login_data = {
'username': username,
'password': password
}
client = app.test_client()
return client.post(
'/login', data=login_data, follow_redirects=True
)
def test_start_as_anonymous(db):
client = app.test_client()
anon_home = client.get('/').data
assert SUBMIT_BTN not in anon_home
def test_login_success(db):
# Is this unencrypted password okay because it's not deployed?
# The walkthrough DID say "never" store passwords unencrypted...
# "Anywhere".
username, password = ('admin', 'admin')
response = login_helper(username, password)
assert SUBMIT_BTN in response.data
def test_login_fails(db):
username, password = ('admin', 'wrong')
response = login_helper(username, password)
assert 'Login Failed' in response.data
def test_logout(db):
home = login_helper('admin', 'admin').data
assert SUBMIT_BTN in home
client = app.test_client()
response = client.get('/logout')
assert SUBMIT_BTN not in response.data
assert response.status_code == 302
| defzzd/learning_journal | test_journal.py | Python | mit | 10,338 |
# -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.conf.urls.defaults import patterns
urlpatterns = patterns(
'',
(r'^([^\.]+)/$', 'books.views.book_view'),
(r'^$', 'books.views.books_index'),
)
| Karajlug/karajlug | books/urls.py | Python | gpl-2.0 | 1,132 |
#!/usr/bin/env python3
# Set up imports and paths
bufferpath = "../../dataAcq/buffer/python"
sigProcPath = "../signalProc"
from psychopy import visual, core, event, gui, sound, data, monitors
import numpy as np
import sys
import socket
from time import sleep, time
import os
bufhelpPath = "../../python/signalProc"
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),bufhelpPath))
import bufhelp
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),sigProcPath))
# init connection to the buffer
ftc,hdr=bufhelp.connect();
def showText(text):
text.draw()
mywin.flip()
def waitForKeypress():
allKeys = event.getKeys()
while len(allKeys)==0:
allKeys = event.getKeys()
if 'escape' in allKeys[0]:
mywin.close() # quit
core.quit()
# Setup the stimulus window
screenWidth = 600
screenHeight = 400
mywin = visual.Window(size=(screenWidth, screenHeight), fullscr=False, screen=1, allowGUI=False, allowStencil=False,
monitor='testMonitor', units="pix",color=[1,1,1], colorSpace='rgb',blendMode='avg', useFBO=True)
#define variables
stim = [' ', '+']
nr_sequences = 5
nr_trials = 10
interStimDuration= 0.8
stimDuration = 0.2
#create some stimuli
welcome_text = visual.TextStim(mywin, text='Welcome! \n\nPress a key to start...',color=(-1,-1,-1),wrapWidth = 800)
# ************** Start run sentences **************
showText(welcome_text)
# label data
bufhelp.sendEvent('experiment','start')
waitForKeypress()
# inject an ERP into the trigger channel
socket.socket(socket.AF_INET,socket.SOCK_DGRAM,0).sendto(bytes(1),('localhost',8300))
# pause
core.wait(stimDuration)
# refresh the screen
mywin.flip()
| jadref/buffer_bci | tutorial/lect3-helloworld/ERPviewer_stimulus_psychopy_skel.py | Python | gpl-3.0 | 1,698 |
from sage.all import RealDistribution
from yanntricks import *
def ChiSquared():
pspict,fig = SinglePicture("ChiSquared")
pspict.dilatation_X(0.5)
pspict.dilatation_Y(50)
f=phyFunction(RealDistribution("chisquared",10).distribution_function).graph(0,30)
f.linearr_plotpoints=1000
pspict.DrawGraphs(f)
pspict.axes.single_axeX.Dx=5
pspict.axes.single_axeY.Dx=0.05
pspict.DrawDefaultAxes()
fig.conclude()
fig.write_the_file()
| LaurentClaessens/mazhe | src_yanntricks/yanntricksChiSquared.py | Python | gpl-3.0 | 471 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
PROJECT_NAME = 'tooltool/api'
APP_NAME = 'tooltool_api'
PULSE_ROUTE_CHECK_FILE_PENDING_UPLOADS = 'check_file_pending_uploads'
SCOPE_PREFIX = f'project:releng:services/{PROJECT_NAME}'
SCOPE_MANAGE = f'{SCOPE_PREFIX}/manage'
| La0/mozilla-relengapi | src/tooltool/api/tooltool_api/config.py | Python | mpl-2.0 | 447 |
# -*- coding: utf-8 -*-
import logging
from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
import sys
import warnings
#
# Exceptions
#
class MarkdownException(Exception):
""" A Markdown Exception. """
pass
class MarkdownWarning(Warning):
""" A Markdown Warning. """
pass
#
# Global functions
#
def message(level, text):
""" A wrapper method for logging debug messages. """
logger = logging.getLogger('MARKDOWN')
if logger.handlers:
# The logger is configured
logger.log(level, text)
if level > WARN:
sys.exit(0)
elif level > WARN:
raise MarkdownException, text
else:
warnings.warn(text, MarkdownWarning) | DJMelonz/basic-blog | markdown/md_logging.py | Python | bsd-3-clause | 702 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.