repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wdavilaneto/ultron | mlcore/pipeline/bag_of_bigrams.py | 1 | 1609 | #!/usr/bin/env python3
# pylint: disable=C0103
"""This module does nothing ...."""
import math
from document.utils import printProgressBar
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from mlcore import TextService
import progressbar
import pyorient
orientdb = pyorient.OrientDB("localhost", 2424)
orientdb.connect("root", "root")
orientdb.db_open('datascience', "root", "root")
text_service = TextService()
def bag_of_bigrams_words(words, n=100, score_fn=BigramAssocMeasures.chi_sq):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(bigram, True) for bigram in bigrams])
def save_bigrams_as_many(bigrams):
default_cluster = (orientdb.command("SELECT FROM ( SELECT expand( classes ) FROM metadata:schema ) WHERE name = 'Bigrams'")[0]).oRecordData['defaultClusterId']
#orientdb.command("delete from Bigrams")
for key in bigrams.keys():
w = dict(id=key, qtd=bigrams.get(key))
# orientdb.record_create(default_cluster, w)
print (w)
if __name__ == "__main__":
print("bigramming..")
result = orientdb.query("select * from Documento LIMIT 100")
current = 0
total = len(result)
bar = progressbar.ProgressBar(max_value=total)
all_texts = ""
for each in result:
current += 1
bar.update(current)
all_texts = all_texts + each.texto_arquivo
bar.finish()
# save_bigrams_as_many(bigra
bigrams = bag_of_bigrams_words(text_service.tokenize(all_texts))
save_bigrams_as_many(bigrams)
| gpl-3.0 | -959,391,149,232,723,200 | 33.234043 | 163 | 0.699814 | false | 3.198807 | false | false | false |
arpan-chavda/rh_app | libs/venus/planet/idindex.py | 1 | 3113 | from glob import glob
import os, sys
if __name__ == '__main__':
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, rootdir)
from planet.spider import filename
from planet import config
def open():
try:
cache = config.cache_directory()
index=os.path.join(cache,'index')
if not os.path.exists(index): return None
import dbhash
return dbhash.open(filename(index, 'id'),'w')
except Exception, e:
if e.__class__.__name__ == 'DBError': e = e.args[-1]
from planet import logger as log
log.error(str(e))
def destroy():
from planet import logger as log
cache = config.cache_directory()
index=os.path.join(cache,'index')
if not os.path.exists(index): return None
idindex = filename(index, 'id')
if os.path.exists(idindex): os.unlink(idindex)
os.removedirs(index)
log.info(idindex + " deleted")
def create():
from planet import logger as log
cache = config.cache_directory()
index=os.path.join(cache,'index')
if not os.path.exists(index): os.makedirs(index)
import dbhash
index = dbhash.open(filename(index, 'id'),'c')
try:
import libxml2
except:
libxml2 = False
from xml.dom import minidom
for file in glob(cache+"/*"):
if os.path.isdir(file):
continue
elif libxml2:
try:
doc = libxml2.parseFile(file)
ctxt = doc.xpathNewContext()
ctxt.xpathRegisterNs('atom','http://www.w3.org/2005/Atom')
entry = ctxt.xpathEval('/atom:entry/atom:id')
source = ctxt.xpathEval('/atom:entry/atom:source/atom:id')
if entry and source:
index[filename('',entry[0].content)] = source[0].content
doc.freeDoc()
except:
log.error(file)
else:
try:
doc = minidom.parse(file)
doc.normalize()
ids = doc.getElementsByTagName('id')
entry = [e for e in ids if e.parentNode.nodeName == 'entry']
source = [e for e in ids if e.parentNode.nodeName == 'source']
if entry and source:
index[filename('',entry[0].childNodes[0].nodeValue)] = \
source[0].childNodes[0].nodeValue
doc.freeDoc()
except:
log.error(file)
log.info(str(len(index.keys())) + " entries indexed")
index.close()
return open()
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: %s [-c|-d]' % sys.argv[0]
sys.exit(1)
config.load(sys.argv[1])
if len(sys.argv) > 2 and sys.argv[2] == '-c':
create()
elif len(sys.argv) > 2 and sys.argv[2] == '-d':
destroy()
else:
from planet import logger as log
index = open()
if index:
log.info(str(len(index.keys())) + " entries indexed")
index.close()
else:
log.info("no entries indexed")
| gpl-3.0 | -2,870,029,551,038,159,400 | 30.444444 | 78 | 0.54417 | false | 3.787105 | false | false | false |
ghidalgo3/gen-javadoc | JavadocCreator.py | 1 | 2107 | import sublime, sublime_plugin
from Javadoc import *
class JavadocCommand(sublime_plugin.TextCommand):
def determineIndentation(self,region):
(row, col) = self.view.rowcol(region.begin())
indent_region = self.view.find('^\s+', self.view.text_point(row, 0))
indent_level = len(self.view.substr(indent_region))/4
return indent_level
def alreadyCommented(self,region):
(row,col)= self.view.rowcol(region.begin())
previous_line = self.view.line(self.view.text_point(row-1,0))
if "*/" in self.view.substr(previous_line):
return True
else:
return False
def run(self, edit):
#check if it's a java file
#fileName = self.view.file_name()[-4:]
classSignature = self.view.find("""(public|private|protected) (abstract )?(class|interface|enum)""",0)
#indentation_level = self.determineIndentation(classSignature)
#maybe do this better?
javadocer = Javadoc()
if not self.alreadyCommented(classSignature):
self.view.insert(edit,classSignature.begin(), javadocer.createClassJavadoc())
startSearchPoint = 0
foundPublicsCount = self.view.find_all("public.*\\)")
#use the [region] as a counter of how many comments we're inserting
for methodSignatures in foundPublicsCount:
#find from startSearchPoint because everytime we insert comments,
#all characters move so we have to continually keep searching for
#the next method signature
methodSignature = self.view.find("public.*\\)", startSearchPoint)
methodSignatureString = self.view.substr(methodSignature)
indentation_level = self.determineIndentation(methodSignature)
javadocer = Javadoc(indentation_level, methodSignatureString)
if not self.alreadyCommented(methodSignature):
self.view.insert(edit,methodSignature.begin(),javadocer.createMethodJavadoc())
startSearchPoint = methodSignature.end()+javadocer.charactersAdded
| mit | -1,093,159,872,325,818,500 | 45.822222 | 110 | 0.657807 | false | 3.975472 | false | false | false |
skies-io/django-rest-api | setup.py | 1 | 1233 | from setuptools import setup # find_packages
from codecs import open
from os import path
with open(path.join(path.abspath(path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-rest-api',
version='0.1.5',
description='Django REST API',
long_description=long_description,
url='https://github.com/skies-io/django-rest-api',
author='Skies',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='django rest api',
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['rest_api'],
install_requires=['Django>=1.6'],
)
| mit | -5,980,093,340,751,972,000 | 35.264706 | 96 | 0.614761 | false | 3.901899 | false | true | false |
lmascare/utils | python/bin/watch_adobe.py | 1 | 1617 | #!/usr/local/bin/python3
"""Script to watch Adobe Security page
This script looks at https://helpx.adobe.com/security.html and parses the
html file for all the products. It then determines if the page was updated
from the last check.
Design Spec
- Create 2 tables with the following schemas
Table 1
- Product Name
- Product URL
- Published Date
Table 2
- Product Name
- Link
- Title
- Posted
- Updated
- collection_date
"""
import urllib.request
from urllib.parse import urljoin
import bs4 as bs
url = "https://helpx.adobe.com/security.html"
def main():
"""Fabled main, where it all begins."""
print (url)
sauce = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(sauce, "html.parser")
for meta in soup.find_all('meta', attrs = {'name': 'publishDate'}, content=True):
# print (type(meta))
print ("Published Date : {}".format(meta.get('content')))
bulletin_items = soup.find(class_="noHeader")
b_href = bulletin_items.find_all("a")
# Loop through the Table records
for t_row in bulletin_items.find_all("tr")[1::]:
t_data = t_row.find_all("td")
#link = t_row.find_all("a")
link = t_row.find_all("a")
link = link[0].get("href")
href = urljoin(url,link)
print ("Link : {}\n"
"Title : {}\n"
"Posted : {}\n"
"Updated : {}\n".format(href, t_data[0].text, t_data[1].text, t_data[2].text))
f_href = urllib.request.urlopen(href).read()
print(f_href)
exit(0)
if __name__ == "__main__":
main() | artistic-2.0 | 7,700,682,488,215,118,000 | 26.896552 | 93 | 0.595547 | false | 3.334021 | false | false | false |
pszemus/grpc | tools/profiling/bloat/bloat_diff.py | 5 | 2816 | #!/usr/bin/env python2.7
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import multiprocessing
import os
import shutil
import subprocess
import sys
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import check_on_pr
argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
argp.add_argument(
'-d',
'--diff_base',
type=str,
help='Commit or branch to compare the current one to')
argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
args = argp.parse_args()
LIBS = [
'libgrpc.so',
'libgrpc++.so',
]
def build(where):
subprocess.check_call('make -j%d' % args.jobs, shell=True, cwd='.')
shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True)
os.rename('libs', 'bloat_diff_%s' % where)
build('new')
if args.diff_base:
old = 'old'
where_am_i = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
subprocess.check_call(['git', 'submodule', 'update'])
try:
try:
build('old')
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
build('old')
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
subprocess.check_call(
'make -j%d' % args.jobs, shell=True, cwd='third_party/bloaty')
text = ''
for lib in LIBS:
text += '****************************************************************\n\n'
text += lib + '\n\n'
old_version = glob.glob('bloat_diff_old/opt/%s' % lib)
new_version = glob.glob('bloat_diff_new/opt/%s' % lib)
assert len(new_version) == 1
cmd = 'third_party/bloaty/bloaty -d compileunits,symbols'
if old_version:
assert len(old_version) == 1
text += subprocess.check_output(
'%s %s -- %s' % (cmd, new_version[0], old_version[0]), shell=True)
else:
text += subprocess.check_output(
'%s %s' % (cmd, new_version[0]), shell=True)
text += '\n\n'
print text
check_on_pr.check_on_pr('Bloat Difference', '```\n%s\n```' % text)
| apache-2.0 | 1,848,135,676,354,107,600 | 29.27957 | 82 | 0.622514 | false | 3.364397 | false | false | false |
squarebracket/coen315 | timing.py | 1 | 4898 | import GNUCapSimulationData as gc
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
py.sign_in('squarebracket', '6edn8gin4t')
for x in [1, 2, 3]:
sim_data = None
sim_data = gc.GNUCapSimulationData('circuit%s.csv' % x, index_col='Time',
delim_whitespace=True)
(vol, voh, total_current, total_power, avg_prop) = sim_data.get_properties(vout_node='sum', signal_node='a', psu_name='VDD')
print('Parameters for Circuit %s' % x)
print('VOL: %f' % vol)
print('VOH: %f' % voh)
print('total current: %s' % total_current)
print('total power: %s' % total_power)
print('average tprop: %s' % avg_prop)
subplot_data = [
['V(a)', 'V(a_inv)'],
['V(b)', 'V(b_inv)'],
['V(cin)', 'V(cin_inv)'],
['V(h)', 'V(i)'],
['V(j)', 'V(k)'],
['V(x)', 'V(y)'],
['V(sum)', 'V(sum_inv)'],
]
f = sim_data.plot_timing_diagram(subplot_data, y1_label=r'$\mathrm{Voltage}$',
line_style={'alpha': 1},
sec_line_style={'linestyle': '--', 'alpha': 1})
f.savefig('vtd_%s.pdf' % x)
subplot_data = ['I(VDD)', 'I(CH)', 'I(CI)', 'I(CJ)', 'I(CK)', 'I(CX)', 'I(CY)', 'I(CSum)', 'I(CSum_inv)']
sim_data.df['I(PMOS)'] = sim_data.df['ID(M1P)'] + sim_data.df['ID(M5P)'] + \
sim_data.df['ID(M9P)'] + sim_data.df['ID(M13P)']
subplot_data = [
['I(VDD)',],
# ['I(CH)', 'ID(M1P)', 'ID(M2N)',],
# ['I(CI)', 'ID(M5P)', 'ID(M6N)',],
# ['I(CJ)', 'ID(M9P)', 'ID(M10N)',],
# ['I(CK)', 'ID(M13P)', 'ID(M14N)',],
# ['I(CX)'],
# ['I(CY)'],
# ['I(CH)', 'ID(M1P)', 'ID(M2N)', 'ID(M3N)', 'ID(M4N)'],
# ['I(CI)', 'ID(M5P)', 'ID(M6N)', 'ID(M7N)', 'ID(M8N)'],
# ['I(CJ)', 'ID(M9P)', 'ID(M10N)', 'ID(M11N)', 'ID(M12N)'],
# ['I(CK)', 'ID(M13P)', 'ID(M14N)', 'ID(M15N)', 'ID(M16N)'],
# ['ID(M1P)', 'ID(M2N)', 'ID(M3N)', 'ID(M4N)'],
# ['ID(M5P)', 'ID(M6N)', 'ID(M7N)', 'ID(M8N)'],
# ['ID(M9P)', 'ID(M10N)', 'ID(M11N)', 'ID(M12N)'],
# ['ID(M13P)', 'ID(M14N)', 'ID(M15N)', 'ID(M16N)'],
['ID(M1P)', 'ID(M5P)', 'ID(M9P)', 'ID(M13P)'],
['ID(M2N)', 'ID(M6N)', 'ID(M10N)', 'ID(M14N)'],
['I(CH)', 'I(CI)', 'I(CJ)', 'I(CK)'],
]
f = sim_data.plot_timing_diagram(subplot_data, y1_label=r'$\mathrm{Current}$',
line_style={'alpha': 0.5}, unit='A', yscale=1,
sec_line_style={'alpha': 0.5}, y1_lim=(-0.0006, 0.0006), hspace=None)
f.savefig('itd_%s.pdf' % x)
subplot_data = [
('V(h)', ['I(CH)', 'ID(M1P)', 'ID(M2N)']),
('V(i)', ['I(CI)', 'ID(M5P)', 'ID(M6N)']),
('V(j)', ['I(CJ)', 'ID(M9P)', 'ID(M10N)']),
('V(k)', ['I(CK)', 'ID(M13P)', 'ID(M14N)']),
('V(x)', ['I(CX)', 'ID(M3N)', 'ID(M4N)', 'ID(M7N)', 'ID(M12N)']),
('V(y)', ['I(CY)', 'ID(M8N)', 'ID(M11N)', 'ID(M15N)', 'ID(M16N)']),
('V(sum)', ['I(CSum)', 'ID(M4N)', 'ID(M8N)'])
]
if x == 3:
subplot_data[6][1].append('ID(M17P)')
time_slices=(
(np.float64('5E-9'), np.float64('7E-9')),
(np.float64('10E-9'), np.float64('12E-9')),
(np.float64('15E-9'), np.float64('17E-9')),
(np.float64('20E-9'), np.float64('22E-9')),
)
f = sim_data.plot3(subplot_data, right_line_style={'alpha': 0.75}, left_unit='V', right_unit='A', yscale=1.2,
left_line_style={'alpha': 0.2}, hspace=0, time_slices=time_slices)
f.savefig('itd3_%s.pdf' % x)
f = sim_data.plot2(subplot_data, right_line_style={'alpha': 0.75}, left_unit='V', right_unit='A', yscale=1.2,
left_line_style={'alpha': 0.2}, hspace=0)
f.savefig('itd2_%s.pdf' % x)
# ax = sim_data.df[['ID(M1P)', 'ID(M5P)', 'ID(M9P)', 'ID(M13P)']].plot(kind='area', figsize=(8,1.2))
# ax.figure.savefig('../report/test.pdf')
subplot_data = (
(['V(h)'], ['I(CH)', 'ID(M1P)', 'ID(M2N)', 'ID(M3N)']),
(['V(x)', 'V(h)', 'V(i)'], ['ID(M3N)', 'ID(M4N)', 'ID(M7N)', 'ID(M12N)', 'I(CH)', 'I(CI)', 'I(CX)']),
)
f = sim_data.plot2(subplot_data, line_style={'alpha': 0.7}, left_unit='V', right_unit='A')
f.savefig('current_%s.pdf' % x)
f = sim_data.plot_timing_diagram(['V(x)', 'VTH(M3N)', 'VTH(M4N)', 'VTH(M7N)', 'VTH(M12N)'])
f.savefig('a%s.pdf' % x)
f = sim_data.plot_timing_diagram([['V(sum)', 'V(sum_inv)'], ['I(VDD)']],
line_style={'alpha': 0.8},
sec_line_style={'alpha': 0.8, 'linestyle': '--'},
start_time=np.float64('4.E-9'), end_time=np.float64('9.E-9'),
sharey=False)
f.savefig('blip_%s.pdf' % x)
| gpl-2.0 | 5,969,815,710,889,868,000 | 43.93578 | 128 | 0.444059 | false | 2.427156 | false | false | false |
kunitoki/nublas | nublas/db/fields/color.py | 1 | 1094 | import re
from django.db import models
from django.core.validators import RegexValidator
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
#==============================================================================
color_re = re.compile('^\#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})$')
validate_color = RegexValidator(color_re, _("Enter a valid color."), 'invalid')
#==============================================================================
class RGBColorField(models.CharField):
default_validators = [validate_color]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 7
super(RGBColorField, self).__init__(*args, **kwargs)
def clean(self, value, model_instance):
if value[0] != '#':
value = '#' + value
value = super(RGBColorField, self).clean(value, model_instance)
return smart_text(value)
def deconstruct(self):
name, path, args, kwargs = super(RGBColorField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
| mit | 3,558,500,804,969,140,700 | 35.466667 | 79 | 0.555759 | false | 4.223938 | false | false | false |
andreasots/lrrbot | lrrbot/twitchfollows.py | 2 | 1782 | import asyncio
from common.config import config
from common import rpc
from common import storm
from common import twitch
from common import utils
import dateutil.parser
FOLLOWER_CHECK_DELAY = 60
class TwitchFollows:
def __init__(self, lrrbot, loop):
self.lrrbot = lrrbot
self.loop = loop
self.last_timestamp = None
self.last_users = set()
self.schedule_check()
def schedule_check(self):
asyncio.ensure_future(self.check_follows(), loop=self.loop).add_done_callback(utils.check_exception)
self.loop.call_later(FOLLOWER_CHECK_DELAY, self.schedule_check)
async def check_follows(self):
if self.last_timestamp is None:
async for follower in twitch.get_followers():
if self.last_timestamp is None or self.last_timestamp == follower['created_at']:
self.last_timestamp = follower['created_at']
self.last_users.add(follower['user']['_id'])
else:
break
else:
last_users = self.last_users
self.last_users = set()
events = []
async for follower in twitch.get_followers():
if follower['created_at'] >= self.last_timestamp:
if follower['user']['_id'] not in last_users:
events.append((
follower['user'].get('display_name') or follower['user']['name'],
follower['user'].get('logo'),
follower['created_at'],
))
self.last_users.add(follower['user']['_id'])
else:
break
if not events:
self.last_users = last_users
for name, avatar, timestamp in events[::-1]:
self.last_timestamp = timestamp
timestamp = dateutil.parser.parse(timestamp)
event = {
'name': name,
'avatar': avatar,
'count': storm.increment(self.lrrbot.engine, self.lrrbot.metadata, 'twitch-follow'),
}
await rpc.eventserver.event('twitch-follow', event, timestamp)
| apache-2.0 | -1,351,116,069,295,548,700 | 28.213115 | 102 | 0.676207 | false | 3.25777 | false | false | false |
mingot/detectme_server | detectme/detectme/urls.py | 1 | 2056 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='index.html'), name="home"),
url(r'^detectors/', include('detectors.urls')),
url(r'^leaderboard/', include('leaderboards.urls')),
url(r'^videostream/', include('videostream.urls')),
url(r'^how_it_works/$', TemplateView.as_view(template_name='how_it_works.html'),
name="how_it_works"),
url(r'^api_documentation/$', TemplateView.as_view(template_name='api_documentation.html'),
name="api_documentation"),
url(r'^about/$', TemplateView.as_view(template_name='about.html'),
name="about"),
url(r'^contact/', include('envelope.urls'), name="contact"),
# create account over api
(r'^accounts/api/', include('accounts.urls')),
# userena app
(r'^accounts/', include('userena.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Include the login and logout views for the API.
urlpatterns += patterns('',
url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token')
)
# Metrics
urlpatterns += patterns('',
url(r'^metrics/', include('redis_metrics.urls')),
url(r'^profiler/', include('profiler.urls')),
)
url(r'^metrics/', include('redis_metrics.urls')),
# Allow access to the Media folder from the browser
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
| mit | 3,397,887,765,450,255,000 | 33.847458 | 94 | 0.648346 | false | 3.684588 | false | false | false |
nw0/mealy | views.py | 1 | 13867 | from django.contrib.auth.decorators import login_required, user_passes_test, \
permission_required
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import loader
from django.core.urlresolvers import reverse, reverse_lazy
from django.views import generic
from django.utils.decorators import method_decorator
from django.core.exceptions import ValidationError
from django.db.models import Avg, Value, Sum, Count
from django.db.models.functions import Coalesce
from .forms import MealForm, DishForm, TicketForm, TicketFormSet, \
InstAmtForm, InstPriceForm, \
NewInstForm, NewInstStdForm, NewStdInstForm
import json, datetime, time
from .models import Resource_Type, Resource_Inst, Resource_Ticket, \
TicketManager, Meal, Dish, Standard_Inst
from .admin import other_checks
# Create your views here.
decs = [ login_required, user_passes_test(other_checks)]
SEMI_OPEN_STATE_THRESHOLD = 10
ENABLE_CAL_PROGRESS_BARS = True
def iso_to_gregorian(iso_year, iso_week, iso_day):
"Gregorian calendar date for the given ISO year, week and day"
fifth_jan = datetime.date(iso_year, 1, 5)
_, fifth_jan_week, fifth_jan_day = fifth_jan.isocalendar()
return fifth_jan + datetime.timedelta(days=iso_day-fifth_jan_day,
weeks=iso_week-fifth_jan_week)
@login_required
@user_passes_test(other_checks)
def index(request):
# We want to know what meals there are
meal_list = Meal.objects.filter(
meal_owner=request.user.id).order_by('-cons_time')
cal = {}
for meal in meal_list:
iy, im, iw = meal.cons_time.isocalendar()
if (iy, im) not in cal:
cal[(iy, im)] = {}
if iw not in cal[(iy, im)]:
cal[(iy, im)][iw] = []
cal[(iy, im)][iw].append(meal)
for e in cal:
# e is a week, e.g. (2016, 1)
# cal[e] is a dict of meals by day {iw: [meal]} that week
weekMeals = [[] for i in range(7)]
tot, mc, opensum = 0, 0, 0
for w in xrange(7):
weekMeals[w] = cal[e][w+1] if w+1 in cal[e] else []
for meal in weekMeals[w]:
tot += meal.get_meal_cost()
mc += 1
opensum += meal.open_cost
weekMeals[w] = [ (u"%s \xA3%.2f" % (meal.meal_type[0],
meal.get_meal_cost()/100),
reverse("mealy:meal_detail", args=(meal.id,)),
meal.open_cost, meal.get_meal_cost())
for meal in weekMeals[w]]
# weekMeals[0] = (monday) [ ("L 2.77", det_link, 0.56, 2.77), ... ]
weekMeals[w] = [iso_to_gregorian(e[0], e[1], w+1).strftime("%b %d"),
weekMeals[w]]
# weekMeals[0] = [ "Mar 14", [("L...", det_link, 0.56, 2.77), ...] ]
weekMeals[w][1].sort()
weekMeals.append(["", [(u"T \xA3%.2f (%.2f)" % (tot/100, opensum/100),
False, opensum, tot),
(u"A \xA3%.2f (%.2f)" %
(tot/100/mc, opensum/100/mc), False, opensum/mc, tot/mc)]])
cal[e] = weekMeals
cal = sorted(list(cal.items()), reverse=True)
template = loader.get_template("mealy/meals.html")
contDict = { 'meal_list': meal_list,
'mtypes': Meal.MEAL_TYPES,
'meal_form': MealForm,
'user': request.user,
'cal_meals': cal,
'semi_open': SEMI_OPEN_STATE_THRESHOLD,
'prog_bars': ENABLE_CAL_PROGRESS_BARS,
}
return HttpResponse(template.render(contDict, request))
@method_decorator(decs, name='dispatch')
class MealView(generic.DetailView):
def get_queryset(self):
return Meal.objects.filter( id=self.kwargs['pk'],
meal_owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(MealView, self).get_context_data(**kwargs)
context['dish_form'] = DishForm
return context
@method_decorator(decs, name='dispatch')
class NewMeal(generic.edit.CreateView):
form_class = MealForm
success_url = reverse_lazy("mealy:index")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.meal_owner = self.request.user
return super(NewMeal, self).form_valid(form)
@method_decorator(decs, name='dispatch')
class DeleteMeal(generic.edit.DeleteView):
models = Meal
def get_queryset(self):
return Meal.objects.filter( id=self.kwargs['pk'],
meal_owner=self.request.user,
dish__isnull=True)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:meal_detail",
args=[self.get_object().id]))
def get_success_url(self):
return reverse("mealy:index")
@method_decorator(decs, name='dispatch')
class NewDish(generic.edit.CreateView):
form_class = DishForm
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.par_meal = Meal.objects.get( id=self.kwargs['meal_id'],
meal_owner=self.request.user, )
return super(NewDish, self).form_valid(form)
def get_success_url(self):
return reverse("mealy:meal_detail", args=[self.kwargs['meal_id']])
@method_decorator(decs, name='dispatch')
class DeleteDish(generic.edit.DeleteView):
models = Dish
def get_queryset(self):
return Dish.objects.filter( id=self.kwargs['pk'],
par_meal__meal_owner=self.request.user,
resource_ticket__isnull=True)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:dish_detail",
args=[self.get_object().id]))
def get_success_url(self):
return reverse("mealy:meal_detail", args=[self.object.par_meal.id])
@method_decorator(decs, name='dispatch')
class DishView(generic.DetailView):
def get_queryset(self):
return Dish.objects.filter( id=self.kwargs['pk'],
par_meal__meal_owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(DishView, self).get_context_data(**kwargs)
context['tkt_form'] = TicketForm(self.request.user)
return context
def post(self, *args, **kwargs):
form = TicketForm(self.request.user, self.request.POST)
if form.is_valid():
self.object = self.get_object()
res_inst = form.cleaned_data['resource_inst']
uu = form.cleaned_data['units_used']
exhausted = form.cleaned_data['exhausted']
nt = Resource_Ticket.objects.create_ticket(
res_inst, uu, self.object, exhausted)
else:
raise Http404("Invalid form")
return HttpResponseRedirect(
reverse("mealy:dish_detail", args=[self.object.id]))
@method_decorator(decs, name='dispatch')
class TypesOverview(generic.ListView):
queryset = Resource_Type.objects.order_by('r_parent')
@method_decorator(decs, name='dispatch')
class TypesView(generic.DetailView):
slug_field = "r_name"
queryset = Resource_Type.objects.all()
@method_decorator(decs, name='dispatch')
class StdInstListView(generic.ListView):
queryset = Standard_Inst.objects.order_by('inst_type')
def get_context_data(self, **kwargs):
context = super(StdInstListView, self).get_context_data(**kwargs)
context['nsiForm'] = NewStdInstForm
return context
@method_decorator(decs, name='dispatch')
class StdInstDetailView(generic.DetailView):
queryset = Standard_Inst.objects.all()
@method_decorator(decs + [permission_required('mealy.can_add_standard_resource_instance')], name='dispatch')
class NewStdInst(generic.edit.CreateView):
form_class = NewStdInstForm
success_url = reverse_lazy("mealy:std_insts")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
@method_decorator(decs, name='dispatch')
class InventView(generic.ListView):
def get_queryset(self):
objs = Resource_Inst.objects.filter(inst_owner=self.request.user)
if not self.kwargs['showAll']:
objs = objs.filter(exhausted=False)
return objs.order_by('res_type', 'purchase_date')
def get_context_data(self, **kwargs):
context = super(InventView, self).get_context_data(**kwargs)
context['types'] = Resource_Type.objects.all()
context['showAll'] = self.kwargs['showAll']
context['niForm'] = NewInstForm
context['nisForm'] = NewInstStdForm(auto_id='newinststdform_%s')
return context
@method_decorator(decs, name='dispatch')
class DeleteTicket(generic.edit.DeleteView):
models = Resource_Ticket
def get_queryset(self):
return Resource_Ticket.objects.filter( id=self.kwargs['pk'], resource_inst__inst_owner=self.request.user)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:inv_detail",
args=[self.get_object().resource_inst.id]))
def get_success_url(self):
return reverse("mealy:inv_detail", args=[self.object.resource_inst.id])
@login_required
@user_passes_test(other_checks)
def invent_detail(request, inst_id):
inst = get_object_or_404(Resource_Inst, id=inst_id, inst_owner=request.user)
if request.method == "POST":
formType = request.POST['formtype']
if formType == "finalise":
defin = request.POST['finalisation']
if defin == "final":
inst.finalise()
elif defin == "definal":
inst.definalise()
else:
raise Http404("Finalisation invalid")
elif formType == "pricechange":
initf = inst.exhausted
if initf:
inst.definalise()
newPrice = int(request.POST['price'])
inst.change_price(newPrice)
if initf:
inst.finalise()
elif formType == "amtchange":
newAmt = float(request.POST['orig_amt'])
inst.change_amt(newAmt)
else:
raise Http404("We're not sure what form you submitted")
return HttpResponseRedirect(reverse("mealy:inv_detail", args=[inst.id]))
tickets = Resource_Ticket.objects.filter(resource_inst=inst).order_by('par_dish')
similar_insts = inst.similar_set()
similar_att = inst.similar_attrs()
template = loader.get_template("mealy/inv_detail.html")
contDict = { 'inst': inst,
'price_form': InstPriceForm,
'amt_form': InstAmtForm,
'tickets': tickets,
'sim_list': similar_insts,
'sim_att': similar_att,
}
return HttpResponse(template.render(contDict, request))
@method_decorator(decs, name='dispatch')
class NewInst(generic.edit.CreateView):
form_class = NewInstForm
success_url = reverse_lazy("mealy:inventory")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.inst_owner = self.request.user
form.instance.unit_use_formal = False
return super(NewInst, self).form_valid(form)
@method_decorator(decs, name='dispatch')
class DeleteInst(generic.edit.DeleteView):
models = Resource_Inst
success_url = reverse_lazy("mealy:inventory")
def get_queryset(self):
return Resource_Inst.objects.filter( id=self.kwargs['pk'],
inst_owner=self.request.user,
resource_ticket__isnull=True)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:invent_detail",
args=[self.get_object().id]))
@method_decorator(decs, name='dispatch')
class NewInstStd(generic.edit.CreateView):
form_class = NewInstStdForm
success_url = reverse_lazy("mealy:inventory")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.inst_owner = self.request.user
form.instance.unit_use_formal = False
form.instance.res_name = form.cleaned_data['std_inst'].inst_name
form.instance.res_type = form.cleaned_data['std_inst'].inst_type
form.instance.orig_unit = form.cleaned_data['std_inst'].orig_unit
form.instance.amt_original = form.cleaned_data['std_inst'].orig_amt
form.instance.best_before = form.cleaned_data['std_inst'].use_bestbef
if form.cleaned_data['std_inst'].is_relative:
form.instance.amt_original = form.cleaned_data['amt_dummy']
return super(NewInstStd, self).form_valid(form)
@login_required
@user_passes_test(other_checks)
def getStandardInst(request):
inst = get_object_or_404(Standard_Inst, id=request.GET['id'])
return HttpResponse(inst.show_fields())
| gpl-3.0 | -3,726,163,514,609,451,500 | 39.905605 | 113 | 0.591909 | false | 3.695896 | false | false | false |
sammyf/dungeon-bot | redis-dungeon.py | 1 | 2533 | #!/usr/bin/python2
"""
initialize the redis databases and create and store a random maze recursively
"""
import redis
import random
import sys
import math
global gMaze, sx, sy, it
gMaze = []
maxX = 6
maxY = 6
sx = 6
sy = 6
it = 0
sys.setrecursionlimit( 2000)
db_MazeLayout = 0
db_Entities = 1
random.seed()
for i in range( 0, maxX*maxY):
gMaze.append({0:'w',1:'w',2:'w',3:'w','visited':False})
def recurse(x, y):
global gMaze, sy, sy, it
print "iteration: "+str(it)+" Room @ "+str(x)+" : "+str(y)
it = it + 1
gMaze[(y*sx) + x]['visited'] = True
seq = [0,1,2,3]
random.shuffle( seq)
for i in range(0,4):
neighbour = seq[i]
if neighbour == 0:
nx = x
ny = y-1
if ny < 0 or gMaze[(ny*sx) + nx]['visited']:
continue
else:
gMaze[(y*sx) + x][0] = 'd'
gMaze[(ny*sx) + nx][2] = 'd'
recurse(x, ny)
elif neighbour == 1:
nx = x+1
ny = y
if nx >= sx or gMaze[(ny*sx) + nx]['visited']:
continue
else:
gMaze[(y*sx) + x][1] = 'd'
gMaze[(ny*sx) + nx][3] = 'd'
recurse(nx, y)
elif neighbour == 2:
nx = x
ny = y+1
if ny >= sy or gMaze[(ny*sx) + x]['visited']:
continue
else:
gMaze[(y*sx) + x][2] = 'd'
gMaze[(ny*sx) + nx][0] = 'd'
recurse(x, ny)
elif neighbour == 3:
nx = x-1
ny = y
if nx < 0 or gMaze[(ny*sx) + nx]['visited']:
continue
else:
gMaze[(y*sx) + x][3] = 'd'
gMaze[(ny*sx) + nx][1] = 'd'
recurse(nx,y)
def createStoreMaze():
global gMaze, sx, sy
recurse( 0,0)
r = redis.StrictRedis( host='localhost', port=6379, db=db_Entities)
r.flushdb()
r = redis.StrictRedis( host='localhost', port=6379, db=db_MazeLayout)
r.flushdb()
for i in range( 0, sy):
for s in range( 0, sx):
items=""
if s==(sx-1) and i==(sy-1):
items = "stairs"
data = {'walls':( gMaze[(i*sx)+s][0], gMaze[(i*sx)+s][1], gMaze[(i*sx)+s][2], gMaze[(i*sx)+s][3]), 'entities':[], 'items':[items], 'grafiti':""};
k = "x"+str(s)+"y"+str(i)
r.set( k, data)
print "\ndone\n"
createStoreMaze();
| gpl-3.0 | -3,030,606,899,594,232,000 | 25.663158 | 157 | 0.437821 | false | 3.008314 | false | false | false |
aprotopopov/lifetimes | tests/test_plotting.py | 1 | 5568 | import pytest
import matplotlib
matplotlib.use('AGG') # use a non-interactive backend
from matplotlib import pyplot as plt
from lifetimes import plotting
from lifetimes import BetaGeoFitter, ParetoNBDFitter, ModifiedBetaGeoFitter
from lifetimes.datasets import load_cdnow_summary, load_transaction_data
from lifetimes import utils
bgf = BetaGeoFitter()
cd_data = load_cdnow_summary()
bgf.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
@pytest.mark.plottest
class TestPlotting():
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions(self):
plt.figure()
plotting.plot_period_transactions(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_parento(self):
pnbd = ParetoNBDFitter()
pnbd.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
plt.figure()
plotting.plot_period_transactions(pnbd)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_mbgf(self):
mbgf = ModifiedBetaGeoFitter()
mbgf.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
plt.figure()
plotting.plot_period_transactions(mbgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_max_frequency(self):
plt.figure()
plotting.plot_period_transactions(bgf, max_frequency=12)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_labels(self):
plt.figure()
plotting.plot_period_transactions(bgf, label=['A', 'B'])
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix_max_recency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix_max_frequency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_frequency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix_max_frequency_max_recency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_frequency=100, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix_max_frequency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_frequency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix_max_recency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix_max_frequency_max_recency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_frequency=100, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_expected_repeat_purchases(self):
plt.figure()
plotting.plot_expected_repeat_purchases(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_expected_repeat_purchases_with_label(self):
plt.figure()
plotting.plot_expected_repeat_purchases(bgf, label='test label')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_customer_alive_history(self):
plt.figure()
transaction_data = load_transaction_data()
# yes I know this is using the wrong data, but I'm testing plotting here.
id = 35
days_since_birth = 200
sp_trans = transaction_data.loc[transaction_data['id'] == id]
plotting.plot_history_alive(bgf, days_since_birth, sp_trans, 'date')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_calibration_purchases_vs_holdout_purchases(self):
transaction_data = load_transaction_data()
summary = utils.calibration_and_holdout_data(transaction_data, 'id', 'date', '2014-09-01', '2014-12-31')
bgf.fit(summary['frequency_cal'], summary['recency_cal'], summary['T_cal'])
plt.figure()
plotting.plot_calibration_purchases_vs_holdout_purchases(bgf, summary)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_calibration_purchases_vs_holdout_purchases_time_since_last_purchase(self):
transaction_data = load_transaction_data()
summary = utils.calibration_and_holdout_data(transaction_data, 'id', 'date', '2014-09-01', '2014-12-31')
bgf.fit(summary['frequency_cal'], summary['recency_cal'], summary['T_cal'])
plt.figure()
plotting.plot_calibration_purchases_vs_holdout_purchases(bgf, summary, kind='time_since_last_purchase')
return plt.gcf()
| mit | 235,944,716,493,724,300 | 37.4 | 112 | 0.679598 | false | 3.334132 | true | false | false |
hivemined/queen | src/hivemined/container.py | 1 | 4385 | #!/usr/bin/python3
import docker
import docker.utils
from .core import Docker
from .image import Image
__author__ = 'Ryan Clarke - [email protected]'
class Container:
"""Base Container class for docker containers managed by Hivemined."""
label = 'hivemined.container'
def __init__(self, name, image, command='', volumes=list(), port=None, memory=None, swap=None, cpu=None, **kwargs):
self.name = str(name)
self.command = str(command)
# Type checking for image
if isinstance(image, Image):
self.image = image
else:
raise TypeError('Parameter must be an Image', image)
# Type checking for volumes
if next((False for v in volumes if not isinstance(v, Container)), True):
self.volumes = volumes
else:
raise TypeError('Parameter must be a list of Containers.', volumes)
# Set network port and resource limits
self.port = port
self.limits = {}
if memory:
self.limits['Memory'] = str(memory)
if swap:
self.limits['Swap'] = str(swap)
if cpu:
self.limits['Cpu'] = int(cpu)
self.restart = {
'Name': 'always', # 'always' | 'on-failure' | 'no'
'MaximumRetryCount': 0
}
self.container = None
self.create(**kwargs)
def list(self, show_all=False, quiet=False):
"""List all containers manages by the calling class (respects inheritance)."""
return Docker.containers(all=show_all, quiet=quiet, filters={'label': type(self).label})
def exists(self, running=False):
"""Return True if the container referenced by this object exists, or False otherwise.
If running==True, check if the container is running instead.
"""
if not self.container.get('Id'):
return False
containers = self.list(show_all=(not running))
return next((True for c in containers if c.get('Id') == self.container.get('Id')), False)
def create(self, force=False, **kwargs):
"""Create a new managed docker container.
If force==True, create new a container even if one already exists.
Propagates LookupError from self.image.get() f the image does not exist and cannot be pulled or built,
Raises Warning if container creation resulted in warnings form Docker.
"""
labels = {type(self).label: None, 'name': self.name}
if self.exists() and not force:
return
try:
self.image.get() # Ensure that the specified Image exists.
except LookupError as e:
print(e)
raise
volume_list = []
for v in self.volumes:
volume_list.append(v.container.get("Id"))
if self.port:
host_cfg = docker.utils.create_host_config(
volumes_from=volume_list, restart_policy=self.restart, port_bindings={25565: int(self.port)})
else:
host_cfg = docker.utils.create_host_config(
volumes_from=volume_list, restart_policy=self.restart, publish_all_ports=True)
self.container = Docker.create_container(
host_config=host_cfg, labels=labels, image=self.image.name, command=self.command,
mem_limit=self.limits.get('memory'), memswap_limit=self.limits.get('swap'),
cpu_shares=self.limits.get('cpu'), **kwargs)
if self.container.get('Warnings'):
raise Warning("Container creation warning.", self)
def delete(self, volumes=True):
if self.exists(running=True):
self.stop()
Docker.remove_container(self.container.get('Id'), v=volumes)
def update(self):
self.image.get(update=True)
old_container = self.container
self.create(force=True)
Docker.remove_container(old_container.get('Id'))
def start(self, tty=False):
if not self.exists():
self.create()
Docker.start(self.container.get('Id'), tty=tty)
def stop(self):
Docker.stop(self.container.get('Id'))
def restart(self):
Docker.restart(self.container.get('Id'))
def command(self, command, tty=False):
exec_str = Docker.exec_create(self.container.get('Id'), cmd=command, tty=tty)
Docker.exec_start(exec_str, tty=tty)
| apache-2.0 | 5,664,839,434,192,485,000 | 34.942623 | 119 | 0.605701 | false | 4.041475 | false | false | false |
neothemachine/crowfood | crowfood/engine.py | 1 | 12443 | from __future__ import print_function, absolute_import
from six.moves import filter
from io import open # to use encoding kw in Python 2
import os
from collections import defaultdict
from crowfood.utils import is_subdir
import re
import sys
import itertools
def get_roots_and_include_paths(args):
# convention:
# input roots are the directories of the files to scan
# include roots are the directories given by -I
input_roots = set()
for path in args.path:
if os.path.isfile(path):
path = os.path.dirname(path)
input_roots.add(path)
external_roots = set(args.external_roots)
# make any include path an additional external root if it is outside any existing root
external_roots.update(
set(filter(lambda include_path: not any(is_subdir(include_path, root)
for root in input_roots.union(external_roots)),
args.include_paths)))
input_include_paths = defaultdict(list) # input root -> include paths
external_include_paths = defaultdict(list) # external root -> include paths
for include_path in args.include_paths:
input_root = [root for root in input_roots if is_subdir(include_path, root)]
if input_root:
input_include_paths[input_root[0]].append(include_path)
else:
external_root = [root for root in external_roots
if is_subdir(include_path, root) or include_path == root]
external_include_paths[external_root[0]].append(include_path)
for root in input_roots:
if root not in input_include_paths:
input_include_paths[root].append(root)
for root in external_roots:
if root not in external_include_paths:
external_include_paths[root].append(root)
return input_roots, input_include_paths, external_roots, external_include_paths
def run(args):
input_roots, input_include_paths, external_roots, external_include_paths =\
get_roots_and_include_paths(args)
# for every found directory and file we need to output:
#((root, 'relative/path/to/root'), (None, None))
# We scan all requested files and directories and stop at the outer
# level of any dependencies found at the include search paths.
# Files in the include paths are not scanned for #include's.
# Get a list of all files with .c/.cc/.cpp/.cxx/.h/.hpp/.hxx extension
# from the directories to scan for, if any.
exts = ['c', 'h', 'cc', 'cpp', 'cxx', 'hpp', 'hxx'] + args.additional_exts
files = defaultdict(list) # input root -> file paths relative to root
def get_input_root(path):
return next(filter(lambda root: root in path, input_roots))
if args.fuzzy:
filemap = defaultdict(list) # filename -> (root,relpath)
for path in args.path:
if os.path.isfile(path):
root = get_input_root(path)
files[root].append(os.path.relpath(path, root))
else:
for base, _, filenames in os.walk(path):
if base in args.ignore_paths:
continue
root = get_input_root(base)
filenames = list(filter(lambda f: any(f.endswith('.' + ext) for ext in exts), filenames))
filepaths = map(lambda f: os.path.join(base, f), filenames)
filepaths = list(map(lambda p: os.path.relpath(p, root), filepaths))
files[root].extend(filepaths)
if args.fuzzy:
for filename, filepath in zip(filenames,filepaths):
filemap[filename].append((root,filepath))
# parse the #include's of all files
quotes = dict({'both': ('["|<]', '["|>]'),
'angle': ('<', '>'),
'quote': ('"', '"')
})[args.quotetypes]
include_re = re.compile(r'#include {}([a-zA-Z0-9_\-\.\/]+){}'.format(*quotes))
includes = dict() # (root,relpath) -> [include paths]
for root, filepaths in files.items():
for filepath in filepaths:
print('parsing', filepath, file=sys.stderr)
with open(os.path.join(root, filepath), encoding='utf8') as fp:
includes[(root,filepath)] = include_re.findall(fp.read())
# for each include, find the root it belongs to
includes_roots = dict() # include path -> root
includes_unique = set(itertools.chain.from_iterable(includes.values()))
def find_in_root(include, root, include_paths, cache=None):
for include_path in include_paths:
full_path = os.path.join(include_path, include)
rel = os.path.relpath(full_path, root)
if cache:
if rel in cache[root]:
return rel
elif os.path.exists(full_path):
return rel
return False
def find_in_roots(include, root_includepaths, cache=False):
for root, include_paths in root_includepaths:
rel = find_in_root(include, root, include_paths, cache)
if rel:
return root, rel
return False, False
for include in includes_unique:
# first we search within the input roots, then in the external roots
root, relpath = find_in_roots(include, input_include_paths.items(), files)
if not root:
root, relpath = find_in_roots(include, external_include_paths.items())
if root:
includes_roots[include] = root, relpath
not_found = defaultdict(list)
for (root, filepath), includepaths in list(includes.items()):
includes[(root, filepath)] = []
for include in includepaths:
root_path = False
if not args.no_include_current:
# look in current folder and prefer this over the other results
rel = find_in_root(include, root,
[os.path.join(root, os.path.dirname(filepath))], files)
if rel:
root_path = root, rel
if not root_path:
root_path = includes_roots.get(include)
if not root_path and args.fuzzy:
filename = os.path.basename(include)
if filename in filemap:
res = filemap[filename]
if len(res) > 1:
print('WARNING: ignoring fuzzy result as multiple '
'{} candidates were found (from {}): {}'.\
format(filename, filepath, [p for _,p in res]),
file=sys.stderr)
else:
root_path = res[0]
if root_path:
includes[(root, filepath)].append((root_path[0],root_path[1]))
else:
not_found[include].append(filepath)
if not_found:
print('\nWARNING: some includes could not be found:\n', file=sys.stderr)
for include in sorted(not_found.keys()):
print('{} not found'.format(include), file=sys.stderr)
if args.verbose:
for filepath in sorted(not_found[include]):
print(' from {}'.format(filepath), file=sys.stderr)
# Unify roots when a file was found over multiple roots.
# This happens when an include search path is given that is above
# an internal root.
roots = input_roots.union(external_roots)
nested_roots = list(filter(lambda r: is_subdir(*r), itertools.product(roots, roots)))
if nested_roots:
print('going to unify paths as there are nested roots', file=sys.stderr)
def move_root(subroot, root, filepath):
full = os.path.join(root, filepath)
if is_subdir(full, subroot) or os.path.dirname(full) == subroot:
rel = os.path.relpath(full, subroot)
print('moving root: {} -> {} for {}'.format(root, subroot, filepath), file=sys.stderr)
return (subroot, rel)
else:
return (root, filepath)
for subroot,root in nested_roots:
# the strategy is to move all includes from root to the subroot if they
# are actually within the subroot
for rf,includepaths in includes.items():
includes[rf] = [move_root(subroot,root,filepath) if root_ == root else (root_,filepath)
for root_,filepath in includepaths]
# merge .h/.c files if requested
if args.merge == 'module':
# The tricky part is: how do we know which files belong together?
# Obviously this is only possible if there is a 1-1 relationship
# in naming of the .c/.h files, that is the base is the same.
# Also, the .h file must be included in the matching .c file.
# We merge transitive dependencies of the same base name
# into the including .c file entry, thereby collapsing
# the dependencies of the matching files.
def find_matches(base, includepaths):
''' returns a list of (root,filepath) items '''
if not includepaths:
return []
matches = ((root,filepath) for (root,filepath) in includepaths
if os.path.splitext(os.path.basename(filepath))[0] == base)
return itertools.chain(matches,
itertools.chain.from_iterable(
find_matches(base, includes[match])
for match in matches)
)
for (root,filepath),includepaths in list(includes.items()):
if (root,filepath) not in includes:
# already merged
continue
filename = os.path.basename(filepath)
base,ext = os.path.splitext(filename)
if not ext.startswith('.c'):
continue
# Recursively get all includes with matching base name
# starting from the current c file.
# This becomes the set of files to merge into the including .c entry.
# Recursion only follows paths where the base name matches,
# that is, a.c -> a.h -> a.inc will be picked up, but not
# a.c -> b.h -> a.inc.
# Cyclic imports will lead to an error.
matches = set(find_matches(base, includepaths))
deps = itertools.chain.from_iterable(includes.get(match, []) for match in matches)
includes[(root,filepath)] = list((set(includepaths) | set(deps)) - matches)
for match in matches:
if match in includes:
del includes[match]
# move old references to the merged module
for k, includepaths in includes.items():
if match in includepaths:
includes[k] = [(root,filepath) if dep == match else dep
for dep in includepaths]
# remove file extensions as these don't make sense anymore now
newincludes = dict()
for (root1,path1),includepaths in includes.items():
newincludes[(root1,os.path.splitext(path1)[0])] =\
[(root2,os.path.splitext(path2)[0]) for (root2,path2) in includepaths]
includes = newincludes
# return dependencies as ((root,path),(root,path)) tuples
deps = []
dirs = set()
for (root,filepath),includepaths in includes.items():
deps.append(((root,filepath),(None,None)))
directory = os.path.dirname(os.path.join(root, filepath))
if directory not in dirs:
dirs.add(directory)
deps.append(((root,os.path.dirname(filepath)),(None,None)))
for root_,filepath_ in includepaths:
deps.append(((root,filepath),(root_,filepath_)))
directory = os.path.dirname(os.path.join(root_, filepath_))
if directory not in dirs:
dirs.add(directory)
deps.append(((root_,os.path.dirname(filepath_)),(None,None)))
return deps
| mit | 7,126,259,649,590,224,000 | 44.75 | 105 | 0.562565 | false | 4.372101 | false | false | false |
nutszebra/multimodal_word2vec | word2vec/downloadCIFAR100.py | 1 | 3055 | #!/usr/bin/env python
from six.moves.urllib import request
import cPickle as pickle
import os
import cv2
import subprocess
import numpy as np
picBase = "/mnt/s3pic/cifar10/"
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def checkExistance(path):
if os.path.exists(path):
return True
else:
return False
def makeDirectory(path):
if not checkExistance(path):
os.makedirs(path)
def downloadPic(url, name):
cmd = "wget " + url + " -O " + name + " -q"
subprocess.call(cmd, shell=True)
def extractExtension(name):
return re.findall(r"^.*(\..*)$", name)[0]
def moveFile(path, name):
cmd = "mv " + name + " " + path
subprocess.call(cmd, shell=True)
def cifar100Extract():
makeDirectory(picBase)
makeDirectory(picBase + "train")
makeDirectory(picBase + "test")
makeDirectory(picBase + "label")
if not os.path.exists("cifar-100-python"):
request.urlretrieve(
"http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz",
"./cifar-100-python.tar.gz"
)
cmd = "tar -xvzf cifar-100-python.tar.gz"
subprocess.call(cmd, shell=True)
cmd = "rm -r cifar-100-python.tar.gz"
subprocess.call(cmd, shell=True)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
train = unpickle("cifar-100-python/train")
test = unpickle("cifar-100-python/test")
tag = unpickle("cifar-100-python/meta")
tagWith_ = ['aquarium_fish',
'lawn_mower',
'maple_tree',
'oak_tree',
'palm_tree',
'pickup_truck',
'pine_tree',
'sweet_pepper',
'willow_tree']
tagAlter = ["fish",
"lawnmower",
"maple",
"oak",
"palm",
"truck",
"pine",
"paprika",
"willow"]
index = [tag["fine_label_names"].index(with_) for with_ in tagWith_]
count = 0
for i in index:
tag["fine_label_names"][i] = tagAlter[count]
count = count + 1
y_train = {}
y_test = {}
x_test = test['data']
x_test = x_test.reshape(len(x_test),3,32,32)
x_train = train['data']
x_train= x_train.reshape(len(x_train),3,32,32)
# for x in zip(x_test, test["filenames"], test["fine_labels"]):
# cv2.imwrite(picBase + "test/" + x[1], x[0].transpose(1,2,0)[:,:,::-1].copy())
# y_test[x[1]] = x[2]
# for x in zip(x_train, train["filenames"], train["fine_labels"]):
# cv2.imwrite(picBase + "train/" + x[1], x[0].transpose(1,2,0)[:,:,::-1].copy())
# y_train[x[1]] = x[2]
# save_object(y_test, picBase + "label/y_test.pkl")
# save_object(y_train, picBase + "label/y_train.pkl")
return (train, test, tag)
"""
In [38]: tag.keys()
Out[38]: ['fine_label_names', 'coarse_label_names']
In [40]: train.keys()
Out[40]: ['data', 'batch_label', 'fine_labels', 'coarse_labels', 'filenames'
In [41]: len(train["data"])
Out[41]: 50000
In [42]: len(train["data"][0])
Out[42]: 3072 // it means 32*32*3
"""
if __name__ == '__main__':
# x_train, y_train, x_test, y_test, tag = cifar100Extract()
train, test, tag = cifar100Extract()
| mit | -1,638,127,540,387,665,400 | 26.522523 | 83 | 0.618658 | false | 2.69163 | true | false | false |
EMATech/autopower | autopower.py | 2 | 1804 | #!/bin/env python2
"""
Automate powered loudspeakers power with EATON UPS from Denon DN-500AV pre-amplifier state
"""
from twisted.internet import reactor
from twisted.internet.protocol import ClientFactory
from twisted.conch.telnet import TelnetTransport, TelnetProtocol # Unavailable in Python3, yet
from PyNUT import PyNUTClient
class DenonProtocol(TelnetProtocol):
ups_name = 'nutdev1' # TODO: store in a configuration file
ups_var = "outlet.2.switchable" # on means power down or off power up
ups_username = 'admin' # TODO: store in a configuration file
ups_userpass = 'ups' # TODO: store securely? in a configuration file
def connectionMade(self):
# Subscribe to the power state
self.transport.write("PW?\n")
def dataReceived(self, bytes):
ups = PyNUTClient(login=self.ups_username, password=self.ups_userpass)
if 'PWON' in bytes:
# Enable UPS sockets
ups.SetRWVar(ups=self.ups_name, var=self.ups_var, value='no')
if 'PWSTANDBY' in bytes:
# Disable UPS sockets
ups.SetRWVar(ups=self.ups_name, var=self.ups_var, value='yes')
class TelnetFactory(ClientFactory):
def buildProtocol(self, addr):
return TelnetTransport(DenonProtocol)
if __name__ == '__main__':
"""
The amplifier uses a Telnet interface (port 23) to send and receive serial commands
We can subscribe to the power state using PW?\r
The reply can be either PWON or PWSTANDBY
The UPS is an EATON powerstation
It exposes an interface through NUT to control 2 power sockets
We want them to follow the amp's state
"""
host = '192.168.1.10' # TODO: store in a configuration file
port = 23
reactor.connectTCP(host, port, TelnetFactory())
reactor.run()
| gpl-3.0 | 7,586,736,075,752,400,000 | 31.8 | 95 | 0.687916 | false | 3.475915 | false | false | false |
jigarkb/CTCI | CTCI/chapter_2/2.3.py | 2 | 1156 | # Delete Middle Node: Implement an algorithm to delete a node in the middle of
# singly linked list, given only access to that node
from LinkedList import Node, LinkedList
def delete_middle_node(node_to_delete):
if node_to_delete.next is None:
raise Exception("Invalid node to delete")
node_to_delete.data = node_to_delete.next.data
node_to_delete.next = node_to_delete.next.next
if __name__ == '__main__':
ll = LinkedList()
continue_ans = raw_input("Do you want to add new node? (y/n): ")
to_delete = "n"
while continue_ans == 'y':
data = raw_input("Enter data for the new node: ")
data_node = Node(data)
ll.append_node(data_node)
if to_delete == 'n':
to_delete = raw_input("Is this the one you want to remove? (y/n): ")
if to_delete == 'y':
node_to_delete = data_node
continue_ans = raw_input("Do you want to add new node? (y/n)?")
print "Initial linked list: {}".format(ll)
print "Middle node to delete: {}".format(node_to_delete)
delete_middle_node(node_to_delete)
print "Linked list after deletion: {}".format(ll)
| mit | -1,494,367,965,266,206,200 | 37.533333 | 80 | 0.623702 | false | 3.420118 | false | false | false |
agalitsyn/play | 6-web-stream-server/server.py | 1 | 1507 | # http://flask.pocoo.org/docs/patterns/fileuploads/
import os
from flask import Flask, request, redirect, url_for, send_from_directory
from werkzeug import secure_filename
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
# this has changed from the original example because the original did not work for me
return filename[-3:].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
print '**found file', file.filename
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# for browser, add 'redirect' function on top of 'url_for'
return url_for('uploaded_file',
filename=filename)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
if __name__ == '__main__':
app.run(debug=True) | gpl-3.0 | 5,452,678,844,395,169,000 | 34.069767 | 87 | 0.625083 | false | 3.79597 | false | false | false |
fginter/dep_search | bracketed2dsearch.py | 1 | 4894 | import six
assert six.PY3, "Please run me with Python3"
import ply.lex as lex
import ply.yacc as yacc
import readline
import urllib.parse
import requests
import sys
class Node:
def __init__(self,dtype,children):
self.dtype=dtype
self.children=children
def dsearch_ex_lin(self):
#cases like [dep xxx xxx xxx xxx]
assert sum(1 for c in self.children if isinstance(c,str))==len(self.children)
exprs=[]
for root_idx,root in enumerate(self.children):
expr=['"'+root+'"']
for other_idx,other in enumerate(self.children):
if other_idx<root_idx:
expr.append('>lin@L "{}"'.format(other))
elif other_idx>root_idx:
expr.append('>lin@R "{}"'.format(other))
exprs.append("("+(" ".join(expr))+")")
return "("+(" | ".join(exprs))+")"
def dsearch_ex(self):
global macros
#Now I guess I pick one of my STRING children to be the root or what?
possible_roots=[c for c in self.children if isinstance(c,str)]
if len(possible_roots)==len(self.children) and len(self.children)>1:
return self.dsearch_ex_lin()
elif len(possible_roots)>1:
raise ValueError("Unsupported")
assert len(possible_roots)==1
for r in possible_roots:
bits=["(",macros.get(r,'"'+r+'"')] #Bits of the expression
for c in self.children:
if c==r:
continue
if isinstance(c,str):
bits.extend(['>',macros.get(c,'"'+c+'"')])
elif isinstance(c,Node):
if c.dtype=="dep" or c.dtype=="_":
bits.append(' > ')
else:
bits.append(' >'+c.dtype)
bits.append(c.dsearch_ex())
else:
assert False, repr(c)
bits.append(")")
return " ".join(bits)#I guess I should then generate the others too?
### ---------- lexer -------------
# List of token names. This is always required
tokens = ('LBRAC','RBRAC','STRING')
def t_LBRAC(t):
r'\['
return t
def t_RBRAC(t):
r'\]'
return t
def t_STRING(t):
r'[^\s\[\]]+'
return t
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
### --------- grammar -----------
def p_expressions(p):
'''expressions : expression
| expression expressions
'''
if len(p)==2:
p[0]=[p[1]]
elif len(p)==3:
p[0]=[p[1]]+p[2]
else:
assert False
def p_expr(p):
'''expression : tree
| STRING
'''
p[0]=p[1]
def p_tree(p):
'tree : LBRAC STRING expressions RBRAC'
p[0]=Node(p[2],p[3])
def p_error(p):
print("Syntax error in input!")
parser = yacc.yacc()
def get_query_url(q):
url="http://bionlp-www.utu.fi/dep_search/query"
url+="?"+urllib.parse.urlencode({"search":q,"db":"RU160M","case_sensitive":"False","hits_per_page":"50"})
return url
def download(qry,maxnum,fname):
data={"search":qry,"db":"RU160M","case":"False","retmax":maxnum}
result=requests.get("http://epsilon-it.utu.fi/dep_search_webapi",params=data)
print(result.url)
with open(fname,"w") as f:
print(result.text,file=f)
### ---------- run this ------------
# * NP-Nom = NOUN Case=Nom
# * XP = any phrasal category = NOUN, ADJ, ADV, PRON, VERB
# * PRON-Dat = PRON Case=Dat
# * NOUN-Nom = NOUN Case=Nom
# * VP = VERB
# * AP = ADJ
# * VP-Inf = VERB VerbForm=Inf
# * Imper = Mood=Imp
# * dep = any dependency label
macros_def="""
NP-Nom : (NOUN&Nom)
NP-Dat : (NOUN&Dat)
XP : (NOUN|ADJ|ADV|PRON|VERB)
PRON-Dat : (PRON&Dat)
NOUN-Nom : (NOUN&Nom)
VP : VERB
AP : ADJ
VP-Inf : (VERB&Inf)
VP-Imper : (VERB&Mood=Imp)
V-Past : (VERB&Past)
Imper : (Mood=Imp)
Cl : (VERB >nsubj _)
_ : _
"""
macros={} #macro -> replacement
for repl in macros_def.strip().split("\n"):
src,trg=repl.split(" : ",1)
macros[src]=trg
expressions={} #filename -> list of expressions
for line in sys.stdin:
line=line.strip()
if not line:
continue
if line.startswith("["):
#an expression
expression_list.append(line)
else: #construction name
line=line.replace(" ","_")
expression_list=[]
expressions[line]=expression_list
for fname,expression_list in sorted(expressions.items()):
for expression in expression_list:
print("Parsing expression", expression, file=sys.stderr, flush=True)
node = parser.parse(expression)
qry=node[0].dsearch_ex()
print(qry)
download(qry,5,"dl/"+fname+".conllu")
| gpl-3.0 | 2,211,182,918,179,198,000 | 25.743169 | 109 | 0.547814 | false | 3.284564 | false | false | false |
google-research/falken | service/learner/assignment_processor.py | 1 | 30183 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Handles assignment processing."""
import enum
import json
import time
import traceback
import uuid
from learner import data_fetcher
from learner import file_system
from learner import model_exporter
from learner import model_manager
from learner import stats_collector
from learner import storage
from learner.brains import brain_cache as brain_cache_module
from learner.brains import continuous_imitation_brain
from learner.brains import demonstration_buffer
from log import falken_logging
# pylint: disable=g-bad-import-order
import common.generate_protos # pylint: disable=unused-import
import action_pb2
import episode_pb2
import data_store_pb2 as falken_schema_pb2
# How long to work on a single assignment in a single learner at most.
_MAX_ASSIGNMENT_WORK_TIME_SECS = 60*60
_CHUNK_STATE_TO_STEP_PHASE = {
episode_pb2.UNSPECIFIED: demonstration_buffer.StepPhase.UNSPECIFIED,
episode_pb2.IN_PROGRESS: demonstration_buffer.StepPhase.IN_PROGRESS,
episode_pb2.SUCCESS: demonstration_buffer.StepPhase.SUCCESS,
episode_pb2.FAILURE: demonstration_buffer.StepPhase.FAILURE,
episode_pb2.ABORTED: demonstration_buffer.StepPhase.ABORTED,
episode_pb2.GAVE_UP: demonstration_buffer.StepPhase.GAVE_UP,
}
_DEFAULT_LEARNER_HPARAMS = {
# Should learning continue or restart when new data is received?
'continuous': True,
# The minimum interval between model saves, measured in batches trained.
# If set to None, the model is never saved.
'save_interval_batches': 20_000,
# Minimum number of (steps * batch_size) before finishing training (or
# restarting). If None, we don't require a minimum amount of steps to train.
'min_train_examples': None,
# Maximum number of (steps * batch_size). If None, we don't require
# a maximum amount of steps to train.
'max_train_examples': None,
# Export models in the main thread.
'synchronous_export': False,
}
class Error(Exception):
"""Base class for exceptions."""
class HParamError(Exception):
"""Raised if there is an unknown hyperparameter in the assignment."""
class NoDataError(Error):
"""Learner could not find new data to train on."""
class ExceededMaxWorkTimeError(Error):
"""Learner exceeded maximum work time on a single assignment."""
class AssignmentStats:
"""Returns stats about assignment processing for testing / debugging.
In contrast to StatsCollector these are stats that are use to track
information about an entire assignment rather than information about
each trained model.
Attributes:
queries_completed: Number of Spanner fetch chunk queries completed.
frames_added: Number of frames added to the brain from fetched
demonstration data.
models_recorded: Number of models saved during this assignment.
brain_train_steps: Number of calls to brain.train() during the assignment.
num_restarts: Number of times training has been restarted from scratch
with a newly initialized model.
brain_global_step: The value of the global step variable. (Resets with
restarts.)
"""
def __init__(self):
self.queries_completed = 0
self.frames_added = 0
self.models_recorded = 0
self.brain_train_steps = 0
self.num_restarts = 0
self.brain_global_step = 0
# Lists the possible processing assignment status, e.g. those that
# ProcessAssignmentUntil can handle.
class ProcessAssignmentStatus(enum.Enum):
"""Possible stopping points for assignment processing.
Can be used in calls to ProcessAssignmentUntil.
"""
# A step has been processed in the current assignment.
PROCESSED_STEP = 1
# A step has been processed in the current assignment, and training should
# restart before processing the next step.
PROCESSED_STEP_NEEDS_RESTART = 2
# A model has been saved.
SAVED_MODEL = 3
# The assignment is about to fetch data. Useful for making sure more data
# is available when it fetches, during a test.
WILL_FETCH_DATA = 4
# The assignment finished processing.
FINISHED = 5
def _step_generator(episode_chunks):
"""Yields steps from EpisodeChunks."""
for chunk in episode_chunks:
for i, step in enumerate(chunk.data.steps):
step_phase = demonstration_buffer.StepPhase.IN_PROGRESS
if chunk.chunk_id == 0 and i == 0:
# First step of first chunk is the start of the episode.
step_phase = demonstration_buffer.StepPhase.START
elif i == len(chunk.data.steps) - 1:
# Last step of any chunk has equivalent phase as the chunk state.
step_phase = _CHUNK_STATE_TO_STEP_PHASE.get(
chunk.data.episode_state,
demonstration_buffer.StepPhase.UNSPECIFIED)
if step_phase == demonstration_buffer.StepPhase.UNSPECIFIED:
raise ValueError(
f'Unexpected chunk state: {chunk.data.episode_state}.')
yield (chunk.episode_id, chunk.chunk_id, step.observation,
step.reward.reward_value, step_phase, step.action,
chunk.created_micros)
def _get_hparams(assignment_id):
"""Parse a hyperparemeters dictionary from an assignment ID.
Args:
assignment_id: Assignment ID to parse. If this is "default" an empty
dictionary is returned.
Returns:
Dictionary containing the parsed hyperparameters.
Raises:
HParamError: If the assignment is malformed.
"""
falken_logging.info(f'GetHParams got assignment_id {assignment_id}')
if assignment_id == 'default':
return {}
try:
return json.loads(assignment_id)
except json.decoder.JSONDecodeError as error:
error_message = (f'Failed to parse assignment ID: {error}\n' +
assignment_id)
falken_logging.error(error_message)
raise HParamError(error_message)
def populate_hparams_with_defaults_and_validate(hparams):
"""Construct hyperparameters for brain creation.
Args:
hparams: Hyperparameters that override the default learner hyperparameters.
Returns:
Hyperparameters dictionary that can be used to create a brain.
Raises:
HParamError: If the provided hyperparmeters overlap with default learner
parameters or they're unknown.
"""
result_hparams = continuous_imitation_brain.BCAgent.default_hparams()
for hparam in _DEFAULT_LEARNER_HPARAMS:
if hparam in result_hparams:
raise HParamError(f'Learner HParam overlaps with brain HParam: {hparam}')
result_hparams.update(_DEFAULT_LEARNER_HPARAMS)
for hparam in hparams:
if hparam not in result_hparams:
raise HParamError(f'Unknown hparam in assignment: {hparam}')
result_hparams.update(hparams)
return result_hparams
class AssignmentProcessor:
"""Trains models based on incoming Assignments."""
# How often to check the DB for new data.
_DB_QUERY_INTERVAL_SECS = 10.0
# How long to wait for training data
_WAIT_FOR_DATA_BRAIN_SECS = 60
def __init__(self,
read_assignment: falken_schema_pb2.Assignment,
filesys_helper: file_system.FileSystem,
storage_helper: storage.Storage,
brain_cache: brain_cache_module.BrainCache,
get_session_state=None,
write_assignment=None,
always_block_when_fetching=False):
"""Create a new assignment processor.
Args:
read_assignment: The Assignment proto received from the queue.
filesys_helper: A filesystem.Filesystem object.
storage_helper: A storage.Storage helper.
brain_cache: BrainCache instance.
get_session_state: Callable which takes no arguments and returns
storage.SessionState for the assignment. When this is None
the session state is retrieved from the session associated with
read_assignment in the database.
write_assignment: Assignment proto used to write assignment results.
If this is None, results are written to read_assignment.
always_block_when_fetching: If True, always block during fetching. Useful
for removing racing conditions in tests.
"""
self._brain_cache = brain_cache
self._read_assignment = read_assignment
self._write_assignment = (
write_assignment if write_assignment else read_assignment)
falken_logging.info(f'Reading from {self._read_assignment}, '
f'writing to {self._write_assignment}')
self._episode_id = ''
self._episode_chunk_id = 0
self._most_recent_demo_micros = 0
self._file = filesys_helper
self._storage = storage_helper
self._brain = None
self._hparams = None
self._model_manager = None
self._always_block_when_fetching = always_block_when_fetching
self._stats = stats_collector.StatsCollector(
self._write_assignment.project_id,
self._write_assignment.brain_id,
self._write_assignment.session_id,
self._write_assignment.assignment_id)
self._assignment_stats = AssignmentStats()
if get_session_state:
self._get_session_state = get_session_state
else:
def _get_session_state():
return self._storage.get_session_state(
self._read_assignment.project_id,
self._read_assignment.brain_id,
self._read_assignment.session_id)
self._get_session_state = _get_session_state
def __enter__(self):
"""Start processing an assignment."""
return self
def __exit__(self, *unused_args):
"""Stop processing an assignment and clean up temporary storage."""
self._file.wipe_checkpoints(self._write_assignment)
@property
def stats(self):
"""Returns StatsCollector about the processing task."""
return self._stats
@property
def assignment_stats(self):
"""Returns AssignmentStats statistics about the processing task."""
return self._assignment_stats
@property
def _min_train_batches(self):
"""Min amount of batches to train (or None if unrestricted)."""
min_train_examples = self._hparams['min_train_examples']
if min_train_examples is None:
return None
return int(min_train_examples / self._hparams['batch_size'])
@property
def _max_train_batches(self):
"""Max amount of batches to train (or None if unlimited)."""
max_train_examples = self._hparams['max_train_examples']
if max_train_examples is None:
return None
return int(max_train_examples / self._hparams['batch_size'])
def _create_brain(self):
"""Creates a Brain."""
brain_spec = self._storage.get_brain_spec(
self._read_assignment.project_id, self._read_assignment.brain_id)
falken_logging.info('Creating brain.',
brain_spec=brain_spec,
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
if not brain_spec:
raise ValueError(
f'Brain spec not found for project_id: '
f'{self._read_assignment.project_id} and '
f'brain_id: {self._read_assignment.brain_id}.')
checkpoint_path = self._file.create_checkpoints_path(self._write_assignment)
summary_path = self._file.create_summary_path(self._write_assignment)
return self._brain_cache.GetOrCreateBrain(
_get_hparams(self._read_assignment.assignment_id),
brain_spec, checkpoint_path, summary_path)
def _add_episode_chunks(self, chunks):
"""Insert new EpisodeData into the brain's replay buffer.
Args:
chunks: A batch of EpisodeChunks
Returns:
The number of demo frames contained in the provided chunks.
"""
falken_logging.info('Adding {} new chunks.'.format(len(chunks)),
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
demo_frames = 0
for (episode_id, chunk_id, observation, reward, phase, action,
timestamp) in _step_generator(chunks):
self._episode_id = episode_id
self._episode_chunk_id = chunk_id
self.assignment_stats.frames_added += 1
self._brain.record_step(observation, reward, phase, episode_id, action,
timestamp)
if action.source == action_pb2.ActionData.HUMAN_DEMONSTRATION:
demo_frames += 1
if timestamp > self._most_recent_demo_micros:
self._most_recent_demo_micros = timestamp
falken_logging.info(
f'Finished adding {len(chunks)} new chunks with {demo_frames} '
f'demo frames',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return demo_frames
def _chunk_generator(self):
"""Generates lists of chunks by querying the database.
Yields:
List of new chunks if available, None otherwise.
"""
earliest_timestamp_micros = 0
# Fetch data associated with ancestors AND with the current session.
session_ids = self._storage.get_ancestor_session_ids(
self._read_assignment.project_id,
self._read_assignment.brain_id,
self._read_assignment.session_id)
session_ids.add(self._read_assignment.session_id)
def generate_chunk_key(chunk):
"""Returns a unique string identifier for an episode chunk proto.
Args:
chunk: data_store_pb2.EpisodeChunk proto.
Returns:
Unique identifier for the chunk proto.
"""
return f'{chunk.session_id}_{chunk.episode_id}_{chunk.chunk_id}'
previous_chunk_keys = set()
while True: # Yield new chunks, potentially forever.
new_chunks = []
new_chunk_keys = set()
for chunk in self._storage.get_episode_chunks(
self._read_assignment.project_id,
self._read_assignment.brain_id,
session_ids, earliest_timestamp_micros):
chunk_key = generate_chunk_key(chunk)
if chunk_key not in previous_chunk_keys:
# Update date_timestamp_micros to avoid refetching data.
earliest_timestamp_micros = max(earliest_timestamp_micros,
chunk.created_micros)
new_chunks.append(chunk)
new_chunk_keys.add(chunk_key)
self.assignment_stats.queries_completed += 1
if new_chunks:
previous_chunk_keys = new_chunk_keys
yield new_chunks
else:
yield None
def _session_complete(self):
"""Returns true if the session is stale or ended."""
session_state = self._get_session_state()
complete = session_state in (
storage.SessionState.STALE, storage.SessionState.ENDED)
if complete:
falken_logging.info(
'Session complete, with state: '
f'{storage.SessionState.as_string(session_state)}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return complete
def _training_complete(self):
"""Returns true if training on the assignment is complete."""
if self._session_complete():
falken_logging.info(
'Stopping training, reason: session has completed.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return True
if self._min_train_batches is not None and (
self._brain.tf_agent.train_step_counter < self._min_train_batches):
# Unless the session is closed, we train the brain for min steps.
return False
if self._model_manager and self._model_manager.should_stop():
falken_logging.info(
f'Stopping training, reason: {self._model_manager.should_stop()}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return True
if self._max_train_batches is not None and (
self._brain.global_step >= self._max_train_batches):
falken_logging.info(
'Stopping training, reason: Exceeded max_train_batches of '
f'{self._max_train_batches}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return True
return False
def _save_and_evaluate_policy(self, exporter):
"""Saves the current policy and evaluates it vs current best.
Args:
exporter: A ModelExporter.
Returns:
The ID of the model that was written.
"""
if self._session_complete():
falken_logging.info(
'Skipping model export on completed session.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return
falken_logging.info(
'Writing tmp model.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
self._stats.demonstration_frames = self._brain.num_train_frames
self._stats.evaluation_frames = self._brain.num_eval_frames
model_id = str(uuid.uuid4())
with self._stats.record_event(
stats_collector.FALKEN_EXPORT_CHECKPOINT_EVENT_NAME):
tmp_checkpoint_path = self._file.create_tmp_checkpoint_path(
self._write_assignment, model_id)
self._brain.save_checkpoint(tmp_checkpoint_path)
falken_logging.info(
'Finished writing tmp model.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
with self._stats.record_event(stats_collector.FALKEN_EVAL_EVENT_NAME):
evals = list(self._brain.compute_full_evaluation())
training_examples_completed = (
self._brain.global_step * self._hparams['batch_size'])
# The hparam can be set explicitly to None so we need to check for it.
max_training_examples = (
self._hparams['max_train_examples']
if self._hparams.get('max_train_examples', None) else 0)
exporter.export_model(tmp_checkpoint_path, evals, self._stats, model_id,
self._episode_id, self._episode_chunk_id,
training_examples_completed, max_training_examples,
self._most_recent_demo_micros)
self.assignment_stats.models_recorded += 1
# Compare new model to previous best and update accordingly.
self._model_manager.record_new_model(model_id, evals)
return model_id
def _fetch_data(self, fetcher, initial_wait_for_data):
"""Fetches training / eval data from a fetcher and adds it to the brain.
Args:
fetcher: A data_fetcher.DataFetcher.
initial_wait_for_data: Whether to wait for data on the first queue fetch.
Returns:
The number of demonstration frames that were added.
"""
falken_logging.info(
'Checking for new training data.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
first_fetch = True
demo_frames = 0
while True:
try:
block, timeout = False, None
if initial_wait_for_data and first_fetch:
# Wait longer on the first fetch.
block, timeout = True, self._WAIT_FOR_DATA_BRAIN_SECS
# TODO(lph): Change datafetcher to auto-block on first query.
elif self._always_block_when_fetching:
# Short block for other fetches.
block, timeout = True, 5
first_fetch = False
chunks = fetcher.get(block=block, timeout=timeout)
demo_frames += self._add_episode_chunks(chunks)
except data_fetcher.Empty:
# If the underlying SQL queries did not complete, then we're not
# waiting long enough for data to arrive.
if (initial_wait_for_data and
not self.assignment_stats.queries_completed):
# We are in the first loop iteration and have not completed any
# queries after _WAIT_FOR_DATA_BRAIN_SECS.
raise NoDataError('Could not query DB for chunks.')
return demo_frames
def _process_step(self, fetcher):
"""Create and train a model.
Args:
fetcher: A data_fetcher.DataFetcher object to pull fresh data from.
Yields:
Pairs (ProcessAssignmentStatus, status_metadata). This allows for
functions like ProcessAssignmentUntil to pause and resume Process.
Raises:
ExceededMaxWorkTimeError: If assignment takes too long to process.
"""
if not self._brain:
self._brain, self._hparams = self._create_brain()
self._stats.training_steps = self._hparams['training_steps']
self._stats.batch_size = self._hparams['batch_size']
self._model_manager = model_manager.ModelManager()
else:
self._brain.reinitialize_agent()
self._model_manager.reset_counters()
with model_exporter.ModelExporter(self._write_assignment, self._storage,
self._file, self._model_manager,
self._brain.hparams) as exporter:
saved_model_id = None
loop_counter = 0
restart_requested = False # Whether to restart.
# Enter main training loop.
while not self._training_complete():
if restart_requested and (
self._min_train_batches is None or
self._brain.tf_agent.train_step_counter >=
self._min_train_batches):
if saved_model_id is None:
# Save if we didn't auto-save last loop iteration.
saved_model_id = self._save_and_evaluate_policy(exporter)
yield ProcessAssignmentStatus.SAVED_MODEL, saved_model_id
falken_logging.info(
f'Restarting training after {loop_counter} iterations.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
yield ProcessAssignmentStatus.PROCESSED_STEP_NEEDS_RESTART, None
return
time_elapsed = time.perf_counter() - self._start_timestamp
falken_logging.info(
f'{time_elapsed}s elapsed since start of assignment.',
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
if time_elapsed > _MAX_ASSIGNMENT_WORK_TIME_SECS:
raise ExceededMaxWorkTimeError(
f'Assignment took too long. Started {time_elapsed} seconds ago.'
)
# Grab all data from the fetcher.
with self._stats.record_event(
stats_collector.FALKEN_FETCH_CHUNK_EVENT_NAME):
yield ProcessAssignmentStatus.WILL_FETCH_DATA, None
demo_frames = self._fetch_data(
fetcher,
initial_wait_for_data=(loop_counter == 0))
continuous = self._hparams['continuous']
falken_logging.info(
f'Finished data fetch, training iteration {loop_counter}. '
f'Got {demo_frames} new demo frames, continuous={continuous}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
if not continuous and loop_counter and demo_frames:
restart_requested = True
falken_logging.info('Received new data, requesting a restart.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
if not self._brain.num_train_frames:
falken_logging.error(
'No training frames available.',
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
break
# Perform training.
with self._stats.record_event(
stats_collector.FALKEN_TRAIN_BRAIN_EVENT_NAME):
try:
falken_logging.info('Training brain.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
self._brain.train()
self.assignment_stats.brain_train_steps += 1
self.assignment_stats.brain_global_step = self._brain.global_step
except Exception as e: # pylint: disable=broad-except
falken_logging.error(
f'Exception found when running _train_step: {e}.'
f'Traceback:\n{traceback.format_exc()}',
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
raise e
batch_count = (self.assignment_stats.brain_train_steps *
self._hparams['training_steps'])
if (self._hparams['save_interval_batches'] is not None and
batch_count % self._hparams['save_interval_batches'] == 0):
saved_model_id = self._save_and_evaluate_policy(exporter)
yield ProcessAssignmentStatus.SAVED_MODEL, saved_model_id
else:
saved_model_id = None
loop_counter += 1
# End of main training loop.
if saved_model_id is None and self.assignment_stats.brain_train_steps:
# If the last loop didn't save, save now.
saved_model_id = self._save_and_evaluate_policy(exporter)
yield ProcessAssignmentStatus.SAVED_MODEL, saved_model_id
# End of exporter context
# Learner completed normally, no restart indicated.
yield ProcessAssignmentStatus.PROCESSED_STEP, None
def process(self):
"""Train one or multiple brains.
Yields:
Pairs (ProcessAssignmentStatus, status_metadata). This allows for
functions like ProcessAssignmentUntil to pause and resume Process.
"""
with self._stats.record_event(stats_collector.FALKEN_PROCESS_EVENT_NAME):
self._start_timestamp = time.perf_counter()
if self._session_complete():
falken_logging.info('Returning since assignment is '
'associated with closed or stale session.',
brain_id=self._read_assignment.brain_id,
session_id=self._read_assignment.session_id,
assignment_id=self._read_assignment.assignment_id)
return
falken_logging.info('Starting work on assignment.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
with self._stats.record_event(
stats_collector.FALKEN_MAIN_TRAINING_LOOP_EVENT_NAME):
with data_fetcher.DataFetcher(self._chunk_generator(),
self._DB_QUERY_INTERVAL_SECS) as fetcher:
has_next_step = True
while has_next_step:
# Actually do the work.
has_next_step = False
for status, metadata in self._process_step(fetcher):
if status == ProcessAssignmentStatus.PROCESSED_STEP_NEEDS_RESTART:
has_next_step = True
yield status, metadata
if has_next_step:
falken_logging.info(
'Restarting work on assignment.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
self.assignment_stats.num_restarts += 1
# Delete checkpoints to ensure that restarts start from scratch.
self._file.wipe_checkpoints(self._write_assignment)
if self.assignment_stats.brain_train_steps:
falken_logging.info(
'Completed assignment. '
f'Called brain.train {self.assignment_stats.brain_train_steps} '
'times.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
else:
# This should only happen in rare cases: A learner failed to ACK
# after training to completion, e.g., due to preemption of the
# learner at the end of training.
falken_logging.warn(
'Completed assignment without training.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
# Clean-up checkpoints dir.
self._file.wipe_checkpoints(self._write_assignment)
| apache-2.0 | -273,857,898,316,733,730 | 39.030504 | 80 | 0.654541 | false | 4.001458 | false | false | false |
bittner/django-cms | cms/templatetags/cms_admin.py | 1 | 9639 | # -*- coding: utf-8 -*-
from classytags.arguments import Argument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from cms.constants import PUBLISHER_STATE_PENDING
from cms.toolbar.utils import get_plugin_toolbar_js
from cms.utils.admin import render_admin_rows
from sekizai.helpers import get_varname
from django import template
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
register = template.Library()
CMS_ADMIN_ICON_BASE = "%sadmin/img/" % settings.STATIC_URL
@register.simple_tag(takes_context=True)
def show_admin_menu_for_pages(context, pages):
request = context['request']
if 'cl' in context:
filtered = context['cl'].is_filtered or context['cl'].query
else:
filtered = False
site = context['cms_current_site']
language = context['preview_language']
return render_admin_rows(request, pages=pages, site=site, filtered=filtered, language=language)
class TreePublishRow(Tag):
name = "tree_publish_row"
options = Options(
Argument('page'),
Argument('language')
)
def render_tag(self, context, page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
if page.is_dirty(language):
cls = "cms-pagetree-node-state cms-pagetree-node-state-dirty dirty"
text = _("unpublished changes")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-published published"
text = _("published")
else:
if language in page.languages:
public_pending = page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING
if public_pending or page.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished-parent unpublishedparent"
text = _("unpublished parent")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished unpublished"
text = _("unpublished")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-empty empty"
text = _("no content")
return mark_safe(
'<span class="cms-hover-tooltip cms-hover-tooltip-left cms-hover-tooltip-delay %s" '
'data-cms-tooltip="%s"></span>' % (cls, force_text(text)))
register.tag(TreePublishRow)
@register.filter
def is_published(page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
return True
else:
if language in page.languages and page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
return True
return False
@register.filter
def is_dirty(page, language):
return page.is_dirty(language)
@register.filter
def all_ancestors_are_published(page, language):
"""
Returns False if any of the ancestors of page (and language) are
unpublished, otherwise True.
"""
page = page.parent
while page:
if not page.is_published(language):
return False
page = page.parent
return True
class CleanAdminListFilter(InclusionTag):
"""
used in admin to display only these users that have actually edited a page
and not everybody
"""
name = 'clean_admin_list_filter'
template = 'admin/cms/page/tree/filter.html'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title, 'choices': unique_choices}
register.tag(CleanAdminListFilter)
@register.filter
def boolean_icon(value):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(
u'<img src="%sicon-%s.gif" alt="%s" />' % (CMS_ADMIN_ICON_BASE, BOOLEAN_MAPPING.get(value, 'unknown'), value))
@register.filter
def preview_link(page, language):
if settings.USE_I18N:
# Which one of page.get_slug() and page.get_path() is the right
# one to use in this block? They both seem to return the same thing.
try:
# attempt to retrieve the localized path/slug and return
return page.get_absolute_url(language, fallback=False)
except:
# no localized path/slug. therefore nothing to preview. stay on the same page.
# perhaps the user should be somehow notified for this.
return ''
return page.get_absolute_url(language)
class PageSubmitRow(InclusionTag):
name = 'page_submit_row'
template = 'admin/cms/page/submit_row.html'
def get_context(self, context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
basic_info = context.get('basic_info', False)
advanced_settings = context.get('advanced_settings', False)
change_advanced_settings = context.get('can_change_advanced_settings', False)
language = context.get('language', '')
filled_languages = context.get('filled_languages', [])
show_buttons = language in filled_languages
if show_buttons:
show_buttons = (basic_info or advanced_settings) and change_advanced_settings
context = {
# TODO check this (old code: opts.get_ordered_objects() )
'onclick_attrib': (opts and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': False,
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': False,
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'basic_info_active': basic_info,
'advanced_settings_active': advanced_settings,
'show_buttons': show_buttons,
'show_save': True,
'language': language,
'language_is_filled': language in filled_languages,
'object_id': context.get('object_id', None)
}
return context
register.tag(PageSubmitRow)
def in_filtered(seq1, seq2):
return [x for x in seq1 if x in seq2]
in_filtered = register.filter('in_filtered', in_filtered)
@register.simple_tag
def admin_static_url():
"""
If set, returns the string contained in the setting ADMIN_MEDIA_PREFIX, otherwise returns STATIC_URL + 'admin/'.
"""
return getattr(settings, 'ADMIN_MEDIA_PREFIX', None) or ''.join([settings.STATIC_URL, 'admin/'])
class CMSAdminIconBase(Tag):
name = 'cms_admin_icon_base'
def render_tag(self, context):
return CMS_ADMIN_ICON_BASE
register.tag(CMSAdminIconBase)
@register.simple_tag(takes_context=True)
def render_plugin_toolbar_config(context, plugin):
content_renderer = context['cms_content_renderer']
instance, plugin_class = plugin.get_plugin_instance()
if not instance:
return ''
with context.push():
content = content_renderer.render_editable_plugin(
instance,
context,
plugin_class,
)
# render_editable_plugin will populate the plugin
# parents and children cache.
placeholder_cache = content_renderer.get_rendered_plugins_cache(instance.placeholder)
toolbar_js = get_plugin_toolbar_js(
instance,
request_language=content_renderer.request_language,
children=placeholder_cache['plugin_children'][instance.plugin_type],
parents=placeholder_cache['plugin_parents'][instance.plugin_type],
)
varname = get_varname()
toolbar_js = '<script>{}</script>'.format(toolbar_js)
# Add the toolbar javascript for this plugin to the
# sekizai "js" namespace.
context[varname]['js'].append(toolbar_js)
return mark_safe(content)
@register.inclusion_tag('admin/cms/page/plugin/submit_line.html', takes_context=True)
def submit_row_plugin(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': context.get('has_delete_permission', False) and change and context.get('show_delete', True),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
| bsd-3-clause | -2,619,761,319,136,201,700 | 34.178832 | 120 | 0.634713 | false | 3.924674 | false | false | false |
open-synergy/event | event_sale_registration_partner_unique/models/sale_order.py | 1 | 1091 | # -*- coding: utf-8 -*-
# © 2016 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import _, api, models
from openerp.addons.event_registration_partner_unique import exceptions
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
@api.multi
def button_confirm(self):
"""Add registrations to the already existing record if possible."""
for s in self:
try:
with self.env.cr.savepoint():
super(SaleOrderLine, s).button_confirm()
# A registration already exists
except exceptions.DuplicatedPartnerError as error:
regs = error._kwargs["registrations"].with_context(
mail_create_nolog=True)
qty = int(s.product_uom_qty)
for reg in regs:
reg.nb_register += qty
regs.message_post(_("%d new registrations sold in %s.") %
(qty, s.order_id.display_name))
return True
| agpl-3.0 | 2,935,847,197,673,163,300 | 35.3 | 75 | 0.573921 | false | 4.01845 | false | false | false |
hanw/connectal | scripts/adb/adb_protocol.py | 3 | 13753 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ADB protocol implementation.
Implements the ADB protocol as seen in android's adb/adbd binaries, but only the
host side.
"""
import struct
import time
import usb_exceptions
# Maximum amount of data in an ADB packet.
MAX_ADB_DATA = 4096
# ADB protocol version.
VERSION = 0x01000000
# AUTH constants for arg0.
AUTH_TOKEN = 1
AUTH_SIGNATURE = 2
AUTH_RSAPUBLICKEY = 3
class InvalidCommandError(Exception):
"""Got an invalid command over USB."""
def __init__(self, message, response_header, response_data):
if response_header == 'FAIL':
message = 'Command failed, device said so. (%s)' % message
super(InvalidCommandError, self).__init__(
message, response_header, response_data)
class InvalidResponseError(Exception):
"""Got an invalid response to our command."""
class InvalidChecksumError(Exception):
"""Checksum of data didn't match expected checksum."""
class InterleavedDataError(Exception):
"""We only support command sent serially."""
def MakeWireIDs(ids):
id_to_wire = {
cmd_id: sum(ord(c) << (i * 8) for i, c in enumerate(cmd_id))
for cmd_id in ids
}
wire_to_id = {wire: cmd_id for cmd_id, wire in id_to_wire.items()}
return id_to_wire, wire_to_id
class AuthSigner(object):
"""Signer for use with authenticated ADB, introduced in 4.4.x/KitKat."""
def Sign(self, data):
"""Signs given data using a private key."""
raise NotImplementedError()
def GetPublicKey(self):
"""Returns the public key in PEM format without headers or newlines."""
raise NotImplementedError()
class _AdbConnection(object):
"""ADB Connection."""
def __init__(self, usb, local_id, remote_id, timeout_ms):
self.usb = usb
self.local_id = local_id
self.remote_id = remote_id
self.timeout_ms = timeout_ms
def _Send(self, command, arg0, arg1, data=''):
message = AdbMessage(command, arg0, arg1, data)
message.Send(self.usb, self.timeout_ms)
def Write(self, data):
"""Write a packet and expect an Ack."""
self._Send('WRTE', arg0=self.local_id, arg1=self.remote_id, data=data)
# Expect an ack in response.
cmd, okay_data = self.ReadUntil('OKAY')
if cmd != 'OKAY':
if cmd == 'FAIL':
raise usb_exceptions.AdbCommandFailureException(
'Command failed.', okay_data)
raise InvalidCommandError(
'Expected an OKAY in response to a WRITE, got %s (%s)',
cmd, okay_data)
return len(data)
def Okay(self):
self._Send('OKAY', arg0=self.local_id, arg1=self.remote_id)
def ReadUntil(self, *expected_cmds):
"""Read a packet, Ack any write packets."""
cmd, remote_id, local_id, data = AdbMessage.Read(
self.usb, expected_cmds, self.timeout_ms)
if local_id != 0 and self.local_id != local_id:
raise InterleavedDataError("We don't support multiple streams...")
if remote_id != 0 and self.remote_id != remote_id:
raise InvalidResponseError(
'Incorrect remote id, expected %s got %s' % (
self.remote_id, remote_id))
# Ack write packets.
if cmd == 'WRTE':
self.Okay()
return cmd, data
def ReadUntilClose(self):
"""Yield packets until a Close packet is received."""
while True:
cmd, data = self.ReadUntil('CLSE', 'WRTE')
if cmd == 'CLSE':
self._Send('CLSE', arg0=self.local_id, arg1=self.remote_id)
break
if cmd != 'WRTE':
if cmd == 'FAIL':
raise usb_exceptions.AdbCommandFailureException(
'Command failed.', data)
raise InvalidCommandError('Expected a WRITE or a CLOSE, got %s (%s)',
cmd, data)
yield data
def Close(self):
self._Send('CLSE', arg0=self.local_id, arg1=self.remote_id)
cmd, data = self.ReadUntil('CLSE')
if cmd != 'CLSE':
if cmd == 'FAIL':
raise usb_exceptions.AdbCommandFailureException('Command failed.', data)
raise InvalidCommandError('Expected a CLSE response, got %s (%s)',
cmd, data)
class AdbMessage(object):
"""ADB Protocol and message class.
Protocol Notes
local_id/remote_id:
Turns out the documentation is host/device ambidextrous, so local_id is the
id for 'the sender' and remote_id is for 'the recipient'. So since we're
only on the host, we'll re-document with host_id and device_id:
OPEN(host_id, 0, 'shell:XXX')
READY/OKAY(device_id, host_id, '')
WRITE(0, host_id, 'data')
CLOSE(device_id, host_id, '')
"""
ids = ['SYNC', 'CNXN', 'AUTH', 'OPEN', 'OKAY', 'CLSE', 'WRTE']
commands, constants = MakeWireIDs(ids)
# An ADB message is 6 words in little-endian.
format = '<6I'
connections = 0
def __init__(self, command=None, arg0=None, arg1=None, data=''):
self.command = self.commands[command]
self.magic = self.command ^ 0xFFFFFFFF
self.arg0 = arg0
self.arg1 = arg1
self.data = data
@property
def checksum(self):
return self.CalculateChecksum(self.data)
@staticmethod
def CalculateChecksum(data):
# The checksum is just a sum of all the bytes. I swear.
return sum(map(ord, data)) & 0xFFFFFFFF
def Pack(self):
"""Returns this message in an over-the-wire format."""
return struct.pack(self.format, self.command, self.arg0, self.arg1,
len(self.data), self.checksum, self.magic)
@classmethod
def Unpack(cls, message):
try:
cmd, arg0, arg1, data_length, data_checksum, unused_magic = struct.unpack(
cls.format, message)
except struct.error as e:
raise ValueError('Unable to unpack ADB command.', cls.format, message, e)
return cmd, arg0, arg1, data_length, data_checksum
def Send(self, usb, timeout_ms=None):
"""Send this message over USB."""
usb.BulkWrite(self.Pack(), timeout_ms)
usb.BulkWrite(self.data, timeout_ms)
@classmethod
def Read(cls, usb, expected_cmds, timeout_ms=None, total_timeout_ms=None):
"""Receive a response from the device."""
total_timeout_ms = usb.Timeout(total_timeout_ms)
start = time.time()
while True:
msg = usb.BulkRead(24, timeout_ms)
cmd, arg0, arg1, data_length, data_checksum = cls.Unpack(msg)
command = cls.constants.get(cmd)
if not command:
raise InvalidCommandError(
'Unknown command: %x' % cmd, cmd, (arg0, arg1))
if command in expected_cmds:
break
if time.time() - start > total_timeout_ms:
raise InvalidCommandError(
'Never got one of the expected responses (%s)' % expected_cmds,
cmd, (timeout_ms, total_timeout_ms))
if data_length > 0:
data = ''
while data_length > 0:
temp = usb.BulkRead(data_length, timeout_ms)
data += temp
data_length -= len(temp)
actual_checksum = cls.CalculateChecksum(data)
if actual_checksum != data_checksum:
raise InvalidChecksumError(
'Received checksum %s != %s', (actual_checksum, data_checksum))
else:
data = ''
return command, arg0, arg1, data
@classmethod
def Connect(cls, usb, banner='notadb', rsa_keys=None, auth_timeout_ms=100):
"""Establish a new connection to the device.
Args:
usb: A USBHandle with BulkRead and BulkWrite methods.
banner: A string to send as a host identifier.
rsa_keys: List of AuthSigner subclass instances to be used for
authentication. The device can either accept one of these via the Sign
method, or we will send the result of GetPublicKey from the first one
if the device doesn't accept any of them.
auth_timeout_ms: Timeout to wait for when sending a new public key. This
is only relevant when we send a new public key. The device shows a
dialog and this timeout is how long to wait for that dialog. If used
in automation, this should be low to catch such a case as a failure
quickly; while in interactive settings it should be high to allow
users to accept the dialog. We default to automation here, so it's low
by default.
Returns:
The device's reported banner. Always starts with the state (device,
recovery, or sideload), sometimes includes information after a : with
various product information.
Raises:
usb_exceptions.DeviceAuthError: When the device expects authentication,
but we weren't given any valid keys.
InvalidResponseError: When the device does authentication in an
unexpected way.
"""
msg = cls(
command='CNXN', arg0=VERSION, arg1=MAX_ADB_DATA,
data='host::%s\0' % banner)
msg.Send(usb)
cmd, arg0, arg1, banner = cls.Read(usb, ['CNXN', 'AUTH'])
if cmd == 'AUTH':
if not rsa_keys:
raise usb_exceptions.DeviceAuthError(
'Device authentication required, no keys available.')
# Loop through our keys, signing the last 'banner' or token.
for rsa_key in rsa_keys:
if arg0 != AUTH_TOKEN:
raise InvalidResponseError(
'Unknown AUTH response: %s %s %s' % (arg0, arg1, banner))
signed_token = rsa_key.Sign(banner)
msg = cls(
command='AUTH', arg0=AUTH_SIGNATURE, arg1=0, data=signed_token)
msg.Send(usb)
cmd, arg0, unused_arg1, banner = cls.Read(usb, ['CNXN', 'AUTH'])
if cmd == 'CNXN':
return banner
# None of the keys worked, so send a public key.
msg = cls(
command='AUTH', arg0=AUTH_RSAPUBLICKEY, arg1=0,
data=rsa_keys[0].GetPublicKey() + '\0')
msg.Send(usb)
try:
cmd, arg0, unused_arg1, banner = cls.Read(
usb, ['CNXN'], timeout_ms=auth_timeout_ms)
except usb_exceptions.BulkReadFailedError as e:
if e.usb_error.value == -7: # Timeout.
raise usb_exceptions.DeviceAuthError(
'Accept auth key on device, then retry.')
raise
# This didn't time-out, so we got a CNXN response.
return banner
return banner
@classmethod
def Open(cls, usb, destination, timeout_ms=None):
"""Opens a new connection to the device via an OPEN message.
Not the same as the posix 'open' or any other google3 Open methods.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
destination: The service:command string.
timeout_ms: Timeout in milliseconds for USB packets.
Raises:
InvalidResponseError: Wrong local_id sent to us.
InvalidCommandError: Didn't get a ready response.
Returns:
The local connection id.
"""
local_id = 1
msg = cls(
command='OPEN', arg0=local_id, arg1=0,
data=destination + '\0')
msg.Send(usb, timeout_ms)
cmd, remote_id, their_local_id, _ = cls.Read(usb, ['CLSE', 'OKAY'],
timeout_ms=timeout_ms)
if local_id != their_local_id:
raise InvalidResponseError(
'Expected the local_id to be %s, got %s' % (local_id, their_local_id))
if cmd == 'CLSE':
# Device doesn't support this service.
return None
if cmd != 'OKAY':
raise InvalidCommandError('Expected a ready response, got %s' % cmd,
cmd, (remote_id, their_local_id))
return _AdbConnection(usb, local_id, remote_id, timeout_ms)
@classmethod
def Command(cls, usb, service, command='', timeout_ms=None):
"""One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Returns:
The response from the service.
"""
return ''.join(cls.StreamingCommand(usb, service, command, timeout_ms))
@classmethod
def StreamingCommand(cls, usb, service, command='', timeout_ms=None):
"""One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Yields:
The responses from the service.
"""
connection = cls.Open(usb, destination='%s:%s' % (service, command),
timeout_ms=timeout_ms)
for data in connection.ReadUntilClose():
yield data
| mit | -4,266,882,794,569,911,000 | 33.906091 | 80 | 0.644369 | false | 3.800221 | false | false | false |
breakhearts/wallstreet | wallstreet/test/test_stock_api.py | 1 | 2758 | from __future__ import absolute_import
from wallstreet.crawler import stockapi
from wallstreet.crawler.fetcher import CurlFetcher
from datetime import datetime
from wallstreet import config
class TestYahooStockHistoryAPI:
def test_get_url_params(self):
api = stockapi.YahooHistoryDataAPI()
url, method, headers, data = api.get_url_params("BIDU", start_date="20150217", end_date="20150914")
assert url == "http://real-chart.finance.yahoo.com/table.csv" \
"?s=BIDU&g=d&ignore=.csv&a=1&b=17&c=2015&d=8&e=14&f=2015"
assert method == "GET"
assert data == {}
def test_parse_ret(self):
api = stockapi.YahooHistoryDataAPI()
url, method, headers, data = api.get_url_params("BIDU", start_date="20150218", end_date="20150220")
fetcher = CurlFetcher()
status_code, content = fetcher.fetch(url, method, headers, data)
assert status_code == 200
days = api.parse_ret("BIDU", content)
assert len(days) == 3
day_last = days[0]
assert day_last.symbol == "BIDU"
assert day_last.date == datetime(2015, 2, 20)
class TestNasdaqStockInfoAPI:
def test_all(self):
api = stockapi.NasdaqStockInfoAPI()
url, method, headers, data = api.get_url_params("NASDAQ")
fetcher = CurlFetcher()
status_code, content = fetcher.fetch(url, method, headers, data)
stock_infos = api.parse_ret("NASDAQ", content)
assert len(stock_infos) > 100
class TestEdgarAPI:
def test_year_fiscal_report(self):
api = stockapi.EdgarYearReportAPI(config.get_test("edgar", "core_key"))
url, method, headers, data = api.get_url_params(["BIDU", "AAPL"], start_year=2011, end_year=2012)
fetcher = CurlFetcher(timeout=30)
status_code, content = fetcher.fetch(url, method, headers, data)
raw_report = api.parse_ret(content)
assert len(raw_report) == 4
def test_quarter_fiscal_report(self):
api = stockapi.EdgarQuarterReportAPI(config.get_test("edgar", "core_key"))
url, method, headers, data = api.get_url_params("FB", start_year=2014, end_year=2015, start_quarter=3, end_quarter=1)
fetcher = CurlFetcher(timeout=30)
status_code, content = fetcher.fetch(url, method, headers, data)
raw_report = api.parse_ret(content)
assert len(raw_report) == 3
def test_company_report(self):
api = stockapi.EdgarCompanyAPI(config.get_test("edgar", "core_key"))
url, method, headers, data = api.get_url_params(["BIDU", "BABA"])
fetcher = CurlFetcher(timeout=30)
status_code, content = fetcher.fetch(url, method, headers, data)
t = api.parse_ret(content)
assert len(t) == 2 | apache-2.0 | 6,489,140,336,624,224,000 | 42.793651 | 125 | 0.642857 | false | 3.256198 | true | false | false |
lpsinger/astropy | examples/coordinates/plot_galactocentric-frame.py | 8 | 8058 | # -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.SkyCoord(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s,
frame='icrs')
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interepreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.SkyCoord(ring_rep, frame=coord.Galactocentric)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel(f"$v_x$ [{(u.km / u.s).to_string('latex_inline')}]")
axes[1].set_ylabel(f"$v_y$ [{(u.km / u.s).to_string('latex_inline')}]")
fig.tight_layout()
plt.show()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(fr'$\mu_l \, \cos b$ [{(u.mas/u.yr).to_string("latex_inline")}]')
ax.legend()
plt.show()
| bsd-3-clause | 7,444,938,070,177,072,000 | 40.323077 | 81 | 0.61169 | false | 3.385714 | false | false | false |
stxnext-kindergarten/presence-analyzer-drudkiewicz | src/presence_analyzer/utils.py | 1 | 5035 | # -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
from lxml import etree
from json import dumps
from functools import wraps
from datetime import datetime
from flask import Response
from presence_analyzer.main import app
import logging
log = logging.getLogger(__name__) # pylint: disable=C0103
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
Inner function of jsonify.
"""
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
def get_menu_data():
"""
Extracts menu data from CSV file
It creates structure like this:
data = [{
'link': 'mainpage',
'title': 'This is mainpage'
}]
"""
data = []
with open(app.config['MENU_CSV'], 'r') as csvfile:
menu_reader = csv.reader(csvfile, delimiter=',')
for row in menu_reader:
data.append({
'link': row[0],
'title': row[1]
})
return data
@app.template_global()
def get_menu(page_url):
"""
Gets links and their titles.
Adds 'selected' attribute to current page.
"""
pages = get_menu_data()
for page in pages:
if page.get('link') == page_url:
page['selected'] = True
return pages
@app.template_global()
def get_users():
"""
Gets dictionary with users data imported from xml file
"""
data = etree.parse(app.config['DATA_USERS']).getroot()
server = data.find('server')
host = '{0}://{1}:{2}'.format(
server.find('protocol').text,
server.find('host').text,
server.find('port').text,
)
data_users = data.find('users')
users = {
user.get('id'): {
'name': unicode(user.find('name').text),
'avatar': host + user.find('avatar').text
} for user in data_users
}
return users
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
data.setdefault(user_id, {})[date] = {
'start': start,
'end': end
}
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = {i: [] for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def group_by_weekday_start_end(items):
"""
Groups start and end presences by weekday.
"""
weekdays = {
i: {
'start': [],
'end': []
}
for i in range(7)
}
for date in items:
start = seconds_since_midnight(items[date]['start'])
end = seconds_since_midnight(items[date]['end'])
weekdays[date.weekday()]['start'].append(start)
weekdays[date.weekday()]['end'].append(end)
return weekdays
def presence_start_end(items):
"""
Groups mean start and mean end presences by weekday.
"""
weekdays = group_by_weekday_start_end(items)
result = {
weekday: {
'start': int(mean(time['start'])),
'end': int(mean(time['end'])),
}
for weekday, time in weekdays.items()
}
return result
| mit | -7,590,320,113,156,924,000 | 23.802956 | 79 | 0.540417 | false | 3.915241 | false | false | false |
ghtmtt/DataPlotly | DataPlotly/core/plot_settings.py | 1 | 9944 | # -*- coding: utf-8 -*-
"""Encapsulates settings for a plot
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from qgis.PyQt.QtCore import (
QFile,
QIODevice
)
from qgis.PyQt.QtXml import QDomDocument, QDomElement
from qgis.core import (
QgsXmlUtils,
QgsPropertyCollection,
QgsPropertyDefinition
)
class PlotSettings: # pylint: disable=too-many-instance-attributes
"""
The PlotSettings class encapsulates all settings relating to a plot, and contains
methods for serializing and deserializing these settings.
"""
PROPERTY_FILTER = 1
PROPERTY_MARKER_SIZE = 2
PROPERTY_COLOR = 3
PROPERTY_STROKE_COLOR = 4
PROPERTY_STROKE_WIDTH = 5
PROPERTY_X_MIN = 6
PROPERTY_X_MAX = 7
PROPERTY_Y_MIN = 8
PROPERTY_Y_MAX = 9
PROPERTY_TITLE = 10
PROPERTY_LEGEND_TITLE = 11
PROPERTY_X_TITLE = 12
PROPERTY_Y_TITLE = 13
PROPERTY_Z_TITLE = 14
DYNAMIC_PROPERTIES = {
PROPERTY_FILTER: QgsPropertyDefinition('filter', 'Feature filter', QgsPropertyDefinition.Boolean),
PROPERTY_MARKER_SIZE: QgsPropertyDefinition('marker_size', 'Marker size', QgsPropertyDefinition.DoublePositive),
PROPERTY_COLOR: QgsPropertyDefinition('color', 'Color', QgsPropertyDefinition.ColorWithAlpha),
PROPERTY_STROKE_COLOR: QgsPropertyDefinition('stroke_color', 'Stroke color',
QgsPropertyDefinition.ColorWithAlpha),
PROPERTY_STROKE_WIDTH: QgsPropertyDefinition('stroke_width', 'Stroke width',
QgsPropertyDefinition.DoublePositive),
PROPERTY_TITLE: QgsPropertyDefinition('title', 'Plot title', QgsPropertyDefinition.String),
PROPERTY_LEGEND_TITLE: QgsPropertyDefinition('legend_title', 'Legend title', QgsPropertyDefinition.String),
PROPERTY_X_TITLE: QgsPropertyDefinition('x_title', 'X title', QgsPropertyDefinition.String),
PROPERTY_Y_TITLE: QgsPropertyDefinition('y_title', 'Y title', QgsPropertyDefinition.String),
PROPERTY_Z_TITLE: QgsPropertyDefinition('z_title', 'Z title', QgsPropertyDefinition.String),
PROPERTY_X_MIN: QgsPropertyDefinition('x_min', 'X axis minimum', QgsPropertyDefinition.Double),
PROPERTY_X_MAX: QgsPropertyDefinition('x_max', 'X axis maximum', QgsPropertyDefinition.Double),
PROPERTY_Y_MIN: QgsPropertyDefinition('y_min', 'Y axis minimum', QgsPropertyDefinition.Double),
PROPERTY_Y_MAX: QgsPropertyDefinition('y_max', 'Y axis maximum', QgsPropertyDefinition.Double)
}
def __init__(self, plot_type: str = 'scatter', properties: dict = None, layout: dict = None,
source_layer_id=None):
# Define default plot dictionary used as a basis for plot initialization
# prepare the default dictionary with None values
# plot properties
plot_base_properties = {
'marker': 'markers',
'custom': None,
'hover_text': None,
'additional_hover_text': None,
'hover_label_text': None,
'x_name': '',
'y_name': '',
'z_name': '',
'in_color': '#8ebad9',
'out_color': '#1f77b4',
'marker_width': 1,
'marker_size': 10,
'marker_symbol': 0,
'line_dash': 'solid',
'box_orientation': 'v',
'box_stat': None,
'box_outliers': False,
'name': '',
'normalization': None,
'cont_type': 'fill',
'color_scale': None,
'show_lines': False,
'cumulative': False,
'show_colorscale_legend': False,
'invert_color_scale': False,
'invert_hist': 'increasing',
'bins': 0,
'selected_features_only': False,
'visible_features_only': False,
'color_scale_data_defined_in_check': False,
'color_scale_data_defined_in_invert_check': False,
'marker_type_combo': 'Points',
'point_combo': '',
'line_combo': 'Solid Line',
'contour_type_combo': 'Fill',
'show_lines_check': False,
'opacity': 1,
'violin_side': None,
'violin_box': False,
'show_mean_line': False,
'layout_filter_by_map': False,
'layout_filter_by_atlas': False
}
# layout nested dictionary
plot_base_layout = {
'title': 'Plot Title',
'legend': True,
'legend_title': None,
'legend_orientation': 'h',
'x_title': '',
'y_title': '',
'z_title': '',
'xaxis': None,
'bar_mode': None,
'x_type': None,
'y_type': None,
'x_inv': None,
'y_inv': None,
'x_min': None,
'x_max': None,
'y_min': None,
'y_max': None,
'range_slider': {'borderwidth': 1, 'visible': False},
'bargaps': 0,
'polar': {'angularaxis': {'direction': 'clockwise'}},
'additional_info_expression': '',
'bins_check': False,
'gridcolor': '#bdbfc0'
}
self.plot_base_dic = {
'plot_type': None,
'layer': None,
'plot_prop': plot_base_properties,
'layout_prop': plot_base_layout
}
self.data_defined_properties = QgsPropertyCollection()
# Set class properties - we use the base dictionaries, replacing base values with
# those from the passed properties dicts
if properties is None:
self.properties = plot_base_properties
else:
self.properties = {**plot_base_properties, **properties}
if layout is None:
self.layout = plot_base_layout
else:
self.layout = {**plot_base_layout, **layout}
self.plot_type = plot_type
self.x = []
self.y = []
self.z = []
self.feature_ids = []
self.additional_hover_text = []
self.data_defined_marker_sizes = []
self.data_defined_colors = []
self.data_defined_stroke_colors = []
self.data_defined_stroke_widths = []
# layout properties
self.data_defined_title = ""
self.data_defined_legend_title = ""
self.data_defined_x_title = ""
self.data_defined_y_title = ""
self.data_defined_z_title = ""
self.data_defined_x_min = None
self.data_defined_x_max = None
self.data_defined_y_min = None
self.data_defined_y_max = None
self.source_layer_id = source_layer_id
def write_xml(self, document: QDomDocument):
"""
Writes the plot settings to an XML element
"""
element = QgsXmlUtils.writeVariant({
'plot_type': self.plot_type,
'plot_properties': self.properties,
'plot_layout': self.layout,
'source_layer_id': self.source_layer_id,
'dynamic_properties': self.data_defined_properties.toVariant(PlotSettings.DYNAMIC_PROPERTIES)
}, document)
return element
def read_xml(self, element: QDomElement) -> bool:
"""
Reads the plot settings from an XML element
"""
res = QgsXmlUtils.readVariant(element)
if not isinstance(res, dict) or \
'plot_type' not in res or \
'plot_properties' not in res or \
'plot_layout' not in res:
return False
self.plot_type = res['plot_type']
self.properties = res['plot_properties']
self.layout = res['plot_layout']
self.source_layer_id = res.get('source_layer_id', None)
self.data_defined_properties.loadVariant(res.get('dynamic_properties', None), PlotSettings.DYNAMIC_PROPERTIES)
return True
def write_to_project(self, document: QDomDocument):
"""
Writes the settings to a project (represented by the given DOM document)
"""
elem = self.write_xml(document)
parent_elem = document.createElement('DataPlotly')
parent_elem.appendChild(elem)
root_node = document.elementsByTagName("qgis").item(0)
root_node.appendChild(parent_elem)
def read_from_project(self, document: QDomDocument):
"""
Reads the settings from a project (represented by the given DOM document)
"""
root_node = document.elementsByTagName("qgis").item(0)
if root_node.isNull():
return False
node = root_node.toElement().firstChildElement('DataPlotly')
if node.isNull():
return False
elem = node.toElement()
return self.read_xml(elem.firstChildElement())
def write_to_file(self, file_name: str) -> bool:
"""
Writes the settings to an XML file
"""
document = QDomDocument("dataplotly")
elem = self.write_xml(document)
document.appendChild(elem)
try:
with open(file_name, "w") as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(document.toString())
return True
except FileNotFoundError:
return False
def read_from_file(self, file_name: str) -> bool:
"""
Reads the settings from an XML file
"""
f = QFile(file_name)
if f.open(QIODevice.ReadOnly):
document = QDomDocument()
if document.setContent(f):
if self.read_xml(document.firstChildElement()):
return True
return False
| gpl-2.0 | 3,380,574,620,063,825,000 | 36.104478 | 120 | 0.573109 | false | 4.024282 | false | false | false |
basilboli/playground | pubsub-hello/subscribe.py | 1 | 1330 | from client import *
import base64
client = create_pubsub_client()
# You can fetch multiple messages with a single API call.
batch_size = 100
subscription = 'projects/single-object-747/subscriptions/mysubscription'
# Create a POST body for the Pub/Sub request
body = {
# Setting ReturnImmediately to false instructs the API to wait
# to collect the message up to the size of MaxEvents, or until
# the timeout.
'returnImmediately': False,
'maxMessages': batch_size,
}
while True:
resp = client.projects().subscriptions().pull(
subscription=subscription, body=body).execute()
received_messages = resp.get('receivedMessages')
if received_messages is not None:
ack_ids = []
for received_message in received_messages:
pubsub_message = received_message.get('message')
if pubsub_message:
# Process messages
print base64.b64decode(str(pubsub_message.get('data')))
# Get the message's ack ID
ack_ids.append(received_message.get('ackId'))
# Create a POST body for the acknowledge request
ack_body = {'ackIds': ack_ids}
# Acknowledge the message.
client.projects().subscriptions().acknowledge(
subscription=subscription, body=ack_body).execute() | unlicense | -353,386,624,742,801,900 | 32.275 | 72 | 0.658647 | false | 4.222222 | false | false | false |
dhyeon/ingredient2vec | src/IngredientAnalysis.py | 1 | 1337 |
# import implemented python files
import Config
from utils import DataLoader, GensimModels, DataPlotter
class IngredientAnalysis:
def __init__(self, word_vectors):
print "\nIngredientAnalysis initialized"
self.word_vectors = word_vectors
def analogy(self):
list_most_similar_cosmul = self.word_vectors.most_similar(positive=['orange', 'apple_juice'], negative=['apple'])
print "\nIngredient Analogy"
for dic in list_most_similar_cosmul:
word = dic[0]
score = dic[1]
if score > 0.5 :
print word, score
else:
print "No similar words"
if __name__ == '__main__':
gensimLoader = GensimModels.GensimModels()
model = gensimLoader.load_word2vec(path=Config.path_embeddings_ingredients)
vocab = model.vocab
"""
Analyze Intredient2Vec
"""
# analgoy test
ingredientAnalyzer = IngredientAnalysis(model)
ingredientAnalyzer.analogy()
"""
Plot Ingredient2Vec
"""
# TSNE
model_TSNE = DataPlotter.load_TSNE(model)
# plot data with category
DataPlotter.plot_category(model=model, model_tsne=model_TSNE, path=Config.path_plottings_ingredients_category, withLegends=True)
# plot data with clustering
DataPlotter.plot_clustering(model=model, model_tsne=model_TSNE, path=Config.path_plottings_ingredients_clustering)
| apache-2.0 | -8,695,295,712,565,464,000 | 19.253968 | 129 | 0.697083 | false | 3.102088 | false | false | false |
mcocdawc/chemcoord | setup.py | 1 | 2131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup file for the chemcoord package.
"""
from __future__ import with_statement
from __future__ import absolute_import
from setuptools import setup, find_packages
from io import open # pylint:disable=redefined-builtin
import version
MAIN_PACKAGE = 'chemcoord'
DESCRIPTION = "Python module for dealing with chemical coordinates."
LICENSE = 'LGPLv3'
AUTHOR = 'Oskar Weser'
EMAIL = '[email protected]'
URL = 'https://github.com/mcocdawc/chemcoord'
INSTALL_REQUIRES = ['numpy>=1.14', 'scipy', 'pandas>=1.0', 'numba>=0.35',
'sortedcontainers', 'sympy', 'six', 'pymatgen']
KEYWORDS = ['chemcoord', 'transformation', 'cartesian', 'internal',
'chemistry', 'zmatrix', 'xyz', 'zmat', 'coordinates',
'coordinate system']
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics']
def readme():
'''Return the contents of the README.md file.'''
with open('README.md') as freadme:
return freadme.read()
def setup_package():
setup(
name=MAIN_PACKAGE,
version=version.get_version(pep440=True),
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
include_package_data=True,
keywords=KEYWORDS,
license=LICENSE,
long_description=readme(),
classifiers=CLASSIFIERS,
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=INSTALL_REQUIRES,
)
if __name__ == "__main__":
setup_package()
| lgpl-3.0 | -9,055,857,766,856,021,000 | 30.80597 | 79 | 0.635382 | false | 3.791815 | false | false | false |
mcrute/pydora | pydora/utils.py | 1 | 5542 | import os
import sys
import getpass
import subprocess
class TerminalPlatformUnsupported(Exception):
"""Platform-specific functionality is not supported
Raised by code that can not be used to interact with the terminal on this
platform.
"""
pass
class Colors:
def __wrap_with(raw_code):
@staticmethod
def inner(text, bold=False):
code = raw_code
if bold:
code = "1;{}".format(code)
return "\033[{}m{}\033[0m".format(code, text)
return inner
red = __wrap_with("31")
green = __wrap_with("32")
yellow = __wrap_with("33")
blue = __wrap_with("34")
magenta = __wrap_with("35")
cyan = __wrap_with("36")
white = __wrap_with("37")
class PosixEchoControl:
"""Posix Console Echo Control Driver
Uses termios on POSIX compliant platforms to control console echo. Is not
supported on Windows as termios is not available and will throw a
TerminalPlatformUnsupported exception if contructed on Windows.
"""
def __init__(self):
try:
import termios
self.termios = termios
except ImportError:
raise TerminalPlatformUnsupported("POSIX not supported")
def set_echo(self, enabled):
handle = sys.stdin.fileno()
if not os.isatty(handle):
return
attrs = self.termios.tcgetattr(handle)
if enabled:
attrs[3] |= self.termios.ECHO
else:
attrs[3] &= ~self.termios.ECHO
self.termios.tcsetattr(handle, self.termios.TCSANOW, attrs)
class Win32EchoControl:
"""Windows Console Echo Control Driver
This uses the console API from WinCon.h and ctypes to control console echo
on Windows clients. It is not possible to construct this class on
non-Windows systems, on those systems it will throw a
TerminalPlatformUnsupported exception.
"""
STD_INPUT_HANDLE = -10
ENABLE_ECHO_INPUT = 0x4
DISABLE_ECHO_INPUT = ~ENABLE_ECHO_INPUT
def __init__(self):
import ctypes
if not hasattr(ctypes, "windll"):
raise TerminalPlatformUnsupported("Windows not supported")
from ctypes import wintypes
self.ctypes = ctypes
self.wintypes = wintypes
self.kernel32 = ctypes.windll.kernel32
def _GetStdHandle(self, handle):
return self.kernel32.GetStdHandle(handle)
def _GetConsoleMode(self, handle):
mode = self.wintypes.DWORD()
self.kernel32.GetConsoleMode(handle, self.ctypes.byref(mode))
return mode.value
def _SetConsoleMode(self, handle, value):
self.kernel32.SetConsoleMode(handle, value)
def set_echo(self, enabled):
stdin = self._GetStdHandle(self.STD_INPUT_HANDLE)
mode = self._GetConsoleMode(stdin)
if enabled:
self._SetConsoleMode(stdin, mode | self.ENABLE_ECHO_INPUT)
else:
self._SetConsoleMode(stdin, mode & self.DISABLE_ECHO_INPUT)
class Screen:
def __init__(self):
try:
self._echo_driver = PosixEchoControl()
except TerminalPlatformUnsupported:
pass
try:
self._echo_driver = Win32EchoControl()
except TerminalPlatformUnsupported:
pass
if not self._echo_driver:
raise TerminalPlatformUnsupported("No supported terminal driver")
def set_echo(self, enabled):
self._echo_driver.set_echo(enabled)
@staticmethod
def clear():
sys.stdout.write("\x1b[2J\x1b[H")
sys.stdout.flush()
@staticmethod
def print_error(msg):
print(Colors.red(msg))
@staticmethod
def print_success(msg):
print(Colors.green(msg))
@staticmethod
def get_string(prompt):
while True:
value = input(prompt).strip()
if not value:
print(Colors.red("Value Required!"))
else:
return value
@staticmethod
def get_password(prompt="Password: "):
while True:
value = getpass.getpass(prompt)
if not value:
print(Colors.red("Value Required!"))
else:
return value
@staticmethod
def get_integer(prompt):
"""Gather user input and convert it to an integer
Will keep trying till the user enters an interger or until they ^C the
program.
"""
while True:
try:
return int(input(prompt).strip())
except ValueError:
print(Colors.red("Invalid Input!"))
def iterate_forever(func, *args, **kwargs):
"""Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
"""
output = func(*args, **kwargs)
while True:
try:
playlist_item = next(output)
playlist_item.prepare_playback()
yield playlist_item
except StopIteration:
output = func(*args, **kwargs)
class SilentPopen(subprocess.Popen):
"""A Popen varient that dumps it's output and error"""
def __init__(self, *args, **kwargs):
self._dev_null = open(os.devnull, "w")
kwargs["stdin"] = subprocess.PIPE
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = self._dev_null
super().__init__(*args, **kwargs)
def __del__(self):
self._dev_null.close()
super().__del__()
| mit | 2,048,144,627,866,843,600 | 25.644231 | 78 | 0.600686 | false | 4.243492 | false | false | false |
tfiers/arenberg-online | ticketing/management/commands/prefill_given_paper_tickets.py | 1 | 2064 | from django.core.management.base import BaseCommand
from given_paper_tickets import given_paper_tickets
from core.models import User
from ticketing.models import GivenPaperTickets, Performance
from datetime import datetime, date
from django.contrib.contenttypes.models import ContentType
import django.utils.timezone as django_tz
def fill_in_existing_data():
do = Performance.objects.get(date__contains=date(2015,5,7))
vr = Performance.objects.get(date__contains=date(2015,5,8))
tz = django_tz.get_default_timezone()
for tup in given_paper_tickets['23 for do']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,23,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=do.id,
count=tup[2],
)
for tup in given_paper_tickets['23 for vr']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,23,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=vr.id,
count=tup[2],
)
for tup in given_paper_tickets['30 for do']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,30,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=do.id,
count=tup[2],
)
for tup in given_paper_tickets['30 for vr']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,30,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=vr.id,
count=tup[2],
)
class Command(BaseCommand):
args = 'none'
help = "yo do this"
def handle(self, *args, **options):
fill_in_existing_data()
self.stdout.write('Succesfully filled in existing given_paper_tickets data.') | mit | 3,304,924,674,308,608,500 | 34 | 82 | 0.731589 | false | 2.831276 | false | false | false |
rendermotion/RMMel | rig/constraintSwitch.py | 1 | 5465 | import pymel.core as pm
from RMPY.rig import rigBase
class ConstraintSwitchModel(rigBase.BaseModel):
def __init__(self):
super(ConstraintSwitchModel, self).__init__()
self.outputs = []
self.list_a = []
self.list_b = []
self.constraints = []
self.attribute_output_a = None
self.attribute_output_b = None
class ConstraintSwitch(rigBase.RigBase):
def __init__(self, *args, **kwargs):
super(ConstraintSwitch, self).__init__(*args, **kwargs)
self._model = ConstraintSwitchModel()
self.constraint_func = {'parent': pm.parentConstraint,
'point': pm.pointConstraint,
'orient': pm.orientConstraint}
@property
def attribute_output_b(self):
return self._model.attribute_output_b
@attribute_output_b.setter
def attribute_output_b(self, value):
self._model.attribute_output_b = value
@property
def attribute_output_a(self):
return self._model.attribute_output_a
@attribute_output_a.setter
def attribute_output_a(self, value):
self._model.attribute_output_a = value
@property
def controls(self):
return self._model.controls
@controls.setter
def controls(self, value):
self._model.controls = value
@property
def outputs(self):
return self._model.outputs
@outputs.setter
def outputs(self, value):
self._model.outputs = value
@property
def constraints(self):
return self._model.constraints
@constraints.setter
def constraints(self, value):
self._model.constraints = value
@property
def list_a(self):
return self._model.list_a
@list_a.setter
def list_a(self, value):
self._model.list_a = value
@property
def list_b(self):
return self._model.list_b
@list_b.setter
def list_b(self, value):
self._model.list_b = value
def build(self, list_a, list_b, **kwargs):
control = kwargs.pop('control', None)
self.create_list_base(list_a, list_b)
if control:
print 'control found {}, {}'.format(control, kwargs)
self.create_attribute_control(control, **kwargs)
self.link_attribute_to_constraints()
self.controls.append(control)
def create_list_base(self, list_a, list_b, **kwargs):
destination = kwargs.pop('destination', None)
constraint_type = kwargs.pop('constraint_type', 'parent')
output_type = kwargs.pop('output_type', 'joint')
root_group = pm.group(empty=True)
self.name_convention.rename_name_in_format(root_group, name='intermediate')
if output_type == 'group' or output_type == 'locator':
root_group.setParent(self.rig_system.kinematics)
else:
root_group.setParent(self.rig_system.joints)
if len(list_a) == len(list_b):
for index, (constraint_a, constraint_b) in enumerate(zip(list_a, list_b)):
if not destination:
if output_type == 'group':
output = self.create.group.point_base(constraint_a, name='intermediate')
output.setParent(root_group)
elif output_type == 'locator':
output = self.create.space_locator.point_base(constraint_a, name='intermediate')
output.setParent(root_group)
else:
reset, output = self.create.joint.point_base(constraint_a, name='intermediate')
reset.setParent(root_group)
else:
output = destination[index]
self.outputs.append(output)
constraint = self.constraint_func[constraint_type](constraint_a, output)
constraint.interpType.set(2)
self.constraint_func[constraint_type](constraint_b, output)
self.constraints.append(constraint)
else:
print 'list_a and list_b should be the same size'
def create_attribute_control(self, control, **kwargs):
self.controls.append(control)
attribute_name = kwargs.pop('attribute_name', 'space_switch')
if attribute_name not in pm.listAttr(self.controls[0]):
pm.addAttr(self.controls[0], ln=attribute_name, hnv=True, hxv=True, min=0, max=10, k=True)
reverse = pm.shadingNode('reverse', asUtility=True, name="reverse")
multiply = pm.createNode('unitConversion', name="multiplier")
self.name_convention.rename_name_in_format(reverse)
self.name_convention.rename_name_in_format(multiply)
pm.connectAttr('{}.{}'.format(self.controls[0], attribute_name), "{}.input".format(multiply))
pm.setAttr("{}.conversionFactor".format(multiply), 0.1)
pm.connectAttr("{}.output".format(multiply), "{}.inputX".format(reverse))
self.attribute_output_a = multiply.output
self.attribute_output_b = reverse.outputX
def link_attribute_to_constraints(self):
for each_constraint in self.constraints:
for attribute_control, weight_alias in zip([self.attribute_output_a, self.attribute_output_b],
each_constraint.getWeightAliasList()):
attribute_control >> weight_alias
if __name__ == '__main__':
pass | lgpl-3.0 | -212,268,365,905,434,180 | 35.684564 | 106 | 0.597987 | false | 4.027266 | false | false | false |
latreides/SE_Team3 | flashcards/migrations/0002_auto_20140922_1421.py | 1 | 2495 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flashcards', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='card',
name='Card_ID',
),
migrations.RemoveField(
model_name='deck',
name='Deck_ID',
),
migrations.RemoveField(
model_name='image',
name='Image_ID',
),
migrations.AddField(
model_name='card',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=0, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='deck',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=None, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=None, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='card',
name='Back_Img_ID',
field=models.ForeignKey(related_name=b'Back_Image', blank=True, to='flashcards.Image'),
),
migrations.AlterField(
model_name='card',
name='Back_Text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='card',
name='Difficulty',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='card',
name='Front_Img_ID',
field=models.ForeignKey(related_name=b'Front_Image', blank=True, to='flashcards.Image'),
),
migrations.AlterField(
model_name='card',
name='Front_Text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='card',
name='Last_Attempted',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='deck',
name='Accessed_Date',
field=models.DateTimeField(blank=True),
),
]
| mit | 8,965,067,324,240,967,000 | 30.582278 | 122 | 0.536273 | false | 4.400353 | false | false | false |
aosingh/lexpy | lexpy/trie.py | 1 | 1734 | from lexpy._base.node import FSANode
from lexpy._base.automata import FSA
__all__ = ['Trie']
class Trie(FSA):
__slots__ = 'root'
"""
Description:
To create a Trie instance, create an object of this class.
Attributes:
root: (_TrieNode) The Top level node which is created every time you create a Trie instance
"""
def __init__(self):
"""
Description:
This method initializes the Trie instance by creating the root node.
By default, the id of the root node is 1 and number of words in the Trie is also 1.
The label of the root node is an empty string ''.
"""
root = FSANode(0, '')
super(Trie, self).__init__(root)
def __len__(self):
"""
Description:
Returns the number of nodes in the Trie Data Structure
Returns:
:returns (int) Number of Nodes in the trie data structure
:return:
"""
return self._id
def add(self, word, count=1):
"""
Description:
Adds a word in the trie data structure.
Args:
:arg word (str) : The word that you want to insert in the trie.
Raises:
:raises: ``AssertionError`` if the word is None
"""
assert word is not None, "Input word cannot be None"
node = self.root
for i, letter in enumerate(word):
if letter not in node.children:
self._id += 1
node.add_child(letter, _id=self._id)
node = node[letter]
if i == len(word)-1:
node.eow = True
node.count += count
self._num_of_words += count | gpl-3.0 | 1,698,374,852,213,542,100 | 26.109375 | 99 | 0.535755 | false | 4.25 | false | false | false |
CorundumGames/Invasodado | game/player.py | 1 | 9349 | from functools import partial
from itertools import chain
from math import sin, cos, pi
from random import gauss, uniform
from pygame import Rect, Surface
from pygame.constants import *
import pygame.key
from core import color
from core import config
from core.particles import ParticleEmitter, ParticlePool, Particle
from game import gamedata
from game.combocounter import get_combo_counter
from game.gameobject import GameObject
from game.shipbullet import ShipBullet
from game.enemy import Enemy
### Functions ##################################################################
def _burst_appear(self):
self.acceleration[1] = GRAVITY
self.velocity = [gauss(0, 25), uniform(-10, -20)]
def _radius_appear(self):
self.progress = 0
distance_magnitude = gauss(300, 50)
angle = uniform(0, -pi)
self.position[0] = START_POS.centerx + distance_magnitude * cos(angle)
self.position[1] = START_POS.centery + distance_magnitude * sin(angle)
self.startpos = tuple(self.position)
self.rect.topleft = (self.position[0] + .5, self.position[1] + .5)
def _radius_move(self):
self.progress += 1
position = self.position
percent = self.progress / 30
if percent == 1:
#If we've reached our target location...
self.change_state(Particle.STATES.LEAVING)
else:
dx = (percent**2) * (3-2*percent)
ddx = 1 - dx
position[0] = (self.startpos[0] * ddx) + (START_POS.centerx * dx)
position[1] = (self.startpos[1] * ddx) + (START_POS.centery * dx)
self.rect.topleft = (position[0] + .5, position[1] + .5)
################################################################################
### Constants ##################################################################
PART_IMAGE = Rect(4, 170, 4, 4)
APPEAR = config.load_sound('appear.wav')
APPEAR_POOL = ParticlePool(config.get_sprite(PART_IMAGE), _radius_move, _radius_appear)
DEATH = config.load_sound('death.wav')
DEATH_POOL = ParticlePool(config.get_sprite(PART_IMAGE), appear_func=_burst_appear)
FRAMES = tuple(config.get_sprite(Rect(32 * i, 128, 32, 32)) for i in range(5))
GRAVITY = 0.5
SHIP_STATES = ('IDLE', 'SPAWNING', 'ACTIVE', 'DYING', 'DEAD', 'RESPAWN')
SPEED = 4
START_POS = Rect(config.SCREEN_WIDTH / 2, config.SCREEN_HEIGHT * .8, 32, 32)
################################################################################
### Preparation ################################################################
for i in FRAMES: i.set_colorkey(color.COLOR_KEY, config.BLIT_FLAGS)
################################################################################
class FlameTrail(GameObject):
'''
FlameTrail is the jet left by the Ship's engines. This is purely a
graphical effect.
'''
FRAMES = tuple(config.get_sprite(Rect(32*i, 0, 32, 32)) for i in range(6))
GROUP = None
def __init__(self):
super().__init__()
self.anim = 0.0
self.image = FlameTrail.FRAMES[0]
self.position = [-300.0, -300.0]
self.rect = Rect(self.position, self.image.get_size())
self.state = 1
del self.acceleration, self.velocity
for i in self.__class__.FRAMES: i.set_colorkey(color.COLOR_KEY, config.BLIT_FLAGS)
def animate(self):
self.anim += 1/3
self.image = FlameTrail.FRAMES[int(3 * sin(self.anim / 2)) + 3]
actions = {1 : 'animate'}
################################################################################
class LightColumn(GameObject):
'''
This class exists to let the player know where exactly he's aiming.
'''
SIZE = Rect(0, 0, 32, config.SCREEN_HEIGHT - 32 * 3)
def __init__(self):
super().__init__()
self.image = Surface(self.__class__.SIZE.size, config.BLIT_FLAGS)
self.position = [-300.0, -300.0]
self.rect = Rect(self.position, self.__class__.SIZE.size)
self.state = 1
self.image.fill(color.WHITE)
self.image.set_alpha(128)
del self.acceleration, self.velocity
actions = {1 : None}
################################################################################
class Ship(GameObject):
'''
The Ship is the player character. There's only going to be one instance of
it, but it has to inherit from pygame.sprite.Sprite, so we can't make it a
true Python singleton (i.e. a module).
'''
STATES = config.Enum(*SHIP_STATES)
GROUP = None
def __init__(self):
'''
@ivar anim: A counter for ship animation
@ivar image: The graphic
@ivar invincible: How many frames of invincibility the player has if any
@ivar my_bullet: The single bullet this ship may fire
'''
super().__init__()
self.anim = 0.0
self.appear_emitter = ParticleEmitter(APPEAR_POOL, START_POS.copy(), 2)
self.emitter = ParticleEmitter(DEATH_POOL, START_POS.copy(), 2)
self.flames = FlameTrail()
self.image = FRAMES[0]
self.invincible = 0
self.light_column = LightColumn()
self.my_bullet = ShipBullet()
self.position = list(START_POS.topleft)
self.rect = START_POS.copy()
self.respawn_time = 3 * 60 # In frames
self.change_state(Ship.STATES.RESPAWN)
def on_fire_bullet(self):
bul = self.my_bullet
if bul.state == ShipBullet.STATES.IDLE and self.state == Ship.STATES.ACTIVE:
#If our bullet is not already on-screen...
bul.add(Ship.GROUP)
self.anim = 1
self.image = FRAMES[self.anim]
bul.rect.center = self.rect.center
bul.position = list(self.rect.topleft)
bul.change_state(ShipBullet.STATES.FIRED)
def respawn(self):
self.appear_emitter.burst(200)
APPEAR.stop()
APPEAR.play()
for i in chain(FRAMES, FlameTrail.FRAMES, {self.light_column.image}): i.set_alpha(128)
self.invincible = 250
self.light_column.rect.midbottom = self.rect.midtop
self.position = list(START_POS.topleft)
self.rect = START_POS.copy()
self.respawn_time = 3 * 60
self.change_state(Ship.STATES.ACTIVE)
def move(self):
keys = pygame.key.get_pressed() #Shorthand for which keys are pressed
rect = self.rect
width = self.image.get_width()
if self.state not in {Ship.STATES.DYING, Ship.STATES.DEAD, Ship.STATES.IDLE}:
if (keys[K_LEFT] or keys[K_a]) and rect.left > 0:
#If we're pressing left and not at the left edge of the screen...
self.position[0] -= SPEED
elif (keys[K_RIGHT] or keys[K_d]) and rect.right < config.SCREEN_RECT.right:
#If we're pressing right and not at the right edge of the screen...
self.position[0] += SPEED
rect.left = self.position[0] + 0.5
self.flames.rect.midtop = (rect.midbottom[0], rect.midbottom[1] - 1)
#Compensate for the gap in the flames ^^^
self.light_column.position[0] = self.position[0]
self.light_column.rect.left = round(self.light_column.position[0] / width) * width
if self.invincible:
#If we're invincible...
self.invincible -= 1
elif self.image.get_alpha() == 128:
for i in chain(FRAMES, FlameTrail.FRAMES): i.set_alpha(255)
self.anim = self.anim + (0 < self.anim < len(FRAMES) - 1) / 3 if self.anim != 4 else 0.0
self.image = FRAMES[int(self.anim)]
if gamedata.combo_time == gamedata.MAX_COMBO_TIME and gamedata.combo > 1:
counter = get_combo_counter(gamedata.combo, self.rect.topleft)
counter.rect.midbottom = self.rect.midtop
counter.position = list(counter.rect.topleft)
counter.change_state(counter.__class__.STATES.APPEARING)
Ship.GROUP.add(counter)
def die(self):
DEATH.play()
for i in chain(FRAMES, FlameTrail.FRAMES, (self.light_column.image,)): i.set_alpha(0)
self.emitter.rect = self.rect
self.emitter.burst(100)
self.change_state(Ship.STATES.DEAD)
def instadie(self, other):
if gamedata.lives:
#If we have any lives...
gamedata.lives = 0
self.die()
def wait_to_respawn(self):
self.respawn_time -= 1
if not self.respawn_time:
#If we're done waiting to respawn...
self.change_state(Ship.STATES.RESPAWN)
actions = {
STATES.IDLE : None ,
STATES.SPAWNING : 'respawn' ,
STATES.ACTIVE : 'move' ,
STATES.DYING : 'die' ,
STATES.DEAD : 'wait_to_respawn',
STATES.RESPAWN : 'respawn' ,
}
collisions = {
Enemy: instadie,
} | gpl-3.0 | -2,049,556,484,475,392,000 | 38.121339 | 96 | 0.535886 | false | 3.63209 | true | false | false |
hughobrien/shimmer-nokia-fall-detection | MovementGrapher.py | 1 | 6767 | # Accelerometer Grapher and Fall Dector - Hugh O'Brien March 2009
#
#This is a script for PyS60 that opens a bluetooth serial connection
#to a pre-programmed SHIMMER sensor, The SHIMMER provides accelerometer
#data in the form "1111 1111 1111" where '1111' will be in the range
#of 0 -> 4400. The three values represent the data gathered
#from monitoring the three axis of the accelerometer.
#
#The script reduces the accuracy of these values in order to be able
#to graph them on a screen that is only 320x240px in size
#
#The script also monitors the difference between two subsequent
#readings in order to determine if a large movement has occured.
#This can be interpreted as a fall. A call is then placed to a
#pre-defined telephone number and the details of the victim are
#read out to the receiver.
import e32, appuifw, audio, telephone
#btsocket is the 'old' BT system, new version introduced in
#PyS60 1.9.1 is harder to work with.
import btsocket as socket
#a predefined BT MAC address can be set here to skip discovery process
target = ''
contact_name = "John Watson"
contact_number = "5550137"
victim_name = "Mr. Sherlock Holmes"
victim_address = "221 B. Baker Street. London"
sensitivity = 28
def fall():
global app_lock, contact_name, contact_number, victim_name,\
victim_address, data, prev
audio.say("Dialling %s now" % contact_name)
telephone.dial(contact_number)
e32.ao_sleep(7) #7 sec delay for someone to answer
for i in range(2, -1, -1):
audio.say("This is an automated message. A fall has been detected.\
Please assist %s at address %s. \
This message will repeat %d more times" \
% (victim_name, victim_address, i) )
telephone.hang_up()
data = ( 40, 40, 40 ) #reset values so as not to trigger again
prev = data
app_lock.signal() #unlock the main loop
def connect(): #this function sets up the BT socket connection
global btsocket, target
try:
#socket params passed to the OS
btsocket=socket.socket(socket.AF_BT,socket.SOCK_STREAM)
if target == '': #if no target defined, begin OS discovery routine
address,services = socket.bt_discover()
target = (address, services.values()[0])
btsocket.connect(target) #initiate connection and notify user
appuifw.note(u"Connected to " + str(address), "info")
except: #fail cleanly
appuifw.note(u"Error connecting to device")
btsocket.close()
def getData(): #this receives single characters over the bitstream
#until it encounters a newline and carraige return it then
#returns the characters it has buffered until that point
global btsocket #use the globally defined socket
buffer = "" #create an empty buffer
rxChar = btsocket.recv(1) #receive 1 char over BT and save in rxChar
#spin here until we get a 'real' char
while (rxChar == '\n') or (rxChar == '\r'):
rxChar = btsocket.recv(1)
#as long as we receive 'real' chars buffer them
while (rxChar != '\n') and (rxChar != '\r'):
buffer += rxChar
rxChar = btsocket.recv(1)
return buffer #return the buffer contents
def graph_data(input):
#this function produces the graphs on the screen. the screen is
#landscape oriented with a resolution of 240x320. The constants seen
#here are used to define where on the screen the graphs should be drawn
global count, canvas, prev, data
#take the input string formated like "1111 1111 1111" and parse it
#to acquire 3 sets of chars and then interpret them as digits saving
#them to a list in this format: ( '1111', '1111', '1111' )
#the values are then divided by 60 as they will be in the range
#0 -> x -> 4400 as the screen is only 240px high. furthermore as there
#are three graphs being drawn each is confined to (240 / 3 )px of
#height. The divisor of 60 accommodates this at the cost of accuracy.
try:
data = (\
int(input[0:4]) / 60, \
int(input[5:9]) / 60, \
int(input[10:14]) / 60\
)
#sane defaults if we receive a malformed reading
except ValueError:
data = ( 36, 36, 36 )
#redraw the screen if there are more than 280 samples displayed.
if count > 280:
reset()
#draw a line, with the X1 starting 10 points from the left and
#expanding right, Y1 being the previous value of Y2 (initially zero)
#plus a vertical offset so the graphs don't overlap each other, X2
#being one point right of X1 and Y2 one of the 3 XYZ readings plus
#the vertical offset. other options are purely aesthetic.
canvas.line(\
(count + 10, prev[0], count + 11, data[0] ), \
outline = 0xFF0000, width = 1)
canvas.line(\
(count + 10, prev[1] + 80, count + 11, data[1] + 80), \
outline = 0x00DD00, width = 1)
canvas.line(\
(count + 10, prev[2] + 160, count + 11, data[2] + 160), \
outline = 0x4444FF, width = 1)
#increment counter - data should also be pushed into prev here
#but this happens in the main loop for monitoring reasons
count = count + 1
def reset(): # this function redraws the screen when it becomes full
global count, canvas
#reset the count and redraw a blank canvas
count = 0
canvas.rectangle((0, 0, 320, 240), fill = 0x000000)
#Main
data = ( 0, 0, 0 )
prev = (40, 40, 40) #initial zero values for 'previous values' of the data
canvas = appuifw.Canvas() #create a new Canvas object
appuifw.app.body = canvas
appuifw.app.screen = "full" #go 'fullscreen'
appuifw.app.orientation = "landscape" # draw in landscape orientation
appuifw.app.title = u"Activity Monitor" #name the program
app_lock = e32.Ao_lock() #locking system
connect() #open the BT socket
e32.ao_sleep(1) # sleep for 1 second in case of graphical slowness
reset() # initially reset the screen to draw the canvas
while 1: #loop the following code infinitely
e32.reset_inactivity() #keep the screensaver away
graph_data( getData() ) # poll the BT data passing it to the grapher.
#test the movement level between the last two samples
if ( (abs(data[0] - prev[0]) > sensitivity ) \
or (abs(data[1] - prev[1]) > sensitivity ) \
or (abs(data[2] - prev[2]) > sensitivity ) ):
fall() #if too much, take action
app_lock.wait() #pause this loop until fall() finishes
e32.ao_sleep(1)
reset()
prev = data #move current data into previous data buffer | mit | -3,011,709,792,132,006,400 | 36.242938 | 75 | 0.648589 | false | 3.574749 | false | false | false |
proflayton/iPerfParser | ParserStructure/Pings.py | 1 | 4794 |
# ------------------------------------------------------------------------
# This block checks to see if the script is being run directly,
# i.e. through the command line. If it is, then it stops and exits the
# program, asking the user to use these files by running the main.py
# ------------------------------------------------------------------------
try:
from .utils import testForMain
except:
from utils import testForMain
testForMain(__name__)
# ------------------------------------------------------------------------
# PINGS.PY
#
# AUTHOR(S): Peter Walker [email protected]
# Brandon Layton [email protected]
#
# PURPOSE- Holds a single measurement of data transfer speed in a single test
# (i.e. This object represent one line of text in a speed test)
#
# VARIABLES:
# secIntervalStart Float, represents the start time of this Ping
# secIntervalEnd Float, represents the end time of this Ping (should always be start + 1)
# size Float, represents this Ping's size in Kbits sent
# speed Float, represents this Ping's speed in KBytes/sec
# size_string String, converted from size, used in __str__
# size_units String, units to be appended to string
# speed_string String, converted from speed, used in __str__
# speed_units String, units to be appended to string
#
# FUNCTIONS:
# __init__ - Used to initialize an object of this class
# INPUTS- self: reference to the object calling this method (i.e. Java's THIS)
# OUTPUTS- none
#
# __str__ - Returns a string represenation of the object
# INPUTS- self: reference to the object calling this method (i.e. Java's THIS)
# OUTPUTS- String, representing the attributes of the object (THIS)
# ------------------------------------------------------------------------
from .utils import global_str_padding as pad; pad = pad*4
class Ping(object):
# ------------------
# Initializing some class attributes
secIntervalStart = 0
secIntervalEnd = 0
size = 0
speed = 0
size_string = ""
size_units = ""
speed_string = ""
speed_units = ""
# ------------------
# DESC: Initializing class
def __init__(self, data, size_u, speed_u):
self.size_units = size_u
self.speed_units = speed_u
#This takes the given data String and parses the object information
data_start = data.split("-")[0].split("]")[1].strip()
data = data.split("-")[1]
data_end = data.split("sec", 1)[0].strip()
data = data.split("sec", 1)[1]
data_size = data.split(self.size_units)[0].strip()
data = data.split(self.size_units)[1]
data_speed = data.split(self.speed_units)[0].strip()
self.secIntervalStart = float(data_start)
self.secIntervalEnd = float(data_end)
self.size = float(data_size)
self.speed = float(data_speed)
#This section adds the zeros following the speed and size numbers, as sometimes
# the size may vary between ##.# and ###
if ("." in data_size):
if (len(data_size.split(".")[1]) == 1):
data_size += "0"
#END IF
else:
data_size += ".00"
self.size_string = data_size
if ("." in data_speed):
if (len(data_speed.split(".")[1]) == 1):
data_speed += "0"
#END IF
else:
data_speed += ".00"
self.speed_string = data_speed
#Creating the padding of spaces needed to line up all of the numbers
# The padding after the time varies because the time may be between 0 and 99.
# If the start and end are both 1 digit, two spaces are needed. If start and end are
# a 1 and 2 digit number, one space is needed
self.time_pad = ""
if self.secIntervalEnd < 10.0:
self.time_pad = " "
elif self.secIntervalStart < 10.0 and self.secIntervalEnd >= 10.0:
self.time_pad = " "
from math import log10
self.size_pad = (" " * (4 - int(log10(self.size)))) if self.size else (" " * 4)
self.speed_pad = (" " * (4 - int(log10(self.speed)))) if self.speed else (" " * 4)
#END DEF
# DESC: Creating a string representation of our object
def __str__(self):
return (pad + str(self.secIntervalStart) + "-"
+ str(self.secIntervalEnd) + self.time_pad + " " + self.size_pad
+ str(self.size_string) + " " + str(self.size_units) + " " + self.speed_pad
+ str(self.speed_string) + " " + str(self.speed_units)
)
#END DEF
#END CLASS | mit | -2,295,991,434,001,595,400 | 40.695652 | 97 | 0.539007 | false | 3.872375 | false | false | false |
HKuz/Test_Code | setup.py | 1 | 4412 | #!/Applications/anaconda/envs/Python3/bin
import sys
def main():
'''Python 3 Quick Start Code Examples'''
# Get input from user and display it
# feels = input("On a scale of 1-10, how do you feel? ")
# print("You selected: {}".format(feels))
# Python Data Types
integer = 42
floater = 3.14
stringer = 'Hello, World!'
noner = None # singleton value, check: if var is None
tupler = (1, 2, 3)
lister = [1, 2, 3]
dicter = dict(
one = 1,
two = 2,
three = 3
)
boolTrue = True
boolFalse = False
# Conditionals
print("=========== Conditionals ==========")
num1, num2 = 0, 1
if (num1 > num2):
# print("{} is greater than {}".format(num1, num2))
pass
elif (num1 < num2):
# print("{} is less than {}".format(num1, num2))
pass
else:
# print("{} is equal to {}".format(num1, num2))
pass
# Python version of ternary operator
bigger = num1 if num1 >= num2 else num2
smaller = num1 if num1 < num2 else num2
# print("Conditional statment says {} is greater than or equal to {}".format(bigger, smaller))
# Python version of a switch statement
choices = dict(
a = 'First',
b = 'Second',
c = 'Third',
d = 'Fourth',
e = 'Fifth'
)
opt1 = 'c'
opt2 = 'f'
default = 'Option not found'
# print("Python 'switch' statment using a dict: {}".format(choices))
# print("Option 1 was {} and returned: {}".format(opt1, choices.get(opt1, default)))
# print("Option 2 was {} and returned: {}".format(opt2, choices.get(opt2, default)))
print("==============================")
# Loops
print("=========== Loops ==========")
print("Fibonacci series up to 100:")
a, b = 0, 1
while b < 100:
print(b, end=" ")
a, b = b, a + b
print()
# print("For loop printing parts of {}".format(stringer))
for letter in stringer:
# Don't print the vowels
if letter in 'aeiouAEIOU':
continue
# Stop looping at punctuation
if letter in '!@#$%^&*.,?;:-_+=|':
break
# print(letter, end=" ")
# print()
print("==============================")
# Get an index using a for loop with enumerate()
# for index, letter in enumerate(stringer):
# print("Index: {} is letter: {}".format(index, letter))
# List comprehensions
print("=========== List Comprehensions ==========")
# Create a new list - [expression for variable in list]
listOne = [0, 1, 2, 3, 4, 5]
listSquares = [x*x for x in listOne]
print("List comprehension: {}".format(listSquares))
# Filter a list - [expression for variable in list if condition]
listOdd = [x for x in listSquares if x % 2 == 1]
print("Filtered list comprehension: {}".format(listOdd))
# Dictionary comprehensions
print("=========== Dict Comprehensions ==========")
dictComp = {chr(64+x): x for x in range(1, 27)}
print("Dict comprehension: {}".format(dictComp))
# Set comprehension
print("=========== Set Comprehensions ==========")
setComp = {x**5 for x in range(2,8)}
print("Set comprehension: {}".format(setComp))
print("==============================")
# Check if a type is an iterable
print("=========== Is X Type Interable? ==========")
print("Is a string an iterable? {}".format(hasattr(str, '__iter__')))
print("Is a Boolean an iterable? {}".format(hasattr(bool, '__iter__')))
print("Is a list an iterable? {}".format(hasattr(list, '__iter__')))
print("Is a set an iterable? {}".format(hasattr(set, '__iter__')))
print("Is an int an iterable? {}".format(hasattr(int, '__iter__')))
print("==============================")
# Generator Expressions
# Similar to list comprehension, less space in memory
print("=========== Generator Expressions ==========")
genExp = (x**5 for x in range(2,8))
listComp = [x**5 for x in range(2,8)]
print("Type of a generator expression: {}".format(type(genExp)))
print("Actual generator expression: {}".format(genExp))
print("Size of generator expression: {}".format(sys.getsizeof(genExp)))
print("Size of same list comprehension: {}".format(sys.getsizeof(listComp)))
print("==============================")
return 0
if __name__ == '__main__':
main()
| mit | 3,006,655,741,803,645,000 | 32.172932 | 98 | 0.541024 | false | 3.758092 | false | false | false |
TEAM-HRA/hra_suite | HRAGUI/src/hra_gui/qt/docks/tachogram_plot_statistics_dock_widget.py | 1 | 4657 | '''
Created on 04-04-2013
@author: jurek
'''
from hra_core.special import ImportErrorMessage
try:
from PyQt4.QtCore import * # @UnusedWildImport
from PyQt4.QtGui import * # @UnusedWildImport
from hra_core.misc import Params
from hra_math.model.data_vector_listener import DataVectorListener
from hra_math.statistics.tachogram_statistics import calculate_tachogram_statistics # @IgnorePep8
from hra_gui.qt.utils.dnd import CopyDragger
from hra_gui.qt.widgets.dock_widget_widget import DockWidgetWidget
from hra_gui.qt.widgets.table_view_widget import TableViewWidget
from hra_gui.qt.plots.tachogram_plot_const import STATISTIC_MIME_ID
from hra_gui.qt.plots.tachogram_plot_const import STATISTIC_CLASS_NAME_ID
except ImportError as error:
ImportErrorMessage(error, __name__)
class TachogramPlotStatisticsDockWidget(DockWidgetWidget):
"""
a dock widget for tachogram plot statistics
"""
def __init__(self, parent, **params):
self.params = Params(**params)
super(TachogramPlotStatisticsDockWidget, self).__init__(parent,
title=params.get('title', 'Tachogram plot statistics'),
**params)
self.data_accessor = self.params.data_accessor # alias
self.data_accessor.addListener(self,
__TachogramStatisticsDataVectorListener__(self))
self.__createStatisticsWidget__(QVBoxLayout())
parent.addDockWidget(Qt.RightDockWidgetArea, self)
def __createStatisticsWidget__(self, _layout):
self.statisticsWidget = TachogramStatisticsWidget(self.dockComposite,
layout=_layout)
self.fillStatisticsWidget()
def fillStatisticsWidget(self):
statistics = calculate_tachogram_statistics(
signal=self.data_accessor.signal,
annotation=self.data_accessor.annotation)
self.statisticsWidget.setTachogramStatistics(statistics)
class TachogramStatisticsWidget(TableViewWidget):
"""
a widget to display basic tachogram's statistics
"""
def __init__(self, parent, **params):
TableViewWidget.__init__(self, parent, **params)
self.__dragger__ = CopyDragger(self, STATISTIC_MIME_ID, drag_only=True)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.__createModel__()
def __createModel__(self):
model = TachogramStatisticsModel(self)
labels = QStringList(["class_name", "Statistic", "Value"])
model.setHorizontalHeaderLabels(labels)
self.setModel(model)
def setTachogramStatistics(self, _statistics):
model = self.model()
model.removeRows(0, model.rowCount())
values = _statistics[0]
descriptions = _statistics[1]
self.class_names = sorted([name for name in values])
for name in sorted([name for name in self.class_names]):
model.appendRow([QStandardItem(str(name)),
QStandardItem(str(descriptions[name])),
QStandardItem(str(values[name]))])
self.setColumnHidden(0, True) # "class_name" is a hidden column
def startDrag(self, dropActions):
row = self.model().itemFromIndex(self.currentIndex()).row()
self.__dragger__.clear()
self.__dragger__.dragObject(STATISTIC_CLASS_NAME_ID,
self.class_names[row])
self.__dragger__.startDrag()
class TachogramStatisticsModel(QStandardItemModel):
def __init__(self, parent):
QStandardItemModel.__init__(self, parent=parent)
def data(self, _modelIndex, _role):
#the third column (indexing starts from 0) is a value of statistic
if _modelIndex.column() == 2 and _role == Qt.TextAlignmentRole:
return Qt.AlignRight
else:
return super(TachogramStatisticsModel, self).data(_modelIndex,
_role)
class __TachogramStatisticsDataVectorListener__(DataVectorListener):
"""
class used to recalculate tachogram statistics for tachogram
statistics widget when signal or annotation data is changing
"""
def __init__(self, _dock_widget):
self.__dock_widget__ = _dock_widget
def changeSignal(self, _signal, **params):
self.__dock_widget__.fillStatisticsWidget()
def changeAnnotation(self, _annotation, **params):
self.__dock_widget__.fillStatisticsWidget()
| lgpl-3.0 | 8,411,936,265,652,319,000 | 40.954955 | 102 | 0.643333 | false | 4.124889 | false | false | false |
zsiki/ulyxes | pyapi/bluetoothiface.py | 1 | 5203 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: bluetoothiface.py
:platform: Unix, Windows
:synopsis: Ulyxes - an open source project to drive total stations and
publish observation results. GPL v2.0 license Copyright (C)
2010-2013 Zoltan Siki <[email protected]>.
sudo apt-get -y install bluetooth bluez bluez-tools rfkill
to turn on/off bluetooth interface from the command line:
rfkill unblock bluetooth or sudo /etc/init.d/bluetooth stop
rfkill block bluetooth or sudo /etc/init.d/bluetooth start
make connection from command line
hciconfig
https://computingforgeeks.com/connect-to-bluetooth-device-from-linux-terminal/
.. moduleauthor:: Zoltan Siki <[email protected]>,
Kecskeméti Máté <[email protected]>
"""
import logging
import bluetooth
import time
from iface import Iface
class BluetoothIface(Iface):
""" Interface to communicate through bluetooth interfacei as a client.
This class depends on pybluez.
:param name: name of bluetooth interface (str)
:param mac: mac address of server to connect
:param port: bluetooth port, default 3
:param eomRead: end of message char from instrument (str), default '\\r\\n'
:param eomWrite: end of message char from computer (str), default '\\r\\n'
"""
def __init__(self, name, mac, port=3, timeout=5, eomRead='\r\n',
eomWrite='\r\n'):
""" Constructor for bluetooth client
"""
super(BluetoothIface, self).__init__(name)
self.mac = mac
self.port = port
self.timeout = timeout
self.eomRead = eomRead
self.eomWrite = eomWrite
self.socket = None
self.Open()
def __del__(self):
""" Destructor for bluetooth client
"""
self.Close()
self.socket = None
def Open(self):
""" Open bluetooth communication
"""
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
try:
self.socket.connect((self.mac, self.port))
except Exception:
logging.error(" error opening bluetooth connection")
self.state = self.IF_SOURCE
self.socket = None
def Close(self):
""" Close bluetooth communication
"""
try:
self.socket.close()
except Exception:
pass
def GetLine(self):
""" read a line from bluetooth
"""
if self.socket is None or self.state != self.IF_OK:
logging.error(" bluetooth connection not opened or in error state")
return None
# read answer till end of message marker
ans = ''
w = -1 * len(self.eomRead)
while ans[w:] != self.eomRead:
ch = ''
try:
ch = (self.socket.recv(1)).decode('ascii')
except Exception:
self.state = self.IF_READ
logging.error(" cannot read bluetooth connection")
break
if ch == '':
# timeout exit loop
self.state = self.IF_TIMEOUT
logging.error(" timeout on bluetooth")
break
ans += ch
# remove end of line
logging.debug(" message got: %s", ans)
ans = ans.strip(self.eomRead)
return ans
def PutLine(self, msg):
""" Send message through bluetooth
:param msg: message to send (str)
:returns: 0 - on OK, -1 on error or interface is in error state
"""
# do nothing if interface is in error state
if self.socket is None or self.state != self.IF_OK:
logging.error(" bluetooth connection not opened or in error state")
return -1
# add CR/LF to message end
w = -1 * len(self.eomWrite)
if msg[w:] != self.eomWrite:
msg += self.eomWrite
# remove special characters
msg = msg.encode('ascii', 'ignore')
# send message to bluetooth interface
logging.debug(" message sent: %s", msg)
try:
self.socket.settimeout(self.timeout)
self.socket.send(msg)
except Exception:
self.state = self.IF_WRITE
logging.error(" cannot write to bluetooth connection")
return -1
return 0
def Send(self, msg):
""" send message to bluetooth and wait for answer
:param msg: message to send, it can be multipart message separated by '|' (str)
:returns: answer from instrument (str)
"""
msglist = msg.split("|")
res = ''
#sending
for m in msglist:
if self.PutLine(m) == 0:
time.sleep(5)
res += self.GetLine() + '|'
if res.endswith('|'):
res = res[:-1]
return res
if __name__ == "__main__":
#a = BluetoothIface('test', '00:12:F3:04:ED:06', 1) # leica 1200
a = BluetoothIface('test', '00:07:80:57:3B:6E', 1) # topcon hiper II rover
if a.GetState() == a.IF_OK:
print(a.Send('%R1Q,2008:1,0'))
print(a.GetState())
| gpl-2.0 | -7,032,048,408,603,781,000 | 32.333333 | 91 | 0.566923 | false | 3.954373 | false | false | false |
ntt-nflex/flexer | examples/get_logs.py | 1 | 3563 | """This is an example implementation of a "get_logs" handler
for cmp-connectors. For the purpose of this example, a python generator is
generating fake logs
"""
import datetime as dt
import itertools
def get_logs(event, context):
# mock the credentials for now, they're usually inside the event
# e.g. credentials = event['credentials']
credentials = {
"username": "usr",
"password": "pass",
"region": "eu"
}
try:
username = credentials["username"]
password = credentials["password"]
region = credentials["region"] # Provider region of the resource
provider_id = event["provider_id"] # Provider ID of the resource
resource_id = event["resource_id"] # CMP ID of the resource
start_date = event["start_date"]
end_date = event["end_date"]
except KeyError as e:
raise Exception("Missing \"%s\" from the event" % e)
client = NTTTrainingClient(username=username,
password=password,
region=region)
logs = client.get_logs(provider_id=provider_id,
start_date=start_date,
end_date=end_date)
context.log("Collected %d logs from Prototype provider" % len(logs))
cmp_logs = build_cmp_logs(context, resource_id, logs)
context.log("Built %d CMP logs" % len(cmp_logs))
return post_logs(context=context, data=cmp_logs)
ADJECTIVES = [
"bad", "terrible", "awful", "sinister", "despicable",
"good", "great", "groovy", "wonderful", "marvelous",
"weird", "mysterious", "unexpected", "worrying",
]
def logs_generator(adjectives):
values = ["Something %s happened" % adj for adj in adjectives]
return itertools.cycle(values)
logs = logs_generator(adjectives=ADJECTIVES)
class NTTTrainingClient(object):
"""Fake provider client that generates logs.
It uses the logs generator above to generate fake data
"""
def __init__(self, *args, **kwargs):
"""The arguments depend on the format of the provider credentials"""
self.provider = 'ntt-training'
def get_logs(self, provider_id, start_date, end_date):
"""Query the provider for log data and return it"""
start = dt.datetime.strptime(start_date, "%Y-%m-%dT%H:%M:%S.%fZ")
end = dt.datetime.strptime(end_date, "%Y-%m-%dT%H:%M:%S.%fZ")
return self._generate_logs(logs, start, end)
def _generate_logs(self, iterator, start, end):
logs = []
t = start
while t <= end:
logs.append({
"message": next(iterator),
"level": "INFO",
"time": t.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
})
t += dt.timedelta(minutes=1)
return logs
def build_cmp_logs(context, resource_id, logs):
"""Convert the provider logs into a CMP-friendly format"""
cmp_logs = []
for log in logs:
cmp_logs.append({
"service": "nflex.cmp-adapter.ntt-training",
"resource_id": resource_id,
"severity": log["level"],
"timestamp": log["time"],
"message": log["message"],
})
return cmp_logs
def post_logs(context, data):
"""Send the logs to CMP"""
try:
response = context.api.post(path="/logs", data=data)
response.raise_for_status()
return response.json()
except Exception as err:
context.log("Error sending logs to CMP: %s" % err,
severity="ERROR")
| gpl-2.0 | -4,423,594,845,112,025,600 | 30.254386 | 76 | 0.586304 | false | 3.802561 | false | false | false |
foraliving/pilot | foraliving/forms.py | 1 | 2864 | from django import forms
from django.forms import CharField, Form, PasswordInput
from .models import *
from django.contrib.auth.models import User, Group
from django.db.models.fields import BLANK_CHOICE_DASH
class volunteerUserSignupForm(forms.ModelForm):
password = forms.CharField(widget=PasswordInput())
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'username', 'password', ]
class volunteerSignupForm(forms.ModelForm):
hsGradChoices = (
("", 'Select range'),
(1, '1-4'),
(2, '5-10'),
(3, '11 or more'),
(4, 'Have not graduated'),)
collegeLevelChoice = (
("", "Select"),
(1, "associate"),
(2, "bachelor's"),
(3, "master's"),
(4, "doctoral"),
(5, "none"),)
canGetText = forms.TypedChoiceField(coerce=lambda x: x == 'True', choices=((True, 'Yes'), (False, 'No')),
widget=forms.RadioSelect, label="Can we text you on this number?", required=True)
isBusinessOwner = forms.BooleanField(label="I am a business owner", initial=True, required=False)
yearsInIndustry = forms.CharField(label="Number of years in this industry", required=True,
widget=forms.NumberInput(attrs={'size': '10', 'placeholder': ''}))
workTitle = forms.CharField(label="Work title", required=False)
workIndustry = forms.CharField(label="Work industry", required=False)
linkedinProfile = forms.CharField(label="Your Linkedin profile", required=False)
yearsSinceHSGraduation = forms.ChoiceField(hsGradChoices, label="Year since high school graduation", required=True)
collegeLevel = forms.ChoiceField(choices=collegeLevelChoice, label="Highest college degree",
required=True, initial="")
collegeMajor = forms.CharField(label="College major(s)", required=False)
skills = forms.CharField(label="Please enter skills related to your job, role and industry",
required=False)
interests = forms.CharField(label="Please provide some interests that lead you to your career choice",
required=False)
def __init__(self, *args, **kwargs):
super(volunteerSignupForm, self).__init__(*args, **kwargs)
self.fields['yearsInIndustry'].widget.attrs['style'] = "width:20%"
class Meta:
model = Volunteer_User_Add_Ons
# fields = '__all__'
exclude = ['user']
class TeacherAddClass(forms.Form):
class_name = forms.CharField(label="Class name")
students_csv = forms.FileField(required=True, label='Upload File')
class TeacherAddClassAssignment(forms.Form):
assignment_name = forms.CharField(label="Assignment name")
description = forms.CharField(label="Description", required=False)
| mit | -6,639,675,033,360,926,000 | 43.061538 | 121 | 0.639665 | false | 3.912568 | false | false | false |
wangjun/pyload | module/plugins/hoster/UlozTo.py | 1 | 6962 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
import re
import time
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.common.json_layer import json_loads
def convertDecimalPrefix(m):
# decimal prefixes used in filesize and traffic
return ("%%.%df" % {'k': 3, 'M': 6, 'G': 9}[m.group(2)] % float(m.group(1))).replace('.', '')
class UlozTo(SimpleHoster):
__name__ = "UlozTo"
__type__ = "hoster"
__pattern__ = r"http://(\w*\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj.cz|zachowajto.pl)/(?:live/)?(?P<id>\w+/[^/?]*)"
__version__ = "0.95"
__description__ = """uloz.to"""
__author_name__ = ("zoidberg")
FILE_NAME_PATTERN = r'<a href="#download" class="jsShowDownload">(?P<N>[^<]+)</a>'
FILE_SIZE_PATTERN = r'<span id="fileSize">.*?(?P<S>[0-9.]+\s[kMG]?B)</span>'
FILE_INFO_PATTERN = r'<p>File <strong>(?P<N>[^<]+)</strong> is password protected</p>'
FILE_OFFLINE_PATTERN = r'<title>404 - Page not found</title>|<h1 class="h1">File (has been deleted|was banned)</h1>'
FILE_SIZE_REPLACEMENTS = [('([0-9.]+)\s([kMG])B', convertDecimalPrefix)]
FILE_URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "www.ulozto.net")]
PASSWD_PATTERN = r'<div class="passwordProtectedFile">'
VIPLINK_PATTERN = r'<a href="[^"]*\?disclaimer=1" class="linkVip">'
FREE_URL_PATTERN = r'<div class="freeDownloadForm"><form action="([^"]+)"'
PREMIUM_URL_PATTERN = r'<div class="downloadForm"><form action="([^"]+)"'
def setup(self):
self.multiDL = self.premium
self.resumeDownload = True
def process(self, pyfile):
pyfile.url = re.sub(r"(?<=http://)([^/]+)", "www.ulozto.net", pyfile.url)
self.html = self.load(pyfile.url, decode=True, cookies=True)
passwords = self.getPassword().splitlines()
while self.PASSWD_PATTERN in self.html:
if passwords:
password = passwords.pop(0)
self.logInfo("Password protected link, trying " + password)
self.html = self.load(pyfile.url, get={"do": "passwordProtectedForm-submit"},
post={"password": password, "password_send": 'Send'}, cookies=True)
else:
self.fail("No or incorrect password")
if re.search(self.VIPLINK_PATTERN, self.html):
self.html = self.load(pyfile.url, get={"disclaimer": "1"})
self.file_info = self.getFileInfo()
if self.premium and self.checkTrafficLeft():
self.handlePremium()
else:
self.handleFree()
self.doCheckDownload()
def handleFree(self):
action, inputs = self.parseHtmlForm('id="frm-downloadDialog-freeDownloadForm"')
if not action or not inputs:
self.parseError("free download form")
self.logDebug('inputs.keys() = ' + str(inputs.keys()))
# get and decrypt captcha
if inputs.has_key('captcha_value') and inputs.has_key('captcha_id') and inputs.has_key('captcha_key'):
# Old version - last seen 9.12.2013
self.logDebug('Using "old" version')
captcha_value = self.decryptCaptcha("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id'])
self.logDebug('CAPTCHA ID: ' + inputs['captcha_id'] + ', CAPTCHA VALUE: ' + captcha_value)
inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value})
elif inputs.has_key("captcha_value") and inputs.has_key("timestamp") and inputs.has_key("salt") and inputs.has_key("hash"):
# New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013
self.logDebug('Using "new" version')
xapca = self.load("http://www.ulozto.net/reloadXapca.php", get = { "rnd": str(int(time.time()))})
self.logDebug('xapca = ' + str(xapca))
data = json_loads(xapca)
captcha_value = self.decryptCaptcha(str(data['image']))
self.logDebug('CAPTCHA HASH: ' + data['hash'] + ', CAPTCHA SALT: ' + str(data['salt']) + ', CAPTCHA VALUE: ' + captcha_value)
inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value})
else:
self.parseError("CAPTCHA form changed")
self.multiDL = True
self.download("http://www.ulozto.net" + action, post=inputs, cookies=True, disposition=True)
def handlePremium(self):
self.download(self.pyfile.url + "?do=directDownload", disposition=True)
#parsed_url = self.findDownloadURL(premium=True)
#self.download(parsed_url, post={"download": "Download"})
def findDownloadURL(self, premium=False):
msg = "%s link" % ("Premium" if premium else "Free")
found = re.search(self.PREMIUM_URL_PATTERN if premium else self.FREE_URL_PATTERN, self.html)
if not found:
self.parseError(msg)
parsed_url = "http://www.ulozto.net" + found.group(1)
self.logDebug("%s: %s" % (msg, parsed_url))
return parsed_url
def doCheckDownload(self):
check = self.checkDownload({
"wrong_captcha": re.compile(r'<ul class="error">\s*<li>Error rewriting the text.</li>'),
"offline": re.compile(self.FILE_OFFLINE_PATTERN),
"passwd": self.PASSWD_PATTERN,
"server_error": 'src="http://img.ulozto.cz/error403/vykricnik.jpg"', # paralell dl, server overload etc.
"not_found": "<title>Ulož.to</title>"
})
if check == "wrong_captcha":
#self.delStorage("captcha_id")
#self.delStorage("captcha_text")
self.invalidCaptcha()
self.retry(reason="Wrong captcha code")
elif check == "offline":
self.offline()
elif check == "passwd":
self.fail("Wrong password")
elif check == "server_error":
self.logError("Server error, try downloading later")
self.multiDL = False
self.setWait(3600, True)
self.wait()
self.retry()
elif check == "not_found":
self.fail("Server error - file not downloadable")
getInfo = create_getInfo(UlozTo)
| gpl-3.0 | -4,936,276,214,774,120,000 | 44.201299 | 137 | 0.605085 | false | 3.575244 | false | false | false |
opennode/nodeconductor-saltstack | src/nodeconductor_saltstack/saltstack/apps.py | 1 | 2254 | from django.apps import AppConfig
from django.db.models import signals
from nodeconductor.structure import SupportedServices
class SaltStackConfig(AppConfig):
name = 'nodeconductor_saltstack.saltstack'
verbose_name = 'SaltStack Core'
service_name = 'SaltStack'
def ready(self):
from .backend import SaltStackBackend
from .models import SaltStackProperty
import handlers
SupportedServices.register_backend(SaltStackBackend)
from nodeconductor.structure.models import ServiceSettings
from nodeconductor.quotas.fields import QuotaField, CounterQuotaField
from ..exchange.models import ExchangeTenant
ServiceSettings.add_quota_field(
name='sharepoint_storage',
quota_field=QuotaField(
creation_condition=lambda service_settings: service_settings.type == SaltStackConfig.service_name,
),
)
ServiceSettings.add_quota_field(
name='exchange_storage',
quota_field=QuotaField(
creation_condition=lambda service_settings: service_settings.type == SaltStackConfig.service_name,
),
)
ServiceSettings.add_quota_field(
name='exchange_tenant_count',
quota_field=CounterQuotaField(
creation_condition=lambda service_settings: service_settings.type == SaltStackConfig.service_name,
target_models=[ExchangeTenant],
path_to_scope='service_project_link.service.settings',
)
)
for index, model in enumerate(SaltStackProperty.get_all_models()):
signals.post_save.connect(
handlers.log_saltstack_property_created,
sender=model,
dispatch_uid='nodeconductor_saltstack.saltstack.handlers.log_saltstack_property_created{}_{}'.format(
model.__name__, index),
)
signals.post_delete.connect(
handlers.log_saltstack_property_deleted,
sender=model,
dispatch_uid='nodeconductor_saltstack.saltstack.handlers.log_saltstack_property_deleted{}_{}'.format(
model.__name__, index),
)
| mit | -2,577,255,075,806,401,500 | 37.862069 | 117 | 0.63354 | false | 4.63786 | true | false | false |
MeGotsThis/Hanabi-AI | server.py | 1 | 20140 | import math
import random
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from bot import bot
from enums import Action, Clue, Rank, Suit, Variant
from game import Game
names: List[str] = ['Alice', 'Bob', 'Cathy', 'Donald', 'Emily']
numberWords: List[str] = ['zero', 'one', 'two', 'three', 'four', 'five']
class CardStatus(Enum):
Deck = 0
Hand = 1
Play = 2
Discard = 3
class ServerGame:
def __init__(self,
variant: Variant,
players: int,
botCls: Type[bot.Bot], *,
print_messages: Any=False,
print_verbose: Any=False,
null_clues: Any=False,
seed: Any=None, **kwargs) -> None:
if variant not in Variant:
raise ValueError('variant')
if players < 2 or players > 5:
raise ValueError('players')
self.seed: Any = seed
self.rng: random.Random = random.Random(self.seed)
self.variant: Variant = variant
self.printVerbose: Optional[bool] = convert_print(print_verbose)
self.printMessages: Optional[bool]= convert_print(print_messages)
self.allowNullClues: bool = bool(null_clues)
self.gameLog: List[Tuple[str, dict]] = []
self.messages: List[str] = []
self.verbose: List[str] = []
self.deck: List[ServerCard]
self.initialize_deck()
self.hands: List[List[int]] = [[] for _ in range(players)]
self.discards: Dict[Suit, List[int]]
self.discards = {s: [] for s in self.variant.pile_suits}
self.plays: Dict[Suit, List[int]]
self.plays = {s: [] for s in self.variant.pile_suits}
self.nextDraw: int = 0
self.turnCount: int = 0
self.endTurn: Optional[int] = None
self.maxScore: int = 25
self.score: int = 0
self.clues: int = 8
self.strikes: int = 0
self.loss: bool = False
self.connections: List[ServerConnection]
self.connections = [ServerConnection(p, self)
for p in range(players)]
self.players: List[Game]
self.players = [Game(self.connections[p], self.variant,
names[:players], p, botCls, **kwargs)
for p in range(players)]
self.currentPlayer: int = self.rng.randrange(players)
self.lastAction: int = (self.currentPlayer - 1) % players
def isGameComplete(self) -> bool:
if self.strikes == 3 or self.score >= self.maxScore:
return True
if self.turnCount > (self.endTurn or math.inf):
return True
return False
def updateMaxScore(self) -> None:
maxScore: int = 0
s: Suit
for s in self.variant.pile_suits:
possible: int = 5
copies: Dict[Rank, int] = {r: 0 for r in Rank}
d: int
for d in self.discards[s]:
card: ServerCard = self.deck[d]
copies[card.rank] += 1
r: Rank
for r in reversed(Rank): # type: ignore
totalCopies: int = r.num_copies
if self.variant == Variant.OneOfEach and s == Suit.Extra:
totalCopies += 1
if copies[r] == totalCopies:
possible = r.value - 1
maxScore += possible
self.maxScore = maxScore
def print(self,
message: Optional[str]=None,
verbose: Optional[str]=None,
final: bool=False) -> None:
verbose = verbose if verbose is not None else message
if verbose is not None:
self.verbose.append(verbose)
if self.printVerbose:
print(verbose)
if message is not None:
self.messages.append(message)
if not self.printVerbose and self.printMessages:
print(message)
if final and self.printMessages is None:
print(message)
def run_game(self) -> None:
self.log('game_start', {'replay': False})
p: Game
for p in self.players:
self.log('init',
{'seat': p.botPosition,
'names': names[:len(self.players)],
'variant': self.variant.value,
'replay': False,
'spectating': False})
handSize: int = 4 if len(self.players) > 3 else 5
for p in self.players:
for _ in range(handSize):
self.draw_card(p.botPosition)
self.print('{} goes first'.format(names[self.currentPlayer]))
while not self.isGameComplete():
self.send('notify', {'type': 'status', 'clues': self.clues,
'score': self.score})
self.send('notify', {'type': 'turn', 'who': self.currentPlayer,
'num': self.turnCount})
self.send('action',
{'can_clue': self.clues > 0,
'can_discard': self.clues < 8},
player=self.currentPlayer)
self.turnCount += 1
self.currentPlayer = (self.currentPlayer + 1) % len(self.players)
self.updateMaxScore()
self.send('notify', {'type': 'game_over'})
self.loss = self.strikes == 3
if not self.loss:
self.print("Players score {} points".format(self.score),
final=True)
else:
self.print("Players lost", final=True)
self.print(verbose='')
self.print(verbose='Number of Players: {}'.format(len(self.players)))
self.print(verbose='Variant: {}'.format(self.variant.full_name))
self.print(verbose='Deck Size: {}'.format(len(self.deck)))
self.recordGameState()
def recordGameState(self) -> None:
deckSize: int = len(self.deck) - self.nextDraw
lastPlayer: int = (self.currentPlayer - 1) % len(self.players)
self.print(verbose='Deck Count: {}'.format(deckSize))
self.print(verbose='Clue Count: {}'.format(self.clues))
self.print(verbose='Score: {}'.format(self.score))
self.print(verbose='Strikes: {}'.format(self.strikes))
self.print(verbose='Max Possible Score: {}'.format(self.maxScore))
self.print(verbose='Turn Count: {}'.format(self.turnCount))
self.print(verbose='End Turn: {}'.format(self.endTurn))
self.print(verbose='Next Draw Index: {}'.format(self.nextDraw))
self.print(verbose='Last Player: {}'.format(names[lastPlayer]))
self.print(verbose='')
self.print(verbose='Player Hands (Newest To Oldest)')
p: int
hand: List[int]
for p, hand in enumerate(self.hands):
cards = []
for deckIdx in reversed(hand):
card = self.deck[deckIdx]
cards.append('{} {}'.format(card.suit.full_name(self.variant),
card.rank.value))
self.print(verbose='{}: {}'.format(names[p], ', '.join(cards)))
self.print(verbose='')
self.print(verbose='Played Cards')
s: Suit
for s in self.variant.pile_suits:
self.print(verbose='{}: {}'.format(s.full_name(self.variant),
len(self.plays[s])))
self.print(verbose='')
self.print(verbose='Discarded Cards')
for s in self.variant.pile_suits:
discards: List[int] = []
for deckIdx in self.discards[s]:
card = self.deck[deckIdx]
discards.append(card.rank.value)
discards.sort()
self.print(verbose='{}: {}'.format(
s.full_name(self.variant),
', '.join(str(d) for d in discards)))
self.print(verbose='')
def log(self, type: str, resp: dict) -> None:
self.gameLog.append((type, resp))
def send(self,
type: str,
resp: dict, *,
player: Optional[int]=None) -> None:
if player is not None:
p = self.players[player]
p.received(type, resp)
else:
for p in self.players:
p.received(type, resp)
self.log(type, resp)
def initialize_deck(self) -> None:
self.deck = []
index: int = 0
s: Suit
r: Rank
i: int
for s in self.variant.pile_suits:
for r in Rank:
if not (s == Suit.Extra and self.variant == Variant.OneOfEach):
for i in range(r.num_copies):
self.deck.append(ServerCard(index, s, r, self.variant))
index += 1
else:
self.deck.append(ServerCard(index, s, r, self.variant))
index += 1
self.rng.shuffle(self.deck)
card: ServerCard
for i, card in enumerate(self.deck):
card.position = i
card.status = CardStatus.Deck
def draw_card(self, player: int) -> None:
if self.nextDraw >= len(self.deck):
return
card: ServerCard = self.deck[self.nextDraw]
if card.status != CardStatus.Deck:
raise GameException('Bad Card Status', card.status)
card.player = player
card.status = CardStatus.Hand
p: Game
info: dict
for p in self.players:
info = {'type': 'draw',
'who': player,
'order': self.nextDraw}
if p.botPosition != player:
info['suit'] = card.suit.value
info['rank'] = card.rank.value
self.send('notify', info, player=p.botPosition)
info = {'type': 'draw',
'who': player,
'order': self.nextDraw,
'suit': card.suit.value,
'rank': card.rank.value}
self.log('notify', info)
self.hands[player].append(self.nextDraw)
self.nextDraw += 1
if self.nextDraw >= len(self.deck):
self.endTurn = self.turnCount + len(self.players)
self.send('notify', {'type': 'draw_size',
'size': len(self.deck) - self.nextDraw})
self.print(verbose="{} draws {} {}".format(
names[player], card.suit.full_name(self.variant), card.rank.value))
def clue_player(self,
player: int,
target: int,
type: int,
value: int) -> None:
if self.isGameComplete():
raise GameException('Game is complete')
if self.lastAction == player:
raise GameException('Player already made a move', player)
if self.currentPlayer != player:
raise GameException('Wrong Player Turn', player)
if player == target:
raise GameException('Cannot clue self')
if self.clues == 0:
raise GameException('Cannot Clue')
if target >= len(self.players):
raise GameException('Target does not exist', target)
rank: Rank
suit: Suit
positions: List[int]
cards: List[int]
card: ServerCard
i: int
h: int
if type == Clue.Rank.value:
rank = Rank(value)
if not rank.valid():
raise GameException('Invalid rank value', value)
positions = []
cards = []
for i, h in enumerate(self.hands[target]):
card = self.deck[h]
if card.rank == rank:
positions.insert(0, len(self.hands[target]) - i)
cards.append(h)
if not cards and not self.allowNullClues:
raise GameException('No Cards Clued')
self.send('notify',
{'type': 'clue',
'giver': player,
'target': target,
'clue': {'type': type, 'value': value},
'list': cards})
self.clues -= 1
self.lastAction = player
self.print(
"{} tells {} about {} {}'s".format(
names[player], names[target], numberWords[len(cards)],
rank.value),
"{} tells {} about {} {}'s in slots {}".format(
names[player], names[target], numberWords[len(cards)],
rank.value, ', '.join(str(p) for p in positions)))
elif type == Clue.Suit.value:
suit = Suit(value)
if not suit.valid(self.variant):
raise GameException('Invalid suit value', value)
positions = []
cards = []
for i, h in enumerate(self.hands[target]):
card = self.deck[h]
if card.suit == suit:
positions.insert(0, len(self.hands[target]) - i)
cards.append(h)
if self.variant == Variant.Rainbow and card.suit == Suit.Extra:
cards.append(h)
if not cards and not self.allowNullClues:
raise GameException('No Cards Clued')
self.send('notify',
{'type': 'clue',
'giver': player,
'target': target,
'clue': {'type': type, 'value': value},
'list': cards})
self.clues -= 1
self.lastAction = player
self.print(
"{} tells {} about {} {}'s".format(
names[player], names[target], numberWords[len(cards)],
suit.full_name(self.variant)),
"{} tells {} about {} {}'s in slots {}".format(
names[player], names[target], numberWords[len(cards)],
suit.full_name(self.variant),
', '.join(str(p) for p in positions)))
else:
raise GameException('Invalid clue type', type)
def play_card(self, player: int, deckIdx: int) -> None:
if self.isGameComplete():
raise GameException('Game is complete')
if self.lastAction == player:
raise GameException('Player already made a move', player)
if self.currentPlayer != player:
raise GameException('Wrong Player Turn', player)
card: ServerCard = self.deck[deckIdx]
if card.status != CardStatus.Hand:
raise GameException('Bad Card Status', card.status)
if card.player != player:
raise GameException('Card does not belong to player', card.player)
nextRank: int = len(self.plays[card.suit]) + 1
position: int
position = len(self.hands[player]) - self.hands[player].index(deckIdx)
if card.rank.value == nextRank:
self.plays[card.suit].append(card.position)
self.send('notify',
{'type': 'played',
'which': {'suit': card.suit, 'rank': card.rank,
'index': card.index, 'order': card.position}})
self.score += 1
card.status = CardStatus.Play
self.hands[player].remove(deckIdx)
self.print(
"{} plays {} {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value),
"{} plays {} {} from slot {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value, position))
else:
self.discards[card.suit].append(card.position)
self.strikes += 1
self.send('notify',
{'type': 'discard',
'which': {'suit': card.suit, 'rank': card.rank,
'index': card.index, 'order': card.position}})
self.send('notify',
{'type': 'strike',
'num': self.strikes})
card.status = CardStatus.Discard
self.hands[player].remove(deckIdx)
self.print(
"{} fails to play {} {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value),
"{} fails to play {} {} from slot {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value, position))
self.draw_card(player)
self.lastAction = player
def discard_card(self, player: int, deckIdx: int) -> None:
if self.isGameComplete():
raise GameException('Game is complete')
if self.lastAction == player:
raise GameException('Player already made a move', player)
if self.currentPlayer != player:
raise GameException('Wrong Player Turn', player)
if self.clues == 8:
raise GameException('Cannot Discard')
card: ServerCard = self.deck[deckIdx]
if card.status != CardStatus.Hand:
raise GameException('Bad Card Status', card.status)
if card.player != player:
raise GameException('Card does not belong to player', card.player)
self.discards[card.suit].append(card.position)
self.send('notify',
{'type': 'discard',
'which': {'suit': card.suit, 'rank': card.rank,
'index': card.index, 'order': card.position}})
card.status = CardStatus.Discard
position: int
position = len(self.hands[player]) - self.hands[player].index(deckIdx)
self.hands[player].remove(deckIdx)
self.clues += 1
self.print(
"{} discards {} {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value),
"{} discards {} {} from slot {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value, position))
self.draw_card(player)
self.lastAction = player
class ServerCard:
def __init__(self,
index: int,
suit: Suit,
rank: Rank,
variant: Variant) -> None:
self.variant:Variant = variant
self.index: int = index
self.position: int
self.suit: Suit = suit
self.rank: Rank = rank
self.status: CardStatus
self.player: int = None
def __str__(self) -> str:
return "{color} {number}".format(
color=self.suit.full_name(self.variant),
number=self.rank.value)
class ServerConnection:
def __init__(self, position: int, game: ServerGame) -> None:
self.position: int = position
self.game: ServerGame = game
def emit(self, *args) -> None:
if len(args) == 2:
type: str
data: dict
type, data = args
if type != 'message':
raise GameException('emit type')
if data['type'] != 'action':
raise GameException('data type')
if data['resp']['type'] == Action.Clue.value:
self.game.clue_player(
self.position, data['resp']['target'],
data['resp']['clue']['type'],
data['resp']['clue']['value'])
elif data['resp']['type'] == Action.Play.value:
self.game.play_card(self.position, data['resp']['target'])
elif data['resp']['type'] == Action.Discard.value:
self.game.discard_card(self.position, data['resp']['target'])
else:
raise GameException('emit action type')
class GameException(Exception):
pass
def convert_print(arg: Any) -> Optional[bool]:
if isinstance(arg, str):
if arg.lower() in ['false', '0', '']:
return False
if arg.lower() in ['none']:
return None
return bool(arg) | gpl-3.0 | 8,164,153,947,375,564,000 | 39.04175 | 79 | 0.511867 | false | 4.117767 | false | false | false |
Enteee/pdml2flow | pdml2flow/cli.py | 1 | 4583 | # vim: set fenc=utf8 ts=4 sw=4 et :
import sys
import xml.sax
import imp
from os import path
from signal import signal, SIGINT
from shutil import copytree, ignore_patterns
from pkg_resources import resource_filename
from configparser import ConfigParser
from .logging import *
from .conf import Conf
from .plugin import *
from .pdmlhandler import PdmlHandler
def _add_common_arguments(argparser):
argparser.add_argument(
'-s',
dest='EXTRACT_SHOW',
action='store_true',
help='Extract show names, every data leaf will now look like {{ raw : [] , show: [] }} [default: {}]'.format(
Conf.EXTRACT_SHOW
)
)
argparser.add_argument(
'-d',
dest='DEBUG',
action='store_true',
help='Debug mode [default: {}]'.format(
Conf.DEBUG
)
)
def pdml2flow():
def add_arguments_cb(argparser):
argparser.add_argument(
'-f',
dest='FLOW_DEF_STR',
action='append',
help='Fields which define the flow, nesting with: \'{}\' [default: {}]'.format(
Conf.FLOW_DEF_NESTCHAR, Conf.FLOW_DEF_STR
)
)
argparser.add_argument(
'-t',
type=int,
dest='FLOW_BUFFER_TIME',
help='Lenght (in seconds) to buffer a flow before writing the packets [default: {}]'.format(
Conf.FLOW_BUFFER_TIME
)
)
argparser.add_argument(
'-l',
type=int,
dest='DATA_MAXLEN',
help='Maximum lenght of data in tshark pdml-field [default: {}]'.format(
Conf.DATA_MAXLEN
)
)
argparser.add_argument(
'-c',
dest='COMPRESS_DATA',
action='store_true',
help='Removes duplicate data when merging objects, will not preserve order of leaves [default: {}]'.format(
Conf.COMPRESS_DATA
)
)
argparser.add_argument(
'-a',
dest='FRAMES_ARRAY',
action='store_true',
help='Instead of merging the frames will append them to an array [default: {}]'.format(
Conf.FRAMES_ARRAY
)
)
_add_common_arguments(argparser)
def postprocess_conf_cb(conf):
"""Split each flowdef to a path."""
if conf['FLOW_DEF_STR'] is not None:
conf['FLOW_DEF'] = Conf.get_real_paths(
conf['FLOW_DEF_STR'],
Conf.FLOW_DEF_NESTCHAR
)
Conf.load(
'Aggregates wireshark pdml to flows',
add_arguments_cb,
postprocess_conf_cb
)
start_parser()
def pdml2frame():
def add_arguments_cb(argparser):
_add_common_arguments(argparser)
def postprocess_conf_cb(conf):
conf['DATA_MAXLEN'] = sys.maxsize
conf['FLOW_BUFFER_TIME'] = 0
conf['FLOW_DEF_STR'] = [ 'frame.number' ]
conf['FLOW_DEF'] = Conf.get_real_paths(
conf['FLOW_DEF_STR'],
Conf.FLOW_DEF_NESTCHAR
)
Conf.load(
'Converts wireshark pdml to frames',
add_arguments_cb,
postprocess_conf_cb
)
start_parser()
def start_parser():
# print config
for name, value in Conf.get().items():
debug('{} : {}'.format(name, value))
handler = PdmlHandler()
def sigint_handler(sig, frame):
handler.endDocument()
sys.exit(0)
signal(SIGINT, sigint_handler)
try:
xml.sax.parse(
Conf.IN,
handler
)
except xml.sax._exceptions.SAXParseException as e:
# this might happen when a pdml file is malformed
warning('Parser returned exception: {}'.format(e))
handler.endDocument()
def pdml2flow_new_plugin():
def add_arguments_cb(argparser):
argparser.add_argument(
'DST',
type=str,
nargs='+',
help='Where to initialize the plugin, basename will become the plugin name'
)
Conf.load(
'Initializes a new plugin',
add_arguments_cb
)
for dst in Conf.DST:
plugin_name = path.basename(dst)
plugin_conf = ConfigParser({
'plugin_name': plugin_name
})
copytree(
resource_filename(__name__, 'plugin-skeleton'),
dst,
ignore=ignore_patterns('__pycache__')
)
with open(path.join(dst, Conf.PLUGIN_CONF_NAME), mode='w') as fd:
plugin_conf.write(fd)
| apache-2.0 | 7,527,734,594,599,178,000 | 26.443114 | 119 | 0.543749 | false | 3.957686 | false | false | false |
demisto/content | Packs/OpenCTI/Integrations/OpenCTI/OpenCTI.py | 1 | 24064 | import copy
from typing import List, Optional
from io import StringIO
import sys
import demistomock as demisto # noqa: E402 lgtm [py/polluting-import]
import urllib3
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
from pycti import OpenCTIApiClient, Identity
# Disable insecure warnings
urllib3.disable_warnings()
# Disable info logging from the api
logging.getLogger().setLevel(logging.ERROR)
XSOAR_TYPES_TO_OPENCTI = {
'account': "User-Account",
'domain': "Domain-Name",
'email': "Email-Addr",
'file-md5': "StixFile",
'file-sha1': "StixFile",
'file-sha256': "StixFile",
'file': 'StixFile',
'host': "X-OpenCTI-Hostname",
'ip': "IPv4-Addr",
'ipv6': "IPv6-Addr",
'registry key': "Windows-Registry-Key",
'url': "Url"
}
OPENCTI_TYPES_TO_XSOAR = {
"User-Account": 'Account',
"Domain-Name": 'Domain',
"Email-Addr": 'Email',
"StixFile": "File",
"X-OpenCTI-Hostname": 'Host',
"IPv4-Addr": 'IP',
"IPv6-Addr": 'IPv6',
"Windows-Registry-Key": 'Registry Key',
"Url": 'URL'
}
KEY_TO_CTI_NAME = {
'description': 'x_opencti_description',
'score': 'x_opencti_score'
}
FILE_TYPES = {
'file-md5': "file.hashes.md5",
'file-sha1': "file.hashes.sha-1",
'file-sha256': "file.hashes.sha-256"
}
def label_create(client: OpenCTIApiClient, label_name: Optional[str]):
""" Create label at opencti
Args:
client: OpenCTI Client object
label_name(str): label name to create
Returns:
readable_output, raw_response
"""
try:
label = client.label.create(value=label_name)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't create label.")
return label
def build_indicator_list(indicator_list: List[str]) -> List[str]:
"""Builds an indicator list for the query
Args:
indicator_list: List of XSOAR indicators types to return..
Returns:
indicators: list of OPENCTI indicators types"""
result = []
if 'ALL' in indicator_list:
# Replaces "ALL" for all types supported on XSOAR.
result = ['User-Account', 'Domain-Name', 'Email-Addr', 'StixFile', 'X-OpenCTI-Hostname', 'IPv4-Addr',
'IPv6-Addr', 'Windows-Registry-Key', 'Url']
else:
result = [XSOAR_TYPES_TO_OPENCTI.get(indicator.lower(), indicator) for indicator in indicator_list]
return result
def reset_last_run():
"""
Reset the last run from the integration context
"""
demisto.setIntegrationContext({})
return CommandResults(readable_output='Fetch history deleted successfully')
def get_indicators(client: OpenCTIApiClient, indicator_types: List[str], score: List[str] = None,
limit: Optional[int] = 500,
last_run_id: Optional[str] = None) -> dict:
""" Retrieving indicators from the API
Args:
score: Range of scores to filter by.
client: OpenCTI Client object.
indicator_types: List of indicators types to return.
last_run_id: The last id from the previous call to use pagination.
limit: the max indicators to fetch
Returns:
indicators: dict of indicators
"""
indicator_type = build_indicator_list(indicator_types)
filters = [{
'key': 'entity_type',
'values': indicator_type
}]
if score:
filters.append({
'key': 'x_opencti_score',
'values': score
})
indicators = client.stix_cyber_observable.list(after=last_run_id, first=limit,
withPagination=True, filters=filters)
return indicators
def get_indicators_command(client: OpenCTIApiClient, args: dict) -> CommandResults:
""" Gets indicator from opencti to readable output
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_types = argToList(args.get("indicator_types"))
last_run_id = args.get("last_run_id")
limit = arg_to_number(args.get('limit', 50))
start = arg_to_number(args.get('score_start', 1))
end = arg_to_number(args.get('score_end', 100)) + 1 # type:ignore
score = None
if start or end:
score = [str(i) for i in range(start, end)] # type:ignore
raw_response = get_indicators(
client=client,
indicator_types=indicator_types,
limit=limit,
last_run_id=last_run_id,
score=score
)
last_run = raw_response.get('pagination', {}).get('endCursor') # type: ignore
if indicators_list := copy.deepcopy(raw_response.get('entities')):
indicators = [{'type': OPENCTI_TYPES_TO_XSOAR.get(indicator['entity_type'], indicator['entity_type']),
'value': indicator.get('observable_value'),
'id': indicator.get('id'),
'createdBy': indicator.get('createdBy').get('id')
if indicator.get('createdBy') else None,
'score': indicator.get('x_opencti_score'),
'description': indicator.get('x_opencti_description'),
'labels': [label.get('value') for label in indicator.get('objectLabel')],
'marking': [mark.get('definition') for mark in indicator.get('objectMarking')],
'externalReferences': indicator.get('externalReferences')
}
for indicator in indicators_list]
readable_output = tableToMarkdown('Indicators', indicators,
headers=["type", "value", "id"],
removeNull=True)
outputs = {
'OpenCTI.Indicators(val.lastRunID)': {'lastRunID': last_run},
'OpenCTI.Indicators.IndicatorsList(val.id === obj.id)': indicators
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=indicators_list
)
else:
return CommandResults(readable_output='No indicators')
def indicator_delete_command(client: OpenCTIApiClient, args: dict) -> CommandResults:
""" Delete indicator from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_id = args.get("id")
try:
client.stix_cyber_observable.delete(id=indicator_id)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't delete indicator.")
return CommandResults(readable_output='Indicator deleted.')
def indicator_field_update_command(client: OpenCTIApiClient, args: dict) -> CommandResults:
""" Update indicator field at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_id = args.get("id")
# works only with score and description
key = KEY_TO_CTI_NAME[args.get("field")] # type: ignore
value = args.get("value")
try:
result = client.stix_cyber_observable.update_field(id=indicator_id, key=key, value=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException(f"Can't update indicator with field: {key}.")
return CommandResults(
outputs_prefix='OpenCTI.Indicator',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=f'Indicator {indicator_id} updated successfully.',
raw_response=result
)
def indicator_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create indicator at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_type = args.get("type")
created_by = args.get("created_by")
marking_id = args.get("marking_id")
label_id = args.get("label_id")
external_references_id = args.get("external_references_id")
description = args.get("description")
score = arg_to_number(args.get("score", '50'))
value = args.get("value")
data = {'type': XSOAR_TYPES_TO_OPENCTI.get(indicator_type.lower(), indicator_type), # type:ignore
'value': value}
if indicator_type == 'Registry Key':
data['key'] = value
if indicator_type == 'Account':
data['account_login'] = value
simple_observable_key = None
simple_observable_value = None
if 'file' in indicator_type.lower(): # type: ignore
simple_observable_key = FILE_TYPES.get(indicator_type.lower(), indicator_type) # type: ignore
simple_observable_value = value
try:
# cti code prints to stdout so we need to catch it.
old_stdout = sys.stdout
sys.stdout = StringIO()
result = client.stix_cyber_observable.create(
simple_observable_key=simple_observable_key,
simple_observable_value=simple_observable_value,
type=indicator_type,
createdBy=created_by, objectMarking=marking_id,
objectLabel=label_id, externalReferences=external_references_id,
simple_observable_description=description,
x_opencti_score=score, observableData=data
)
sys.stdout = old_stdout
except KeyError as e:
raise DemistoException(f'Missing argument at data {e}')
if id := result.get('id'):
readable_output = f'Indicator created successfully. New Indicator id: {id}'
outputs = {
'id': result.get('id'),
'value': value,
'type': indicator_type
}
else:
raise DemistoException("Can't create indicator.")
return CommandResults(
outputs_prefix='OpenCTI.Indicator',
outputs_key_field='id',
outputs=outputs,
readable_output=readable_output,
raw_response=result
)
def indicator_add_marking(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Add indicator marking to opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): marking name to add
Returns:
true if added successfully, else false.
"""
try:
result = client.stix_cyber_observable.add_marking_definition(id=id, marking_definition_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't add marking to indicator.")
return result
def indicator_add_label(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Add indicator label to opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): label name to add
Returns:
true if added successfully, else false.
"""
try:
result = client.stix_cyber_observable.add_label(id=id, label_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't add label to indicator.")
return result
def indicator_field_add_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Add indicator marking or label to opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output
"""
indicator_id = args.get("id")
# works only with marking and label
key = args.get("field")
value = args.get("value")
result = {}
if key == 'marking':
result = indicator_add_marking(client=client, id=indicator_id, value=value)
elif key == 'label':
result = indicator_add_label(client=client, id=indicator_id, value=value)
if result:
return CommandResults(readable_output=f'Added {key} successfully.')
else:
return CommandResults(readable_output=f'Cant add {key} to indicator.')
def indicator_remove_label(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Remove indicator label from opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): label name to remove
Returns:
true if removed successfully, else false.
"""
try:
result = client.stix_cyber_observable.remove_label(id=id, label_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't remove label from indicator.")
return result
def indicator_remove_marking(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Remove indicator marking from opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): marking name to remove
Returns:
true if removed successfully, else false.
"""
try:
result = client.stix_cyber_observable.remove_marking_definition(id=id, marking_definition_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't remove marking from indicator.")
return result
def indicator_field_remove_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Remove indicator marking or label from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output
"""
indicator_id = args.get("id")
# works only with marking and label
key = args.get("field")
value = args.get("value")
result = {}
if key == 'marking':
result = indicator_remove_marking(client=client, id=indicator_id, value=value)
elif key == 'label':
result = indicator_remove_label(client=client, id=indicator_id, value=value)
if result:
readable_output = f'{key}: {value} was removed successfully from indicator: {indicator_id}.'
else:
raise DemistoException(f"Can't remove {key}.")
return CommandResults(readable_output=readable_output)
def organization_list_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Get organizations list from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
limit = arg_to_number(args.get('limit', '50'))
last_run_id = args.get('last_run_id')
organizations_list = client.identity.list(types='Organization', first=limit, after=last_run_id, withPagination=True)
if organizations_list:
new_last_run = organizations_list.get('pagination').get('endCursor')
organizations = [
{'name': organization.get('name'), 'id': organization.get('id')}
for organization in organizations_list.get('entities')]
readable_output = tableToMarkdown('Organizations', organizations, headers=['name', 'id'],
headerTransform=pascalToSpace)
outputs = {
'OpenCTI.Organizations(val.organizationsLastRun)': {'organizationsLastRun': new_last_run},
'OpenCTI.Organizations.OrganizationsList(val.id === obj.id)': organizations
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=organizations_list
)
else:
return CommandResults(readable_output='No organizations')
def organization_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create organization at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
name = args.get("name")
description = args.get("description")
reliability = args.get('reliability')
try:
identity = Identity(client)
result = identity.create(name=name, type='Organization', x_opencti_reliability=reliability,
description=description)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't remove label from indicator.")
if organization_id := result.get('id'):
readable_output = f'Organization {name} was created successfully with id: {organization_id}.'
return CommandResults(outputs_prefix='OpenCTI.Organization',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=readable_output,
raw_response=result)
else:
raise DemistoException("Can't create organization.")
def label_list_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Get label list from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
limit = arg_to_number(args.get('limit', '50'))
last_run_id = args.get('last_run_id')
label_list = client.label.list(first=limit, after=last_run_id, withPagination=True)
if label_list:
new_last_run = label_list.get('pagination').get('endCursor')
labels = [
{'value': label.get('value'), 'id': label.get('id')}
for label in label_list.get('entities')]
readable_output = tableToMarkdown('Labels', labels, headers=['value', 'id'],
headerTransform=pascalToSpace)
outputs = {
'OpenCTI.Labels(val.labelsLastRun)': {'labelsLastRun': new_last_run},
'OpenCTI.Labels.LabelsList(val.id === obj.id)': labels
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=label_list
)
else:
return CommandResults(readable_output='No labels')
def label_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create label at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
name = args.get("name")
result = label_create(client=client, label_name=name)
if label_id := result.get('id'):
readable_output = f'Label {name} was created successfully with id: {label_id}.'
return CommandResults(outputs_prefix='OpenCTI.Label',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=readable_output,
raw_response=result)
else:
raise DemistoException("Can't create label.")
def external_reference_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create external reference at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
external_references_source_name = args.get('source_name')
external_references_url = args.get('url')
result = client.external_reference.create(
source_name=external_references_source_name,
url=external_references_url
)
if external_reference_id := result.get('id'):
readable_output = f'Reference {external_references_source_name} was created successfully with id: ' \
f'{external_reference_id}.'
return CommandResults(outputs_prefix='OpenCTI.externalReference',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=readable_output,
raw_response=result)
else:
raise DemistoException("Can't create external reference.")
def marking_list_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Get marking list from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
limit = arg_to_number(args.get('limit', '50'))
last_run_id = args.get('last_run_id')
marking_list = client.marking_definition.list(first=limit, after=last_run_id, withPagination=True)
if marking_list:
new_last_run = marking_list.get('pagination').get('endCursor')
markings = [
{'value': mark.get('definition'), 'id': mark.get('id')}
for mark in marking_list.get('entities')]
readable_output = tableToMarkdown('Markings', markings, headers=['value', 'id'],
headerTransform=pascalToSpace)
outputs = {
'OpenCTI.MarkingDefinitions(val.markingsLastRun)': {'markingsLastRun': new_last_run},
'OpenCTI.MarkingDefinitions.MarkingDefinitionsList(val.id === obj.id)': markings
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=marking_list
)
else:
return CommandResults(readable_output='No markings')
def main():
params = demisto.params()
args = demisto.args()
credentials = params.get('credentials', {})
api_key = credentials.get('password')
base_url = params.get('base_url').strip('/')
try:
client = OpenCTIApiClient(base_url, api_key, ssl_verify=params.get('insecure'), log_level='error')
command = demisto.command()
demisto.info(f"Command being called is {command}")
# Switch case
if command == "test-module":
'''When setting up an OpenCTI Client it is checked that it is valid and allows requests to be sent.
and if not he immediately sends an error'''
get_indicators_command(client, args)
return_results('ok')
elif command == "opencti-get-indicators":
return_results(get_indicators_command(client, args))
elif command == "opencti-indicator-delete":
return_results(indicator_delete_command(client, args))
elif command == "opencti-indicator-field-update":
return_results(indicator_field_update_command(client, args))
elif command == "opencti-indicator-create":
return_results(indicator_create_command(client, args))
elif command == "opencti-indicator-field-add":
return_results(indicator_field_add_command(client, args))
elif command == "opencti-indicator-field-remove":
return_results(indicator_field_remove_command(client, args))
elif command == "opencti-organization-list":
return_results(organization_list_command(client, args))
elif command == "opencti-organization-create":
return_results(organization_create_command(client, args))
elif command == "opencti-label-list":
return_results(label_list_command(client, args))
elif command == "opencti-label-create":
return_results(label_create_command(client, args))
elif command == "opencti-external-reference-create":
return_results(external_reference_create_command(client, args))
elif command == "opencti-marking-definition-list":
return_results(marking_list_command(client, args))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Error:\n [{e}]")
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 1,067,112,909,605,929,900 | 34.284457 | 120 | 0.607713 | false | 3.993362 | false | false | false |
daniilidis-group/bird_recording | src/find_onset.py | 1 | 4304 | #!/usr/bin/env python
#
# read and plot audio file
#
import numpy as np
import matplotlib.pyplot as plt;
import struct
import sys, os
import argparse
import scipy.signal
import ipdb
from scipy.io import wavfile
def plot_wave(t, s):
if s.ndim > 1:
for i in range(0, s.shape[1]):
plt.subplot(s.shape[1], 1, i + 1)
plt.plot(t, s[:,i])
else:
plt.plot(t, s)
plt.show()
def fft_correlate(a, b):
af = np.fft.fft(a, axis=0)
bf = np.fft.fft(b, axis=0)
ft = np.multiply(np.conj(af), bf)
cc = np.real(np.fft.ifft(ft))
return cc
def read_file(f):
ffmt = struct.unpack('i', f.read(struct.calcsize('i')))
depth = f.read(5)
sz = struct.unpack('2i', f.read(struct.calcsize('2i')))
# t_start: time at which the rosbag started
# t1: time at which first audio packet was received
# t0: time at which first sample must have been generated
# dt: delta time between samples, matched to recording
# t = t0 + idx * dt = best rostime estimate
# t - t_start = best estimate for time since start of bag recording
rate, t_start, t0, t1, dt = struct.unpack('5f', f.read(struct.calcsize('5f')))
print "bag start: %f, rate: %f, depth: %s, number of samples: %d, number of channels: %d" % (t_start, rate, depth, sz[0], sz[1])
ts = t0 + np.arange(0,sz[0]) * dt
dtype = np.int16 if depth == 'int16' else np.int32
smpl = np.fromfile(f,dtype=dtype)
samples = np.reshape(smpl, sz)
t = t0 + np.arange(0, sz[0]) * dt - t_start; # gives best estimate
return t, rate, samples
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='find audio onset.')
parser.add_argument('--start', '-s', action='store', default=0.0, type=float,
help='start time')
parser.add_argument('--end', '-e', action='store', default=1e30, type=float,
help='end time')
parser.add_argument('--song', action='store', default='song.wav', help = 'wav file with pattern to search for')
parser.add_argument('--file', action='store', required=True, help = 'audio.dat file generated from rosbag')
parser.add_argument('--songstart', action='store', default=0, type=float, help = 'start time of call within song file')
args = parser.parse_args()
f = open(args.file)
t, rate_recording, recording = read_file(f)
rate_song, song = wavfile.read(args.song)
#song = np.pad(song, (0,int(rate_recording*20)), 'constant', constant_values=(0,0)) # XXX
# average channels of recording
recording = np.sum(recording, axis=1) # add across channels to get average
# resample song to recording frequency
song = scipy.signal.resample(song, int(song.shape[0] * rate_recording / rate_song))
# pad whichever is shorter with zeros
num_pad = abs(recording.shape[0] - song.shape[0])
if (song.shape[0] < recording.shape[0]):
print "padding %d zero samples to song" % num_pad
song = np.expand_dims(np.pad(song, (0,num_pad), 'constant', constant_values=(0,0)), -1)
if (recording.shape[0] < song.shape[0]):
print "padding %d zero samples to recording" % num_pad
recording = np.pad(recording, (0,num_pad), 'constant', constant_values=(0,0))
# expand dimensions so we can stack it
#song = np.expand_dims(song, axis=-1)
print 'recording rate: %d, song rate: %d' % (rate_recording, rate_song)
recording_exp = np.expand_dims(recording, axis=-1)
#cc = fft_correlate(song[:,0], samples[:,0]) # use this for individual channels
cc = fft_correlate(song[:,0], recording)
amax = np.argmax(cc, axis=0) # find maximum correlation
song_rolled = np.roll(song, amax)
idx = np.where((t >= args.start) & (t <= args.end))
t_ss = args.songstart + amax / rate_recording
if t_ss > song.shape[0] / rate_recording:
t_ss = t_ss - song.shape[0] / rate_recording
print 'max correlation roll: %d, song start: %f' % (amax, t_ss)
# stacked = np.hstack((song_rolled[idx[0],:], song[idx[0],:], cc[idx[0],:]))
# stacked = np.hstack((song_rolled[idx[0],:], recording_exp[idx[0],:]))
stacked = np.hstack((song[idx[0],:], recording_exp[idx[0],:], np.expand_dims(cc[idx[0]],-1)))
plot_wave(t[idx[0]], stacked)
| mit | 3,815,214,340,486,254,600 | 43.833333 | 132 | 0.618727 | false | 3.125635 | false | false | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractCleverneckoHomeBlog.py | 1 | 1509 | def extractCleverneckoHomeBlog(item):
'''
Parser for 'clevernecko.home.blog'
'''
badwords = [
'movie review',
'badword',
]
if any([bad in item['tags'] for bad in badwords]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('your husband’s leg is broken', 'your husband’s leg is broken', 'translated'),
('the case of the 27 knife stabs', 'the case of the 27 knife stabs', 'translated'),
('Fate', 'Fate, something so wonderful', 'translated'),
('kimi no shiawase wo negatteita', 'kimi no shiawase wo negatteita', 'translated'),
('warm waters', 'warm waters', 'translated'),
('after being marked by a powerful love rival', 'after being marked by a powerful love rival', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | -7,540,003,231,956,800,000 | 39.702703 | 138 | 0.495017 | false | 3.725248 | false | false | false |
j3camero/canada-election-forecast | calculate_redistributed_2011_results.py | 1 | 1836 | import csv
party_names = [
('Conservative', 'cpc'),
('NDP', 'ndp'),
('Liberal', 'lpc'),
('Bloc', 'bq'),
('Green', 'gpc'),
('Other', 'oth'),
]
ridings = {}
def AddVotes(riding_number, party_code, additional_votes):
if riding_number not in ridings:
ridings[riding_number] = {}
riding = ridings[riding_number]
if party_code not in riding:
riding[party_code] = 0
riding[party_code] += additional_votes
def NormalizeDictVector(v):
norm = {}
divisor = sum(v.values())
for key, value in v.items():
norm[key] = float(value) / divisor
return norm
with open('TRANSPOSITION_338FED.csv', 'rb') as input_file:
# Skip the first few lines of the file, to get to the data part.
for i in range(4):
next(input_file)
reader = csv.DictReader(input_file)
for row in reader:
riding_number = row['2013 FED Number']
riding_name = row['2013 FED Name']
for column_header, value in row.items():
try:
value = int(value)
except:
continue
for party_name, party_code in party_names:
if column_header.startswith(party_name):
AddVotes(riding_number, party_code, value)
with open('redistributed_2011_results.csv', 'wb') as output_file:
writer = csv.writer(output_file)
writer.writerow(['riding', 'date', 'sample_size'] +
[p for _, p in party_names])
for riding_number, vote_counts in ridings.items():
vote_fractions = NormalizeDictVector(vote_counts)
ordered_vote_fractions = [vote_fractions[p] for _, p in party_names]
sample_size = sum(vote_counts.values())
writer.writerow([riding_number, '2011-05-02', sample_size] +
ordered_vote_fractions)
| apache-2.0 | 15,204,818,751,444,074 | 33 | 76 | 0.587691 | false | 3.497143 | false | false | false |
tomncooper/heron | heron/tools/explorer/src/python/version.py | 5 | 1401 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' version.py '''
import heron.tools.common.src.python.utils.config as config
import heron.tools.explorer.src.python.args as args
def create_parser(subparsers):
""" create parser """
parser = subparsers.add_parser(
'version',
help='Display version',
usage="%(prog)s",
add_help=False)
args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, known_args, unknown_args):
""" run command """
config.print_build_info()
return True
| apache-2.0 | 2,370,272,471,565,272,600 | 33.170732 | 63 | 0.725196 | false | 3.796748 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/keyword_plan_campaign.py | 1 | 3725 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.enums.types import keyword_plan_network as gage_keyword_plan_network
__protobuf__ = proto.module(
package='google.ads.googleads.v7.resources',
marshal='google.ads.googleads.v7',
manifest={
'KeywordPlanCampaign',
'KeywordPlanGeoTarget',
},
)
class KeywordPlanCampaign(proto.Message):
r"""A Keyword Plan campaign.
Max number of keyword plan campaigns per plan allowed: 1.
Attributes:
resource_name (str):
Immutable. The resource name of the Keyword Plan campaign.
KeywordPlanCampaign resource names have the form:
``customers/{customer_id}/keywordPlanCampaigns/{kp_campaign_id}``
keyword_plan (str):
The keyword plan this campaign belongs to.
id (int):
Output only. The ID of the Keyword Plan
campaign.
name (str):
The name of the Keyword Plan campaign.
This field is required and should not be empty
when creating Keyword Plan campaigns.
language_constants (Sequence[str]):
The languages targeted for the Keyword Plan
campaign. Max allowed: 1.
keyword_plan_network (google.ads.googleads.v7.enums.types.KeywordPlanNetworkEnum.KeywordPlanNetwork):
Targeting network.
This field is required and should not be empty
when creating Keyword Plan campaigns.
cpc_bid_micros (int):
A default max cpc bid in micros, and in the
account currency, for all ad groups under the
campaign.
This field is required and should not be empty
when creating Keyword Plan campaigns.
geo_targets (Sequence[google.ads.googleads.v7.resources.types.KeywordPlanGeoTarget]):
The geo targets.
Max number allowed: 20.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
keyword_plan = proto.Field(
proto.STRING,
number=9,
optional=True,
)
id = proto.Field(
proto.INT64,
number=10,
optional=True,
)
name = proto.Field(
proto.STRING,
number=11,
optional=True,
)
language_constants = proto.RepeatedField(
proto.STRING,
number=12,
)
keyword_plan_network = proto.Field(
proto.ENUM,
number=6,
enum=gage_keyword_plan_network.KeywordPlanNetworkEnum.KeywordPlanNetwork,
)
cpc_bid_micros = proto.Field(
proto.INT64,
number=13,
optional=True,
)
geo_targets = proto.RepeatedField(
proto.MESSAGE,
number=8,
message='KeywordPlanGeoTarget',
)
class KeywordPlanGeoTarget(proto.Message):
r"""A geo target.
Attributes:
geo_target_constant (str):
Required. The resource name of the geo
target.
"""
geo_target_constant = proto.Field(
proto.STRING,
number=2,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -3,534,356,946,514,556,000 | 29.284553 | 109 | 0.62953 | false | 4.204289 | false | false | false |
mapix/WebHello | WebHello/response.py | 1 | 1510 | # -*- coding:utf-8 -*-
import Cookie
import datetime
__all__ = ['Response']
class Response(object):
status = "200 OK"
def __init__(self, output="", request=None):
self.output = output
self.request = request
self.cookies = Cookie.SimpleCookie()
self.response_headers = [('Content-Type', 'text/html')]
def __call__(self, environ, start_response):
output = self.output if self.status.split(' ', 1)[0] == '200' else self.html
response_headers = self.response_headers
response_headers.extend(tuple(cookie.split(':', 1)) for cookie in self.cookies.output().split('\r\n') if cookie)
response_headers.append(('Content-Length', str(len(output))))
start_response(self.status, response_headers)
return [output]
def set_cookie(self, name, value, domain=None, path='/', expires=None,
max_age=None, secure=None, httponly=None, version=None):
self.cookies[name] = value
self.cookies[name]["path"] = path
if domain:
self.cookies[name]["domain"] = domain
if expires:
expiration = datetime.datetime.now() + datetime.timedelta(days=expires)
self.cookies[name]["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
if max_age:
self.cookies[name]["max-age"] = max_age
if secure:
self.cookies[name]["secure"] = secure
if httponly:
self.cookies[name]["httponly"] = httponly
| bsd-3-clause | 305,731,184,399,917,760 | 35.829268 | 120 | 0.593377 | false | 3.852041 | false | false | false |
shawnhermans/cyborgcrm | cycomments/migrations/0001_initial.py | 1 | 1964 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django_extensions.db.fields
import audit_log.models.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(max_length=40, null=True, editable=False)),
('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(max_length=40, null=True, editable=False)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('object_id', models.PositiveIntegerField()),
('comment', models.TextField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('created_by', audit_log.models.fields.CreatingUserField(related_name='created_cycomments_comment_set', editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='created by')),
('modified_by', audit_log.models.fields.LastUserField(related_name='modified_cycomments_comment_set', editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='modified by')),
],
options={
'abstract': False,
},
),
]
| bsd-2-clause | -4,017,765,377,546,259,000 | 52.081081 | 204 | 0.660387 | false | 4.205567 | false | false | false |
kaushik94/sympy | examples/intermediate/coupled_cluster.py | 3 | 3691 | #!/usr/bin/env python
"""
Calculates the Coupled-Cluster energy- and amplitude equations
See 'An Introduction to Coupled Cluster Theory' by
T. Daniel Crawford and Henry F. Schaefer III.
Other Resource : http://vergil.chemistry.gatech.edu/notes/sahan-cc-2010.pdf
"""
from sympy.physics.secondquant import (AntiSymmetricTensor, wicks,
F, Fd, NO, evaluate_deltas, substitute_dummies, Commutator,
simplify_index_permutations, PermutationOperator)
from sympy import (
symbols, Rational, latex, Dummy
)
pretty_dummies_dict = {
'above': 'cdefgh',
'below': 'klmno',
'general': 'pqrstu'
}
def get_CC_operators():
"""
Returns a tuple (T1,T2) of unique operators.
"""
i = symbols('i', below_fermi=True, cls=Dummy)
a = symbols('a', above_fermi=True, cls=Dummy)
t_ai = AntiSymmetricTensor('t', (a,), (i,))
ai = NO(Fd(a)*F(i))
i, j = symbols('i,j', below_fermi=True, cls=Dummy)
a, b = symbols('a,b', above_fermi=True, cls=Dummy)
t_abij = AntiSymmetricTensor('t', (a, b), (i, j))
abji = NO(Fd(a)*Fd(b)*F(j)*F(i))
T1 = t_ai*ai
T2 = Rational(1, 4)*t_abij*abji
return (T1, T2)
def main():
print()
print("Calculates the Coupled-Cluster energy- and amplitude equations")
print("See 'An Introduction to Coupled Cluster Theory' by")
print("T. Daniel Crawford and Henry F. Schaefer III")
print("Reference to a Lecture Series: http://vergil.chemistry.gatech.edu/notes/sahan-cc-2010.pdf")
print()
# setup hamiltonian
p, q, r, s = symbols('p,q,r,s', cls=Dummy)
f = AntiSymmetricTensor('f', (p,), (q,))
pr = NO((Fd(p)*F(q)))
v = AntiSymmetricTensor('v', (p, q), (r, s))
pqsr = NO(Fd(p)*Fd(q)*F(s)*F(r))
H = f*pr + Rational(1, 4)*v*pqsr
print("Using the hamiltonian:", latex(H))
print("Calculating 4 nested commutators")
C = Commutator
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 1...")
comm1 = wicks(C(H, T))
comm1 = evaluate_deltas(comm1)
comm1 = substitute_dummies(comm1)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 2...")
comm2 = wicks(C(comm1, T))
comm2 = evaluate_deltas(comm2)
comm2 = substitute_dummies(comm2)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 3...")
comm3 = wicks(C(comm2, T))
comm3 = evaluate_deltas(comm3)
comm3 = substitute_dummies(comm3)
T1, T2 = get_CC_operators()
T = T1 + T2
print("commutator 4...")
comm4 = wicks(C(comm3, T))
comm4 = evaluate_deltas(comm4)
comm4 = substitute_dummies(comm4)
print("construct Hausdorff expansion...")
eq = H + comm1 + comm2/2 + comm3/6 + comm4/24
eq = eq.expand()
eq = evaluate_deltas(eq)
eq = substitute_dummies(eq, new_indices=True,
pretty_indices=pretty_dummies_dict)
print("*********************")
print()
print("extracting CC equations from full Hbar")
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
print()
print("CC Energy:")
print(latex(wicks(eq, simplify_dummies=True,
keep_only_fully_contracted=True)))
print()
print("CC T1:")
eqT1 = wicks(NO(Fd(i)*F(a))*eq, simplify_kronecker_deltas=True, keep_only_fully_contracted=True)
eqT1 = substitute_dummies(eqT1)
print(latex(eqT1))
print()
print("CC T2:")
eqT2 = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*eq, simplify_dummies=True, keep_only_fully_contracted=True, simplify_kronecker_deltas=True)
P = PermutationOperator
eqT2 = simplify_index_permutations(eqT2, [P(a, b), P(i, j)])
print(latex(eqT2))
if __name__ == "__main__":
main()
| bsd-3-clause | 5,840,633,361,370,606,000 | 29.504132 | 134 | 0.614739 | false | 2.787764 | false | false | false |
MelanieBittl/dolfin | demo/undocumented/petsc4py/python/demo_petsc4py.py | 3 | 2737 | """This demo program solves Poisson's equation
- div grad u(x, y) = f(x, y)
on the unit square with source f given by
f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)
and boundary conditions given by
u(x, y) = 0 for x = 0 or x = 1
du/dn(x, y) = sin(5*x) for y = 0 or y = 1
It demonstrates how to extract petsc4py objects from dolfin objects
and use them in a petsc4py Krylov solver.
Based on "demo/pde/poisson/python/demo_poisson.py"
"""
# Copyright (C) 2007-2011, 2013 Anders Logg, Lawrence Mitchell
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Begin demo
from __future__ import print_function
from dolfin import *
from six import print_
try:
from petsc4py import PETSc
except:
print_("*** You need to have petsc4py installed for this demo to run", end=' ')
print("Exiting.")
exit()
if not has_petsc4py():
print_("*** DOLFIN has not been compiled with petsc4py support", end=' ')
print("Exiting.")
exit()
parameters["linear_algebra_backend"] = "PETSc"
# Create mesh and define function space
mesh = UnitSquareMesh(32, 32)
V = FunctionSpace(mesh, "Lagrange", 1)
# Define Dirichlet boundary (x = 0 or x = 1)
def boundary(x):
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, boundary)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)")
g = Expression("sin(5*x[0])")
a = inner(grad(u), grad(v))*dx
L = f*v*dx + g*v*ds
# Compute solution
u = Function(V)
A, b = assemble_system(a, L, bc)
# Fetch underlying PETSc objects
A_petsc = as_backend_type(A).mat()
b_petsc = as_backend_type(b).vec()
x_petsc = as_backend_type(u.vector()).vec()
# Create solver, apply preconditioner and solve system
ksp = PETSc.KSP().create()
ksp.setOperators(A_petsc)
pc = PETSc.PC().create()
pc.setOperators(A_petsc)
pc.setType(pc.Type.JACOBI)
ksp.setPC(pc)
ksp.solve(b_petsc, x_petsc)
# Plot solution
plot(u, interactive=True)
# Save solution to file
file = File("poisson.pvd")
file << u
| gpl-3.0 | 8,582,205,401,691,385,000 | 25.833333 | 83 | 0.686518 | false | 2.887131 | false | false | false |
davidzchen/tensorflow | tensorflow/python/keras/mixed_precision/experimental/loss_scale_benchmark.py | 3 | 6827 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for LossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
def _get_strategy(num_gpus):
if num_gpus > 1:
return mirrored_strategy.MirroredStrategy(
['/GPU:%d' % i for i in range(num_gpus)])
else:
return distribution_strategy_context.get_strategy() # The default strategy
class LossScaleBenchmark(test.Benchmark):
"""Benchmark for loss scaling."""
def _benchmark(self, gradient_type, num_gpus, mode, loss_scaling):
"""Benchmarks loss scaling.
We run a simple model with several scalar variables. The loss is the sum of
all variables. The model is simple because we want to measure only the
performance of loss scaling, not the performance of the model itself.
Args:
gradient_type: "optimizer" or "gradient_tape". How gradients are computed.
"optimizer" uses Optimizer.minimize. "gradient_tape" uses
GradientTape.gradient along with LossScaleOptimizer.get_scaled_loss and
LossScaleOptimizer.get_unscaled_gradients.
num_gpus: The number of GPUs to use. Must be at least 1.
mode: "eager" or "tf_function". "tf_function" causes all computations to
be wrapped in a tf.function, while "eager" runs computations eagerly.
loss_scaling: "fixed", "dynamic", or None. The type of loss scaling to
use. None means use no loss scaling, which is useful as a baseline to
see how much slower loss scaling is in comparison.
"""
ls_str = loss_scaling or 'no_loss_scaling'
name = '%s_%d_GPU_%s_%s' % (gradient_type, num_gpus, mode, ls_str)
with context.eager_mode(), _get_strategy(num_gpus).scope() as strategy:
opt = adam.Adam()
if loss_scaling == 'fixed':
loss_scale = loss_scale_module.FixedLossScale(2.)
elif loss_scaling == 'dynamic':
# Make increment_period so high that it's effectively infinite. This
# means the loss scale will never change. Any performance overhead
# from increasing/decreasing the loss scale is typically negligible
# since it happens infrequently, so we only benchmark the common case
# of the loss scale not changing.
increment_period = 1000000
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=2., increment_period=increment_period)
else:
assert loss_scaling is None
loss_scale = None
if loss_scale:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
num_vars = 200
num_warmup_iters = 1
num_iters = 20
# By using scalar variables, we reduce overhead of the actual GPU work of
# multiplying variables, dividing gradients, and checking gradients for
# NaNs. Measuring these overheads isn't very useful as there is little we
# can do to reduce them (one such way would be to fuse dividing gradients
# and checking them for NaNs). We still have all other overheads, such as
# all-reducing the `is_finite` values and having a tf.cond or
# tf.while_loop based on whether gradients are NaNs. Currently, these
# other overheads are much more significant than the GPU work.
var_list = [
variables.Variable(i, dtype='float32') for i in range(num_vars)]
def get_loss():
return math_ops.add_n(var_list)
if gradient_type == 'gradient_tape':
if loss_scale is None:
def minimize_fn():
with backprop.GradientTape() as tape:
loss = get_loss()
grads = tape.gradient(loss, var_list)
return opt.apply_gradients(zip(grads, var_list))
else:
def minimize_fn():
with backprop.GradientTape() as tape:
loss = get_loss()
scaled_loss = opt.get_scaled_loss(loss)
scaled_grads = tape.gradient(scaled_loss, var_list)
grads = opt.get_unscaled_gradients(scaled_grads)
return opt.apply_gradients(zip(grads, var_list))
else:
assert gradient_type == 'optimizer'
def minimize_fn():
return opt.minimize(get_loss, var_list)
def run_fn():
strategy.run(minimize_fn)
if mode == 'tf_function':
run_fn = def_function.function(run_fn)
for _ in range(num_warmup_iters):
run_fn()
start = time.time()
for _ in range(num_iters):
run_fn()
end = time.time()
self.report_benchmark(iters=num_iters,
wall_time=(end - start) / num_iters, name=name)
def _gpus_to_test_with(self):
num_gpus = len(config.list_logical_devices('GPU'))
gpus_to_test_with = []
if num_gpus >= 1:
gpus_to_test_with.append(1)
if num_gpus >= 2:
gpus_to_test_with.append(2)
if num_gpus >= 8:
gpus_to_test_with.append(8)
return gpus_to_test_with
def benchmark_optimizer(self):
for num_gpus in self._gpus_to_test_with():
for mode in 'eager', 'tf_function':
for loss_scaling in None, 'fixed', 'dynamic':
self._benchmark('optimizer', num_gpus, mode, loss_scaling)
def benchmark_gradient_tape(self):
for num_gpus in self._gpus_to_test_with():
for mode in 'eager', 'tf_function':
for loss_scaling in None, 'fixed', 'dynamic':
self._benchmark('gradient_tape', num_gpus, mode, loss_scaling)
if __name__ == '__main__':
test.main()
| apache-2.0 | -5,343,875,307,564,638,000 | 40.628049 | 85 | 0.666178 | false | 3.943963 | true | false | false |
xiaohan2012/capitalization-restoration-train | tests/test_util.py | 1 | 3055 | # -*- coding: utf-8 -*-
import os
from nose.tools import assert_equal, assert_true, assert_false
from capitalization_train.util import (extract_title,
get_document_content_paf,
is_monocase,
get_title_and_content_by_paf)
CURDIR = os.path.dirname(os.path.realpath(__file__))
def test_extract_title():
actual = extract_title(CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A')
expected = u'UPDATE - Nanobiotix gets early Positive Safety rEsults IN head and neck clinical trial'
assert_equal(actual, expected)
def test_get_document_content_paf_empty():
actual = get_document_content_paf(CURDIR + '/data/empty_doc')
expected = '\n\n'
assert_equal(actual, expected)
def test_get_document_content_paf():
actual = get_document_content_paf(CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A')
assert_true(len(actual.strip()) > 400)
def test_get_title_and_content_by_paf():
starting_content, title, body\
= get_title_and_content_by_paf(
CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A'
)
assert_equal(starting_content,
"20150609\n001BBB8BFFE6841FA498FCE88C43B63A\n")
assert_equal(
title,
"UPDATE - Nanobiotix gets early Positive Safety rEsults IN head and neck clinical trial"
)
assert_true(len(body.strip()) > 400)
def test_is_monocase():
assert_true(
is_monocase(
"The Inside Story of How the iPhone Crippled BlackBerry".split()
)
)
# this
assert_true(
is_monocase(
"Ayla Networks Executives Speaking at Key IoT Conferences this Spring".split()
)
)
# annoying 'du'
assert_true(
is_monocase(
"KSL Capital Partners Announces the Sale of Malmaison and Hotel du Vin".split()
)
)
assert_true(
is_monocase("Global Eagle Entertainment and SES Sign a Strategic Partnership to Deliver Global Ku-Band Satellite in-Flight Connectivity to Airlines".split())
)
assert_false(
is_monocase("Agenda Released for the 17th annual Summit on Superbugs & Superdrugs".split())
)
assert_true(
is_monocase("How Find Your Inner Martin Scorsese to Build Brand & Rule the World".split())
)
assert_true(
is_monocase("Half of YouTube 's Traffic is Now Coming From Mobile: CEO".split())
)
assert_true(
is_monocase("Crystal Bridges Announces 2015 Exhibits, Including Warhol, van Gogh, Pollock".split())
)
assert_true(
is_monocase("Why American Airlines Threw Away Paper Flight Plans in Favor of iPads".split())
)
# len(ac) == 0
assert_false(
is_monocase("Why american airlines threw away paper flight plans in favor of ipads".split())
)
assert_true(
is_monocase("Amy Pascal to Work on Sony 's Spider-Man Team".split())
)
| mit | 683,156,147,735,513,900 | 30.173469 | 165 | 0.630442 | false | 3.260406 | false | false | false |
dallingham/regenerate | setup.py | 1 | 1326 | try:
from setuputils import setup
except ImportError:
from distutils.core import setup
setup(
name='regenerate',
version='1.0.0',
license='License.txt',
author='Donald N. Allingham',
author_email='[email protected]',
description='Register editor for ASIC/FPGA designs',
long_description='Allows users to manange registers for '
'ASIC and FPGA designs. Capable of generating Verilog '
'RTL, test code, C and assembler header files, and documentation.',
packages=["regenerate", "regenerate.db", "regenerate.importers",
"regenerate.extras", "regenerate.settings", "regenerate.ui",
"regenerate.writers"],
package_data={
'regenerate': ['data/ui/*.ui', 'data/media/*.svg', 'data/help/*.rst',
'data/media/*.png', 'data/extra/*.odt', 'data/*.*',
'writers/templates/*']
},
url="https://github.com/dallingham/regenerate",
scripts=["bin/regenerate", "bin/regbuild", "bin/regupdate", "bin/regxref",
"bin/regdiff", "bin/ipxact2reg"],
classifiers=
['Operating System :: POSIX', 'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)'], )
| gpl-2.0 | 6,191,504,294,718,468,000 | 43.2 | 82 | 0.636501 | false | 3.693593 | false | false | false |
altai/altai-api | altai_api/blueprints/invites.py | 1 | 3002 |
# vim: tabstop=8 shiftwidth=4 softtabstop=4 expandtab smarttab autoindent
# Altai API Service
# Copyright (C) 2012-2013 Grid Dynamics Consulting Services, Inc
# All Rights Reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from flask import Blueprint, abort
from openstackclient_base import exceptions as osc_exc
from altai_api.blueprints.users import (user_to_view, InvitesDAO,
update_user_data)
from altai_api import auth
from altai_api.schema import Schema
from altai_api.schema import types as st
from altai_api.utils import make_json_response, parse_request_data
from altai_api.utils.decorators import no_auth_endpoint, root_endpoint
BP = Blueprint('invites', __name__)
def _invite_and_user(code):
user_mgr = auth.admin_client_set().identity_admin.users
invite = InvitesDAO.get(code)
try:
assert not invite.complete
user = user_mgr.get(invite.user_id)
assert not user.enabled
except (osc_exc.NotFound, AssertionError):
abort(404)
return invite, user
@BP.route('/')
@root_endpoint('invites')
def list_invites():
# just a stub to mark with root_endpoint
abort(404)
@BP.route('/<code>', methods=('GET',))
@no_auth_endpoint
def get_user_by_code(code):
invite, user = _invite_and_user(code)
return make_json_response(user_to_view(user, invite))
_ACCEPT_SCHEMA = Schema((
st.String('name'),
st.String('fullname', allow_empty=True),
st.String('email'),
))
_ACCEPT_REQUIRES = Schema((
st.String('password'),
))
@BP.route('/<code>', methods=('PUT',))
@no_auth_endpoint
def accept_invite(code):
data = parse_request_data(_ACCEPT_SCHEMA, _ACCEPT_REQUIRES)
invite, user = _invite_and_user(code)
data['enabled'] = True
try:
update_user_data(user, data)
user = auth.admin_client_set().identity_admin.users.get(user.id)
except osc_exc.NotFound:
abort(404)
InvitesDAO.complete_for_user(user.id)
return make_json_response(user_to_view(user, invite), 200)
@BP.route('/<code>', methods=('DELETE',))
@no_auth_endpoint
def drop_invite(code):
"""Refuse to accept invite"""
invite, user = _invite_and_user(code)
try:
user.delete()
except osc_exc.NotFound:
abort(404)
InvitesDAO.complete_for_user(invite.user_id)
return make_json_response(None, status_code=204)
| lgpl-2.1 | -2,387,585,171,577,959,000 | 27.590476 | 73 | 0.692871 | false | 3.438717 | false | false | false |
Christoph-D/Japanese-Tools | reading/read.py | 1 | 4247 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Damien Elmes <[email protected]>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
#
# Automatic reading generation with kakasi and mecab.
# See http://ichi2.net/anki/wiki/JapaneseSupport
#
# Adapted for stand-alone use by
# Christoph Dittmann <[email protected]>.
import sys, os, platform, re, subprocess, codecs
MAX_OUTPUT_LENGTH = 300
kakasiCmd = ["kakasi", "-iutf8", "-outf8", "-u", "-JH", "-KH"]
mecabCmd = ["mecab", '--node-format=%m[%f[5]] ', '--eos-format=\n',
'--unk-format=%m[] ']
class KakasiController(object):
def __init__(self):
self.kakasi = None
def ensureOpen(self):
if not self.kakasi:
try:
self.kakasi = subprocess.Popen(
kakasiCmd, bufsize=-1, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
raise Exception("Please install kakasi.")
def toHiragana(self, expr):
self.ensureOpen()
self.kakasi.stdin.write(expr.encode("utf8", "ignore")+'\n')
self.kakasi.stdin.flush()
res = unicode(self.kakasi.stdout.readline().rstrip('\r\n'), "utf8")
return res
kakasi = KakasiController()
def fixExpr(expr):
out = []
expr_split = re.split("([^\[]+\[[^\]]*\])", expr)
for node in expr_split:
if node == '':
continue
m = re.match("(.+)\[(.*)\]", node.decode("utf-8"))
if not m:
out.append(node.decode("utf-8"))
continue
(kanji, reading) = m.groups()
# hiragana, katakana, punctuation, not japanese, or lacking a reading
if kanji == reading or not reading:
out.append(kanji)
continue
# convert to hiragana
reading = kakasi.toHiragana(reading)
# ended up the same
if reading == kanji:
out.append(kanji)
continue
# don't add readings of numbers
if kanji.strip() in u"0123456789": # u"一二三四五六七八九十0123456789":
out.append(kanji)
continue
# strip matching characters and beginning and end of reading and kanji
# reading should always be at least as long as the kanji
placeL = 0
placeR = 0
for i in range(1,len(kanji)):
if kanji[-i] != reading[-i]:
break
placeR = i
for i in range(0,len(kanji)-1):
if kanji[i] != reading[i]:
break
placeL = i+1
if placeL == 0:
if placeR == 0:
out.append(" %s[%s]" % (kanji, reading))
else:
out.append(" %s[%s]%s" % (
kanji[:-placeR], reading[:-placeR], reading[-placeR:]))
else:
if placeR == 0:
out.append("%s %s[%s]" % (
reading[:placeL], kanji[placeL:], reading[placeL:]))
else:
out.append("%s %s[%s]%s" % (
reading[:placeL], kanji[placeL:-placeR],
reading[placeL:-placeR], reading[-placeR:]))
fin = ""
for c, s in enumerate(out):
if c < len(out) - 1 and re.match("^[A-Za-z0-9]+$", out[c+1]):
s += " "
fin += s
fin = fin.strip()
fin = re.sub(u"\[\]", u"", fin)
fin = re.sub(u" +", u" ", fin)
return fin
def get_readings(expr):
try:
mecab = subprocess.Popen(
mecabCmd, bufsize=-1, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return mecab.communicate(expr)[0]
except OSError:
raise Exception("Please install mecab.")
if __name__ == "__main__":
sys.stdout = codecs.open("/dev/stdout", "w", 'utf-8')
if len(sys.argv) != 2 or len(sys.argv[1]) == 0:
print 'Please provide one argument.'
sys.exit(0)
try:
result = fixExpr(get_readings(sys.argv[1]))
result = re.sub(u"\\n", u"", result)
except Exception, (e):
print e
sys.exit(1)
if len(result) > MAX_OUTPUT_LENGTH:
print result[0:MAX_OUTPUT_LENGTH - 3] + u'...'
else:
print result
| gpl-3.0 | -5,141,048,142,559,384,000 | 32.230159 | 78 | 0.523525 | false | 3.30205 | false | false | false |
robinandeer/puzzle | puzzle/plugins/base_variant_mixin.py | 1 | 2440 | from puzzle.utils import (get_gene_info, get_cytoband_coord)
class BaseVariantMixin(object):
"""Base class for variant mixins"""
def variants(self, case_id, skip=0, count=30, filters=None):
"""Return a results tuple with variants and nr_of_variants.
"""
raise NotImplementedError
def variant(self, variant_id):
"""Return a specific variant."""
raise NotImplementedError
def _get_genes(self, variant):
"""Add the genes for a variant
Get the hgnc symbols from all transcripts and add them
to the variant
Args:
variant (dict): A variant dictionary
Returns:
genes (list): A list of Genes
"""
ensembl_ids = []
hgnc_symbols = []
for transcript in variant.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(transcript.hgnc_symbol)
genes = get_gene_info(
ensembl_ids=ensembl_ids,
hgnc_symbols=hgnc_symbols
)
return genes
def _add_sv_coordinates(self, variant):
"""Add the neccesary sv coordinates for a variant
Args:
variant (puzzle.models.variant)
"""
variant.stop_chrom = variant.CHROM
variant.start = int(variant.POS)
# If we have a translocation:
if ':' in variant.ALT:
other_coordinates = variant.ALT.strip('ACGTN[]').split(':')
variant.stop_chrom = other_coordinates[0].lstrip('chrCHR')
other_position = other_coordinates[1]
# variant.stop = other_position
#Set 'infinity' to length if translocation
variant.sv_len = float('inf')
variant.sv_type = 'BND'
else:
variant.sv_len = variant.stop - variant.start
variant['cytoband_start'] = get_cytoband_coord(
chrom=variant.CHROM,
pos=variant.start
)
variant['cytoband_stop'] = get_cytoband_coord(
chrom=variant.stop_chrom,
pos=variant.stop
)
| mit | -7,586,782,505,556,927,000 | 31.972973 | 71 | 0.515574 | false | 4.518519 | false | false | false |
yantisj/netgrph | nglib/ngtree/upgrade.py | 1 | 1274 | 'Upgrade older ngtrees to newer version'
import logging
logger = logging.getLogger(__name__)
def upgrade_ngt_v2(ngt):
'Upgrade ngt structures to version 2 for the API'
stack = list()
# Add dictionary to stack
stack.append(ngt)
# upgrade keys on all dictionaries
for tree in stack:
# Copy NGT and traverse
nt = tree.copy()
for f in nt:
# Upgrade dictionary key
tree.pop(f)
tree[_new_name(f)] = nt[f]
# Found a nested dict, add to stack
if isinstance(nt[f], dict):
stack.append(nt[f])
# Found a nested list
elif isinstance(nt[f], list):
for en in nt[f]:
# nested dict in list, add to stack
if isinstance(en, dict):
stack.append(en)
return ngt
def _new_name(old):
'Get new name for fields (lowercase, replace spaces with _)'
nmap = {
'StandbyRouter': 'standby_router',
'SecurityLevel': 'security_level',
'mgmtgroup': 'mgmt_group'
}
if old in nmap:
return nmap[old]
old = old.replace(' ', '_')
old = old.lower()
if old == 'data':
old = 'xdata'
return old
| agpl-3.0 | -167,502,471,302,788,800 | 22.163636 | 64 | 0.526688 | false | 3.98125 | false | false | false |
DarKnight--/owtf | framework/plugin/scanner.py | 2 | 8554 | #!/usr/bin/env python
'''
The scan_network scans the network for different ports and call network plugins for different services running on target
'''
import re
import logging
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.utils import FileOperations
SCANS_FOLDER = "scans" # Folder under which all scans will be saved
PING_SWEEP_FILE = "%s/00_ping_sweep" % SCANS_FOLDER
DNS_INFO_FILE= "%s/01_dns_info" % SCANS_FOLDER
FAST_SCAN_FILE = "%s/02_fast_scan" % SCANS_FOLDER
STD_SCAN_FILE = "%s/03_std_scan" % SCANS_FOLDER
FULL_SCAN_FILE = "%s/04_full_scan" % SCANS_FOLDER
class Scanner(BaseComponent):
COMPONENT_NAME = "scanner"
def __init__(self):
self.register_in_service_locator()
self.shell = self.get_component("shell")
self.config = self.get_component("config")
self.plugin_handler = self.get_component("plugin_handler")
self.shell.shell_exec("mkdir %s" % SCANS_FOLDER)
def ping_sweep(self, target, scantype):
if scantype == "full":
logging.info("Performing Intense Host discovery")
self.shell.shell_exec("nmap -n -v -sP -PE -PP -PS21,22,23,25,80,443,113,21339 -PA80,113,443,10042"
" --source_port 53 %s -oA %s" % (target, PING_SWEEP_FILE))
if scantype == "arp":
logging.info("Performing ARP host discovery")
self.shell.shell_exec("nmap -n -v -sP -PR %s -oA %s" % (target, PING_SWEEP_FILE))
self.shell.shell_exec('grep Up %s.gnmap | cut -f2 -d\" \" > %s.ips' % (PING_SWEEP_FILE, PING_SWEEP_FILE))
def dns_sweep(self, file_with_ips, file_prefix):
logging.info("Finding misconfigured DNS servers that might allow zone transfers among live ips ..")
self.shell.shell_exec("nmap -PN -n -sS -p 53 -iL %s -oA %s" % (file_with_ips, file_prefix))
# Step 2 - Extract IPs
dns_servers = "%s.dns_server.ips" % file_prefix
self.shell.shell_exec('grep \"53/open/tcp\" %s.gnmap | cut -f 2 -d \" \" > %s' % (file_prefix, dns_servers))
file = FileOperations.open(dns_servers)
domain_names = "%s.domain_names" % file_prefix
self.shell.shell_exec("rm -f %s" % domain_names)
num_dns_servers = 0
for line in file:
if line.strip('\n'):
dns_server = line.strip('\n')
self.shell.shell_exec("host %s %s | grep 'domain name' | cut -f 5 -d' ' | cut -f 2,3,4,5,6,7 -d. "
"| sed 's/\.$//' >> %s" % (dns_server, dns_server, domain_names))
num_dns_servers += 1
try:
file = FileOperations.open(domain_names, owtf_clean=False)
except IOError:
return
for line in file:
domain = line.strip('\n')
raw_axfr = "%s.%s.%s.axfr.raw" % (file_prefix, dns_server, domain)
self.shell.shell_exec("host -l %s %s | grep %s > %s" % (domain, dns_server, domain, raw_axfr))
success = self.shell.shell_exec("wc -l %s | cut -f 1 -d ' '" % raw_axfr)
if success > 3:
logging.info("Attempting zone transfer on $dns_server using domain %s.. Success!" % domain)
axfr = "%s.%s.%s.axfr" % (file_prefix, dns_server, domain)
self.shell.shell_exec("rm -f %s" % axfr)
logging.info(self.shell.shell_exec("grep 'has address' %s | cut -f 1,4 -d ' ' | sort -k 2 -t ' ' "
"| sed 's/ /#/g'" % raw_axfr))
else:
logging.info("Attempting zone transfer on $dns_server using domain %s.. Success!" % domain)
self.shell.shell_exec("rm -f %s" % raw_axfr)
if num_dns_servers == 0:
return
def scan_and_grab_banners(self, file_with_ips, file_prefix, scan_type, nmap_options):
if scan_type == "tcp":
logging.info("Performing TCP portscan, OS detection, Service detection, banner grabbing, etc")
self.shell.shell_exec("nmap -PN -n -v --min-parallelism=10 -iL %s -sS -sV -O -oA %s.tcp %s") % (
file_with_ips, file_prefix, nmap_options)
self.shell.shell_exec("amap -1 -i %s.tcp.gnmap -Abq -m -o %s.tcp.amap -t 90 -T 90 -c 64" % (file_prefix,
file_prefix))
if scan_type == "udp":
logging.info("Performing UDP portscan, Service detection, banner grabbing, etc")
self.shell.shell_exec("nmap -PN -n -v --min-parallelism=10 -iL %s -sU -sV -O -oA %s.udp %s" % (
file_with_ips, file_prefix, nmap_options))
self.shell.shell_exec("amap -1 -i %s.udp.gnmap -Abq -m -o %s.udp.amap" % (file_prefix, file_prefix))
def get_nmap_services_file(self):
return '/usr/share/nmap/nmap-services'
def get_ports_for_service(self, service, protocol):
regexp = '(.*?)\t(.*?/.*?)\t(.*?)($|\t)(#.*){0,1}'
re.compile(regexp)
list = []
f = FileOperations.open(self.get_nmap_services_file())
for line in f.readlines():
if line.lower().find(service) >= 0:
match = re.findall(regexp, line)
if match:
port = match[0][1].split('/')[0]
prot = match[0][1].split('/')[1]
if (not protocol or protocol == prot) and port not in list:
list.append(port)
f.close()
return list
def target_service(self, nmap_file, service):
ports_for_service = self.get_ports_for_service(service, "")
f = FileOperations.open(nmap_file.strip())
response = ""
for host_ports in re.findall('Host: (.*?)\tPorts: (.*?)[\t\n]', f.read()):
host = host_ports[0].split(' ')[0] # Remove junk at the end
ports = host_ports[1].split(',')
for port_info in ports:
if len(port_info) < 1:
continue
chunk = port_info.split('/')
port = chunk[0].strip()
port_state = chunk[1].strip()
# No point in wasting time probing closed/filtered ports!!
# (nmap sometimes adds these to the gnmap file for some reason ..)
if port_state in ['closed', 'filtered']:
continue
try:
prot = chunk[2].strip()
except:
continue
if port in ports_for_service:
response += "%s:%s:%s##" % (host, port, prot)
f.close()
return response
def probe_service_for_hosts(self, nmap_file, target):
services = []
# Get all available plugins from network plugin order file
net_plugins = self.config.Plugin.GetOrder("network")
for plugin in net_plugins:
services.append(plugin['Name'])
services.append("http")
total_tasks = 0
tasklist = ""
plugin_list = []
http = []
for service in services:
if plugin_list.count(service) > 0:
continue
tasks_for_service = len(self.target_service(nmap_file, service).split("##")) - 1
total_tasks += tasks_for_service
tasklist = "%s [ %s - %s tasks ]" % (tasklist, service, str(tasks_for_service))
for line in self.target_service(nmap_file, service).split("##"):
if line.strip("\n"):
ip = line.split(":")[0]
port = line.split(":")[1]
plugin_to_invoke = service
service1 = plugin_to_invoke
self.config.Set("%s_PORT_NUMBER" % service1.upper(), port)
if service != 'http':
plugin_list.append(plugin_to_invoke)
http.append(port)
logging.info("We have to probe %s:%s for service %s", str(ip), str(port), plugin_to_invoke)
return http
def scan_network(self, target):
self.ping_sweep(target.split("//")[1], "full")
self.dns_sweep("%s.ips" % PING_SWEEP_FILE, DNS_INFO_FILE)
def probe_network(self, target, protocol, port):
self.scan_and_grab_banners("%s.ips" % PING_SWEEP_FILE, FAST_SCAN_FILE, protocol, "-p %s" % port)
return self.probe_service_for_hosts("%s.%s.gnmap" % (FAST_SCAN_FILE, protocol), target.split("//")[1])
| bsd-3-clause | -6,012,392,452,692,870,000 | 47.327684 | 120 | 0.535773 | false | 3.492854 | false | false | false |
bungoume/webhook-deploy | webhook_deploy/core/models.py | 1 | 1442 | from django.db import models
from django.utils import timezone
from jsonfield import JSONField
class Repository(models.Model):
# github, gitbucket, etc...
hub = models.CharField(max_length=191, db_index=True)
# user
user = models.CharField(max_length=191)
# repository_name
name = models.CharField(max_length=191)
# username/reponame
full_name = models.CharField(max_length=191, db_index=True)
# Secret Key
secret = models.CharField(max_length=191, db_index=True)
def __str__(self):
return '{}: {}'.format(self.hub, self.full_name)
class DeploySetting(models.Model):
repository = models.ForeignKey(Repository)
branch = models.CharField(max_length=191)
command = models.TextField()
def __str__(self):
return '{}: {}'.format(self.repository, self.branch)
class HookLog(models.Model):
data = JSONField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
dt_text = timezone.localtime(self.created_at).strftime('%Y-%m-%d %H:%M:%S')
return '{}: {}'.format(dt_text, self.data.get('path'))
class DeployLog(models.Model):
log = models.TextField()
return_code = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
dt_text = timezone.localtime(self.created_at).strftime('%Y-%m-%d %H:%M:%S')
return '{}: {}'.format(dt_text, self.return_code)
| mit | -1,986,875,627,614,209,300 | 29.680851 | 83 | 0.654646 | false | 3.483092 | false | false | false |
Yelp/kafka-python | test/test_admin_client_integration.py | 1 | 2702 | import os
import time
import unittest
import pytest
from kafka.admin_client import AdminClient, NewTopic, NewPartitionsInfo
from kafka.protocol.metadata import MetadataRequest
from test.fixtures import ZookeeperFixture, KafkaFixture
from test.testutil import KafkaIntegrationTestCase, env_kafka_version
KAFKA_ADMIN_TIMEOUT_SECONDS = 5
class TestKafkaAdminClientIntegration(KafkaIntegrationTestCase):
@classmethod
def setUpClass(cls):
if not os.environ.get('KAFKA_VERSION'):
return
cls.zk = ZookeeperFixture.instance()
cls.server = KafkaFixture.instance(0, cls.zk)
@classmethod
def tearDownClass(cls):
if not os.environ.get('KAFKA_VERSION'):
return
cls.server.close()
cls.zk.close()
@pytest.mark.skipif(env_kafka_version() < (0, 10, 1), reason='Unsupported Kafka Version')
def test_create_delete_topics(self):
admin = AdminClient(self.client_async)
topic = NewTopic(
name='topic',
num_partitions=1,
replication_factor=1,
)
metadata_request = MetadataRequest[1]()
response = admin.create_topics(topics=[topic], timeout=KAFKA_ADMIN_TIMEOUT_SECONDS)
# Error code 7 means that RequestTimedOut but we can safely assume
# that topic is created or will be created eventually.
# see this https://cwiki.apache.org/confluence/display/KAFKA/
# KIP-4+-+Command+line+and+centralized+administrative+operations
self.assertTrue(
response[0].topic_errors[0][1] == 0 or
response[0].topic_errors[0][1] == 7
)
time.sleep(1) # allows the topic to be created
delete_response = admin.delete_topics(['topic'], timeout=1)
self.assertTrue(
response[0].topic_errors[0][1] == 0 or
response[0].topic_errors[0][1] == 7
)
@pytest.mark.skipif(env_kafka_version() < (1, 0, 0), reason='Unsupported Kafka Version')
def test_create_partitions(self):
admin = AdminClient(self.client_async)
topic = NewTopic(
name='topic',
num_partitions=1,
replication_factor=1,
)
metadata_request = MetadataRequest[1]()
admin.create_topics(topics=[topic], timeout=KAFKA_ADMIN_TIMEOUT_SECONDS)
time.sleep(1) # allows the topic to be created
new_partitions_info = NewPartitionsInfo('topic', 2, [[0]])
response = admin.create_partitions([new_partitions_info], timeout=1, validate_only=False)
self.assertTrue(
response[0].topic_errors[0][1] == 0 or
response[0].topic_errors[0][1] == 7
)
| apache-2.0 | 4,979,695,835,716,379,000 | 35.513514 | 97 | 0.634345 | false | 3.910275 | true | false | false |
normalnorway/avtalegiro | ordernumber.py | 1 | 1492 | r"""
ORDER NUMBER: Numerical, 7 positions.
Unique numbering must be used for orders per payee's recipient agreement,
12 months + one day ahead.
An atomic incrementing counter is used, modulu 1e8. A check is done to
assert that order number is not reused in the last 12 months + 1 day.
@todo implement that check!
"""
# @todo new name. does more than generate order numbers
# Only store one row with one column
#SQL_TABLE1 = 'CREATE TABLE order_number (next BIGINT)'
# one row per order_number? use auto increment primary key?
import sqlite3
conn = sqlite3.connect ('transmissions.db')
cursor = conn.cursor()
def _init_db():
cursor.execute ('create table keyval (key TEXT, val BIGINT)')
cursor.execute ('insert into keyval values (?,?)', ('order_number', 1))
def next_order_number():
"""Note: Will wrap around after 10 million numbers are generated"""
oldlevel = conn.isolation_level
try:
conn.isolation_level = 'EXCLUSIVE'
params = ('order_number',)
cursor.execute ('select val from keyval where key=?', params)
number = cursor.fetchone()[0]
cursor.execute ('update keyval set val=val+1 where key=?', params)
conn.commit()
except sqlite3.OperationalError as ex:
conn.rollback() # needed? done by default?
if ex.message != 'no such table: keyval': raise ex
_init_db()
return next_order_number()
finally:
conn.isolation_level = oldlevel
return number % int(1e8)
| gpl-3.0 | -4,596,594,146,202,820,000 | 32.155556 | 75 | 0.676273 | false | 3.767677 | false | false | false |
easyCZ/SLIP-A-2015 | respiratory/Processed Datasets/Processed Datasets/week6.py | 1 | 2409 | class patients(object):
def __init__(self,number,scores,stats,participated,rank,actual_rank,overall):
self.number = number # integer from 1-10. patient number.
self.scores = scores # list of floats element of [0,10]. Floats are scores for a given exercise.
self.stats = stats # for n exercises this is a list containing n lists of the form [avg,min,max], where avg is the patient's avg br for a given exercise, min is the patient's min br for a given exercise... etc.
self.participated = participated # list of booleans. True if patient participated in a gievn exercise, False if a patient did not.
self.rank = rank
self.actual_rank = actual_rank
self.overall = overall # float element of [0,10]. Overall scor of patient.
# swap rows i and j of matrix A.
def row_swap(A,i,j):
row_i = A[i]
A[i] = A[j]
A[j] = row_i
return A
# In matrix A, add factor*row_i to row j.
def row_add(A,i,j,factor):
dim_col = len(A[0])
for k in range(0,dim_col):
A[j][k] = A[j][k]+ factor*A[i][k]
return A
def zeros(n,m):
output = []
for i in range(0,n):
output.append([])
for j in range(0,m):
output[i].append(0)
return output
def multiply(A,B):
row_dim = len(A)
col_dim = len(B[0])
sum_length = len(A[0])
AB = zeros(row_dim,col_dim)
for i in range(0,row_dim):
for j in range(0,col_dim):
for k in range(0,sum_length):
AB[i][j] = AB[i][j] + A[i][k]*B[k][j]
return AB
# Takes A,b from Ax = b and returns triangular matrix T along with modified b.
def Gaussian(A,b):
dim = len(A)
for i in range(0,dim):
if A[i][i] == 0:
count = 0
while A[i+count][i] == 0:
count += 1
if i+count > dim:
return "failure"
break
row_swap(A,i,i+count)
row_swap(b,i,i+count)
for j in range(i+1,dim):
row_add(b,i,j,-A[j][i]/A[i][i])
row_add(A,i,j,-A[j][i]/A[i][i])
return [A,b]
A = [[1,2,3],[2,3,5],[5,2,4]]
b = [[1],[2],[3]]
Tb = Gaussian(A,b)
T = Tb[0]
b = Tb[1]
def list_to_int(b):
for i in range(0,len(b)):
b[i] = b[i][0]
return b
# takes triangular matrix T, vector y and solves for x in Tx = y
def backsub(T,y):
y = list_to_int(y)
dim = len(T)
print T[dim-1][dim-1]
x = []
for i in range(0,dim):
x.append(0)
x[dim-1] = y[dim-1]/float(T[dim-1][dim-1])
rows = reversed(range(0,dim-1))
for i in rows:
x[i] = float(y[i])
for j in range(i+1,dim):
x[i] -= T[i][j]*x[j]
x[i] = x[i]/T[i][i]
return x
print backsub(T,b) | mit | -4,914,040,263,418,544,000 | 25.483516 | 214 | 0.611042 | false | 2.283412 | false | false | false |
aaichsmn/tacc_stats | tacc_stats/analysis/job_printer.py | 1 | 2175 | #!/usr/bin/env python
import sys, os
import cPickle as pickle
from datetime import datetime
from tacc_stats import cfg as cfg
from tacc_stats.pickler import batch_acct,job_stats
def main(**args):
acct = batch_acct.factory(cfg.batch_system,
cfg.acct_path,
cfg.host_name_ext)
reader = acct.find_jobids(args['jobid']).next()
date_dir = os.path.join(cfg.pickles_dir,
datetime.fromtimestamp(reader['end_time']).strftime('%Y-%m-%d'))
pickle_file = os.path.join(date_dir, reader['id'])
with open(pickle_file) as fd:
data = pickle.load(fd)
print "Hosts:", data.hosts.keys()
if not args['host']: pass
elif args['host'] in data.hosts:
data.hosts = { args['host'] : data.hosts[args['host']] }
else:
print args['host'],"does not exist in", args['file']
return
for host_name, host in data.hosts.iteritems():
print "Host:",host_name
print "Types:",host.stats.keys()
print host.marks
if not args['type']: pass
elif args['type'] in host.stats:
host.stats = { args['type'] : host.stats[args['type']] }
else:
print args['type'],"does not exist in", args['file']
return
for type_name, type_device in host.stats.iteritems():
print ''
print "Type:", type_name
print "Schema:", data.get_schema(type_name).keys()
for device_name, device in type_device.iteritems():
print "Device:",device_name
print device
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Print job using Job ID or pickle file path')
parser.add_argument('file', help='Pickle file to print',
nargs='?', type=str)
parser.add_argument('-jobid', help='Job ID to print',
nargs='+', type=str)
parser.add_argument('-type', help='Restrict print to this type')
parser.add_argument('-host', help='Restrict print to this host')
main(**vars(parser.parse_args()))
| lgpl-2.1 | 5,620,917,312,338,584,000 | 33.52381 | 94 | 0.567816 | false | 3.983516 | false | false | false |
prontodev/ban2stats_dynamodb | stats/tests/test_blocked_country_package.py | 1 | 2434 | from django.test import SimpleTestCase
from django.conf import settings
from stats.models import BlockedCountry
from stats.packages.blocked_country import BlockedCountryPackageBuilder
import time
class TestBlockedCountryPackageBuilder(SimpleTestCase):
def setUp(self):
self.builder = BlockedCountryPackageBuilder()
if not BlockedCountry.exists():
BlockedCountry.create_table()
time.sleep(settings.TESTING_SLEEP_TIME)
self.item1 = BlockedCountry(country_code='US', country_name='United States', count=22)
self.item1.save()
self.item2 = BlockedCountry(country_code='TH', country_name='Thailand', count=3000)
self.item2.save()
self.item3 = BlockedCountry(country_code='SG', country_name='Singapore', count=12094)
self.item3.save()
self.item4 = BlockedCountry(country_code='AL', country_name='Albania', count=3)
self.item4.save()
self.item5 = BlockedCountry(country_code='MA', country_name='Morocco', count=34123)
self.item5.save()
self.item6 = BlockedCountry(country_code='PE', country_name='Peru', count=50)
self.item6.save()
def tearDown(self):
BlockedCountry.delete_table()
time.sleep(settings.TESTING_SLEEP_TIME)
def test_get_top_5(self):
objects = self.builder.get_top_5_objects()
self.assertEqual(len(objects), 5)
self.assertEqual(objects[0].count, 34123)
self.assertEqual(objects[1].count, 12094)
self.assertEqual(objects[2].count, 3000)
self.assertEqual(objects[3].count, 50)
self.assertEqual(objects[4].count, 22)
def test_render_each_object(self):
content = self.builder.render_each_object(self.item5)
self.assertIn('{', content)
self.assertIn('"country_name": "Morocco"', content)
self.assertIn('"count": "34,123"', content)
self.assertIn('}', content)
def test_render_all_objects(self):
content = self.builder.render_all_objects()
self.assertIn('{', content)
self.assertIn('"country_name": "Morocco"', content)
self.assertIn('"count": "34,123"', content)
self.assertIn('}', content)
self.assertNotEqual(',', content[-1])
def test_render_as_javascript(self):
content = self.builder.render_as_javascript()
expected_content = u''']'''
self.assertIn(expected_content, content) | gpl-2.0 | -482,902,131,101,907,100 | 39.583333 | 94 | 0.657354 | false | 3.6823 | true | false | false |
rjw57/edpcmentoring | edpcmentoring/cuedmembers/migrations/0001_initial.py | 2 | 2205 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-26 12:51
from __future__ import unicode_literals
from django.conf import settings
from django.core.management import call_command
from django.db import migrations, models
import django.db.models.deletion
def load_dept_structure(apps, schema_editor):
call_command('loaddata', 'cuedmembers/divisions_and_research_groups.json')
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Division',
fields=[
('letter', models.CharField(max_length=1, primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_names', models.CharField(blank=True, default='', max_length=100)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ResearchGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('division', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='research_groups', to='cuedmembers.Division')),
],
),
migrations.AddField(
model_name='member',
name='research_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='members', to='cuedmembers.ResearchGroup'),
),
migrations.AddField(
model_name='member',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='cued_member', to=settings.AUTH_USER_MODEL),
),
migrations.RunPython(load_dept_structure),
]
| mit | -1,037,518,257,966,597,800 | 38.375 | 152 | 0.609524 | false | 4.176136 | false | false | false |
mlufei/depot_tools | third_party/gsutil/gslib/addlhelp/metadata.py | 51 | 8027 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HelpProvider
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>OVERVIEW OF METADATA</B>
Objects can have associated metadata, which control aspects of how
GET requests are handled, including Content-Type, Cache-Control,
Content-Disposition, and Content-Encoding (discussed in more detail in
the subsections below). In addition, you can set custom metadata that
can be used by applications (e.g., tagging that particular objects possess
some property).
There are two ways to set metadata on objects:
- at upload time you can specify one or more headers to associate with
objects, using the gsutil -h option. For example, the following command
would cause gsutil to set the Content-Type and Cache-Control for each
of the files being uploaded:
gsutil -h "Content-Type:text/html" -h "Cache-Control:public, max-age=3600" cp -r images gs://bucket/images
Note that -h is an option on the gsutil command, not the cp sub-command.
- You can set or remove metadata fields from already uploaded objects using
the gsutil setmeta command. See "gsutil help setmeta".
More details about specific pieces of metadata are discussed below.
<B>CONTENT TYPE</B>
The most commonly set metadata is Content-Type (also known as MIME type),
which allows browsers to render the object properly.
gsutil sets the Content-Type
automatically at upload time, based on each filename extension. For
example, uploading files with names ending in .txt will set Content-Type
to text/plain. If you're running gsutil on Linux or MacOS and would prefer
to have content type set based on naming plus content examination, see the
use_magicfile configuration variable in the gsutil/boto configuration file
(See also "gsutil help config"). In general, using use_magicfile is more
robust and configurable, but is not available on Windows.
If you specify a -h header when uploading content (like the example gsutil
command given in the previous section), it overrides the Content-Type that
would have been set based on filename extension or content. This can be
useful if the Content-Type detection algorithm doesn't work as desired
for some of your files.
You can also completely suppress content type detection in gsutil, by
specifying an empty string on the Content-Type header:
gsutil -h 'Content-Type:' cp -r images gs://bucket/images
In this case, the Google Cloud Storage service will attempt to detect
the content type. In general this approach will work better than using
filename extension-based content detection in gsutil, because the list of
filename extensions is kept more current in the server-side content detection
system than in the Python library upon which gsutil content type detection
depends. (For example, at the time of writing this, the filename extension
".webp" was recognized by the server-side content detection system, but
not by gsutil.)
<B>CACHE-CONTROL</B>
Another commonly set piece of metadata is Cache-Control, which allows
you to control whether and for how long browser and Internet caches are
allowed to cache your objects. Cache-Control only applies to objects with
a public-read ACL. Non-public data are not cacheable.
Here's an example of uploading an object set to allow caching:
gsutil -h "Cache-Control:public,max-age=3600" cp -a public-read -r html gs://bucket/html
This command would upload all files in the html directory (and subdirectories)
and make them publicly readable and cacheable, with cache expiration of
one hour.
Note that if you allow caching, at download time you may see older versions
of objects after uploading a newer replacement object. Note also that because
objects can be cached at various places on the Internet there is no way to
force a cached object to expire globally (unlike the way you can force your
browser to refresh its cache).
<B>CONTENT-ENCODING</B>
You could specify Content-Encoding to indicate that an object is compressed,
using a command like:
gsutil -h "Content-Encoding:gzip" cp *.gz gs://bucket/compressed
Note that Google Cloud Storage does not compress or decompress objects. If
you use this header to specify a compression type or compression algorithm
(for example, deflate), Google Cloud Storage preserves the header but does
not compress or decompress the object. Instead, you need to ensure that
the files have been compressed using the specified Content-Encoding before
using gsutil to upload them.
For compressible content, using Content-Encoding:gzip saves network and
storage costs, and improves content serving performance (since most browsers
are able to decompress objects served this way).
Note also that gsutil provides an easy way to cause content to be compressed
and stored with Content-Encoding:gzip: see the -z option in "gsutil help cp".
<B>CONTENT-DISPOSITION</B>
You can set Content-Disposition on your objects, to specify presentation
information about the data being transmitted. Here's an example:
gsutil -h 'Content-Disposition:attachment; filename=filename.ext' \\
cp -r attachments gs://bucket/attachments
Setting the Content-Disposition allows you to control presentation style
of the content, for example determining whether an attachment should be
automatically displayed vs should require some form of action from the user to
open it. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1
for more details about the meaning of Content-Disposition.
<B>CUSTOM METADATA</B>
You can add your own custom metadata (e.g,. for use by your application)
to an object by setting a header that starts with "x-goog-meta", for example:
gsutil -h x-goog-meta-reviewer:jane cp mycode.java gs://bucket/reviews
You can add multiple differently named custom metadata fields to each object.
<B>SETTABLE FIELDS; FIELD VALUES</B>
You can't set some metadata fields, such as ETag and Content-Length. The
fields you can set are:
- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-MD5
- Content-Type
- Any field starting with X-GOOG-META- (i.e., custom metadata).
Header names are case-insensitive.
X-GOOG-META- fields can have data set to arbitrary Unicode values. All
other fields must have ASCII values.
<B>VIEWING CURRENTLY SET METADATA</B>
You can see what metadata is currently set on an object by using:
gsutil ls -L gs://the_bucket/the_object
""")
class CommandOptions(HelpProvider):
"""Additional help about object metadata."""
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'metadata',
# List of help name aliases.
HELP_NAME_ALIASES : ['cache-control', 'caching', 'content type',
'mime type', 'mime', 'type'],
# Type of help:
HELP_TYPE : HelpType.ADDITIONAL_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Working with object metadata',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
| bsd-3-clause | -694,530,456,639,687,000 | 42.155914 | 114 | 0.754952 | false | 4.297109 | false | false | false |
KRHS-GameProgramming-2014/SUPER-AWESOME-NINJA-GAME | HUD.py | 1 | 1412 | import pygame
class Text():
def __init__(self, pos, text = "", textSize = 12, textColor=(255,255,255), font = None):
self.text = text
self.textColor = textColor
self.font = pygame.font.Font(font, textSize)
self.image = self.font.render(self.text, 1, textColor)
self.rect = self.image.get_rect()
self.place(pos)
def place(self, pos):
self.rect.center = pos
def setText(self, text):
self.text = text
self.image = self.font.render(text, 1, textColor)
self.rect = self.image.get_rect(center = self.rect.center)
def update(self, width, height):
pass
class Score(Text):
def __init__(self, pos, baseText = "Score: ", textSize = 12, textColor=(255,255,255), font = None):
self.score = 0
self.baseText = baseText
self.text = self.baseText + str(self.score)
Text.__init__(self, pos, self.text, textSize, textColor, font)
self.change = False
def setText(self, text):
self.baseText = text
self.change = True
def update(self):
if self.change:
self.text = self.baseText + str(self.score)
self.image = self.font.render(self.text, 1, self.textColor)
self.rect = self.image.get_rect(center = self.rect.center)
self.change = False
def setScore(self, score):
self.score = score
self.change = True
def increaseScore(self, amount = 1):
self.score += amount
self.change = True
def resetScore(self):
self.score = 0
self.change = True
| bsd-2-clause | 5,879,221,224,374,994,000 | 25.641509 | 100 | 0.667847 | false | 2.881633 | false | false | false |
ifermon/garagePi | event.py | 1 | 1068 | class Event(object):
_event_groups = {}
@classmethod
def get_events(cls, key="Default"):
return cls._event_groups.get(key, [])
def __init__(self, name, msg=None, group_key="Default"):
self._name = name
self._msg = msg
self._my_group = group_key
groups = Event._event_groups
groups[group_key] = groups.get(group_key, []) + [self,]
return
def localize(self, msg):
return Event(self.name, msg, self._my_group)
@property
def name(self):
return self._name
@property
def msg(self):
return self._msg
def __hash__(self):
return hash(self._name)
def __ne__(self):
return not(self == other)
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __eq__(self, other):
return self._name == other._name
if __name__ == "__main__":
grp = "a group"
a = Event("a", "a msg", grp)
b = Event("b", "b msg")
c = Event("c", "c msg", grp)
print(c.__dict__)
| gpl-2.0 | -2,078,049,025,914,573,300 | 20.36 | 63 | 0.519663 | false | 3.56 | false | false | false |
argaen/restmote | restmote/sync.py | 1 | 2059 | import requests
import logging
import urlparse
from django.conf import settings
root = urlparse.urljoin(settings.RESTMOTE_HOST + ":" + settings.RESTMOTE_PORT, settings.RESTMOTE_API_ROOT)
def get_data(url):
if hasattr(settings, "RESTMOTE_USER") and hasattr(settings, "RESTMOTE_PASSWORD"):
r = requests.get(url, timeout=15, auth=(settings.RESTMOTE_USER, settings.RESTMOTE_PASSWORD))
else:
r = requests.get(url, timeout=15)
if r.status_code == 200:
logging.info(url)
logging.info(r.json())
return True, r.json()
else:
logging.info("Connection failed: %s" % r.text)
return False, []
def build_objects(obj_class, obj_string, data, field_bindings, nested=[]):
for e in data:
try:
o = obj_class.objects.get(**{'id' + obj_string: e["id"]})
except obj_class.DoesNotExist:
o = obj_class()
for f in [x for x in e if x in field_bindings]:
setattr(o, field_bindings[f], e[f])
for n in nested:
for f in [x for x in e[n] if x in field_bindings]:
setattr(o, field_bindings[f], e[n][f])
setattr(o, "id" + obj_string, e["id"])
o.save()
logging.info("Added %s: %s" % (obj_string, o.pk))
def sync_objects(url, qfilter, obj_class, obj_string, field_bindings, nested=[]):
status, data = get_data(root + url + '?' + qfilter)
if status:
build_objects(obj_class, obj_string, data, field_bindings, nested)
return 1
else:
return 0
def remove_objects(url, obj_class, obj_string):
status, remote_ids = get_data(root + url)
if status:
local_ids = obj_class.objects.values_list('id' + obj_string, flat=True)
must_remove = list(set(local_ids).difference(remote_ids))
obj_class.objects.filter(**{'id' + obj_string + '__in': must_remove}).delete()
if must_remove:
logging.info("Deleted %s: %s" % (obj_string, ', '.join(str(x) for x in must_remove)))
return 1
else:
return 0
| apache-2.0 | -7,204,773,143,457,903,000 | 31.171875 | 106 | 0.595435 | false | 3.35342 | false | false | false |
ChenJunor/hue | apps/pig/src/pig/models.py | 4 | 5507 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import posixpath
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document as Doc, SAMPLE_USER_ID
from hadoop.fs.hadoopfs import Hdfs
class Document(models.Model):
owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('User who can modify the job.'))
is_design = models.BooleanField(default=True, db_index=True, verbose_name=_t('Is a user document, not a document submission.'),
help_text=_t('If the document is not a submitted job but a real query, script, workflow.'))
def is_editable(self, user): # Deprecated
return user.is_superuser or self.owner == user
def can_edit_or_exception(self, user, exception_class=PopupException): # Deprecated
if self.is_editable(user):
return True
else:
raise exception_class(_('Only superusers and %s are allowed to modify this document.') % user)
class PigScript(Document):
_ATTRIBUTES = ['script', 'name', 'properties', 'job_id', 'parameters', 'resources', 'hadoopProperties']
data = models.TextField(default=json.dumps({
'script': '',
'name': '',
'properties': [],
'job_id': None,
'parameters': [],
'resources': [],
'hadoopProperties': []
}))
doc = generic.GenericRelation(Doc, related_name='pig_doc')
def update_from_dict(self, attrs):
data_dict = self.dict
for attr in PigScript._ATTRIBUTES:
if attrs.get(attr) is not None:
data_dict[attr] = attrs[attr]
if 'name' in attrs:
self.doc.update(name=attrs['name'])
self.data = json.dumps(data_dict)
@property
def dict(self):
return json.loads(self.data)
def get_absolute_url(self):
return reverse('pig:index') + '#edit/%s' % self.id
@property
def use_hcatalog(self):
script = self.dict['script']
return ('org.apache.hcatalog.pig.HCatStorer' in script or 'org.apache.hcatalog.pig.HCatLoader' in script) or \
('org.apache.hive.hcatalog.pig.HCatLoader' in script or 'org.apache.hive.hcatalog.pig.HCatStorer' in script) # New classes
@property
def use_hbase(self):
script = self.dict['script']
return 'org.apache.pig.backend.hadoop.hbase.HBaseStorage' in script
def create_or_update_script(id, name, script, user, parameters, resources, hadoopProperties, is_design=True):
try:
pig_script = PigScript.objects.get(id=id)
if id == str(SAMPLE_USER_ID): # Special case for the Example, just create an history
is_design = False
raise PigScript.DoesNotExist()
pig_script.doc.get().can_write_or_exception(user)
except PigScript.DoesNotExist:
pig_script = PigScript.objects.create(owner=user, is_design=is_design)
Doc.objects.link(pig_script, owner=pig_script.owner, name=name)
if not is_design:
pig_script.doc.get().add_to_history()
# A user decided eventually to save an unsaved script after execution:
if is_design and pig_script.doc.get().is_historic():
pig_script.doc.get().remove_from_history()
pig_script.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
})
return pig_script
def get_scripts(user, is_design=None):
scripts = []
data = Doc.objects.available(PigScript, user)
if is_design is not None:
data = [job for job in data if job.is_design]
for script in data:
data = script.dict
massaged_script = {
'id': script.id,
'docId': script.doc.get().id,
'name': data['name'],
'script': data['script'],
'parameters': data['parameters'],
'resources': data['resources'],
'hadoopProperties': data.get('hadoopProperties', []),
'isDesign': script.is_design,
'can_write': script.doc.get().can_write(user)
}
scripts.append(massaged_script)
return scripts
def get_workflow_output(oozie_workflow, fs):
# TODO: guess from the Input(s):/Output(s)
output = None
if 'workflowRoot' in oozie_workflow.conf_dict:
output = oozie_workflow.conf_dict.get('workflowRoot')
if output and not fs.exists(output):
output = None
return output
def hdfs_link(url):
if url:
path = Hdfs.urlsplit(url)[2]
if path:
if path.startswith(posixpath.sep):
return "/filebrowser/view=" + path
else:
return "/filebrowser/home_relative_view=/" + path
else:
return url
else:
return url
| apache-2.0 | 6,807,907,979,281,745,000 | 31.585799 | 130 | 0.684402 | false | 3.620644 | false | false | false |
AdamMagoon/WhatCD | WhatMain.py | 1 | 3126 | from Models import query_all_requests, organize_data_model
from WhatApi import get_login, similar, GazelleAPIMod, \
get_requests_soup, parse_requests_page, match_two_sets, \
filter_torrent_alphabetically
u_name, pw = get_login()
# user = UserSession(user_name=u_name, password=pw)
def update_album_requests():
exists = False
pages = list(range(1, 1000))
for page in pages:
soup = get_requests_soup(page=page)
parsed_soup = parse_requests_page(soup)
exists = organize_data_model(parsed_soup)
if exists:
return page
def find_matches():
"""
Finds matches between existing Album Requests and existing
torrents on What.cd
Takes into account
Artist Name
Album Name
Acceptable Formats (FLAC, FLAC 24, MP3)
Acceptable Bitrates (Lossless, Lossy)
Acceptable Media (WEB, CD)
"""
matches = {}
what_object = GazelleAPIMod(username=u_name, password=pw)
# Query all of our requests from our stored database
all_requests = [(x.id, x.name) for x in query_all_requests() if
x.name.find('-') >= 0]
for req_id, full_name in all_requests:
name_part = full_name.split('-')
artist = name_part[0].strip()
album = name_part[1].strip()
request_object = what_object.request_search_by_id(req_id)
# Query API with artist name - returns all existing artist material
artist_data = what_object.get_artist_json(artist)
# torrentgroup keyword filters just torrents, removing metadata
torrent_groups = artist_data.get('torrentgroup', [])
# artist_id = artist_query['id']
filtered_groups = filter_torrent_alphabetically(torrent_groups, album)
# Iterate over torrent groups
for torrent_group in filtered_groups:
torrent_group_album = torrent_group['groupName']
if similar(album, torrent_group_album, threshold=0.8):
matches[request_object] = [torrent_group]
print(matches)
# bitrate = set(request_object.acceptable_bitrates)
_format = set(request_object.acceptable_formats)
media = set(request_object.acceptable_media)
# Iterate individual torrents
for tor in torrent_group['torrent']:
tor_format = tor['format']
tor_media = tor['media']
# tor_bitrate = tor['encoding']
tor_id = tor['id']
format_match = match_two_sets(set(tor_format), _format)
media_match = match_two_sets(media, set(tor_media))
if format_match and media_match:
package = (req_id, tor_id)
with open('matches.txt', 'a+') as f:
f.write("Request Id: {}\nTorrent Id: {}\n\n"
.format(package[0], package[1]))
return matches
if __name__ == '__main__':
# find_matches()
update_album_requests()
| mit | -2,521,717,268,370,013,000 | 36.214286 | 78 | 0.574216 | false | 4.049223 | false | false | false |
sam-m888/gprime | gprime/datehandler/_date_bg.py | 1 | 10250 | # -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Bulgarian-specific classes for parsing and displaying dates.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Bulgarian parser
#
#-------------------------------------------------------------------------
class DateParserBG(DateParser):
modifier_to_int = {
'преди' : Date.MOD_BEFORE,
'пр.' : Date.MOD_BEFORE,
'пр' : Date.MOD_BEFORE,
'след' : Date.MOD_AFTER,
'сл.' : Date.MOD_AFTER,
'сл' : Date.MOD_AFTER,
'ок' : Date.MOD_ABOUT,
'ок.' : Date.MOD_ABOUT,
'около' : Date.MOD_ABOUT,
'примерно' : Date.MOD_ABOUT,
'прим' : Date.MOD_ABOUT,
'прим.' : Date.MOD_ABOUT,
'приблизително' : Date.MOD_ABOUT,
'приб.' : Date.MOD_ABOUT,
'прибл.' : Date.MOD_ABOUT,
'приб' : Date.MOD_ABOUT,
'прибл' : Date.MOD_ABOUT,
}
calendar_to_int = {
'григориански' : Date.CAL_GREGORIAN,
'г' : Date.CAL_GREGORIAN,
'юлиански' : Date.CAL_JULIAN,
'ю' : Date.CAL_JULIAN,
'еврейски' : Date.CAL_HEBREW,
'е' : Date.CAL_HEBREW,
'ислямски' : Date.CAL_ISLAMIC,
'и' : Date.CAL_ISLAMIC,
'френски републикански' : Date.CAL_FRENCH,
'републикански' : Date.CAL_FRENCH,
'фр.реп.' : Date.CAL_FRENCH,
'р' : Date.CAL_FRENCH,
'френски' : Date.CAL_FRENCH,
'фр.' : Date.CAL_FRENCH,
'персийски' : Date.CAL_PERSIAN,
'п' : Date.CAL_PERSIAN,
}
quality_to_int = {
'приблизително' : Date.QUAL_ESTIMATED,
'прибл.' : Date.QUAL_ESTIMATED,
'изчислено' : Date.QUAL_CALCULATED,
'изчисл.' : Date.QUAL_CALCULATED,
'изч.' : Date.QUAL_CALCULATED,
}
hebrew_to_int = {
"тишрей":1,
"мархешван":2,
"кислев":3,
"тевет":4,
"шват":5,
"адар":6,
"адар бет":7,
"нисан":8,
"ияр":9,
"сиван":10,
"тамуз":11,
"ав":12,
"eлул":13,
}
islamic_to_int = {
"мухаррам":1,
"саффар":2,
"рабиу-л-ауал":3,
"рабиу-с-сани":4,
"джумадал-уля":5,
"джумада-с-сания":6,
"раджаб":7,
"шаабан":8,
"рамадан":9,
"шауал":10,
"зу-л-кида":11,
"зул-л-хиджа":12,
}
persian_to_int = {
"фарвардин":1,
"урдбихищ":2,
"хурдад":3,
"тир":4,
"мурдад":5,
"шахривар":6,
"михр":7,
"абан":8,
"азар":9,
"дай":10,
"бахман":11,
"исфаидармуз":12,
}
french_to_int = {
"вандемер":1,
"брюмер":2,
"фример":3,
"нивоз":4,
"плювиоз":5,
"вантоз":6,
"жерминал":7,
"флореал":8,
"прериал":9,
"месидор":10,
"термидор":11,
"фрюктидор":12,
"допълнителен":13,
}
bce = [
'преди Христа', 'пр. Хр.', 'пр.Хр.'
] + DateParser.bce
def init_strings(self):
DateParser.init_strings(self)
_span_1 = ['от']
_span_2 = ['до']
_range_1 = ['между']
_range_2 = ['и']
self._span = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_span_1), '|'.join(_span_2)),
re.IGNORECASE)
self._range = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_range_1), '|'.join(_range_2)),
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Bulgarian displayer
#
#-------------------------------------------------------------------------
class DateDisplayBG(DateDisplay):
"""
Bulgarian language date display class.
"""
long_months = ( "", "януари", "февруари", "март", "април", "май",
"юни", "юли", "август", "септември", "октомври",
"ноември", "декември" )
short_months = ( "", "яну", "февр", "март", "апр", "май", "юни",
"юли", "авг", "септ", "окт", "ное", "дек" )
calendar = (
"",
"юлиански",
"еврейски",
"републикански",
"персийски",
"ислямски",
"шведски"
)
_mod_str = ("", "преди ", "след ", "около ", "", "", "")
_qual_str = ("", "приблизително ", "изчислено ")
_bce_str = "%s пр. Хр."
formats = (
"ГГГГ-ММ-ДД (ISO)", "Числов", "Месец Ден, Година", "Мес. Ден, Година", "Ден Месец Година", "Ден Мес. Година"
)
# this must agree with DateDisplayEn's "formats" definition
# (since no locale-specific _display_gregorian exists, here)
hebrew = ( "",
"Тишрей",
"Мархешван",
"Кислев",
"Тевет",
"Шват",
"Адар",
"Адар бет",
"Нисан",
"Ияр",
"Сиван",
"Тамуз",
"Ав",
"Елул",
)
islamic = ( "",
"Мухаррам",
"Саффар",
"Рабиу-л-ауал",
"Рабиу-с-сани",
"Джумадал-уля",
"Джумада-с-сания",
"Раджаб",
"Шаабан",
"Рамадан",
"Шауал",
"Зу-л-кида",
"Зул-л-хиджа",
)
persian = ( "",
"Фарвардин",
"Урдбихищ",
"Хурдад",
"Тир",
"Мурдад",
"Шахривар",
"Михр",
"Абан",
"Азар",
"Дай",
"Бахман",
"Исфаидармуз",
)
french = ( "",
"Вандемер",
"Брюмер",
"Фример",
"Нивоз",
"Плювиоз",
"Вантоз",
"Жерминал",
"Флореал",
"Прериал",
"Мессидор",
"Термидор",
"Фрюктидор",
"Допълнителен"
)
def display(self, date):
"""
Returns a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_SPAN:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'от', d1, 'до', d2, scal)
elif mod == Date.MOD_RANGE:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'между', d1, 'и', d2, scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod], text, scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(('bg_BG', 'bg', 'bulgarian', 'Bulgarian'),
DateParserBG, DateDisplayBG)
| gpl-2.0 | 4,148,409,983,572,741,600 | 27.898089 | 116 | 0.426603 | false | 2.758893 | false | false | false |
telegraphic/allantools | tests/np_tests.py | 1 | 26560 | #!/usr/bin/env python
"""
np_tests.py
-----------
Compare output validity and speedup of numpy-accelerated codes to reference
pure python codes.
TODO: Tidy this up
"""
import numpy as np
import allantools.allantools_pure_python as alt
import allantools.allantools as alp
import time
if __name__ == "__main__":
print "Compare results and speed of pure-python allantools against numpy allantools"
#######################
# MTIE_PHASE()
#######################
print "\ntesting mtie_phase()"
data = np.random.random(1000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.mtie_phase_purepy(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.mtie_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 1280
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.mtie_phase_purepy(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.mtie_phase(data, rate, taus)
t4 = time.time()
#print (o_dev, o_dev_)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# MTIE()
#######################
print "\ntesting mtie()"
data = np.random.random(1000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.mtie(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.mtie(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 128000
rate = 2.1
data = np.random.random(10000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.mtie(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.mtie(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# THREE_CORNERED_HAT_PHASE()
#######################
print "\ntesting three_cornered_hat_phase()"
stride = 1
taus = [2, 4, 8, 16]
rate = 2.1
pdata_ab = np.random.random(100000)
pdata_bc = np.random.random(100000)
pdata_ca = np.random.random(100000)
t1 = time.time()
function = alt.adev
tau, dev_a = alt.three_cornered_hat_phase(pdata_ab, pdata_bc, pdata_ca, rate, taus, function)
t2 = time.time()
t3 = time.time()
function = alp.adev
tau_, dev_a_ = alp.three_cornered_hat_phase(pdata_ab, pdata_bc, pdata_ca, rate, taus, function)
t4 = time.time()
assert np.allclose(tau, tau_)
assert np.allclose(dev_a, dev_a_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TIERMS_PHASE()
#######################
print "\ntesting tierms_phase()"
data = np.random.random(1000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.tierms_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.tierms_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.tierms_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.tierms_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TIERMS()
#######################
print "\ntesting tierms()"
data = np.random.random(1000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.tierms(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.tierms(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.tierms(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.tierms(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TOTDEV_PHASE()
#######################
print "\ntesting totdev_phase()"
data = np.random.random(1000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.totdev_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.totdev_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.totdev_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.totdev_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TOTDEV()
#######################
print "\ntesting totdev()"
data = np.random.random(1000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.totdev(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.totdev(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.totdev(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.totdev(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# OHDEV()
#######################
print "\ntesting ohdev()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.ohdev(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.ohdev(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# OHDEV_PHASE()
#######################
print "\ntesting ohdev_phase()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.ohdev_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.ohdev_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.ohdev_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# HDEV_PHASE_CALC()
#######################
print "\ntesting hdev_phase_calc()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for tau in taus:
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
mj = tau
dev, deverr, n = alt.calc_hdev_phase(data, rate, mj, stride)
dev_, deverr_, n_ = alp.calc_hdev_phase(data, rate, mj, stride)
assert np.isclose(dev, dev_)
assert np.isclose(n, n_)
assert np.isclose(deverr, deverr_)
stride = 1
tau = 16
rate = 2.0
t1 = time.time()
dev, deverr, n = alt.calc_hdev_phase(data, rate, mj, stride)
t2 = time.time()
t3 = time.time()
dev_, deverr_, n_ = alp.calc_hdev_phase(data, rate, mj, stride)
t4 = time.time()
assert np.isclose(dev, dev_)
assert np.isclose(n, n_)
assert np.isclose(deverr, deverr_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# OADEV_PHASE()
#######################
print "\ntesting oadev_phase()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# HDEV_PHASE()
#######################
print "\ntesting hdev_phase()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.hdev_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.hdev_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.hdev_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.hdev_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# HDEV()
#######################
print "\ntesting hdev()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.hdev(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.hdev(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.hdev(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.hdev(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# OADEV_PHASE()
#######################
print "\ntesting oadev_phase()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.oadev_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.oadev_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# OADEV()
#######################
print "\ntesting oadev()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.oadev(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.oadev(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.oadev(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.oadev(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# ADEV_PHASE()
#######################
print "\ntesting adev_phase()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.adev_phase(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.adev_phase(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.adev_phase(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.adev_phase(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# ADEV()
#######################
print "\ntesting adev()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
o_taus, o_dev, o_err, o_n = alt.adev(data, rate, taus)
o_taus_, o_dev_, o_err_, o_n_ = alp.adev(data, rate, taus)
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
stride = 1
tau = 16
rate = 2.1
data = np.random.random(100000)
t1 = time.time()
o_taus, o_dev, o_err, o_n = alt.adev(data, rate, taus)
t2 = time.time()
t3 = time.time()
o_taus_, o_dev_, o_err_, o_n_ = alp.adev(data, rate, taus)
t4 = time.time()
assert np.allclose(o_taus, o_taus_)
assert np.allclose(o_dev, o_dev_)
assert np.allclose(o_err, o_err_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# CALC_ADEV_PHASE()
#######################
print "\ntesting calc_adev_phase()"
data = np.random.random(10000)
taus = [1, 3, 5, 16, 128]
rates = [1, 20, 10.7]
strides = [1, 10, 7]
for tau in taus:
for rate in rates:
for stride in strides:
#print "TAU: %i, RATE: %2.2f, STRIDE: %i" % (tau, rate, stride)
mj = tau
dev, deverr, n = alt.calc_adev_phase(data, rate, mj, stride)
dev_, deverr_, n_ = alp.adev_phase_calc(data, rate, mj, stride)
assert np.isclose(dev, dev_)
assert np.isclose(n, n_)
assert np.isclose(deverr, deverr_)
stride = 1
tau = 16
rate = 2.0
t1 = time.time()
dev, deverr, n = alt.calc_adev_phase(data, rate, mj, stride)
t2 = time.time()
t3 = time.time()
dev_, deverr_, n_ = alp.adev_phase_calc(data, rate, mj, stride)
t4 = time.time()
assert np.isclose(dev, dev_)
assert np.isclose(n, n_)
assert np.isclose(deverr, deverr_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TAU_M()
#######################
print "\ntesting tau_m()"
taus = [1, 2, 4, 8, 16, -4, 10000, -3.1, 3.141]
data = np.random.random(10000)
rates = [1, 2, 7.1, 123.12]
for rate in rates:
m, taus2 = alt.tau_m(data, rate, taus)
data_, m_, taus2_ = alp.tau_m(data, rate, taus)
assert np.allclose(m, m_)
assert np.allclose(taus2, taus2_)
taus = np.random.randint(low=-100, high=10000, size=(10000,))
rate = 1.234
t1 = time.time()
m, taus2 = alt.tau_m(data, rate, taus)
t2 = time.time()
t3 = time.time()
data_, m_, taus2_ = alp.tau_m(data, rate, taus)
t4 = time.time()
assert np.allclose(m, m_)
assert np.allclose(taus2, taus2_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# FREQUENCY2PHASE()
#######################
print "\ntesting frequency2phase()"
freqdata = np.random.random(10000)
rates = [1, 2, 7.1, 123.12]
for rate in rates:
phase = alt.frequency2phase(freqdata, rate)
phase_ = alp.frequency2phase(freqdata, rate)
assert len(phase) == len(phase_)
assert np.allclose(phase, phase_)
freqdata = np.random.random(100000)
t1 = time.time()
phase = alt.frequency2phase(freqdata, rate)
t2 = time.time()
t3 = time.time()
phase_ = alp.frequency2phase(freqdata, rate)
t4 = time.time()
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TDEV_PHASE()
#######################
print "\ntesting tdev_phase()"
rate = 1.0 # 1000 Hz sample rate
obs_s = 10000 # 1 hour
n_samples = rate * obs_s
t = np.arange(0, n_samples)
phase = np.random.random(n_samples) + np.sin(t / n_samples)
taus = [4]
t1 = time.time()
taus2, td, tde, ns = alt.tdev_phase(phase, rate, taus)
t2 = time.time()
t3 = time.time()
taus2_, td_, tde_, ns_ = alp.tdev_phase(phase, rate, taus)
t4 = time.time()
assert np.allclose(taus2, taus2_)
assert np.allclose(td, td_)
assert np.allclose(tde, tde_)
assert np.allclose(ns, ns_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# TDEV()
#######################
print "\ntesting tdev()"
rate = 2.0 # 1000 Hz sample rate
obs_s = 32768 # 1 hour
n_samples = rate * obs_s
t = np.arange(0, n_samples)
phase = np.random.random(n_samples) + np.sin(t / n_samples)
taus = [1, 2, 4]
t1 = time.time()
taus2, td, tde, ns = alt.tdev(phase, rate, taus)
t2 = time.time()
t3 = time.time()
taus2_, td_, tde_, ns_ = alp.tdev(phase, rate, taus)
t4 = time.time()
assert np.allclose(taus2, taus2_)
assert np.allclose(td, td_)
assert np.allclose(tde, tde_)
assert np.allclose(ns, ns_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# MDEV_PHASE()
#######################
print "\ntesting mdev_phase()"
rate = 1.0 # 1000 Hz sample rate
obs_s = 10000 # 1 hour
n_samples = rate * obs_s
t = np.arange(0, n_samples)
phase = np.random.random(n_samples) + np.sin(t / n_samples)
taus = [4]
t1 = time.time()
taus2, td, tde, ns = alt.mdev_phase(phase, rate, taus)
t2 = time.time()
t3 = time.time()
taus2_, td_, tde_, ns_ = alp.mdev_phase(phase, rate, taus)
t4 = time.time()
assert np.allclose(taus2, taus2_)
assert np.allclose(td, td_)
assert np.allclose(tde, tde_)
assert np.allclose(ns, ns_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
#######################
# MDEV()
#######################
print "\ntesting mdev()"
rate = 2.0 # 1000 Hz sample rate
obs_s = 32768 # 1 hour
n_samples = rate * obs_s
t = np.arange(0, n_samples)
phase = np.random.random(n_samples) + np.sin(t / n_samples)
taus = [1, 2, 4]
t1 = time.time()
taus2, td, tde, ns = alt.mdev(phase, rate, taus)
t2 = time.time()
t3 = time.time()
taus2_, td_, tde_, ns_ = alp.mdev(phase, rate, taus)
t4 = time.time()
assert np.allclose(taus2, taus2_)
assert np.allclose(td, td_)
assert np.allclose(tde, tde_)
assert np.allclose(ns, ns_)
print "Original: %2.3fs" % (t2 - t1)
print "New: %2.3fs" % (t4 - t3)
print "Speedup: %2.2fx" % ((t2 - t1) / (t4 - t3))
print "\nAll DONE!"
| gpl-3.0 | -3,758,425,467,134,489,000 | 29.955711 | 99 | 0.503539 | false | 2.659457 | true | false | false |
jwesstrom/cleverMirror | testStuff/animationTesting.py | 1 | 1907 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics import Color, Ellipse
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.animation import Animation
class timerWidget(Widget):
def __init__(self, x=350, y =350, sizeWH=200, angle=0, id='counter',**kwargs):
# make sure we aren't overriding any important functionality
super(timerWidget, self).__init__(**kwargs)
self.id = id
self.angle = angle
self.sizeWH = sizeWH
self.pos = [x,y]
#self.size = sizeWH
Clock.schedule_interval(self.update, 1/25)
with self.canvas:
Color(0.0, 1.0, 0.0)
Ellipse(pos=(self.pos[0], self.pos[1]), size=(self.sizeWH, sizeWH), group='a', angle_start=0, angle_end = 10)
def update(self, dt, x=None, y=None):
if self.angle < 360:
self.angle = self.angle + 1
else:
self.angle = 360
self.canvas.get_group('a')[1].angle_end = self.angle
class windowWidget(Widget):
def __init__(self, **kwargs):
super(windowWidget, self).__init__(**kwargs)
pos = [30,350,90]
size = 100
id = 'c'
aDiv = 360-45
for i in range(4):
div = (pos[0]*i) + (pos[2]*i)
aDiv = (aDiv) - (45*i)
self.add_widget(timerWidget(div,pos[1],size,aDiv,id+str(i)))
def update(self, dt):
for i in self.children:
if i.angle == 360:
#self.outAnim(i)
#i.x = 300
print i.x
def outAnim(self, obj):
anim = Animation(x=50, t='in_quad')
anim.start(obj)
class GfxApp(App):
def build(self):
gWindow = windowWidget()
Clock.schedule_interval(gWindow.update, 1/30)
return gWindow
if __name__ == '__main__':
GfxApp().run()
| gpl-3.0 | 749,775,957,101,477,100 | 24.77027 | 121 | 0.558469 | false | 3.381206 | false | false | false |
dimatura/pydisp | pydisp/cli.py | 1 | 1988 | # -*- coding: utf-8 -*-
import os
import base64
import time
import click
import pydisp
@click.command(context_settings={'help_option_names':['-h','--help']})
@click.argument('images', nargs=-1, type=click.Path(exists=True), required=True)
@click.option('--title', '-t', type=str, help='Window title')
@click.option('--win', type=str, help='Window ID. By default, a generated unique id. %p will use path as id. %f will use filename.')
@click.option('--width', '-w', type=int, help='Initial indow width.' )
@click.option('--pause', '-p', default=0.2, help='Pause between images in seconds')
@click.option('--port', type=int, help='Display server port. Default is read from .display/config.json, or 8000.')
@click.option('--hostname', type=str, help='Display server hostname. Default is read from .display/config.json, or localhost.')
def main(images, title, win, width, pause, port, hostname):
# TODO tiling option
if port is not None:
pydisp.CONFIG.PORT = port
if hostname is not None:
pydisp.CONFIG.HOSTNAME = hostname
for img_fname in images:
click.echo('loading {}'.format(img_fname))
base, ext = os.path.splitext(img_fname)
ext = ext.lower().replace('.', '').replace('jpg', 'jpeg')
if not pydisp.is_valid_image_mime_type(ext):
raise click.BadParameter('unrecognized image format: {}'.format(ext))
with open(img_fname, 'rb') as f:
encoded = pydisp.b64_encode(f.read(), ext)
if title == '':
title = img_fname
if win=='%f':
win = img_fname
elif win=='%p':
win = os.path.basename(img_fname)
pydisp.pane('image',
win=win,
title=title,
content={'src': encoded,
'width': width,
})
if (len(img_fname) > 1) and (pause > 0.0):
time.sleep(pause)
if __name__ == '__main__':
main()
| mit | -8,539,764,335,290,668,000 | 35.145455 | 132 | 0.579477 | false | 3.674677 | false | false | false |
derkling/trappy | tests/test_run.py | 1 | 12778 | # Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib
import os
import re
import shutil
import tempfile
import unittest
from test_thermal import BaseTestThermal
import trappy
import utils_tests
class TestRun(BaseTestThermal):
def __init__(self, *args, **kwargs):
super(TestRun, self).__init__(*args, **kwargs)
self.map_label = {"00000000,00000006": "A57", "00000000,00000039": "A53"}
def test_run_has_all_classes(self):
"""The Run() class has members for all classes"""
run = trappy.Run()
for attr in run.class_definitions.iterkeys():
self.assertTrue(hasattr(run, attr))
def test_run_has_all_classes_scope_all(self):
"""The Run() class has members for all classes with scope=all"""
run = trappy.Run(scope="all")
for attr in run.class_definitions.iterkeys():
self.assertTrue(hasattr(run, attr))
def test_run_has_all_classes_scope_thermal(self):
"""The Run() class has only members for thermal classes with scope=thermal"""
run = trappy.Run(scope="thermal")
for attr in run.thermal_classes.iterkeys():
self.assertTrue(hasattr(run, attr))
for attr in run.sched_classes.iterkeys():
self.assertFalse(hasattr(run, attr))
def test_run_has_all_classes_scope_sched(self):
"""The Run() class has only members for sched classes with scope=sched"""
run = trappy.Run(scope="sched")
for attr in run.thermal_classes.iterkeys():
self.assertFalse(hasattr(run, attr))
for attr in run.sched_classes.iterkeys():
self.assertTrue(hasattr(run, attr))
def test_run_accepts_name(self):
"""The Run() class has members for all classes"""
run = trappy.Run(name="foo")
self.assertEquals(run.name, "foo")
def test_fail_if_no_trace_dat(self):
"""Raise an IOError with the path if there's no trace.dat and trace.txt"""
os.remove("trace.txt")
self.assertRaises(IOError, trappy.Run)
cwd = os.getcwd()
try:
trappy.Run(cwd)
except IOError as exception:
pass
self.assertTrue(cwd in str(exception))
def test_other_directory(self):
"""Run() can grab the trace.dat from other directories"""
other_random_dir = tempfile.mkdtemp()
os.chdir(other_random_dir)
dfr = trappy.Run(self.out_dir).thermal.data_frame
self.assertTrue(len(dfr) > 0)
self.assertEquals(os.getcwd(), other_random_dir)
def test_run_arbitrary_trace_txt(self):
"""Run() works if the trace is called something other than trace.txt"""
arbitrary_trace_name = "my_trace.txt"
shutil.move("trace.txt", arbitrary_trace_name)
dfr = trappy.Run(arbitrary_trace_name).thermal.data_frame
self.assertTrue(len(dfr) > 0)
self.assertFalse(os.path.exists("trace.txt"))
# As there is no raw trace requested. The mytrace.raw.txt
# Should not have been generated
self.assertFalse(os.path.exists("mytrace.raw.txt"))
def test_run_autonormalize_time(self):
"""Run() normalizes by default"""
run = trappy.Run()
self.assertEquals(round(run.thermal.data_frame.index[0], 7), 0)
def test_run_dont_normalize_time(self):
"""Run() doesn't normalize if asked not to"""
run = trappy.Run(normalize_time=False)
self.assertNotEquals(round(run.thermal.data_frame.index[0], 7), 0)
def test_run_basetime(self):
"""Test that basetime calculation is correct"""
run = trappy.Run(normalize_time=False)
basetime = run.thermal.data_frame.index[0]
self.assertEqual(run.get_basetime(), basetime)
def test_run_duration(self):
"""Test that duration calculation is correct"""
run = trappy.Run(normalize_time=False)
duration = run.thermal_governor.data_frame.index[-1] - run.thermal.data_frame.index[0]
self.assertEqual(run.get_duration(), duration)
def test_run_normalize_time(self):
"""Run().normalize_time() works accross all classes"""
run = trappy.Run(normalize_time=False)
prev_inpower_basetime = run.cpu_in_power.data_frame.index[0]
prev_inpower_last = run.cpu_in_power.data_frame.index[-1]
basetime = run.thermal.data_frame.index[0]
run.normalize_time(basetime)
self.assertEquals(round(run.thermal.data_frame.index[0], 7), 0)
exp_inpower_first = prev_inpower_basetime - basetime
self.assertEquals(round(run.cpu_in_power.data_frame.index[0] - exp_inpower_first, 7), 0)
exp_inpower_last = prev_inpower_last - basetime
self.assertEquals(round(run.cpu_in_power.data_frame.index[-1] - exp_inpower_last, 7), 0)
def test_get_all_freqs_data(self):
"""Test get_all_freqs_data()"""
allfreqs = trappy.Run().get_all_freqs_data(self.map_label)
self.assertEquals(allfreqs[1][1]["A53_freq_out"].iloc[3], 850)
self.assertEquals(allfreqs[1][1]["A53_freq_in"].iloc[1], 850)
self.assertEquals(allfreqs[0][1]["A57_freq_out"].iloc[2], 1100)
self.assertTrue("gpu_freq_in" in allfreqs[2][1].columns)
# Make sure there are no NaNs in the middle of the array
self.assertTrue(allfreqs[0][1]["A57_freq_in"].notnull().all())
def test_plot_freq_hists(self):
"""Test that plot_freq_hists() doesn't bomb"""
run = trappy.Run()
_, axis = matplotlib.pyplot.subplots(nrows=2)
run.plot_freq_hists(self.map_label, axis)
matplotlib.pyplot.close('all')
def test_plot_load(self):
"""Test that plot_load() doesn't explode"""
run = trappy.Run()
run.plot_load(self.map_label, title="Util")
_, ax = matplotlib.pyplot.subplots()
run.plot_load(self.map_label, ax=ax)
def test_plot_normalized_load(self):
"""Test that plot_normalized_load() doesn't explode"""
run = trappy.Run()
_, ax = matplotlib.pyplot.subplots()
run.plot_normalized_load(self.map_label, ax=ax)
def test_plot_allfreqs(self):
"""Test that plot_allfreqs() doesn't bomb"""
run = trappy.Run()
run.plot_allfreqs(self.map_label)
matplotlib.pyplot.close('all')
_, axis = matplotlib.pyplot.subplots(nrows=2)
run.plot_allfreqs(self.map_label, ax=axis)
matplotlib.pyplot.close('all')
def test_trace_metadata(self):
"""Test if metadata gets populated correctly"""
expected_metadata = {}
expected_metadata["version"] = "6"
expected_metadata["cpus"] = "6"
run = trappy.Run()
for key, value in expected_metadata.items():
self.assertTrue(hasattr(run, "_" + key))
self.assertEquals(getattr(run, "_" + key), value)
def test_missing_metadata(self):
"""Test if trappy.Run() works with a trace missing metadata info"""
lines = []
with open("trace.txt", "r") as fil:
lines += fil.readlines()
lines = lines[7:]
fil.close()
with open("trace.txt", "w") as fil:
fil.write("".join(lines))
fil.close()
run = trappy.Run()
self.assertEquals(run._cpus, None)
self.assertEquals(run._version, None)
self.assertTrue(len(run.thermal.data_frame) > 0)
@unittest.skipUnless(utils_tests.trace_cmd_installed(),
"trace-cmd not installed")
class TestRunRawDat(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestRunRawDat, self).__init__(
[("raw_trace.dat", "trace.dat")],
*args,
**kwargs)
def test_raw_dat(self):
"""Tests an event that relies on raw parsing"""
run = trappy.Run()
self.assertTrue(hasattr(run, "sched_switch"))
self.assertTrue(len(run.sched_switch.data_frame) > 0)
self.assertTrue("prev_comm" in run.sched_switch.data_frame.columns)
def test_raw_dat_arb_name(self):
"""Tests an event that relies on raw parsing with arbitrary .dat file name"""
arbitrary_name = "my_trace.dat"
shutil.move("trace.dat", arbitrary_name)
run = trappy.Run(arbitrary_name)
self.assertTrue(os.path.isfile("my_trace.raw.txt"))
self.assertTrue(hasattr(run, "sched_switch"))
self.assertTrue(len(run.sched_switch.data_frame) > 0)
class TestRunRawBothTxt(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestRunRawBothTxt, self).__init__(
[("raw_trace.txt", "trace.txt"),
("raw_trace.raw.txt", "trace.raw.txt")],
*args,
**kwargs)
def test_both_txt_files(self):
"""test raw parsing for txt files"""
self.assertFalse(os.path.isfile("trace.dat"))
run = trappy.Run()
self.assertTrue(hasattr(run, "sched_switch"))
self.assertTrue(len(run.sched_switch.data_frame) > 0)
def test_both_txt_arb_name(self):
"""Test raw parsing for txt files arbitrary name"""
arbitrary_name = "my_trace.txt"
arbitrary_name_raw = "my_trace.raw.txt"
shutil.move("trace.txt", arbitrary_name)
shutil.move("trace.raw.txt", arbitrary_name_raw)
run = trappy.Run(arbitrary_name)
self.assertTrue(hasattr(run, "sched_switch"))
self.assertTrue(len(run.sched_switch.data_frame) > 0)
class TestRunSched(utils_tests.SetupDirectory):
"""Tests using a trace with only sched info and no (or partial) thermal"""
def __init__(self, *args, **kwargs):
super(TestRunSched, self).__init__(
[("trace_empty.txt", "trace.txt")],
*args,
**kwargs)
def test_run_basetime_empty(self):
"""Test that basetime is 0 if data frame of all data objects is empty"""
run = trappy.Run(normalize_time=False)
self.assertEqual(run.get_basetime(), 0)
def test_run_normalize_some_tracepoints(self):
"""Test that normalizing time works if not all the tracepoints are in the trace"""
with open("trace.txt", "a") as fil:
fil.write(" kworker/4:1-1219 [004] 508.424826: thermal_temperature: thermal_zone=exynos-therm id=0 temp_prev=24000 temp=24000")
run = trappy.Run()
self.assertEqual(run.thermal.data_frame.index[0], 0)
@unittest.skipUnless(utils_tests.trace_cmd_installed(),
"trace-cmd not installed")
class TestTraceDat(utils_tests.SetupDirectory):
"""Test that trace.dat handling work"""
def __init__(self, *args, **kwargs):
super(TestTraceDat, self).__init__(
[("trace.dat", "trace.dat")],
*args, **kwargs)
def test_do_txt_if_not_there(self):
"""Create trace.txt if it's not there"""
self.assertFalse(os.path.isfile("trace.txt"))
trappy.Run()
found = False
with open("trace.txt") as fin:
for line in fin:
if re.search("thermal", line):
found = True
break
self.assertTrue(found)
def test_do_raw_txt_if_not_there(self):
"""Create trace.raw.txt if it's not there"""
self.assertFalse(os.path.isfile("trace.raw.txt"))
trappy.Run()
found = False
with open("trace.raw.txt") as fin:
for line in fin:
if re.search("thermal", line):
found = True
break
def test_run_arbitrary_trace_dat(self):
"""Run() works if asked to parse a binary trace with a filename other than trace.dat"""
arbitrary_trace_name = "my_trace.dat"
shutil.move("trace.dat", arbitrary_trace_name)
dfr = trappy.Run(arbitrary_trace_name).thermal.data_frame
self.assertTrue(os.path.exists("my_trace.txt"))
self.assertTrue(os.path.exists("my_trace.raw.txt"))
self.assertTrue(len(dfr) > 0)
self.assertFalse(os.path.exists("trace.dat"))
self.assertFalse(os.path.exists("trace.txt"))
self.assertFalse(os.path.exists("trace.raw.txt"))
| apache-2.0 | 8,696,708,809,115,110,000 | 32.362924 | 147 | 0.613789 | false | 3.628052 | true | false | false |
Wevolver/HAVE | multiple/repositories/backends/git/main.py | 1 | 6122 | import calendar
import collections
import io
import itertools
import stat
import time
import dulwich
import dulwich.objects
from multiple import repositories
from multiple import utils
class RepositoryGit(repositories.RepositoryBase):
def __init__(self, dulwich_repository):
self.backend = dulwich_repository
def commit(self, index, message=b'', author=None, committer=None,
at_time=None):
# @todo time support
if not committer:
committer = author
if not at_time:
at_time = time.gmtime()
commit = dulwich.objects.Commit()
commit.tree = index.root_tree
commit.author = author
commit.committer = committer
commit.commit_time = commit.author_time = calendar.timegm(at_time)
commit.commit_timezone = commit.author_timezone = at_time.tm_isdst
commit.message = message
self.backend.object_store.add_object(commit)
def open_index_at(self, reference):
root_tree = None
if reference:
commit = self.backend[reference]
if isinstance(commit, dulwich.objects.Commit):
root_tree = self.backend[commit.tree]
else:
raise ValueError(
"bad reference '%r' is not a "
"dulwich.objects.Commit" % commit
)
else:
root_tree = dulwich.objects.Tree()
return MemoryIndex(root_tree, self.backend.object_store)
def get(self, path, reference, default=None):
result = default
commit = self.backend[reference]
if isinstance(commit, dulwich.objects.Commit):
tree = self.backend[commit.tree]
blob_object = tree.lookup_path(path)
if isinstance(blob_object, dulwich.objects.Blob):
result = blob_object.data
if isinstance(result, str):
result = io.StringIO(result)
return result
class MemoryIndex(object):
def __init__(self, root_tree, object_store):
"""
Args:
root_tree (dulwich.objects.Tree):
The root tree of the index
object_store (dulwich.object_store.BaseObjectStore):
The object store where to store the objects.
"""
self.object_store = object_store
self._objects = dict(self._get_objects(root_tree))
@property
def root_tree(self):
return self._objects[b''].copy()
@property
def objects(self):
return {
path: obj.copy()
for path, obj in self._objects.items()
}
def _get_objects(self, start_tree):
"""
Load in memory all the needed objects
Returns:
(Dict(Tuple(str, dulwich.objects.ShaFile)))
"""
contents = self.object_store.iter_tree_contents(start_tree.id, True)
for entry in contents:
yield entry.path, self.object_store[entry.sha]
def _get_or_create_tree(self, path):
try:
return self._objects[path]
except KeyError:
tree = dulwich.objects.Tree()
self._objects[path] = tree
return tree
def get(self, path, default=None):
return self._objects.get(path, default)
def add(self, contents):
# @todo a true bulk add without considering every file individually
for content, path in contents:
blob = dulwich.objects.Blob.from_string(content.read())
self._add(path, blob)
def _add(self, path, blob, file_mode=0o100644):
processed_path = ProcessedPath.from_path(path)
self.object_store.add_object(blob)
self._objects[processed_path.rootless_path] = blob
paths = list(processed_path.intermediate_paths())
# first update the leaf tree with the blob objects to add
leaf_path = paths[-1]
leaf_tree = self._get_or_create_tree(leaf_path)
leaf_tree.add(processed_path.basename, file_mode, blob.id)
self.object_store.add_object(leaf_tree)
# iterate the other trees from the nearest until the root
# and update them
indexed_paths = list(enumerate(reversed(paths)))
for idx, intermediate_path in indexed_paths:
if intermediate_path:
# if intermediate_path == '' it's the root tree
_, parent_path = indexed_paths[idx + 1]
parent_tree = self._get_or_create_tree(parent_path)
child_tree = self._get_or_create_tree(intermediate_path)
child_idx = processed_path.tokens_n - 1 - idx
child_name = processed_path.tokens[child_idx]
parent_tree.add(child_name, stat.S_IFDIR, child_tree.id)
self.object_store.add_object(child_tree)
self.object_store.add_object(parent_tree)
else:
break
_ProcessedPath = collections.namedtuple(
'_ProcessedPath',
(
'path', # intial path with a leading /
'dirname', # dirname extracted from the path
'basename', # basename extracted from the path
'tokens', # tokens of the dirname
'tokens_n' # number of tokens
)
)
class ProcessedPath(_ProcessedPath):
@classmethod
def from_path(cls, path):
if not path.startswith(b'/'):
path = b'/' + path
dirname, basename = utils.paths.path_split(path)
dirname_tokens = dirname.split(b'/')
n_dirname_tokens = len(dirname_tokens)
return cls(path, dirname, basename, dirname_tokens, n_dirname_tokens)
def intermediate_paths(self):
"""
Generate the intermediate paths with the ProcessedPath.tokens
values.
b'/data/files/data.json' -> ['', 'data', 'data/files']
Returns:
(iter)
"""
return itertools.accumulate(self.tokens, utils.paths.path_join)
@property
def rootless_path(self):
return self.path[1:]
| gpl-3.0 | -2,013,109,953,934,375,700 | 28.574879 | 77 | 0.583633 | false | 4.081333 | false | false | false |
goodfeli/pylearn2 | pylearn2/datasets/dense_design_matrix.py | 2 | 54738 | """
The DenseDesignMatrix class and related code. Functionality for representing
data that can be described as a dense matrix (rather than a sparse matrix)
with each row containing an example and each column corresponding to a
different feature. DenseDesignMatrix also supports other "views" of the data,
for example a dataset of images can be viewed either as a matrix of flattened
images or as a stack of 2D multi-channel images. However, the images must all
be the same size, so that each image may be mapped to a matrix row by the same
transformation.
"""
__authors__ = "Ian Goodfellow and Mehdi Mirza"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import functools
import logging
import warnings
import numpy as np
from theano.compat.six.moves import xrange
from pylearn2.datasets import cache
from pylearn2.utils.iteration import (
FiniteDatasetIterator,
resolve_iterator_class
)
import copy
# Don't import tables initially, since it might not be available
# everywhere.
tables = None
from pylearn2.datasets.dataset import Dataset
from pylearn2.datasets import control
from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace, IndexSpace
from pylearn2.utils import safe_zip
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import contains_nan
from theano import config
logger = logging.getLogger(__name__)
def ensure_tables():
"""
Makes sure tables module has been imported
"""
global tables
if tables is None:
import tables
class DenseDesignMatrix(Dataset):
"""
A class for representing datasets that can be stored as a dense design
matrix (and optionally, associated targets).
Parameters
----------
X : ndarray, 2-dimensional, optional
Should be supplied if `topo_view` is not. A design \
matrix of shape (number examples, number features) \
that defines the dataset.
topo_view : ndarray, optional
Should be supplied if X is not. An array whose first \
dimension is of length number examples. The remaining \
dimensions are examples with topological significance, \
e.g. for images the remaining axes are rows, columns, \
and channels.
y : ndarray, optional
Targets for each example (e.g., class ids, values to be predicted
in a regression task).
Currently three formats are supported:
- None:
Pass `None` if there are no target values. In this case the
dataset may not be some tasks such as supervised learning
or evaluation of a supervised learning system, but it can
be used for some other tasks. For example, a supervised
learning system can make predictions on it, or an unsupervised
learning system can be trained on it.
- 1D ndarray of integers:
This format may be used when the targets are class labels.
In this format, the array should have one entry for each
example. Each entry should be an integer, in the range
[0, N) where N is the number of classes.
This is the format that the `SVM` class expects.
- 2D ndarray, data type optional:
This is the most common format and can be used for a variety
of problem types. Each row of the matrix becomes the target
for a different example. Specific models / costs can interpret
this target vector differently. For example, the `Linear`
output layer for the `MLP` class expects the target for each
example to be a vector of real-valued regression targets. (It
can be a vector of size one if you only have one regression
target). The `Softmax` output layer of the `MLP` class expects
the target to be a vector of N elements, where N is the number
of classes, and expects all but one of the elements to 0. One
element should have value 1., and the index of this element
identifies the target class.
view_converter : object, optional
An object for converting between the design matrix \
stored internally and the topological view of the data.
axes: tuple, optional
The axes ordering of the provided topo_view. Must be some permutation
of ('b', 0, 1, 'c') where 'b' indicates the axis indexing examples,
0 and 1 indicate the row/cols dimensions and 'c' indicates the axis
indexing color channels.
rng : object, optional
A random number generator used for picking random \
indices into the design matrix when choosing minibatches.
X_labels : int, optional
If X contains labels then X_labels must be passed to indicate the
total number of possible labels e.g. the size of a the vocabulary
when X contains word indices. This will make the set use
IndexSpace.
y_labels : int, optional
If y contains labels then y_labels must be passed to indicate the
total number of possible labels e.g. 10 for the MNIST dataset
where the targets are numbers. This will make the set use
IndexSpace.
See Also
--------
DenseDesignMatrixPytables : Use this class if your data is too big to fit
in memory.
Notes
-----
- What kind of data can be stored in this way?
A design matrix is a matrix where each row contains a single
example. Each column within the row is a feature of that example.
By dense, we mean that every entry in the matrix is explicitly given a
value.
Examples of datasets that can be stored this way include MNIST and
CIFAR10.
Some datasets cannot be stored as a design matrix. For example, a
collection of images, each image having a different size, can't be
stored in this way, because we can't reshape each image to the same
length of matrix row.
Some datasets can, conceptually, be represented as a design matrix, but
it may not be efficient to store them as dense matrices. For example,
a dataset of sentences with a bag of words representation, might have a
very high number of features but most of the values are zero, so it
would be better to store the data as a sparse matrix.
- What if my examples aren't best thought of as vectors?
The DenseDesignMatrix class supports two views of the data, the "design
matrix view" in which each example is just a vector, and the
"topological view" in which each example is formatted using some kind
of data structure with meaningful topology. For example, a dataset of
images can be viewed as a design matrix where each row contains a
flattened version of each image, or it can be viewed as a 4D tensor,
where each example is a 3D subtensor, with one axis corresponding to
rows of the image, one axis corresponding to columns of the image, and
one axis corresponding to the color channels. This structure can be
thought of as having meaningful topology because neighboring
coordinates on the row and column axes correspond to neighboring
pixels in the image.
"""
_default_seed = (17, 2, 946)
def __init__(self, X=None, topo_view=None, y=None,
view_converter=None, axes=('b', 0, 1, 'c'),
rng=_default_seed, preprocessor=None, fit_preprocessor=False,
X_labels=None, y_labels=None):
self.X = X
self.y = y
self.view_converter = view_converter
self.X_labels = X_labels
self.y_labels = y_labels
self._check_labels()
if topo_view is not None:
assert view_converter is None
self.set_topological_view(topo_view, axes)
else:
assert X is not None, ("DenseDesignMatrix needs to be provided "
"with either topo_view, or X")
if view_converter is not None:
# Get the topo_space (usually Conv2DSpace) from the
# view_converter
if not hasattr(view_converter, 'topo_space'):
raise NotImplementedError("Not able to get a topo_space "
"from this converter: %s"
% view_converter)
# self.X_topo_space stores a "default" topological space that
# will be used only when self.iterator is called without a
# data_specs, and with "topo=True", which is deprecated.
self.X_topo_space = view_converter.topo_space
else:
self.X_topo_space = None
# Update data specs, if not done in set_topological_view
X_source = 'features'
if X_labels is None:
X_space = VectorSpace(dim=X.shape[1])
else:
if X.ndim == 1:
dim = 1
else:
dim = X.shape[-1]
X_space = IndexSpace(dim=dim, max_labels=X_labels)
if y is None:
space = X_space
source = X_source
else:
if y.ndim == 1:
dim = 1
else:
dim = y.shape[-1]
if y_labels is not None:
y_space = IndexSpace(dim=dim, max_labels=y_labels)
else:
y_space = VectorSpace(dim=dim)
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
self.data_specs = (space, source)
self.X_space = X_space
self.compress = False
self.design_loc = None
self.rng = make_np_rng(rng, which_method="random_integers")
# Defaults for iterators
self._iter_mode = resolve_iterator_class('sequential')
self._iter_topo = False
self._iter_targets = False
self._iter_data_specs = (self.X_space, 'features')
if preprocessor:
preprocessor.apply(self, can_fit=fit_preprocessor)
self.preprocessor = preprocessor
def _check_labels(self):
"""Sanity checks for X_labels and y_labels."""
if self.X_labels is not None:
assert self.X is not None
assert self.view_converter is None
assert self.X.ndim <= 2
assert np.all(self.X < self.X_labels)
if self.y_labels is not None:
assert self.y is not None
assert self.y.ndim <= 2
assert np.all(self.y < self.y_labels)
@functools.wraps(Dataset.iterator)
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None,
return_tuple=False):
[mode, batch_size, num_batches, rng, data_specs] = self._init_iterator(
mode, batch_size, num_batches, rng, data_specs)
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
if src == 'features' and \
getattr(self, 'view_converter', None) is not None:
conv_fn = (
lambda batch, self=self, space=sp:
self.view_converter.get_formatted_batch(batch, space))
else:
conv_fn = None
convert.append(conv_fn)
return FiniteDatasetIterator(self,
mode(self.get_num_examples(),
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
def get_data(self):
"""
Returns all the data, as it is internally stored.
The definition and format of these data are described in
`self.get_data_specs()`.
Returns
-------
data : numpy matrix or 2-tuple of matrices
The data
"""
if self.y is None:
return self.X
else:
return (self.X, self.y)
def use_design_loc(self, path):
"""
Calling this function changes the serialization behavior of the object
permanently.
If this function has been called, when the object is serialized, it
will save the design matrix to `path` as a .npy file rather
than pickling the design matrix along with the rest of the dataset
object. This avoids pickle's unfortunate behavior of using 2X the RAM
when unpickling.
TODO: Get rid of this logic, use custom array-aware picklers (joblib,
custom pylearn2 serialization format).
Parameters
----------
path : str
The path to save the design matrix to
"""
if not path.endswith('.npy'):
raise ValueError("path should end with '.npy'")
self.design_loc = path
def get_topo_batch_axis(self):
"""
The index of the axis of the batches
Returns
-------
axis : int
The axis of a topological view of this dataset that corresponds
to indexing over different examples.
"""
axis = self.view_converter.axes.index('b')
return axis
def enable_compression(self):
"""
If called, when pickled the dataset will be saved using only
8 bits per element.
.. todo::
Not sure this should be implemented as something a base dataset
does. Perhaps as a mixin that specific datasets (i.e. CIFAR10)
inherit from.
"""
self.compress = True
def __getstate__(self):
"""
.. todo::
WRITEME
"""
rval = copy.copy(self.__dict__)
# TODO: Not sure this should be implemented as something a base dataset
# does. Perhaps as a mixin that specific datasets (i.e. CIFAR10)
# inherit from.
if self.compress:
rval['compress_min'] = rval['X'].min(axis=0)
# important not to do -= on this line, as that will modify the
# original object
rval['X'] = rval['X'] - rval['compress_min']
rval['compress_max'] = rval['X'].max(axis=0)
rval['compress_max'][rval['compress_max'] == 0] = 1
rval['X'] *= 255. / rval['compress_max']
rval['X'] = np.cast['uint8'](rval['X'])
if self.design_loc is not None:
# TODO: Get rid of this logic, use custom array-aware picklers
# (joblib, custom pylearn2 serialization format).
np.save(self.design_loc, rval['X'])
del rval['X']
return rval
def __setstate__(self, d):
"""
.. todo::
WRITEME
"""
if d['design_loc'] is not None:
if control.get_load_data():
fname = cache.datasetCache.cache_file(d['design_loc'])
d['X'] = np.load(fname)
else:
d['X'] = None
if d['compress']:
X = d['X']
mx = d['compress_max']
mn = d['compress_min']
del d['compress_max']
del d['compress_min']
d['X'] = 0
self.__dict__.update(d)
if X is not None:
self.X = np.cast['float32'](X) * mx / 255. + mn
else:
self.X = None
else:
self.__dict__.update(d)
# To be able to unpickle older data after the addition of
# the data_specs mechanism
if not all(m in d for m in ('data_specs', 'X_space',
'_iter_data_specs', 'X_topo_space')):
X_space = VectorSpace(dim=self.X.shape[1])
X_source = 'features'
if self.y is None:
space = X_space
source = X_source
else:
y_space = VectorSpace(dim=self.y.shape[-1])
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_data_specs = (X_space, X_source)
view_converter = d.get('view_converter', None)
if view_converter is not None:
# Get the topo_space from the view_converter
if not hasattr(view_converter, 'topo_space'):
raise NotImplementedError("Not able to get a topo_space "
"from this converter: %s"
% view_converter)
# self.X_topo_space stores a "default" topological space that
# will be used only when self.iterator is called without a
# data_specs, and with "topo=True", which is deprecated.
self.X_topo_space = view_converter.topo_space
def _apply_holdout(self, _mode="sequential", train_size=0, train_prop=0):
"""
This function splits the dataset according to the number of
train_size if defined by the user with respect to the mode provided
by the user. Otherwise it will use the
train_prop to divide the dataset into a training and holdout
validation set. This function returns the training and validation
dataset.
Parameters
-----------
_mode : WRITEME
train_size : int
Number of examples that will be assigned to the training dataset.
train_prop : float
Proportion of training dataset split.
Returns
-------
WRITEME
"""
"""
This function splits the dataset according to the number of
train_size if defined by the user with respect to the mode provided
by the user. Otherwise it will use the
train_prop to divide the dataset into a training and holdout
validation set. This function returns the training and validation
dataset.
Parameters
-----------
_mode : WRITEME
train_size : int
Number of examples that will be assigned to the training dataset.
train_prop : float
Proportion of training dataset split.
Returns
-------
WRITEME
"""
if train_size != 0:
size = train_size
elif train_prop != 0:
size = np.round(self.get_num_examples() * train_prop)
else:
raise ValueError("Initialize either split ratio and split size to "
"non-zero value.")
if size < self.get_num_examples() - size:
dataset_iter = self.iterator(
mode=_mode,
batch_size=(self.get_num_examples() - size))
valid = dataset_iter.next()
train = dataset_iter.next()[:(self.get_num_examples()
- valid.shape[0])]
else:
dataset_iter = self.iterator(mode=_mode,
batch_size=size)
train = dataset_iter.next()
valid = dataset_iter.next()[:(self.get_num_examples()
- train.shape[0])]
return (train, valid)
def split_dataset_nfolds(self, nfolds=0):
"""
This function splits the dataset into to the number of n folds
given by the user. Returns an array of folds.
Parameters
----------
nfolds : int, optional
The number of folds for the the validation set.
Returns
-------
WRITEME
"""
folds_iter = self.iterator(mode="sequential", num_batches=nfolds)
folds = list(folds_iter)
return folds
def split_dataset_holdout(self, train_size=0, train_prop=0):
"""
This function splits the dataset according to the number of
train_size if defined by the user.
Otherwise it will use the train_prop to divide the dataset into a
training and holdout validation set. This function returns the
training and validation dataset.
Parameters
----------
train_size : int
Number of examples that will be assigned to the training
dataset.
train_prop : float
Proportion of dataset split.
"""
return self._apply_holdout("sequential", train_size, train_prop)
def bootstrap_nfolds(self, nfolds, rng=None):
"""
This function splits the dataset using the random_slice and into the
n folds. Returns the folds.
Parameters
----------
nfolds : int
The number of folds for the dataset.
rng : WRITEME
Random number generation class to be used.
"""
folds_iter = self.iterator(mode="random_slice",
num_batches=nfolds,
rng=rng)
folds = list(folds_iter)
return folds
def bootstrap_holdout(self, train_size=0, train_prop=0, rng=None):
"""
This function splits the dataset according to the number of
train_size defined by the user.
Parameters
----------
train_size : int
Number of examples that will be assigned to the training dataset.
nfolds : int
The number of folds for the the validation set.
rng : WRITEME
Random number generation class to be used.
"""
return self._apply_holdout("random_slice", train_size, train_prop)
def get_stream_position(self):
"""
If we view the dataset as providing a stream of random examples to
read, the object returned uniquely identifies our current position in
that stream.
"""
return copy.copy(self.rng)
def set_stream_position(self, pos):
"""
.. todo::
WRITEME properly
Return to a state specified by an object returned from
get_stream_position.
Parameters
----------
pos : object
WRITEME
"""
self.rng = copy.copy(pos)
def restart_stream(self):
"""
Return to the default initial state of the random example stream.
"""
self.reset_RNG()
def reset_RNG(self):
"""
Restore the default seed of the rng used for choosing random
examples.
"""
if 'default_rng' not in dir(self):
self.default_rng = make_np_rng(None, [17, 2, 946],
which_method="random_integers")
self.rng = copy.copy(self.default_rng)
def apply_preprocessor(self, preprocessor, can_fit=False):
"""
.. todo::
WRITEME
Parameters
----------
preprocessor : object
preprocessor object
can_fit : bool, optional
WRITEME
"""
preprocessor.apply(self, can_fit)
def get_topological_view(self, mat=None):
"""
Convert an array (or the entire dataset) to a topological view.
Parameters
----------
mat : ndarray, 2-dimensional, optional
An array containing a design matrix representation of training
examples. If unspecified, the entire dataset (`self.X`) is used
instead.
This parameter is not named X because X is generally used to
refer to the design matrix for the current problem. In this
case we want to make it clear that `mat` need not be the design
matrix defining the dataset.
"""
if self.view_converter is None:
raise Exception("Tried to call get_topological_view on a dataset "
"that has no view converter")
if mat is None:
mat = self.X
return self.view_converter.design_mat_to_topo_view(mat)
def get_formatted_view(self, mat, dspace):
"""
Convert an array (or the entire dataset) to a destination space.
Parameters
----------
mat : ndarray, 2-dimensional
An array containing a design matrix representation of
training examples.
dspace : Space
A Space we want the data in mat to be formatted in.
It can be a VectorSpace for a design matrix output,
a Conv2DSpace for a topological output for instance.
Valid values depend on the type of `self.view_converter`.
Returns
-------
WRITEME
"""
if self.view_converter is None:
raise Exception("Tried to call get_formatted_view on a dataset "
"that has no view converter")
self.X_space.np_validate(mat)
return self.view_converter.get_formatted_batch(mat, dspace)
def get_weights_view(self, mat):
"""
.. todo::
WRITEME properly
Return a view of mat in the topology preserving format. Currently
the same as get_topological_view.
Parameters
----------
mat : ndarray, 2-dimensional
WRITEME
"""
if self.view_converter is None:
raise Exception("Tried to call get_weights_view on a dataset "
"that has no view converter")
return self.view_converter.design_mat_to_weights_view(mat)
def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
"""
Sets the dataset to represent V, where V is a batch
of topological views of examples.
.. todo::
Why is this parameter named 'V'?
Parameters
----------
V : ndarray
An array containing a design matrix representation of
training examples.
axes : tuple, optional
The axes ordering of the provided topo_view. Must be some
permutation of ('b', 0, 1, 'c') where 'b' indicates the axis
indexing examples, 0 and 1 indicate the row/cols dimensions and
'c' indicates the axis indexing color channels.
"""
if len(V.shape) != len(axes):
raise ValueError("The topological view must have exactly 4 "
"dimensions, corresponding to %s" % str(axes))
assert not contains_nan(V)
rows = V.shape[axes.index(0)]
cols = V.shape[axes.index(1)]
channels = V.shape[axes.index('c')]
self.view_converter = DefaultViewConverter([rows, cols, channels],
axes=axes)
self.X = self.view_converter.topo_view_to_design_mat(V)
# self.X_topo_space stores a "default" topological space that
# will be used only when self.iterator is called without a
# data_specs, and with "topo=True", which is deprecated.
self.X_topo_space = self.view_converter.topo_space
assert not contains_nan(self.X)
# Update data specs
X_space = VectorSpace(dim=self.X.shape[1])
X_source = 'features'
if self.y is None:
space = X_space
source = X_source
else:
if self.y.ndim == 1:
dim = 1
else:
dim = self.y.shape[-1]
# This is to support old pickled models
if getattr(self, 'y_labels', None) is not None:
y_space = IndexSpace(dim=dim, max_labels=self.y_labels)
elif getattr(self, 'max_labels', None) is not None:
y_space = IndexSpace(dim=dim, max_labels=self.max_labels)
else:
y_space = VectorSpace(dim=dim)
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_data_specs = (X_space, X_source)
def get_design_matrix(self, topo=None):
"""
Return topo (a batch of examples in topology preserving format),
in design matrix format.
Parameters
----------
topo : ndarray, optional
An array containing a topological representation of training
examples. If unspecified, the entire dataset (`self.X`) is used
instead.
Returns
-------
WRITEME
"""
if topo is not None:
if self.view_converter is None:
raise Exception("Tried to convert from topological_view to "
"design matrix using a dataset that has no "
"view converter")
return self.view_converter.topo_view_to_design_mat(topo)
return self.X
def set_design_matrix(self, X):
"""
.. todo::
WRITEME
Parameters
----------
X : ndarray
WRITEME
"""
assert len(X.shape) == 2
assert not contains_nan(X)
self.X = X
def get_targets(self):
"""
.. todo::
WRITEME
"""
return self.y
def get_batch_design(self, batch_size, include_labels=False):
"""
.. todo::
WRITEME
Parameters
----------
batch_size : int
WRITEME
include_labels : bool
WRITEME
"""
try:
idx = self.rng.randint(self.X.shape[0] - batch_size + 1)
except ValueError:
if batch_size > self.X.shape[0]:
reraise_as(ValueError("Requested %d examples from a dataset "
"containing only %d." %
(batch_size, self.X.shape[0])))
raise
rx = self.X[idx:idx + batch_size, :]
if include_labels:
if self.y is None:
return rx, None
ry = self.y[idx:idx + batch_size]
return rx, ry
rx = np.cast[config.floatX](rx)
return rx
def get_batch_topo(self, batch_size, include_labels=False):
"""
.. todo::
WRITEME
Parameters
----------
batch_size : int
WRITEME
include_labels : bool
WRITEME
"""
if include_labels:
batch_design, labels = self.get_batch_design(batch_size, True)
else:
batch_design = self.get_batch_design(batch_size)
rval = self.view_converter.design_mat_to_topo_view(batch_design)
if include_labels:
return rval, labels
return rval
@functools.wraps(Dataset.get_num_examples)
def get_num_examples(self):
return self.X.shape[0]
def view_shape(self):
"""
.. todo::
WRITEME
"""
return self.view_converter.view_shape()
def weights_view_shape(self):
"""
.. todo::
WRITEME
"""
return self.view_converter.weights_view_shape()
def has_targets(self):
"""
.. todo::
WRITEME
"""
return self.y is not None
def restrict(self, start, stop):
"""
.. todo::
WRITEME properly
Restricts the dataset to include only the examples
in range(start, stop). Ignored if both arguments are None.
Parameters
----------
start : int
start index
stop : int
stop index
"""
assert (start is None) == (stop is None)
if start is None:
return
assert start >= 0
assert stop > start
assert stop <= self.X.shape[0]
assert self.X.shape[0] == self.y.shape[0]
self.X = self.X[start:stop, :]
if self.y is not None:
self.y = self.y[start:stop, :]
assert self.X.shape[0] == self.y.shape[0]
assert self.X.shape[0] == stop - start
def convert_to_one_hot(self, min_class=0):
"""
.. todo::
WRITEME properly
If y exists and is a vector of ints, converts it to a binary matrix
Otherwise will raise some exception
Parameters
----------
min_class : int
WRITEME
"""
if self.y is None:
raise ValueError("Called convert_to_one_hot on a "
"DenseDesignMatrix with no labels.")
if self.y.ndim != 1:
raise ValueError("Called convert_to_one_hot on a "
"DenseDesignMatrix whose labels aren't scalar.")
if 'int' not in str(self.y.dtype):
raise ValueError("Called convert_to_one_hot on a "
"DenseDesignMatrix whose labels aren't "
"integer-valued.")
self.y = self.y - min_class
if self.y.min() < 0:
raise ValueError("We do not support negative classes. You can use "
"the min_class argument to remap negative "
"classes to positive values, but we require this "
"to be done explicitly so you are aware of the "
"remapping.")
# Note: we don't check that the minimum occurring class is exactly 0,
# since this dataset could be just a small subset of a larger dataset
# and may not contain all the classes.
num_classes = self.y.max() + 1
y = np.zeros((self.y.shape[0], num_classes))
for i in xrange(self.y.shape[0]):
y[i, self.y[i]] = 1
self.y = y
# Update self.data_specs with the updated dimension of self.y
init_space, source = self.data_specs
X_space, init_y_space = init_space.components
new_y_space = VectorSpace(dim=num_classes)
new_space = CompositeSpace((X_space, new_y_space))
self.data_specs = (new_space, source)
def adjust_for_viewer(self, X):
"""
.. todo::
WRITEME
Parameters
----------
X : ndarray
The data to be adjusted
"""
return X / np.abs(X).max()
def adjust_to_be_viewed_with(self, X, ref, per_example=None):
"""
.. todo::
WRITEME
Parameters
----------
X : int
WRITEME
ref : float
WRITEME
per_example : obejct, optional
WRITEME
"""
if per_example is not None:
logger.warning("ignoring per_example")
return np.clip(X / np.abs(ref).max(), -1., 1.)
def get_data_specs(self):
"""
Returns the data_specs specifying how the data is internally stored.
This is the format the data returned by `self.get_data()` will be.
"""
return self.data_specs
def set_view_converter_axes(self, axes):
"""
.. todo::
WRITEME properly
Change the axes of the view_converter, if any.
This function is only useful if you intend to call self.iterator
without data_specs, and with "topo=True", which is deprecated.
Parameters
----------
axes : WRITEME
WRITEME
"""
assert self.view_converter is not None
self.view_converter.set_axes(axes)
# Update self.X_topo_space, which stores the "default"
# topological space, which is the topological output space
# of the view_converter
self.X_topo_space = self.view_converter.topo_space
class DenseDesignMatrixPyTables(DenseDesignMatrix):
"""
DenseDesignMatrix based on PyTables
Parameters
----------
X : ndarray, 2-dimensional, optional
Should be supplied if `topo_view` is not. A design matrix of shape
(number examples, number features) that defines the dataset.
topo_view : ndarray, optional
Should be supplied if X is not. An array whose first dimension is of
length number examples. The remaining dimensions are xamples with
topological significance, e.g. for images the remaining axes are rows,
columns, and channels.
y : ndarray, 1-dimensional(?), optional
Labels or targets for each example. The semantics here are not quite
nailed down for this yet.
view_converter : object, optional
An object for converting between design matrices and topological views.
Currently DefaultViewConverter is the only type available but later we
may want to add one that uses the retina encoding that the U of T group
uses.
axes : WRITEME
WRITEME
rng : object, optional
A random number generator used for picking random indices into the
design matrix when choosing minibatches.
"""
_default_seed = (17, 2, 946)
def __init__(self,
X=None,
topo_view=None,
y=None,
view_converter=None,
axes=('b', 0, 1, 'c'),
rng=_default_seed):
super_self = super(DenseDesignMatrixPyTables, self)
super_self.__init__(X=X,
topo_view=topo_view,
y=y,
view_converter=view_converter,
axes=axes,
rng=rng)
ensure_tables()
if not hasattr(self, 'filters'):
self.filters = tables.Filters(complib='blosc', complevel=5)
def set_design_matrix(self, X, start=0):
"""
.. todo::
WRITEME
"""
assert len(X.shape) == 2
assert not contains_nan(X)
DenseDesignMatrixPyTables.fill_hdf5(file_handle=self.h5file,
data_x=X,
start=start)
def set_topological_view(self, V, axes=('b', 0, 1, 'c'), start=0):
"""
Sets the dataset to represent V, where V is a batch
of topological views of examples.
.. todo::
Why is this parameter named 'V'?
Parameters
----------
V : ndarray
An array containing a design matrix representation of training \
examples.
axes : tuple, optional
The axes ordering of the provided topo_view. Must be some
permutation of ('b', 0, 1, 'c') where 'b' indicates the axis
indexing examples, 0 and 1 indicate the row/cols dimensions and
'c' indicates the axis indexing color channels.
start : int
The start index to write data.
"""
assert not contains_nan(V)
rows = V.shape[axes.index(0)]
cols = V.shape[axes.index(1)]
channels = V.shape[axes.index('c')]
self.view_converter = DefaultViewConverter([rows, cols, channels],
axes=axes)
X = self.view_converter.topo_view_to_design_mat(V)
assert not contains_nan(X)
DenseDesignMatrixPyTables.fill_hdf5(file_handle=self.h5file,
data_x=X,
start=start)
def init_hdf5(self, path, shapes):
"""
Initializes the hdf5 file into which the data will be stored. This must
be called before calling fill_hdf5.
Parameters
----------
path : string
The name of the hdf5 file.
shapes : tuple
The shapes of X and y.
"""
x_shape, y_shape = shapes
# make pytables
ensure_tables()
h5file = tables.openFile(path, mode="w", title="SVHN Dataset")
gcolumns = h5file.createGroup(h5file.root, "Data", "Data")
atom = (tables.Float32Atom() if config.floatX == 'float32'
else tables.Float64Atom())
h5file.createCArray(gcolumns, 'X', atom=atom, shape=x_shape,
title="Data values", filters=self.filters)
h5file.createCArray(gcolumns, 'y', atom=atom, shape=y_shape,
title="Data targets", filters=self.filters)
return h5file, gcolumns
@staticmethod
def fill_hdf5(file_handle,
data_x,
data_y=None,
node=None,
start=0,
batch_size=5000):
"""
Saves the data to the hdf5 file.
PyTables tends to crash if you write large amounts of data into them
at once. As such this function writes data in batches.
Parameters
----------
file_handle : hdf5 file handle
Handle to an hdf5 object.
data_x : nd array
X data. Must be the same shape as specified to init_hdf5.
data_y : nd array, optional
y data. Must be the same shape as specified to init_hdf5.
node : string, optional
The hdf5 node into which the data should be stored.
start : int
The start index to write data.
batch_size : int, optional
The size of the batch to be saved.
"""
if node is None:
node = file_handle.getNode('/', 'Data')
data_size = data_x.shape[0]
last = np.floor(data_size / float(batch_size)) * batch_size
for i in xrange(0, data_size, batch_size):
stop = (i + np.mod(data_size, batch_size) if i >= last
else i + batch_size)
assert len(range(start + i, start + stop)) == len(range(i, stop))
assert (start + stop) <= (node.X.shape[0])
node.X[start + i: start + stop, :] = data_x[i:stop, :]
if data_y is not None:
node.y[start + i: start + stop, :] = data_y[i:stop, :]
file_handle.flush()
def resize(self, h5file, start, stop):
"""
Resizes the X and y tables. This must be called before calling
fill_hdf5.
Parameters
----------
h5file : hdf5 file handle
Handle to an hdf5 object.
start : int
The start index to write data.
stop : int
The index of the record following the last record to be written.
"""
ensure_tables()
# TODO is there any smarter and more efficient way to this?
data = h5file.getNode('/', "Data")
try:
gcolumns = h5file.createGroup('/', "Data_", "Data")
except tables.exceptions.NodeError:
h5file.removeNode('/', "Data_", 1)
gcolumns = h5file.createGroup('/', "Data_", "Data")
start = 0 if start is None else start
stop = gcolumns.X.nrows if stop is None else stop
atom = (tables.Float32Atom() if config.floatX == 'float32'
else tables.Float64Atom())
x = h5file.createCArray(gcolumns,
'X',
atom=atom,
shape=((stop - start, data.X.shape[1])),
title="Data values",
filters=self.filters)
y = h5file.createCArray(gcolumns,
'y',
atom=atom,
shape=((stop - start, data.y.shape[1])),
title="Data targets",
filters=self.filters)
x[:] = data.X[start:stop]
y[:] = data.y[start:stop]
h5file.removeNode('/', "Data", 1)
h5file.renameNode('/', "Data", "Data_")
h5file.flush()
return h5file, gcolumns
class DefaultViewConverter(object):
"""
.. todo::
WRITEME
Parameters
----------
shape : list
[num_rows, num_cols, channels]
axes : tuple
The axis ordering to use in topological views of the data. Must be some
permutation of ('b', 0, 1, 'c'). Default: ('b', 0, 1, 'c')
"""
def __init__(self, shape, axes=('b', 0, 1, 'c')):
self.shape = shape
self.pixels_per_channel = 1
for dim in self.shape[:-1]:
self.pixels_per_channel *= dim
self.axes = axes
self._update_topo_space()
def view_shape(self):
"""
.. todo::
WRITEME
"""
return self.shape
def weights_view_shape(self):
"""
.. todo::
WRITEME
"""
return self.shape
def design_mat_to_topo_view(self, design_matrix):
"""
Returns a topological view/copy of design matrix.
Parameters
----------
design_matrix: numpy.ndarray
A design matrix with data in rows. Data is assumed to be laid out in
memory according to the axis order ('b', 'c', 0, 1)
returns: numpy.ndarray
A matrix with axis order given by self.axes and batch shape given by
self.shape (if you reordered self.shape to match self.axes, as
self.shape is always in 'c', 0, 1 order).
This will try to return
a view into design_matrix if possible; otherwise it will allocate a
new ndarray.
"""
if len(design_matrix.shape) != 2:
raise ValueError("design_matrix must have 2 dimensions, but shape "
"was %s." % str(design_matrix.shape))
expected_row_size = np.prod(self.shape)
if design_matrix.shape[1] != expected_row_size:
raise ValueError("This DefaultViewConverter's self.shape = %s, "
"for a total size of %d, but the design_matrix's "
"row size was different (%d)." %
(str(self.shape),
expected_row_size,
design_matrix.shape[1]))
bc01_shape = tuple([design_matrix.shape[0], ] + # num. batches
# Maps the (0, 1, 'c') of self.shape to ('c', 0, 1)
[self.shape[i] for i in (2, 0, 1)])
topo_array_bc01 = design_matrix.reshape(bc01_shape)
axis_order = [('b', 'c', 0, 1).index(axis) for axis in self.axes]
return topo_array_bc01.transpose(*axis_order)
def design_mat_to_weights_view(self, X):
"""
.. todo::
WRITEME
"""
rval = self.design_mat_to_topo_view(X)
# weights view is always for display
rval = np.transpose(rval, tuple(self.axes.index(axis)
for axis in ('b', 0, 1, 'c')))
return rval
def topo_view_to_design_mat(self, topo_array):
"""
Returns a design matrix view/copy of topological matrix.
Parameters
----------
topo_array: numpy.ndarray
An N-D array with axis order given by self.axes. Non-batch axes'
dimension sizes must agree with corresponding sizes in self.shape.
returns: numpy.ndarray
A design matrix with data in rows. Data, is laid out in memory
according to the default axis order ('b', 'c', 0, 1). This will
try to return a view into topo_array if possible; otherwise it will
allocate a new ndarray.
"""
for shape_elem, axis in safe_zip(self.shape, (0, 1, 'c')):
if topo_array.shape[self.axes.index(axis)] != shape_elem:
raise ValueError(
"topo_array's %s axis has a different size "
"(%d) from the corresponding size (%d) in "
"self.shape.\n"
" self.shape: %s (uses standard axis order: 0, 1, "
"'c')\n"
" self.axes: %s\n"
" topo_array.shape: %s (should be in self.axes' order)")
topo_array_bc01 = topo_array.transpose([self.axes.index(ax)
for ax in ('b', 'c', 0, 1)])
return topo_array_bc01.reshape((topo_array_bc01.shape[0],
np.prod(topo_array_bc01.shape[1:])))
def get_formatted_batch(self, batch, dspace):
"""
.. todo::
WRITEME properly
Reformat batch from the internal storage format into dspace.
"""
if isinstance(dspace, VectorSpace):
# If a VectorSpace is requested, batch should already be in that
# space. We call np_format_as anyway, in case the batch needs to be
# cast to dspace.dtype. This also validates the batch shape, to
# check that it's a valid batch in dspace.
return dspace.np_format_as(batch, dspace)
elif isinstance(dspace, Conv2DSpace):
# design_mat_to_topo_view will return a batch formatted
# in a Conv2DSpace, but not necessarily the right one.
topo_batch = self.design_mat_to_topo_view(batch)
if self.topo_space.axes != self.axes:
warnings.warn("It looks like %s.axes has been changed "
"directly, please use the set_axes() method "
"instead." % self.__class__.__name__)
self._update_topo_space()
return self.topo_space.np_format_as(topo_batch, dspace)
else:
raise ValueError("%s does not know how to format a batch into "
"%s of type %s."
% (self.__class__.__name__, dspace, type(dspace)))
def __setstate__(self, d):
"""
.. todo::
WRITEME
"""
# Patch old pickle files that don't have the axes attribute.
if 'axes' not in d:
d['axes'] = ['b', 0, 1, 'c']
self.__dict__.update(d)
# Same for topo_space
if 'topo_space' not in self.__dict__:
self._update_topo_space()
def _update_topo_space(self):
"""Update self.topo_space from self.shape and self.axes"""
rows, cols, channels = self.shape
self.topo_space = Conv2DSpace(shape=(rows, cols),
num_channels=channels,
axes=self.axes)
def set_axes(self, axes):
"""
.. todo::
WRITEME
"""
self.axes = axes
self._update_topo_space()
def from_dataset(dataset, num_examples):
"""
Constructs a random subset of a DenseDesignMatrix
Parameters
----------
dataset : DenseDesignMatrix
num_examples : int
Returns
-------
sub_dataset : DenseDesignMatrix
A new dataset containing `num_examples` examples. It is a random subset
of continuous 'num_examples' examples drawn from `dataset`.
"""
if dataset.view_converter is not None:
try:
V, y = dataset.get_batch_topo(num_examples, True)
except TypeError:
# This patches a case where control.get_load_data() is false so
# dataset.X is None This logic should be removed whenever we
# implement lazy loading
if isinstance(dataset, DenseDesignMatrix) and \
dataset.X is None and \
not control.get_load_data():
warnings.warn("from_dataset wasn't able to make subset of "
"dataset, using the whole thing")
return DenseDesignMatrix(
X=None, view_converter=dataset.view_converter
)
raise
rval = DenseDesignMatrix(topo_view=V, y=y, y_labels=dataset.y_labels)
rval.adjust_for_viewer = dataset.adjust_for_viewer
else:
X, y = dataset.get_batch_design(num_examples, True)
rval = DenseDesignMatrix(X=X, y=y, y_labels=dataset.y_labels)
return rval
def dataset_range(dataset, start, stop):
"""
Returns a new dataset formed by extracting a range of examples from an
existing dataset.
Parameters
----------
dataset : DenseDesignMatrix
The existing dataset to extract examples from.
start : int
Extract examples starting at this index.
stop : int
Stop extracting examples at this index. Do not include this index
itself (like the python `range` builtin)
Returns
-------
sub_dataset : DenseDesignMatrix
The new dataset containing examples [start, stop).
"""
if dataset.X is None:
return DenseDesignMatrix(X=None,
y=None,
view_converter=dataset.view_converter)
X = dataset.X[start:stop, :].copy()
if dataset.y is None:
y = None
else:
if dataset.y.ndim == 2:
y = dataset.y[start:stop, :].copy()
else:
y = dataset.y[start:stop].copy()
assert X.shape[0] == y.shape[0]
assert X.shape[0] == stop - start
topo = dataset.get_topological_view(X)
rval = DenseDesignMatrix(topo_view=topo, y=y)
rval.adjust_for_viewer = dataset.adjust_for_viewer
return rval
def convert_to_one_hot(dataset, min_class=0):
"""
.. todo::
WRITEME properly
Convenient way of accessing convert_to_one_hot from a yaml file
"""
dataset.convert_to_one_hot(min_class=min_class)
return dataset
def set_axes(dataset, axes):
"""
.. todo::
WRITEME
"""
dataset.set_view_converter_axes(axes)
return dataset
| bsd-3-clause | 8,283,100,961,385,428,000 | 33.644304 | 79 | 0.547389 | false | 4.456766 | false | false | false |
galaxy-team/website | newrelic/hooks/framework_cherrypy.py | 1 | 4772 | from __future__ import with_statement
import sys
import types
import newrelic.api.transaction
import newrelic.api.web_transaction
import newrelic.api.function_trace
import newrelic.api.object_wrapper
import newrelic.api.error_trace
class HandlerWrapper(object):
def __init__(self, wrapped):
self.__name = newrelic.api.object_wrapper.callable_name(wrapped)
self.__wrapped = wrapped
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self.__wrapped.__get__(instance, klass)
return self.__class__(descriptor)
def __call__(self, *args, **kwargs):
transaction = newrelic.api.transaction.current_transaction()
if transaction:
transaction.name_transaction(name=self.__name, priority=2)
with newrelic.api.error_trace.ErrorTrace(transaction):
with newrelic.api.function_trace.FunctionTrace(
transaction, name=self.__name):
try:
return self.__wrapped(*args, **kwargs)
except:
transaction.record_exception(*sys.exc_info())
raise
else:
return self.__wrapped(*args, **kwargs)
class ResourceWrapper(object):
def __init__(self, wrapped):
self.__wrapped = wrapped
def __dir__(self):
return dir(self.__wrapped)
def __getattr__(self, name):
attr = getattr(self.__wrapped, name)
if name.isupper():
return HandlerWrapper(attr)
return attr
class ResolverWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
self.__instance = instance
self.__wrapped = wrapped
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self.__wrapped.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args, **kwargs):
transaction = newrelic.api.transaction.current_transaction()
if transaction:
try:
obj, vpath = self.__wrapped(*args, **kwargs)
if obj:
klass = self.__instance.__class__
if klass.__name__ == 'MethodDispatcher':
transaction.name_transaction('405', group='Uri')
obj = ResourceWrapper(obj)
else:
obj = HandlerWrapper(obj)
else:
transaction.name_transaction('404', group='Uri')
return obj, vpath
except:
transaction.record_exception(*sys.exc_info())
raise
else:
return self.__wrapped(*args, **kwargs)
class RoutesResolverWrapper(object):
def __init__(self, wrapped):
if type(wrapped) == types.TupleType:
(instance, wrapped) = wrapped
else:
instance = None
self.__instance = instance
self.__wrapped = wrapped
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self.__wrapped.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args, **kwargs):
transaction = newrelic.api.transaction.current_transaction()
if transaction:
try:
handler = self.__wrapped(*args, **kwargs)
if handler:
handler = HandlerWrapper(handler)
else:
transaction.name_transaction('404', group='Uri')
return handler
except:
transaction.record_exception(*sys.exc_info())
raise
else:
return self.__wrapped(*args, **kwargs)
def instrument_cherrypy_cpdispatch(module):
newrelic.api.object_wrapper.wrap_object(module,
'Dispatcher.find_handler', ResolverWrapper)
newrelic.api.object_wrapper.wrap_object(module,
'RoutesDispatcher.find_handler', RoutesResolverWrapper)
def instrument_cherrypy_cpwsgi(module):
newrelic.api.web_transaction.wrap_wsgi_application(
module, 'CPWSGIApp.__call__')
def instrument_cherrypy_cptree(module):
newrelic.api.web_transaction.wrap_wsgi_application(
module, 'Application.__call__')
| agpl-3.0 | -5,887,581,141,992,436,000 | 31.684932 | 72 | 0.564334 | false | 4.570881 | false | false | false |
Bergiu/smarthomepi | packages/shp/client/ServerController.py | 1 | 3142 | #
from ..Controller import Controller
from .Server import Server
class ServerController ( Controller ):
table_name="tbl_server"
def __init__( self, process, database):
"""
@database: Database
@**kwargs
user_controller: UserController
program_controller: ProgramController
bwl_list_controller: BwlListController
"""
super(ServerController,self).__init__(process,database)
self.initiateAllFromDB()
def createItem(self, **kwargs):
"""
@**kwargs:
ip_adress:string
key:string
"""
missing="ServerController createItem: Missing "
params={}
if "ip_adress" in kwargs.keys():
params["ip_adress"]=str(kwargs["ip_adress"])
else:
raise ValueError(missing+"ip_adress")
if "key" in kwargs.keys():
params["key"]=str(kwargs["key"])
else:
raise ValueError(missing+"key")
duplicate=self.getIdByIp(params["ip_adress"])
if duplicate:
raise Exception("ServerController createItem: The server %s is already in use")
params["id"]=int(self.database.insert(self.table_name,**params))
item=Server(**params)
validation=self.addItem(item)
if validation:
return item.getId()
else:
return False
def delItem( self, id_item ):
"""
@id_item:int
"""
item=self.getItemById(id_item)
if item:
validation=self.database.delete(self.table_name,id_item)
if not validation:
return False
self.itemList.remove(item)
self.indexIdx-=1
return True
else:
raise Exception("No Server with id=%s"%(id_item))
def initiateFromDB( self, id_item ):
"""
@id_item:int
@validation:boolean
"""
item_data=self.database.load(self.table_name,id_item)
if item_data:
params={}
params["id"]=int(item_data[0])
duplicate=self.isItem(params["id"])
if duplicate:
self.removeItem(params["id"])
params["ip_adress"]=str(item_data[1])
params["key"]=str(item_data[2])
item=Server(**params)
self.addItem(item)
return True
else:
return False
def initiateAllFromDB( self ):
"""
@validation:boolean
"""
item_datas=self.database.loadTable(self.table_name)
for item_data in item_datas:
params={}
params["id"]=int(item_data[0])
duplicate=self.isItem(params["id"])
if duplicate:
self.removeItem(params["id"])
params["ip_adress"]=str(item_data[1])
params["key"]=str(item_data[2])
item=Server(**params)
self.addItem(item)
return True
#end interface
#public:
def getId( self, idx_server):
"""
@idx_server:int
@id:int
"""
if len(self.itemList)>idx_server:
return self.itemList[idx_server].getId()
else:
return False
def getIdByIp(self, ip_adress):
for item in self.itemList:
if item.getIpAdress() == str(ip_adress):
return item.getId()
return False
def getIpAdress( self, id_server):
"""
@id_server:int
@ip_adress:string
"""
item=self.getItemById(id_server)
if item:
return item.getIpAdress()
else:
return False
def checkKey( self, id_server, key):
"""
@id_server:int
@key:text
@validation:boolean
"""
item=self.getItemById(id_server)
if item:
return item.checkKey(key)
else:
return False
| gpl-3.0 | 1,749,547,383,230,368,300 | 21.442857 | 82 | 0.660089 | false | 2.964151 | false | false | false |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/pip-0.6.3-py2.7.egg/pip/vcs/subversion.py | 1 | 10484 | import os
import re
from pip import call_subprocess
from pip.index import Link
from pip.util import rmtree, display_path
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
['svn', 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def unpack(self, location):
"""Check out the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
logger.notify('Checking out svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
['svn', 'checkout', url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
logger.notify('Checking out svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
['svn', 'export', url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
['svn', 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
['svn', 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
['svn', 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
egg_fragment_re = re.compile(r'#egg=(.*)$')
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
f = open(entries_fn)
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8'
dirurl = data[0][3]
revs = [int(d[9]) for d in data if len(d)>9 and d[9]]+[0]
if revs:
localrev = max(revs)
else:
localrev = 0
elif data.startswith('<?xml'):
dirurl = _svn_xml_url_re.search(data).group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)]+[0]
if revs:
localrev = max(revs)
else:
localrev = 0
else:
logger.warn("Unrecognized .svn/entries format; skipping %s", base)
dirs[:] = []
continue
if base == location:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8'
return data[0][3]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
return match.group(1) # get repository URL
else:
logger.warn("Unrecognized .svn/entries format in %s" % location)
# Or raise exception?
return None
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
['svn', 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
vcs.register(Subversion)
| apache-2.0 | 2,218,389,806,356,872,200 | 40.438735 | 101 | 0.52499 | false | 3.974223 | false | false | false |
nccgroup/featherduster | feathermodules/auxiliary/rand_time.py | 1 | 2373 | import cryptanalib as ca
import feathermodules
from time import time
import random
def rand_seeded_with_time_check(samples):
def seed_and_generate_value(seed, lowest, highest):
random.seed(seed)
return random.randint(lowest, highest)
options = feathermodules.current_options
options_tmp = dict(options)
check_arguments(options_tmp)
if options_tmp == False:
return False
timestamps = range(options_tmp['base_timestamp']-86400,options_tmp['base_timestamp']+86400)
prng_outputs = map(lambda timestamp: seed_and_generate_value(timestamp, options_tmp['lowest'], options_tmp['highest']), timestamps)
converted_samples = map(lambda sample: int(sample, options_tmp['base']), samples)
matches = set(prng_outputs) & set(converted_samples)
if matches:
print '[!] %d matches were discovered! This suggests random outputs are based on Mersenne Twister output seeded with the current system time.' % len(matches)
return matches
else:
print '[+] No matches discovered.'
return False
def check_arguments(options):
try:
print '[+] Checking provided timestamp...'
options['base_timestamp'] = int(options['base_timestamp'])
print '[+] Checking provided format...'
if options['format'].lower() in ['hex', 'h']:
options['base'] = 16
elif options['format'].lower() in ['dec', 'd', 'decimal']:
options['base'] = 10
else:
print '[*] Format option was not recognized. Please use \'hex\' or \'dec\'.'
print '[+] Checking lowest possible value...'
options['lowest'] = int(options['lowest'], options['base'])
print '[+] Checking highest possible value...'
options['highest'] = int(options['highest'], options['base'])
return options
except:
print '[*] One or more numeric arguments could not be converted to a number. Please try again.'
return False
feathermodules.module_list['rand_time'] = {
'attack_function':rand_seeded_with_time_check,
'type':'auxiliary',
'keywords':['random'],
'description':'A brute force attack attempting to match captured samples to the output of the Mersenne Twister PRNG seeded with the current system time.',
'options':{'base_timestamp': str(int(time())),
'format': 'hex',
'lowest': '00000000',
'highest': 'FFFFFFFF'
}
}
| bsd-3-clause | 35,931,700,480,913,720 | 37.274194 | 163 | 0.657817 | false | 3.968227 | false | false | false |
rpav/io_scene_consmodel | nodes.py | 1 | 6385 | __all__ = ["make_node", "CM_Node"]
import bpy
import bpy.types
import bpy_types
import bmesh
import bmesh.ops
import math
import mathutils
import pyconspack as cpk
from array import array
from pyconspack import Conspack
from mathutils import Matrix
import io_scene_consmodel.consmodel as consmodel
from io_scene_consmodel.util import (matrix_to_vec, AttrPack, defencode)
# Nodes
class CM_Node(AttrPack):
def preinit(self, ob=None, **kw):
if(not ob):
return
self.name = ob.name
self.transform = (hasattr(ob, 'matrix_local') and
matrix_to_vec(ob.matrix_local))
vals = ()
if(hasattr(ob, 'children')):
vals = ob.children
elif(hasattr(ob, 'objects')):
vals = ob.objects
if(vals):
self.children = cpk.Vector()
for val in vals:
if(not val.parent or val.parent == ob):
self.children.append(make_node(val))
def best_integer_type(i):
if (i < 2**8): return 'B'
elif(i < 2**16): return 'H'
else: return 'I'
def int_array(a):
return array(best_integer_type(len(a)), a)
class CM_Mesh(CM_Node):
def preinit(self, ob=None, **kw):
super().preinit(ob, **kw)
self.primitive_type = cpk.keyword('triangle')
self.faces = array('I')
self.vertices = array('f')
self.normals = array('f')
self.materials = cpk.Vector()
self.face_normals = cpk.Vector()
if(ob):
if(ob.data in Cache.MESH_CACHE):
self.faces, self.normals, self.face_normals, self.vertices, self.materials = Cache.MESH_CACHE[ob.data]
else:
bm = bmesh.new()
bm.from_mesh(ob.data)
bmesh.ops.triangulate(bm, faces=bm.faces)
for v in bm.verts:
self.vertices.extend(v.co.xyz)
self.normals.extend(v.normal)
for f in bm.faces:
self.faces.extend((v.index for v in f.verts))
self.normals.extend(f.normal)
fni = math.floor(len(self.normals)/3)-1
if(f.smooth):
self.face_normals.extend((v.index for v in f.verts))
else:
self.face_normals.extend((fni, fni, fni))
self.faces = int_array(self.faces)
self.face_normals = int_array(self.face_normals)
bm.free()
for slot in ob.material_slots:
if(slot.material in Cache.MAT_CACHE):
mat = Cache.MAT_CACHE[slot.material]
else:
mat = CM_Material(ob=slot.material)
self.materials.append(mat)
Cache.MESH_CACHE[ob.data] = (self.faces, self.normals, self.face_normals, self.vertices, self.materials)
class CM_Camera(CM_Node):
def preinit(self, ob=None, **kw):
super().preinit(ob, **kw)
self.fov = ob.data.angle
self.clip_near = ob.data.clip_start
self.clip_far = ob.data.clip_end
self.aspect = ob.data.sensor_width / ob.data.sensor_height
class CM_LightPoint(CM_Node):
def preinit(self, ob=None, **kw):
super().preinit(ob, **kw)
self.position = array('f', ob.location)
self.diffuse = array('f', (0, 0, 0))
self.specular = array('f', (0, 0, 0))
if(ob.data.use_diffuse):
self.diffuse = array('f', ob.data.energy * ob.data.color)
if(ob.data.use_specular):
self.specular = array('f', ob.data.energy * ob.data.color)
self.attenuation_constant = 1.0
self.attenuation_linear = 0.0
self.attenuation_quadratic = 0.0
if(ob.data.falloff_type == 'CONSTANT'):
self.attenuation_constant = ob.data.distance
elif(ob.data.falloff_type == 'INVERSE_LINEAR'):
self.attenuation_linear = 1/ob.data.distance
elif(ob.data.falloff_type == 'INVERSE_SQUARE'):
self.attenuation_quadratic = 1/(ob.data.distance**2)
elif(ob.data.falloff_type == 'LINEAR_QUADRATIC_WEIGHTED'):
self.attenuation_linear = 1/(ob.data.linear_attenuation * ob.data.distance)
self.attenuation_quadratic = 1/((ob.data.quadratic_attenuation * ob.data.distance)**2)
class CM_Material(AttrPack):
def preinit(self, ob=None, **kw):
self.name = ""
self.values = v = dict()
m = ob
world = consmodel.Consmodel.SCENE.world
if(ob):
self.name = m.name
v['alpha'] = m.alpha
v['ambient'] = array('f', world.ambient_color * m.ambient)
v['diffuse'] = array('f', m.diffuse_color * m.diffuse_intensity)
# This was taken from the Blinn specular code in shadeoutput.c
roughness = m.specular_hardness * m.specular_intensity
if(roughness < 0.00001):
roughness = 0.0
elif(roughness < 100.0):
roughness = math.sqrt(1.0/roughness)
else:
roughness = math.sqrt(100.0/roughness)
v['roughness'] = roughness
specular = list(m.specular_color * m.specular_alpha)
v['specular'] = array('f', specular)
v['specular-ior'] = m.specular_ior
Cache.MAT_CACHE[ob] = self
# make_node
class Cache:
CACHE = dict()
MESH_CACHE = dict()
MAT_CACHE = dict()
def make_node(bval):
if(bval in Cache.CACHE):
return Cache.CACHE[bval]
if(isinstance(bval, bpy.types.Scene)):
ob = CM_Node(ob=bval)
elif(isinstance(bval, bpy_types.Object)):
if(bval.type == 'MESH'):
ob = CM_Mesh(ob=bval)
elif(bval.type == 'CAMERA'):
ob = CM_Camera(ob=bval)
elif(bval.type == 'LAMP' and bval.data.type == 'POINT'):
ob = CM_LightPoint(ob=bval)
else:
ob = CM_Node(ob=bval)
Cache.CACHE[bval] = ob
return ob
def clear_cache():
Cache.CACHE = dict()
Cache.MESH_CACHE = dict()
Cache.MAT_CACHE = dict()
# Conspack regs
defencode(CM_Node, "node")
defencode(CM_Mesh, "mesh")
defencode(CM_Camera, "camera")
defencode(CM_LightPoint, "light-point")
defencode(CM_Material, "material-simple")
| bsd-2-clause | -6,084,708,919,340,452,000 | 29.697115 | 120 | 0.552702 | false | 3.362296 | false | false | false |
steven-cutting/latinpigsay | latinpigsay/tmp/experiments/timedtest.py | 1 | 1262 | # -*- coding: utf-8 -*-
__title__ = 'latinpigsay'
__license__ = 'MIT'
__author__ = 'Steven Cutting'
__author_email__ = '[email protected]'
__created_on__ = '12/3/2014'
def func1(listofstrings):
return max([len(string) for string in listofstrings])
def func2(listofstrings):
return max(len(string) for string in listofstrings)
def func3(listofstrings):
return len(max(mylist, key=len))
# handle a massive file using generator
def filereader_gen(file):
with open(file) as f:
for line in f:
yield line
def fileprocessor(file, function):
filegen = filereader_gen()
filegen2 = (process(x) for x in g)
##
# Iterate over the lines of a string
foo = """
this is
a multi-line string.
"""
def f1(foo=foo): return iter(foo.splitlines())
def f2(foo=foo):
retval = ''
for char in foo:
retval += char if not char == '\n' else ''
if char == '\n':
yield retval
retval = ''
if retval:
yield retval
def f3(foo=foo):
prevnl = -1
while True:
nextnl = foo.find('\n', prevnl + 1)
if nextnl < 0: break
yield foo[prevnl + 1:nextnl]
prevnl = nextnl
if __name__ == '__main__':
for f in f1, f2, f3:
print list(f())
| mit | -2,829,386,036,969,377,300 | 17.558824 | 57 | 0.588748 | false | 3.100737 | false | false | false |
Patrick-Cole/pygmi | pygmi/pfmod/datatypes.py | 1 | 12775 | # -----------------------------------------------------------------------------
# Name: datatypes.py (part of PyGMI)
#
# Author: Patrick Cole
# E-Mail: [email protected]
#
# Copyright: (c) 2013 Council for Geoscience
# Licence: GPL-3.0
#
# This file is part of PyGMI
#
# PyGMI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyGMI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Class for data types."""
import numpy as np
from pygmi.raster.datatypes import Data
class LithModel():
"""
Lithological Model Data.
This is the main data structure for the modelling program
Attributes
----------
mlut : dictionary
color table for lithologies
numx : int
number of columns per layer in model
numy : int):
number of rows per layer in model
numz : int
number of layers in model
dxy : float
dimension of cubes in the x and y directions
d_z : float
dimension of cubes in the z direction
lith_index : numpy array
3D array of lithological indices.
curlayer : int
Current layer
xrange : list
minimum and maximum x coordinates
yrange : list
minimum and maximum y coordinates
zrange : list
minimum and maximum z coordinates
curprof : int
current profile (in x or y direction)
griddata : dictionary
dictionary of Data classes with raster data
custprofx : dictionary
custom profile x coordinates
custprofy : dictionary
custom profile y coordinates
profpics : dictionary
profile pictures
lith_list : dictionary
list of lithologies
lith_list_reverse : dictionary
reverse lookup for lith_list
mht : float
height of magnetic sensor
ght : float
height of gravity sensor
gregional : float
gravity regional correction
name : str
name of the model
"""
def __init__(self):
self.mlut = {0: [170, 125, 90], 1: [255, 255, 0]}
self.numx = None
self.numy = None
self.numz = None
self.dxy = None
self.d_z = None
self.lith_index = None
self.lith_index_grv_old = None
self.lith_index_mag_old = None
self.xrange = [None, None]
self.yrange = [None, None]
self.zrange = [None, None]
self.griddata = {}
self.custprofx = {}
self.custprofy = {}
self.profpics = {}
self.lith_list = {}
self.lith_list_reverse = {}
self.mht = None
self.ght = None
self.gregional = 0.
self.dataid = '3D Model'
self.tmpfiles = None
# Next line calls a function to update the variables above.
self.update(50, 40, 5, 0., 0., 0., 100., 100., 100., 0.)
self.olith_index = None
self.odxy = None
self.od_z = None
self.oxrng = None
self.oyrng = None
self.ozrng = None
self.onumx = None
self.onumy = None
self.onumz = None
# Obsolete
# self.curlayer = None
# self.is_ew = True
# self.curprof = None
def lithold_to_lith(self, nodtm=False, pbar=None):
"""
Transfers an old lithology to the new one, using update parameters.
Parameters
----------
nodtm : bool, optional
Flag for a DTM. The default is False.
pbar : pygmi.misc.ProgressBar, optional
Progressbar. The default is None.
Returns
-------
None.
"""
if self.olith_index is None:
return
if pbar is not None:
piter = pbar.iter
else:
piter = iter
xvals = np.arange(self.xrange[0], self.xrange[1], self.dxy)
yvals = np.arange(self.yrange[0], self.yrange[1], self.dxy)
zvals = np.arange(self.zrange[0], self.zrange[1], self.d_z)
if xvals[-1] == self.xrange[1]:
xvals = xvals[:-1]
if yvals[-1] == self.yrange[1]:
yvals = yvals[:-1]
if zvals[-1] == self.zrange[1]:
yvals = yvals[:-1]
xvals += 0.5 * self.dxy
yvals += 0.5 * self.dxy
zvals += 0.5 * self.d_z
xvals = xvals[self.oxrng[0] < xvals]
xvals = xvals[xvals < self.oxrng[1]]
yvals = yvals[self.oyrng[0] < yvals]
yvals = yvals[yvals < self.oyrng[1]]
zvals = zvals[self.ozrng[0] < zvals]
zvals = zvals[zvals < self.ozrng[1]]
for x_i in piter(xvals):
o_i = int((x_i - self.oxrng[0]) / self.odxy)
i = int((x_i - self.xrange[0]) / self.dxy)
for x_j in yvals:
o_j = int((x_j - self.oyrng[0]) / self.odxy)
j = int((x_j - self.yrange[0]) / self.dxy)
for x_k in zvals:
o_k = int((self.ozrng[1] - x_k) / self.od_z)
k = int((self.zrange[1] - x_k) / self.d_z)
if (self.lith_index[i, j, k] != -1 and
self.olith_index[o_i, o_j, o_k] != -1) or nodtm:
self.lith_index[i, j, k] = \
self.olith_index[o_i, o_j, o_k]
def dtm_to_lith(self, pbar=None):
"""
Assign the DTM to the model.
This means creating nodata values in areas above the DTM. These values
are assigned a lithology of -1.
Parameters
----------
pbar : pygmi.misc.ProgressBar, optional
Progressbar. The default is None.
Returns
-------
None.
"""
if 'DTM Dataset' not in self.griddata:
return
if pbar is not None:
piter = pbar.iter
else:
piter = iter
self.lith_index = np.zeros([self.numx, self.numy, self.numz],
dtype=int)
curgrid = self.griddata['DTM Dataset']
d_x = curgrid.xdim
d_y = curgrid.ydim
gxmin = curgrid.extent[0]
gymax = curgrid.extent[-1]
grows, gcols = curgrid.data.shape
utlz = curgrid.data.max()
self.lith_index[:, :, :] = 0
for i in piter(range(self.numx)):
xcrd = self.xrange[0] + (i + .5) * self.dxy
xcrd2 = int((xcrd - gxmin) / d_x)
for j in range(self.numy):
ycrd = self.yrange[1] - (j + .5) * self.dxy
ycrd2 = grows - int((gymax - ycrd) / d_y)
if ycrd2 == grows:
ycrd2 = grows-1
# if (ycrd2 >= 0 and xcrd2 >= 0 and ycrd2 < grows and
# xcrd2 < gcols):
if (0 <= ycrd2 < grows and 0 <= xcrd2 < gcols):
alt = curgrid.data.data[ycrd2, xcrd2]
if (curgrid.data.mask[ycrd2, xcrd2] or
np.isnan(alt) or alt == curgrid.nullvalue):
alt = curgrid.data.mean()
k_2 = int((utlz - alt) / self.d_z)
self.lith_index[i, j, :k_2] = -1
def init_grid(self, data):
"""
Initialize raster variables in the Data class.
Parameters
----------
data : numpy array
Masked array containing raster data.
Returns
-------
grid : PyGMI Data
PyGMI raster dataset.
"""
grid = Data()
grid.data = data
grid.xdim = self.dxy
grid.ydim = self.dxy
grid.extent = [self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1]]
return grid
def init_calc_grids(self):
"""
Initialize mag and gravity from the model.
Returns
-------
None.
"""
tmp = np.ma.zeros([self.numy, self.numx])
self.griddata['Calculated Magnetics'] = self.init_grid(tmp.copy())
self.griddata['Calculated Magnetics'].dataid = 'Calculated Magnetics'
self.griddata['Calculated Magnetics'].units = 'nT'
self.griddata['Calculated Gravity'] = self.init_grid(tmp.copy())
self.griddata['Calculated Gravity'].dataid = 'Calculated Gravity'
self.griddata['Calculated Gravity'].units = 'mGal'
def is_modified(self, modified=True):
"""
Update modified flag.
Parameters
----------
modified : bool, optional
Flag for whether the lithology has been modified. The default is
True.
Returns
-------
None.
"""
for i in self.lith_list:
self.lith_list[i].modified = modified
def update(self, cols, rows, layers, utlx, utly, utlz, dxy, d_z, mht=-1,
ght=-1, usedtm=True, pbar=None):
"""
Update the local variables for the LithModel class.
Parameters
----------
cols : int
Number of columns per layer in model.
rows : int
Number of rows per layer in model.
layers : int
Number of layers in model.
utlx : float
Upper top left (NW) x coordinate.
utly : float
Upper top left (NW) y coordinate.
utlz : float
Upper top left (NW) z coordinate.
dxy : float
Dimension of cubes in the x and y directions.
d_z : float
Dimension of cubes in the z direction.
mht : float, optional
Height of magnetic sensor. The default is -1.
ght : float, optional
Height of gravity sensor. The default is -1.
usedtm : bool, optional
Flag to use a DTM. The default is True.
pbar : pygmi.misc.ProgressBar, optional
Progressbar. The default is None.
Returns
-------
None.
"""
if mht != -1:
self.mht = mht
if ght != -1:
self.ght = ght
self.olith_index = self.lith_index
self.odxy = self.dxy
self.od_z = self.d_z
self.oxrng = np.copy(self.xrange)
self.oyrng = np.copy(self.yrange)
self.ozrng = np.copy(self.zrange)
self.onumx = self.numx
self.onumy = self.numy
self.onumz = self.numz
xextent = cols * dxy
yextent = rows * dxy
zextent = layers * d_z
self.numx = cols
self.numy = rows
self.numz = layers
self.xrange = [utlx, utlx + xextent]
self.yrange = [utly - yextent, utly]
self.zrange = [utlz - zextent, utlz]
self.dxy = dxy
self.d_z = d_z
self.lith_index = np.zeros([self.numx, self.numy, self.numz],
dtype=int)
self.lith_index_mag_old = np.zeros([self.numx, self.numy, self.numz],
dtype=int)
self.lith_index_mag_old[:] = -1
self.lith_index_grv_old = np.zeros([self.numx, self.numy, self.numz],
dtype=int)
self.lith_index_grv_old[:] = -1
self.init_calc_grids()
if usedtm:
self.dtm_to_lith(pbar)
self.lithold_to_lith(not usedtm, pbar)
self.update_lithlist()
self.is_modified()
def update_lithlist(self):
"""
Update lith_list from local variables.
Returns
-------
None.
"""
for i in self.lith_list:
self.lith_list[i].set_xyz(self.numx, self.numy, self.numz,
self.dxy, self.mht, self.ght, self.d_z,
modified=False)
def update_lith_list_reverse(self):
"""
Update the lith_list reverse lookup.
It must be run at least once before using lith_list_reverse.
Returns
-------
None.
"""
keys = list(self.lith_list.keys())
values = list(self.lith_list.values())
if not keys:
return
self.lith_list_reverse = {}
for i in range(len(keys)):
self.lith_list_reverse[list(values)[i].lith_index] = list(keys)[i]
| gpl-3.0 | -730,418,106,219,455,900 | 29.200946 | 79 | 0.517417 | false | 3.601635 | false | false | false |
bmccann/examples | super_resolution/main.py | 6 | 3291 | from __future__ import print_function
import argparse
from math import log10
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from model import Net
from data import get_training_set, get_test_set
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--upscale_factor', type=int, required=True, help="super resolution upscale factor")
parser.add_argument('--batchSize', type=int, default=64, help='training batch size')
parser.add_argument('--testBatchSize', type=int, default=10, help='testing batch size')
parser.add_argument('--nEpochs', type=int, default=2, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01')
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
print('===> Loading datasets')
train_set = get_training_set(opt.upscale_factor)
test_set = get_test_set(opt.upscale_factor)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)
print('===> Building model')
model = Net(upscale_factor=opt.upscale_factor)
criterion = nn.MSELoss()
if cuda:
model = model.cuda()
criterion = criterion.cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
def train(epoch):
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
input, target = Variable(batch[0]), Variable(batch[1])
if cuda:
input = input.cuda()
target = target.cuda()
optimizer.zero_grad()
loss = criterion(model(input), target)
epoch_loss += loss.data[0]
loss.backward()
optimizer.step()
print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.data[0]))
print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
def test():
avg_psnr = 0
for batch in testing_data_loader:
input, target = Variable(batch[0]), Variable(batch[1])
if cuda:
input = input.cuda()
target = target.cuda()
prediction = model(input)
mse = criterion(prediction, target)
psnr = 10 * log10(1 / mse.data[0])
avg_psnr += psnr
print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr / len(testing_data_loader)))
def checkpoint(epoch):
model_out_path = "model_epoch_{}.pth".format(epoch)
torch.save(model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
for epoch in range(1, opt.nEpochs + 1):
train(epoch)
test()
checkpoint(epoch)
| bsd-3-clause | 8,631,834,601,338,594,000 | 34.010638 | 120 | 0.679733 | false | 3.358163 | true | false | false |
SuperHouse/esp-open-rtos | utils/filteroutput.py | 16 | 3583 | #!/usr/bin/env python
#
# A thin Python wrapper around addr2line, can monitor esp-open-rtos
# output and uses gdb to convert any suitable looking hex numbers
# found in the output into function and line numbers.
#
# Works with a serial port if the --port option is supplied.
# Otherwise waits for input on stdin.
#
import serial
import argparse
import re
import os
import os.path
import subprocess
import termios
import sys
import time
# Try looking up anything in the executable address space
RE_EXECADDR = r"(0x)?40([0-9]|[a-z]){6}"
def find_elf_file():
out_files = []
for top,_,files in os.walk('.', followlinks=False):
for f in files:
if f.endswith(".out"):
out_files.append(os.path.join(top,f))
if len(out_files) == 1:
return out_files[0]
elif len(out_files) > 1:
print("Found multiple .out files: %s. Please specify one with the --elf option." % out_files)
else:
print("No .out file found under current directory. Please specify one with the --elf option.")
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='esp-open-rtos output filter tool', prog='filteroutput')
parser.add_argument(
'--elf', '-e',
help="ELF file (*.out file) to load symbols from (if not supplied, will search for one)"),
parser.add_argument(
'--port', '-p',
help='Serial port to monitor (will monitor stdin if None)',
default=None)
parser.add_argument(
'--baud', '-b',
help='Baud rate for serial port',
type=int,
default=74880)
parser.add_argument(
'--reset-on-connect', '-r',
help='Reset ESP8266 (via DTR) on serial connect. (Linux resets even if not set, except when using NodeMCU-style auto-reset circuit.)',
action='store_true')
args = parser.parse_args()
if args.elf is None:
args.elf = find_elf_file()
elif not os.path.exists(args.elf):
print("ELF file '%s' not found" % args.elf)
sys.exit(1)
if args.port is not None:
print("Opening %s at %dbps..." % (args.port, args.baud))
port = serial.Serial(args.port, baudrate=args.baud)
if args.reset_on_connect:
print("Resetting...")
port.setDTR(False)
time.sleep(0.1)
port.setDTR(True)
else:
print("Reading from stdin...")
port = sys.stdin
# disable echo
try:
old_attr = termios.tcgetattr(sys.stdin.fileno())
attr = termios.tcgetattr(sys.stdin.fileno())
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, attr)
except termios.error:
pass
try:
while True:
line = port.readline()
if line == '':
break
print(line.strip())
for match in re.finditer(RE_EXECADDR, line, re.IGNORECASE):
addr = match.group(0)
if not addr.startswith("0x"):
addr = "0x"+addr
# keeping addr2line and feeding it addresses on stdin didn't seem to work smoothly
addr2line = subprocess.check_output(["xtensa-lx106-elf-addr2line","-pfia","-e","%s" % args.elf, addr], cwd=".").strip()
if not addr2line.endswith(": ?? ??:0"):
print("\n%s\n" % addr2line.strip())
finally:
if args.port is None:
# restore echo
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_attr)
if __name__ == "__main__":
main()
| bsd-3-clause | -3,975,044,797,260,328,400 | 33.12381 | 142 | 0.590008 | false | 3.630193 | false | false | false |
birm/dbops | dbops/RecursiveDep.py | 1 | 3451 | import os
class RecursiveDep(object):
"""
Map all dependencies for a database/schema, to advise changes.
args:
host: the hostname to use to connect to
database: the database to check against
form: the form to return the result
"""
def __init__(self, host="localhost", database="mysql",
table="user", form="tree"):
"""create assertion object."""
self.host = host
self.database = database
self.table = table
self.form = form
self.storage = set() # set of lists for result
def _run_mysql(self, command):
"""Run the mysql query and get the result as a list."""
cmd = ["mysql", "-h", self.host, self.database,
"-sss", "-e", "\"{command};\"".format(command=command)]
return os.subprocess.check_output(cmd).splitlines()
def find(self):
"""Find, store, and show all dependencies."""
# get tables in db
table_query = "select TABLE_NAME from information_schema.TABLES \
where TABLE_SCHEMA='{db}'".format(db=self.database)
tables = self._run_mysql(table_query)
# call _find_deps for all and store
for table in tables:
self._store(table, self._find_deps(table))
# call the appropriate result function
def _store(self, from_table, to_table):
"""Store the result to internal variable."""
self.storage.add([from_table, to_table])
def _find_deps(self, tablename):
"""Find dependencies for a given table, given by name."""
dep_query = """select TABLE_NAME from information_schema.KEY_COLUMN_USAGE
where TABLE_SCHEMA = "{db}" and REFERENCED_TABLE_NAME = "{table}"
and referenced_column_name is not NULL;""".format(db=self.database,
table=tablename)
return self._run_mysql(dep_query)
def _connect_deps(self, tablename, maxdep=5):
"""Get the tree of dependencies for a table, up to maxdep times.
input:
tablename(str) - which table to start with.
maxdep(int) - (optional) how many layers deep to go
output:
"""
connecting = True # while condition set
# for each iteration
working = [] # list of tables to work through
working2 = [] # list of tables to add next iteration
result = []
pos = 0 # position for result, and for maxdep
working.append(tablename)
while connecting:
for table in working:
# remove from working
working.remove(table)
# all tables with relevant depenency
midres = [x[1] for x in self.storage if x[0]==table]
#add to working
working2.extend(midres)
# prepare for next level deep
working = list(set(working2))
working2 = []
# check if we should continue
result[pos] = working # save the result
pos = pos + 1
if (not midres) or (pos >= maxdep):
continue = False # end the loop
def _graph_result(self):
"""The result display function for the graph output."""
pass
def _text_result(self):
"""The result displa function for text or command line output."""
pass
| gpl-3.0 | -2,130,449,234,815,290,400 | 38.215909 | 81 | 0.558968 | false | 4.464424 | false | false | false |
smartanthill/smartanthill1_0 | smartanthill/network/service.py | 1 | 8242 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
# pylint: disable=W0613
from binascii import hexlify
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, maybeDeferred, returnValue
from twisted.internet.serialport import SerialPort
import smartanthill.network.protocol as sanp
from smartanthill.exception import NetworkRouterConnectFailure
from smartanthill.service import SAMultiService
from smartanthill.util import get_service_named
class ControlService(SAMultiService):
def __init__(self, name):
SAMultiService.__init__(self, name)
self._protocol = sanp.ControlProtocolWrapping(
self.climessage_protocallback)
self._litemq = None
def startService(self):
self._litemq = get_service_named("litemq")
self._protocol.makeConnection(self)
self._litemq.consume("network", "control.in", "transport->control",
self.inmessage_mqcallback)
self._litemq.consume("network", "control.out", "client->control",
self.outmessage_mqcallback)
SAMultiService.startService(self)
def stopService(self):
SAMultiService.stopService(self)
self._litemq.unconsume("network", "control.in")
self._litemq.unconsume("network", "control.out")
def write(self, message):
self._litemq.produce("network", "control->transport", message,
dict(binary=True))
def inmessage_mqcallback(self, message, properties):
self.log.debug("Received incoming raw message %s" % hexlify(message))
self._protocol.dataReceived(message)
def outmessage_mqcallback(self, message, properties):
self.log.debug("Received outgoing %s and properties=%s" %
(message, properties))
self._protocol.send_message(message)
def climessage_protocallback(self, message):
self.log.debug("Received incoming client %s" % message)
self._litemq.produce("network", "control->client", message)
class TransportService(SAMultiService):
def __init__(self, name):
SAMultiService.__init__(self, name)
self._protocol = sanp.TransportProtocolWrapping(
self.rawmessage_protocallback)
self._litemq = None
def startService(self):
self._litemq = get_service_named("litemq")
self._protocol.makeConnection(self)
self._litemq.consume("network", "transport.in", "routing->transport",
self.insegment_mqcallback)
self._litemq.consume("network", "transport.out", "control->transport",
self.outmessage_mqcallback, ack=True)
SAMultiService.startService(self)
def stopService(self):
SAMultiService.stopService(self)
self._litemq.unconsume("network", "transport.in")
self._litemq.unconsume("network", "transport.out")
def rawmessage_protocallback(self, message):
self.log.debug("Received incoming raw message %s" % hexlify(message))
self._litemq.produce("network", "transport->control", message,
dict(binary=True))
def write(self, segment):
self._litemq.produce("network", "transport->routing", segment,
dict(binary=True))
def insegment_mqcallback(self, message, properties):
self.log.debug("Received incoming segment %s" % hexlify(message))
self._protocol.dataReceived(message)
@inlineCallbacks
def outmessage_mqcallback(self, message, properties):
self.log.debug("Received outgoing message %s" % hexlify(message))
ctrlmsg = sanp.ControlProtocol.rawmessage_to_message(message)
def _on_err(failure):
self._litemq.produce("network", "transport->err", ctrlmsg)
failure.raiseException()
d = maybeDeferred(self._protocol.send_message, message)
d.addErrback(_on_err)
result = yield d
if result and ctrlmsg.ack:
self._litemq.produce("network", "transport->ack", ctrlmsg)
returnValue(result)
class RouterService(SAMultiService):
RECONNECT_DELAY = 1 # in seconds
def __init__(self, name, options):
SAMultiService.__init__(self, name, options)
self._protocol = sanp.RoutingProtocolWrapping(
self.inpacket_protocallback)
self._router_device = None
self._litemq = None
self._reconnect_nums = 0
self._reconnect_callid = None
def startService(self):
connection = self.options['connection']
try:
if connection.get_type() == "serial":
_kwargs = connection.params
_kwargs['protocol'] = self._protocol
_kwargs['reactor'] = reactor
# rename port's argument
if "port" in _kwargs:
_kwargs['deviceNameOrPortNumber'] = _kwargs['port']
del _kwargs['port']
self._router_device = SerialPort(**_kwargs)
except:
self.log.error(NetworkRouterConnectFailure(self.options))
self._reconnect_nums += 1
self._reconnect_callid = reactor.callLater(
self._reconnect_nums * self.RECONNECT_DELAY, self.startService)
return
self._litemq = get_service_named("litemq")
self._litemq.consume(
exchange="network",
queue="routing.out." + self.name,
routing_key="transport->routing",
callback=self.outsegment_mqcallback
)
SAMultiService.startService(self)
def stopService(self):
SAMultiService.stopService(self)
if self._reconnect_callid:
self._reconnect_callid.cancel()
if self._router_device:
self._router_device.loseConnection()
if self._litemq:
self._litemq.unconsume("network", "routing.out." + self.name)
def inpacket_protocallback(self, packet):
self.log.debug("Received incoming packet %s" % hexlify(packet))
self._litemq.produce("network", "routing->transport",
sanp.RoutingProtocol.packet_to_segment(packet),
dict(binary=True))
def outsegment_mqcallback(self, message, properties):
# check destination ID @TODO
if ord(message[2]) not in self.options['deviceids']:
return False
self.log.debug("Received outgoing segment %s" % hexlify(message))
self._protocol.send_segment(message)
class ConnectionInfo(object):
def __init__(self, uri):
assert ":" in uri
self.uri = uri
parts = uri.split(":")
self.type_ = parts[0]
self.params = dict()
for part in parts[1:]:
key, value = part.split("=")
self.params[key] = value
def __repr__(self):
return "ConnectionInfo: %s" % self.uri
def get_uri(self):
return self.uri
def get_type(self):
return self.type_
class NetworkService(SAMultiService):
def __init__(self, name, options):
SAMultiService.__init__(self, name, options)
self._litemq = None
def startService(self):
self._litemq = get_service_named("litemq")
self._litemq.declare_exchange("network")
ControlService("network.control").setServiceParent(self)
TransportService("network.transport").setServiceParent(self)
devices = get_service_named("device").get_devices()
for devid, devobj in devices.iteritems():
netopts = devobj.options.get("network", {})
rconn = netopts.get("router", None)
if not rconn:
continue
_options = {"connection": ConnectionInfo(rconn),
"deviceids": [devid]}
RouterService("network.router.%d" % devid,
_options).setServiceParent(self)
SAMultiService.startService(self)
def stopService(self):
SAMultiService.stopService(self)
self._litemq.undeclare_exchange("network")
def makeService(name, options):
return NetworkService(name, options)
| mit | 1,871,958,403,645,602,300 | 34.373391 | 79 | 0.615749 | false | 4.110723 | false | false | false |
locationtech/geowave | python/src/main/python/pygw/geotools/simple_feature_type_builder.py | 2 | 2794 | #
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.config import java_pkg
from pygw.base import GeoWaveObject
from .simple_feature_type import SimpleFeatureType
from .attribute_descriptor import AttributeDescriptor
class SimpleFeatureTypeBuilder(GeoWaveObject):
"""
Builds `pygw.geotools.simple_feature_type.SimpleFeatureType` instances.
"""
def __init__(self):
self.attributes = []
super().__init__(java_pkg.org.geotools.feature.simple.SimpleFeatureTypeBuilder())
def set_name(self, name):
"""
Sets the name of the feature type.
Args:
name (str): The name to use.
Returns:
This feature type builder.
"""
self._java_ref.setName(name)
return self
def set_namespace_uri(self, namespace_uri):
"""
Sets the namespace URI of the feature type.
Args:
namespace_uri (str): The namespace URI to use.
Returns:
This feature type builder.
"""
self._java_ref.setNamespaceURI(namespace_uri)
return self
def set_srs(self, srs):
"""
Sets the spatial reference system of the feature type.
Args:
srs (str): The spatial reference system to use.
Returns:
This feature type builder.
"""
self._java_ref.setSRS(srs)
return self
def add(self, attribute_descriptor):
"""
Adds an attribute to the feature type.
Args:
attribute_descriptor (pygw.geotools.attribute_descriptor.AttributeDescriptor): The attribute to add.
Returns:
This feature type builder.
"""
if isinstance(attribute_descriptor, AttributeDescriptor):
self.attributes.append(attribute_descriptor)
self._java_ref.add(attribute_descriptor._java_ref)
return self
else:
raise ValueError("attribute_descriptor should be of type AttributeDescriptor")
def build_feature_type(self):
"""
Builds the configured feature type.
Returns:
A `pygw.geotools.simple_feature_type.SimpleFeatureType` with the given configuration.
"""
return SimpleFeatureType(self._java_ref.buildFeatureType(), self.attributes)
| apache-2.0 | 3,524,314,869,859,865,000 | 31.488372 | 112 | 0.622047 | false | 4.633499 | false | false | false |
subutai/nupic | tests/integration/nupic/algorithms/knn_classifier_test/categories_test.py | 10 | 3864 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import logging
import unittest2 as unittest
import numpy
from nupic.algorithms.knn_classifier import KNNClassifier
LOGGER = logging.getLogger(__name__)
class KNNCategoriesTest(unittest.TestCase):
"""Tests how k Nearest Neighbor classifier handles categories"""
def testCategories(self):
# We need determinism!
#
# "Life is like a game of cards. The hand you are dealt is determinism; the
# way you play it is free will." Jawaharlal Nehru
#
# What the heck, let's just set the random seed
numpy.random.seed(42)
failures, _knn = simulateCategories()
self.assertEqual(len(failures), 0,
"Tests failed: \n" + failures)
def simulateCategories(numSamples=100, numDimensions=500):
"""Simulate running KNN classifier on many disjoint categories"""
failures = ""
LOGGER.info("Testing the sparse KNN Classifier on many disjoint categories")
knn = KNNClassifier(k=1, distanceNorm=1.0, useSparseMemory=True)
for i in range(0, numSamples):
# select category randomly and generate vector
c = 2*numpy.random.randint(0, 50) + 50
v = createPattern(c, numDimensions)
knn.learn(v, c)
# Go through each category and ensure we have at least one from each!
for i in range(0, 50):
c = 2*i+50
v = createPattern(c, numDimensions)
knn.learn(v, c)
errors = 0
for i in range(0, numSamples):
# select category randomly and generate vector
c = 2*numpy.random.randint(0, 50) + 50
v = createPattern(c, numDimensions)
inferCat, _kir, _kd, _kcd = knn.infer(v)
if inferCat != c:
LOGGER.info("Mistake with %s %s %s %s %s", v[v.nonzero()], \
"mapped to category", inferCat, "instead of category", c)
LOGGER.info(" %s", v.nonzero())
errors += 1
if errors != 0:
failures += "Failure in handling non-consecutive category indices\n"
# Test closest methods
errors = 0
for i in range(0, 10):
# select category randomly and generate vector
c = 2*numpy.random.randint(0, 50) + 50
v = createPattern(c, numDimensions)
p = knn.closestTrainingPattern(v, c)
if not (c in p.nonzero()[0]):
LOGGER.info("Mistake %s %s", p.nonzero(), v.nonzero())
LOGGER.info("%s %s", p[p.nonzero()], v[v.nonzero()])
errors += 1
if errors != 0:
failures += "Failure in closestTrainingPattern method\n"
return failures, knn
def createPattern(c, numDimensions):
"""
Create a sparse pattern from category c with the given number of dimensions.
The pattern is created by setting element c to be a high random number.
Element c-1 and c+1 are set to low random numbers. numDimensions must be > c.
"""
v = numpy.zeros(numDimensions)
v[c] = 5*numpy.random.random() + 10
v[c+1] = numpy.random.random()
if c > 0:
v[c-1] = numpy.random.random()
return v
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 3,642,878,663,968,790,000 | 29.666667 | 79 | 0.658644 | false | 3.726133 | true | false | false |
happeninghq/happening | src/happening/templatetags/forms.py | 2 | 1471 | """Form helpers."""
from django import template
from django.template.loader import render_to_string
from collections import OrderedDict
register = template.Library()
@register.filter
def render_as_blocks(form):
"""Render a form using blocks to contain sections."""
o_label_suffix = form.label_suffix
form.label_suffix = ""
categories = {}
for bf in form:
field = bf.field
if not hasattr(field, "category"):
# This should deal with EnabledDisabledFields
if hasattr(field, "field"):
field = field.field
category = "General"
if hasattr(field, "category"):
category = field.category
if category not in categories:
categories[category] = []
categories[category].append(bf)
# Sort categories alphabetically
categories = OrderedDict(sorted(categories.items()))
rendered = render_to_string("forms/_blocks_form.html",
{"categories": categories})
form.label_suffix = o_label_suffix
return rendered
@register.filter
def render(form):
"""Render a form."""
o_label_suffix = form.label_suffix
form.label_suffix = ""
rendered = render_to_string("forms/_form.html", {"form": form})
form.label_suffix = o_label_suffix
return rendered
@register.simple_tag
def render_field(field, name, value):
"""Render a field."""
return field.widget.render(name, value, {})
| mit | -6,167,044,077,358,500,000 | 26.240741 | 67 | 0.634262 | false | 4.263768 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.