repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
NejcZupec/ggrc-core | test/selenium/src/lib/page/dashboard.py | 1 | 4647 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from selenium.common import exceptions
from lib import base
from lib.constants import url
from lib.constants import locator
from lib import environment
from lib import decorator
from lib.page import widget_bar
from lib.page import lhn
class UserList(base.Component):
locators = locator.PageHeader
def __init__(self, driver):
super(UserList, self).__init__(driver)
self.button_my_work = base.Button(self._driver,
self.locators.BUTTON_MY_WORK)
self.button_admin_dashboard = base.Button(
self._driver, self.locators.BUTTON_ADMIN_DASHBOARD)
self.button_data_import = base.Button(
self._driver, self.locators.BUTTON_DATA_IMPORT)
self.button_data_export = base.Button(
self._driver, self.locators.BUTTON_DATA_EXPORT)
self.button_logout = base.Button(
self._driver, self.locators.BUTTON_LOGOUT)
self.notifications = base.Label(
self._driver, self.locators.NOTIFICATIONS)
self.checkbox_daily_digest = base.Checkbox(
self._driver, self.locators.CHECKBOX_DAILY_DIGEST)
def select_my_work(self):
"""
Returns:
widget.DataAssets
"""
self.button_my_work.click()
return Dashboard(self._driver)
def select_admin_dashboard(self):
"""
Returns:
admin_dashboard.AdminDashboard
"""
self.button_my_work.click()
return AdminDashboard(self._driver)
def select_import_data(self):
raise NotImplementedError
def select_export_data(self):
raise NotImplementedError
@decorator.wait_for_redirect
def select_logout(self):
raise NotImplementedError
def check_daily_email_digest(self):
"""Checks the daily checkbox"""
self.checkbox_daily_digest.check()
def uncheck_daily_email_digest(self):
"""Unchecks the daily checkbox"""
self.checkbox_daily_digest.uncheck()
class Header(base.Component):
"""Header of the page"""
locators = locator.PageHeader
def __init__(self, driver):
super(Header, self).__init__(driver)
self.toggle_lhn_menu = None
self.button_dashboard = None
self.button_search = None
self.button_my_tasks = None
self.button_all_objects = None
self.toggle_user_dropdown = None
self._refresh_elements()
def _refresh_elements(self):
self.button_dashboard = base.Button(self._driver,
self.locators.BUTTON_DASHBOARD)
self.button_search = base.Button(self._driver,
self.locators.BUTTON_SEARCH)
self.button_my_tasks = base.Button(self._driver,
self.locators.BUTTON_MY_TASKS)
self.button_all_objects = base.Button(
self._driver, self.locators.BUTTON_ALL_OBJECTS)
self.toggle_user_dropdown = base.Toggle(
self._driver, self.locators.TOGGLE_USER_DROPDOWN)
self.toggle_lhn_menu = base.Toggle(self._driver,
self.locators.TOGGLE_LHN)
def open_lhn_menu(self):
"""Opens LHN on the Dashboard.
For some reason, after creating 2 objects via LHN (and clicking 2x on the
LHN button), the third time the toggle_lhn_menu signals it's not a part of
the DOM anymore. For this reason we're refreshing the elements.
Returns:
LhnMenu
"""
try:
self.toggle_lhn_menu.toggle()
return lhn.Menu(self._driver)
except exceptions.StaleElementReferenceException:
self._refresh_elements()
return self.open_lhn_menu()
def close_lhn_menu(self):
"""Closes LHN on the Dashboard
Returns:
LhnMenu
"""
try:
self.toggle_lhn_menu.toggle(on=False)
except exceptions.WebDriverException:
self._refresh_elements()
self.close_lhn_menu()
def click_on_logo(self):
"""
Returns:
widget.DataAssets
"""
raise NotImplementedError
def open_user_list(self):
"""
Returns:
UserList
"""
self.toggle_user_dropdown.click()
return UserList(self._driver)
class Dashboard(widget_bar.Dashboard, Header):
"""The main dashboard page"""
URL = environment.APP_URL + url.DASHBOARD
def __init__(self, driver):
super(Dashboard, self).__init__(driver)
self.button_help = base.Button(self._driver, self.locators.BUTTON_HELP)
class AdminDashboard(widget_bar.AdminDashboard,
Header):
"""Admin dashboard page model"""
URL = environment.APP_URL + url.ADMIN_DASHBOARD
def __init__(self, driver):
super(AdminDashboard, self).__init__(driver)
| apache-2.0 | -4,973,490,085,282,803,000 | 28.598726 | 78 | 0.655907 | false |
docusign/docusign-python-client | docusign_esign/models/group_brands.py | 1 | 5695 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GroupBrands(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'recipient_brand_id_default': 'str',
'sender_brand_id_default': 'str',
'brand_options': 'list[Brand]'
}
attribute_map = {
'recipient_brand_id_default': 'recipientBrandIdDefault',
'sender_brand_id_default': 'senderBrandIdDefault',
'brand_options': 'brandOptions'
}
def __init__(self, recipient_brand_id_default=None, sender_brand_id_default=None, brand_options=None): # noqa: E501
"""GroupBrands - a model defined in Swagger""" # noqa: E501
self._recipient_brand_id_default = None
self._sender_brand_id_default = None
self._brand_options = None
self.discriminator = None
if recipient_brand_id_default is not None:
self.recipient_brand_id_default = recipient_brand_id_default
if sender_brand_id_default is not None:
self.sender_brand_id_default = sender_brand_id_default
if brand_options is not None:
self.brand_options = brand_options
@property
def recipient_brand_id_default(self):
"""Gets the recipient_brand_id_default of this GroupBrands. # noqa: E501
The brand seen by envelope recipients when a brand is not explicitly set. # noqa: E501
:return: The recipient_brand_id_default of this GroupBrands. # noqa: E501
:rtype: str
"""
return self._recipient_brand_id_default
@recipient_brand_id_default.setter
def recipient_brand_id_default(self, recipient_brand_id_default):
"""Sets the recipient_brand_id_default of this GroupBrands.
The brand seen by envelope recipients when a brand is not explicitly set. # noqa: E501
:param recipient_brand_id_default: The recipient_brand_id_default of this GroupBrands. # noqa: E501
:type: str
"""
self._recipient_brand_id_default = recipient_brand_id_default
@property
def sender_brand_id_default(self):
"""Gets the sender_brand_id_default of this GroupBrands. # noqa: E501
The brand seen by envelope senders when a brand is not explicitly set. # noqa: E501
:return: The sender_brand_id_default of this GroupBrands. # noqa: E501
:rtype: str
"""
return self._sender_brand_id_default
@sender_brand_id_default.setter
def sender_brand_id_default(self, sender_brand_id_default):
"""Sets the sender_brand_id_default of this GroupBrands.
The brand seen by envelope senders when a brand is not explicitly set. # noqa: E501
:param sender_brand_id_default: The sender_brand_id_default of this GroupBrands. # noqa: E501
:type: str
"""
self._sender_brand_id_default = sender_brand_id_default
@property
def brand_options(self):
"""Gets the brand_options of this GroupBrands. # noqa: E501
The list of brands. # noqa: E501
:return: The brand_options of this GroupBrands. # noqa: E501
:rtype: list[Brand]
"""
return self._brand_options
@brand_options.setter
def brand_options(self, brand_options):
"""Sets the brand_options of this GroupBrands.
The list of brands. # noqa: E501
:param brand_options: The brand_options of this GroupBrands. # noqa: E501
:type: list[Brand]
"""
self._brand_options = brand_options
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GroupBrands, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GroupBrands):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | -3,798,903,295,641,539,000 | 31.919075 | 140 | 0.599473 | false |
scheunemann/robotActionController | robotActionController/ActionRunner/groupRunner.py | 1 | 2761 | import logging
from base import ActionRunner, ActionManager
from collections import namedtuple
from gevent.pool import Group
from gevent import sleep
from robotActionController.Data.storage import StorageFactory
from robotActionController.Data.Model import Action
class GroupRunner(ActionRunner):
supportedClass = 'GroupAction'
Runable = namedtuple('GroupAction', ActionRunner.Runable._fields + ('actions',))
def __init__(self, group, robot, *args, **kwargs):
super(GroupRunner, self).__init__(group)
self._robot = robot
self._handle = None
def _runInternal(self, action):
manager = ActionManager.getManager(self._robot)
handles = [manager.executeActionAsync(a) for a in action.actions]
self._handle = Group([h for h in handles if h])
self.waitForComplete()
self._output.extend([o for h in handles for o in h.output if h])
return all([h.value for h in handles if h])
@staticmethod
def getRunable(action):
if type(action) == dict and action.get('type', None) == GroupRunner.supportedClass:
actionCopy = dict(action)
actions = actionCopy['actions']
actionCopy['actions'] = []
for groupAction in actions:
action = None
if 'action' not in groupAction:
id_ = groupAction.get('action_id', None) or groupAction.get('id', None)
if id_:
session = StorageFactory.getNewSession()
action = ActionRunner.getRunable(session.query(Action).get(id_))
session.close()
else:
action = ActionRunner.getRunable(groupAction['action'])
actionCopy['actions'].append(action)
return GroupRunner.Runable(actionCopy['name'],
actionCopy.get('id', None),
actionCopy['type'],
actionCopy['actions'])
elif action.type == GroupRunner.supportedClass:
actions = [ActionRunner.getRunable(a) for a in action.actions]
return GroupRunner.Runable(action.name, action.id, action.type, actions)
else:
logger = logging.getLogger(GroupRunner.__name__)
logger.error("Action: %s has an unknown action type: %s" % (action.name, action.type))
return None
def isValid(self, group):
valid = True
for action in group.actions:
valid = valid & ActionRunner(self._robot).isValid(action)
if not valid:
break
def waitForComplete(self):
if self._handle:
self._handle.join()
| mit | -2,707,494,878,154,074,600 | 40.208955 | 98 | 0.586382 | false |
ahirner/TabulaRazr-OS | deprecated/GetMuniBondData.py | 1 | 2921 |
import ConfigParser
import gc
import glob
import io
import os
import cStringIO
import re
import subprocess
import string
import sys
import numpy as np
import pandas as pd
def GetConfigParm(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
# Main Process
# Read Configuration Parameters
config = ConfigParser.RawConfigParser()
config.read('GetMuniBondData.cfg')
OutputFileName = config.get("FileLocations","OutputFileName")
OutputColumnSeparator = config.get("FileLocations","OutputColumnSeparator")
InputPath = config.get("FileLocations","InputPath")
# Initialize Data Frame
df = pd.DataFrame(np.zeros(0 , dtype=[('file', 'a99'),('caption', 'a99'),('value', 'a99')]))
for file in glob.glob(InputPath):
printline = 0
linesleft = 0
blanklines = 0
intxtfilename = file + ".txt"
out, err = subprocess.Popen(["pdftotext", "-layout", file, file + ".txt" ]).communicate()
try:
intxtfile = io.open(intxtfilename, mode='rb')
except:
print "Unable to extract text from " + file
continue
lines = intxtfile.readlines()
topfound = 0
headerline = 0
for line in lines:
strippedline = line.upper().strip()
if topfound == 0 and string.find(line," $") > 0:
headerline = 1
topfound = 1
if 1 <= headerline <= 3:
caption = "HEADER " + str(headerline)
value = strippedline
df = df.append({'file':file, 'caption':caption, 'value':value},ignore_index=True)
headerline = headerline + 1
continue
if strippedline == "SOURCES AND USES OF FUNDS" \
or strippedline == "SOURCES AND USES OF FUNDS*" \
or strippedline == "ESTIMATED SOURCES AND USES OF FUNDS" \
or strippedline == "ESTIMATED SOURCES AND USES OF FUNDS*" \
or strippedline == "SOURCES AND USES OF FUNDS(1)" \
or strippedline == "ESTIMATED SOURCES AND USES OF FUNDS(1)" \
or strippedline == "PLAN OF FINANCE AND ESTIMATED SOURCES AND USES OF FUNDS":
printline = 1
linesleft = 25
if printline == 1:
dollar_amount_regex = re.compile("[\$]{0,1}[\s]{0,6}[0-9,]{0,15}(\.[0-9]{1,2})$")
dollar_amount_match = re.search(dollar_amount_regex,strippedline)
if dollar_amount_match:
caption = strippedline[:dollar_amount_match.start(0)].strip()
value = strippedline[dollar_amount_match.start(0):].strip()
df = df.append({'file':file, 'caption':caption, 'value':value},ignore_index=True)
if len(line.strip()) < 5 and linesleft < 10:
blanklines = blanklines + 1
linesleft = linesleft - 1
if linesleft == 0:
printline = 0
del lines
gc.collect()
df.to_csv(OutputFileName,OutputColumnSeparator,index=False)
| agpl-3.0 | 1,245,297,047,318,944,500 | 26.637255 | 92 | 0.65457 | false |
ncbray/pystream | bin/translator/dataflowtransform/glsltranslator/poolimplementation.py | 1 | 2229 | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.glsl import ast as glsl
from . slotstruct import SlotStruct
import re
invalidNameChar = re.compile('[^\w\d_]')
# HACK does not ensure the first character is not a digit.
def ensureValidName(name):
return re.sub(invalidNameChar, '_', name)
# A pool contains O objects and F fields.
class PoolImplementation(object):
def __init__(self, poolinfo, basename):
self.poolinfo = poolinfo
# HACK
poolinfo.impl = self
self.basename = basename
self.stores = {}
self.types = {}
self.struct = SlotStruct(poolinfo)
#self.struct.dump()
#print
def _getFieldRef(self, field, slotinfo):
key = field, slotinfo
if not key in self.stores:
fieldimpl = slotinfo.getPoolInfo().impl
t = fieldimpl.struct.ast
name = "%s_%s_%s" % (self.basename, field.type, field.name.pyobj)
name = ensureValidName(name)
lcl = glsl.Local(t, name)
self.stores[key] = lcl
else:
lcl = self.stores[key]
return lcl
def _deref(self, ref, index):
if self.poolinfo.isSingleUnique():
return ref
else:
return glsl.GetSubscript(ref, index)
def getType(self, index):
assert self.poolinfo.typeTaken
#assert False, "hack"
return glsl.Load(index, 'type')
def getField(self, index, field, slotinfo):
assert slotinfo.isSlotInfo(), slotinfo
ref = self._getFieldRef(field, slotinfo)
return self._deref(ref, index)
def getValue(self, index, type):
assert self.struct.inlined
return index
def allocate(self, translator, slot, g):
if self.poolinfo.isSingleUnique():
return []
else:
src = translator.slotRef(translator.makeConstant(0), slot)
return translator.assignmentTransfer(src, g)
| apache-2.0 | -7,869,629,408,877,145,000 | 25.855422 | 74 | 0.716016 | false |
Tooskich/python_core | rankings/views.py | 1 | 2284 | #-*- coding: utf-8 -*-
import ujson
from django.http import HttpResponse, Http404
from django.core.cache import cache
from django.core.management import call_command
from django.db import connection
RACES_PER_VIEW = 25
def last_races(request):
cursor = connection.cursor()
query = "SELECT id, info, category, genre, link, location, discipline, raceId, date FROM rankings_races ORDER BY date DESC LIMIT " + \
str(RACES_PER_VIEW) + ";"
cursor.execute(query)
races = dictfetchall(cursor)
races = ujson.dumps(races, encode_html_chars=False, ensure_ascii=False)
res = HttpResponse(
races,
content_type="application/json"
)
return res
def race(request, pk):
pk = str(int(pk))
cursor = connection.cursor()
query = "SELECT id, info, category, genre, link, location, discipline, `table`, raceId, date FROM rankings_races WHERE id='" + \
pk + "';"
cursor.execute(query)
races = dictfetchall(cursor)[0]
races = ujson.dumps(races, encode_html_chars=False, ensure_ascii=False)
res = HttpResponse(
races,
content_type="application/json"
)
return res
def race_category(request, category):
if category not in ['WC', 'EC', 'FIS']:
return Http404
page = request.GET.get('page')
page = 0 if page is None else (int(page) - 1)
nb_races = RACES_PER_VIEW * 2 if 'FIS' in category else RACES_PER_VIEW
offset = nb_races * page
cursor = connection.cursor()
query = "SELECT id, info, category, genre, link, location, discipline, raceId, date FROM rankings_races WHERE category='" + \
category + "' ORDER BY date DESC LIMIT " + \
str(offset) + ", " + str(nb_races) + ";"
cursor.execute(query)
races = dictfetchall(cursor)
races = ujson.dumps(races, encode_html_chars=False, ensure_ascii=False)
res = HttpResponse(
races,
content_type="application/json"
)
return res
def update(request):
call_command('updateraces', verbosity=3, interactive=False)
return HttpResponse('1')
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
| apache-2.0 | -1,774,803,645,420,551,400 | 30.287671 | 138 | 0.648424 | false |
JohnDickerson/EnvyFree | driver.py | 1 | 10238 | #!/usr/bin/env python
from model import Model
from model import DupValues
from model import DistTypes
from model import ObjType
import allocator
import bounds
from allocator import DoesNotExistException
import argparse
import random
import time
import csv
import sys
current_ms_time = lambda: int(round(time.time() * 1000))
def run(num_agents, num_items, prefs, dup_values):
# Randomly generate some data for N agents and M items
if prefs.dist_type == DistTypes.urand_int:
m = Model.generate_urand_int(num_agents, num_items, dup_values)
elif prefs.dist_type == DistTypes.urand_real:
m = Model.generate_urand_real(num_agents, num_items, dup_values)
elif prefs.dist_type == DistTypes.zipf_real:
m = Model.generate_zipf_real(num_agents, num_items, 2., dup_values)
elif prefs.dist_type == DistTypes.polya_urn_real:
m = Model.generate_polya_urn_real(num_agents, num_items, 2, 1)
elif prefs.dist_type == DistTypes.correlated_real:
m = Model.generate_correlated_real(num_agents, num_items)
else:
raise Exception("Distribution type {0} is not recognized.".format(prefs.dist_type))
# Do our bounding at the root to check for naive infeasibility
#is_possibly_feasible, bounding_s = bounds.max_contested_feasible(m)
#if not is_possibly_feasible:
# print "Bounded infeasible!"
# sys.exit(-1)
# Compute an envy-free allocation (if it exists)
stats = allocator.allocate(m, prefs)
return stats
def main():
parser = argparse.ArgumentParser(description='Find envy-free allocations.')
parser.add_argument("-f", "--filename", dest="filename", required=True,
metavar="FILE", help="write comma-delimited csv output to FILE")
parser.add_argument("-r", "--num_repeats", type=int, dest="num_repeats", default=10,
metavar="R", help="num repeat runs per parameter setting")
parser.add_argument("-n", type=int, nargs=3, dest="N", default=(5,6,1),
metavar=("N-min","N-max","stepsize"), help="range(a,b,c) iterating over num agents")
parser.add_argument("-m", type=int, nargs=3, dest="M", default=(5,10,1),
metavar=("M-min","M-max","stepsize"), help="range(a,b,c) iterating over num items")
parser.add_argument("--obj-feas", action="store_const", const=ObjType.feasibility, dest="obj_type", default=ObjType.feasibility,
help="Objective function: feasibility")
parser.add_argument("--obj-social", action="store_const", const=ObjType.social_welfare_max, dest="obj_type", default=ObjType.feasibility,
help="Objective function: max social welfare")
parser.add_argument("--dist-urand-int", action="store_const", const=DistTypes.urand_int, dest="dist_type", default=DistTypes.urand_real,
help="Utility distribution integral in {0,...,10*#Items}")
parser.add_argument("--dist-urand-real", action="store_const", const=DistTypes.urand_real, dest="dist_type", default=DistTypes.urand_real,
help="Utility distribution u.a.r. real in U[0,1]")
parser.add_argument("--dist-zipf-real", action="store_const", const=DistTypes.zipf_real, dest="dist_type", default=DistTypes.urand_real,
help="Utility distribution drawn from Zipf with alpha=2.")
parser.add_argument("--dist-polya-urn-real", action="store_const", const=DistTypes.polya_urn_real, dest="dist_type", default=DistTypes.urand_real,
help="Utility distribution drawn from augmented Polya-Eggenberger urn model.")
parser.add_argument("--dist-correlated-real", action="store_const", const=DistTypes.correlated_real, dest="dist_type", default=DistTypes.urand_real,
help="Utility distribution correlated intrinsic item value.")
parser.add_argument("-s", "--seed", type=long, dest="seed", default=0,
help="Sets the random seed in Python")
parser.add_argument("--fathom-too-much-envy", action="store_true", dest="branch_fathom_too_much_envy", default=False,
help="Fathoms a path if #unallocated items is less than #envious agents at node")
parser.add_argument("--branch-avg-value", action="store_true", dest="branch_avg_value", default=False,
help="Branching based on average item value and max agent value")
parser.add_argument("--branch-sos1-envy", action="store_true", dest="branch_sos1_envy", default=False,
help="SOS1 branch to most envious agent [NOT IMPLEMENTED]")
parser.add_argument("--prioritize-avg-value", action="store_true", dest="prioritize_avg_value", default=False,
help="Sets CPLEX branching priority based on average item value.")
parser.add_argument("--alternate-IP-model", action="store_true", dest="alternate_IP_model", default=False,
help="Solves an alternate IP model.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="Prints a bunch of stats to stdout as we solve models.")
parser.add_argument("-t", "--num-threads", type=int, default=1, dest="num_threads",
help="Sets the number of threads used by CPLEX.")
args = parser.parse_args()
if args.alternate_IP_model \
and (args.obj_type != ObjType.feasibility \
or args.branch_fathom_too_much_envy \
or args.branch_avg_value \
or args.branch_sos1_envy):
print "Argument error: running the alternate IP model (--alternate-IP-model) disallows" \
" any objective other than feasibility (--obj-feas); furthermore, we haven't" \
" implemented any branching rules for the alternate IP model yet (--fathom-too-much-envy," \
" --branch-avg-value, --branch-sos1-envy)"
sys.exit(-1)
# If a random seed was explicitly passed in, set it
if hasattr(args, "seed"):
random.seed(args.seed)
else:
random.seed()
# How to handle duplicate valuations for different items by the same agent?
dup_values = DupValues.allowed
# Write one row per run, or one row per N runs (aggregate)?
write_all = True
with open(args.filename, 'wb') as csvfile:
# Write overall stats to out.csv
writer = csv.writer(csvfile, delimiter=',')
for num_agents in range(args.N[0], args.N[1], args.N[2]):
# Phase transition plots runtime, %feas vs. #items
for num_items in range(args.M[0], args.M[1], args.M[2]):
# Never feasible if fewer items than agents
if num_items < num_agents:
continue
build_s_accum = solve_s_accum = 0.0
build_s_min = solve_s_min = 10000.0
build_s_max = solve_s_max = -1.0
sol_exists_accum = 0
for _ in xrange(args.num_repeats):
# Generate an instance and solve it; returns runtime of IP write+solve
stats = run(num_agents, num_items, args, dup_values)
sol_exists, build_s, solve_s = stats['ModelFeasible'], stats['ModelBuildTime'], stats['ModelSolveTime']
# Maintain stats on the runs
sol_exists_accum += 1 if sol_exists else 0
build_s_accum += build_s
solve_s_accum += solve_s
if build_s < build_s_min:
build_s_min = build_s
if solve_s < solve_s_min:
solve_s_min = solve_s
if build_s > build_s_max:
build_s_max = build_s
if solve_s > solve_s_max:
solve_s_max = solve_s
# If we're recording ALL data, write details for this one run
if write_all:
writer.writerow([args.seed, args.num_threads,
num_agents, num_items, args.alternate_IP_model,
args.dist_type, args.num_repeats, args.obj_type,
args.branch_fathom_too_much_envy, stats['MyTooMuchEnvyBranch'],
args.branch_avg_value, stats['MyBranchOnAvgItemValue'],
args.branch_sos1_envy, stats['MyBranchSOS1Envy'],
args.prioritize_avg_value,
sol_exists, stats['MIPNodeCount'], build_s, solve_s, stats['MIPObjVal']
])
csvfile.flush()
# Report stats over all N runs, both to stdout and to out.csv
build_s_avg = build_s_accum / args.num_repeats
solve_s_avg = solve_s_accum / args.num_repeats
if args.verbose == True:
print "Build Avg: {0:3f}, Min: {1:3f}, Max: {2:3f}".format(build_s_avg, build_s_min, build_s_max)
print "Solve Avg: {0:3f}, Min: {1:3f}, Max: {2:3f}".format(solve_s_avg, solve_s_min, solve_s_max)
print "N={0}, M={1}, fraction feasible: {2} / {3}".format(num_agents, num_items, sol_exists_accum, args.num_repeats)
# If we're only writing aggregate data, write that now
if not write_all:
writer.writerow([args.seed, num_agents, num_items, args.alternate_IP_model,
args.dist_type, args.num_repeats, args.obj_type,
args.branch_fathom_too_much_envy,
args.branch_avg_value,
sol_exists_accum,
build_s_avg, build_s_min, build_s_max,
solve_s_avg, solve_s_min, solve_s_max,
])
csvfile.flush()
if __name__ == '__main__':
main()
| gpl-2.0 | -7,730,505,672,266,138,000 | 53.169312 | 152 | 0.574526 | false |
bhenne/MoSP | mosp_examples/random_wiggler.py | 1 | 1499 | #!/bin/env python
""" Beginners' example: random movement
- random movement
- output to visual player, which is executed as child process
- you may try the other commented monitor examples - you can choose a single or multiple monitors
"""
import sys
sys.path.append("..")
import time
import random
from mosp.core import Simulation, Person
from mosp.geo import osm
from mosp.impl import movement
from mosp.monitors import *
__author__ = "P. Tute"
__maintainer__ = "B. Henne"
__contact__ = "[email protected]"
__copyright__ = "(c) 2010-2011, DCSec, Leibniz Universitaet Hannover, Germany"
__license__ = "GPLv3"
class RandomWiggler(Person):
"""Implements a simple person doing only random movement on the map.
@author: P. Tute"""
next_target = movement.person_next_target_random
def main():
"""Defines the simulation, map, monitors, persons."""
t = time.time()
s = Simulation(geo=osm.OSMModel('../data/hannover2.osm'), rel_speed=40)
print time.time() - t
#m = s.add_monitor(EmptyMonitor, 2)
#m = s.add_monitor(PipePlayerMonitor, 2)
#m = s.add_monitor(RecordFilePlayerMonitor, 2)
#m = s.add_monitor(RecordFilePlayerMonitor, 2, filename='exampleoutput_RecordFilePlayerMonitor')
#m = s.add_monitor(ChildprocessPlayerChamplainMonitor, 2)
m = s.add_monitor(SocketPlayerMonitor, 2)
s.add_persons(RandomWiggler, 1000, monitor=m)
s.run(until=1000, real_time=True, monitor=True)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,466,338,805,413,161,000 | 29.591837 | 101 | 0.693129 | false |
thinnect/serdepa | serdepa/serdepa.py | 1 | 17372 | """
serdepa.py: Binary packet serialization and deserialization library.
"""
from __future__ import unicode_literals
from functools import reduce
import struct
import collections
import warnings
import copy
import math
from codecs import encode
from six import add_metaclass, BytesIO
from .exceptions import PacketDefinitionError, DeserializeError, SerializeError
__author__ = "Raido Pahtma, Kaarel Ratas"
__license__ = "MIT"
def add_property(cls, attr, attr_type):
if hasattr(cls, attr):
raise PacketDefinitionError(
"Attribute {} already exists on {}.".format(attr, cls.__name__)
)
else:
if isinstance(attr_type, BaseIterable) or isinstance(attr_type, ByteString):
setter = None
def getter(self):
return getattr(self, '_%s' % attr)
elif isinstance(attr_type, Length):
setter = None
def getter(self):
return len(getattr(
self,
'_field_registry'
)[getattr(self, '_depends')[attr]])
elif isinstance(attr_type, SuperSerdepaPacket):
def setter(self, v):
if isinstance(v, self._fields[attr][0]):
setattr(self, '_%s' % attr, v)
self._field_registry[attr] = v
else:
raise ValueError(
"Cannot assign a value of type {} "
"to field {} of type {}".format(
v.__class__.__name__,
attr,
getattr(self, '_%s' % attr).__class__.__name__
)
)
def getter(self):
return getattr(self, '_%s' % attr)
else:
def setter(self, v):
setattr(getattr(self, '_%s' % attr), "value", v)
def getter(self):
return getattr(self, '_%s' % attr).value
setattr(cls, attr, property(getter, setter))
class SuperSerdepaPacket(type):
"""
Metaclass of the SerdepaPacket object. Essentially does the following:
Reads the _fields_ attribute of the class and for each 2- or
3-tuple entry sets up the properties of the class to the right
names. Also checks that each (non-last) List instance has a
Length field associated with it.
"""
def __init__(cls, what, bases=None, attrs=None):
setattr(cls, "_fields", collections.OrderedDict())
setattr(cls, "_depends", dict())
if '_fields_' in attrs:
for field in attrs['_fields_']:
if len(field) == 2 or len(field) == 3:
if len(field) == 2:
default = None
elif isinstance(field[1], Length):
raise PacketDefinitionError(
"A Length field can't have a default value: {}".format(
field
)
)
else:
default = field[2]
name, value = field[0], field[1]
add_property(cls, name, value)
if name in getattr(cls, "_fields"):
raise PacketDefinitionError(
"The field {} appears more than once in {}.".format(
name, cls.__name__
)
)
getattr(cls, "_fields")[name] = [value, default]
if not (
isinstance(value, (SerdepaPacket, BaseField)) or
issubclass(value, (SerdepaPacket, BaseField))
):
raise PacketDefinitionError(
"Invalid type {} of field {} in {}".format(
value.__name__, name, cls.__name__
)
)
elif isinstance(value, Length):
getattr(cls, "_depends")[name] = value._field
elif isinstance(value, (List, ByteString)):
if not (name in getattr(cls, "_depends").values() or field == attrs['_fields_'][-1]):
raise PacketDefinitionError(
"Only the last field can have an undefined length ({} of type {})".format(
name,
type(value)
)
)
else:
raise PacketDefinitionError("A field needs both a name and a type: {}".format(field))
super(SuperSerdepaPacket, cls).__init__(what, bases, attrs)
@add_metaclass(SuperSerdepaPacket)
class SerdepaPacket(object):
"""
The superclass for any packets. Defining a subclass works as such:
class Packet(SerdepaPacket):
_fields_ = [
("name", type[, default]),
("name", type[, default]),
...
]
Has the following public methods:
.serialize() -> bytearray
.deserialize(bytearray) raises ValueError on bad input
and the class method
.minimal_size() -> int
"""
def __init__(self, **kwargs):
self._field_registry = collections.OrderedDict()
for name, (type_, default) in self._fields.items():
if name in kwargs:
if issubclass(type_, SerdepaPacket):
self._field_registry[name] = copy.copy(kwargs[name])
else:
self._field_registry[name] = type_(initial=copy.copy(kwargs[name]))
elif default:
self._field_registry[name] = type_(initial=copy.copy(default))
else:
self._field_registry[name] = type_()
setattr(self, '_%s' % name, self._field_registry[name])
def serialize(self):
serialized = BytesIO()
for name, field in self._field_registry.items():
if name in self._depends:
serialized.write(
field.serialize(self._field_registry[self._depends[name]].length)
)
else:
serialized.write(field.serialize())
ret = serialized.getvalue()
serialized.close()
return ret
def deserialize(self, data, pos=0, final=True):
for i, (name, field) in enumerate(self._field_registry.items()):
if pos >= len(data):
if i == len(self._field_registry) - 1 and isinstance(field, (List, ByteString)):
break
else:
raise DeserializeError("Invalid length of data to deserialize.")
try:
pos = field.deserialize(data, pos, False)
except AttributeError:
for key, value in self._depends.items():
if name == value:
pos = field.deserialize(data, pos, False, self._field_registry[key]._type.value)
break
else:
pos = field.deserialize(data, pos, False, -1)
if pos > len(data):
raise DeserializeError("Invalid length of data to deserialize. {}, {}".format(pos, len(data)))
if final and pos != len(data):
raise DeserializeError(
"After deserialization, {} bytes were left.".format(len(data)-pos+1)
)
return pos
def serialized_size(self):
size = 0
for name, field in self._field_registry.items():
size += field.serialized_size()
return size
@classmethod
def minimal_size(cls):
size = 0
for name, (_type, default) in cls._fields.items():
size += _type.minimal_size()
return size
def __str__(self):
return encode(self.serialize(), "hex").decode().upper()
def __eq__(self, other):
return str(self) == str(other)
class BaseField(object):
def __call__(self, **kwargs):
ret = copy.copy(self)
if "initial" in kwargs:
ret._set_to(kwargs["initial"])
return ret
# def __call__(self):
# return NotImplemented
#
def serialize(self):
return bytearray([])
def deserialize(self, value, pos, final=True):
raise NotImplementedError()
@classmethod
def minimal_size(cls):
raise NotImplementedError()
class BaseIterable(BaseField, list):
def __init__(self, initial=[]):
super(BaseIterable, self).__init__()
for value in initial:
self.append(self._type(initial=copy.copy(value)))
def _set_to(self, values):
while len(self) > 0:
self.pop()
for value in values:
self.append(value)
def append(self, value):
if isinstance(value, self._type):
new_value = value
else:
new_value = self._type(initial=value)
super(BaseIterable, self).append(new_value)
def serialize(self):
ret = bytearray()
for i in range(self.length):
ret += self[i].serialize()
return ret
def deserialize(self, value, pos, final=True):
for i in range(self.length):
self[i] = self._type()
pos = self[i].deserialize(value, pos, final=final)
return pos
def __iter__(self):
for i in range(len(self)):
try:
yield self[i].value
except AttributeError:
yield self[i]
class BaseInt(BaseField):
"""
Base class for all integer types. Has _signed (bool) and _format (struct format string).
"""
_length = None
_signed = None
_format = ""
def __init__(self, initial=0):
self._value = initial
def _set_to(self, value):
self._value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = int(value)
def serialize(self):
return struct.pack(self._format, self._value)
def deserialize(self, value, pos, final=True):
try:
self._value = struct.unpack(self._format, value[pos:pos+self.serialized_size()])[0]
except struct.error as e:
raise DeserializeError("Invalid length of data!", e)
return pos + self.serialized_size()
@classmethod
def serialized_size(cls):
"""
Returns the length of this field in bytes. If the length in bits is not directly
divisible by 8, an extra byte is added.
"""
return int(math.ceil(cls._length/8.0))
def __getattribute__(self, attr):
if attr in ["__lt__", "__le__", "__eq__", "__ne__", "__gt__", "__ge__",
"__add__", "__sub__", "__mul__", "__floordiv__", "__mod__",
"__divmod__", "__pow__", "__lshift__", "__rshift__", "__and__",
"__xor__", "__or__", "__div__", "__truediv__", "__str__"]:
return self._value.__getattribute__(attr)
else:
return super(BaseInt, self).__getattribute__(attr)
def __int__(self):
return self._value
def __repr__(self):
return "{} with value {}".format(self.__class__, self._value)
@classmethod
def minimal_size(cls):
return int(math.ceil(cls._length/8.0))
class Length(BaseField):
"""
A value that defines another field's length.
"""
def __init__(self, object_type, field_name):
self._type = object_type()
self._field = field_name
def serialized_size(self):
return self._type.serialized_size()
def serialize(self, length): # TODO PyCharm does not like this approach, method signatures don't match
self._type.value = length
return self._type.serialize()
def deserialize(self, value, pos, final=True):
return self._type.deserialize(value, pos, final=final)
def minimal_size(self):
return self.serialized_size()
class List(BaseIterable):
"""
An array with its length defined elsewhere.
"""
def __init__(self, object_type, **kwargs):
self._type = object_type
super(List, self).__init__(**kwargs)
@property
def length(self):
return len(self)
def serialized_size(self):
return self._type.serialized_size() * self.length
def deserialize(self, value, pos, final=True, length=None):
if length is None:
raise AttributeError("Unknown length.")
elif length == -1:
del self[:] # clear the internal list - deserialization will overwrite anyway.
for i in range(len(self), (len(value)-pos)//self._type().serialized_size()):
self.append(0)
return super(List, self).deserialize(value, pos, final=final)
else:
del self[:] # clear the internal list - deserialization will overwrite anyway.
for i in range(len(self), length):
self.append(0)
return super(List, self).deserialize(value, pos, final=final)
def minimal_size(cls):
return 0
class Array(BaseIterable):
"""
A fixed-length array of values.
"""
def __init__(self, object_type, length, **kwargs):
self._type = object_type
self._length = length
super(Array, self).__init__(**kwargs)
@property
def length(self):
return self._length
def serialized_size(self):
return self._type.serialized_size() * self.length
def serialize(self):
dl = self.length - len(self)
if dl < 0:
warnings.warn(RuntimeWarning("The number of items in the Array exceeds the length of the array."))
elif dl > 0:
self += [self._type() for _ in range(dl)] # TODO is this correct???
ret = super(Array, self).serialize()
for i in range(dl):
self.pop(-1)
return ret
def deserialize(self, value, pos, final=True):
for i in range(len(self), self.length):
self.append(self._type())
return super(Array, self).deserialize(value, pos, final=final)
def minimal_size(self):
return self.serialized_size()
class ByteString(BaseField):
"""
A variable or fixed-length string of bytes.
"""
def __init__(self, length=None, **kwargs):
if length is not None:
self._data_container = Array(nx_uint8, length)
else:
self._data_container = List(nx_uint8)
super(ByteString, self).__init__(**kwargs)
def __getattr__(self, attr):
if attr not in ['_data_container']:
return getattr(self._data_container, attr)
else:
return super(ByteString, self).__getattribute__(attr)
def __setattr__(self, attr, value):
if attr not in ['_data_container', '_value']:
setattr(self._data_container, attr, value)
else:
super(ByteString, self).__setattr__(attr, value)
@property
def _value(self):
return reduce(
lambda x, v: x + (v[1] << (8*v[0])),
enumerate(
reversed(list(self._data_container))
),
0
)
def deserialize(self, *args, **kwargs):
return self._data_container.deserialize(*args, **kwargs)
def serialize(self, *args, **kwargs):
return self._data_container.serialize(*args, **kwargs)
def __eq__(self, other):
return self._value == other
def __repr__(self):
return "{} with value {}".format(self.__class__, self._value)
def __str__(self):
return "{value:0{size}X}".format(
value=self._value,
size=self._data_container.serialized_size()*2,
)
def __len__(self):
return len(self._data_container)
class nx_uint8(BaseInt):
_signed = False
_length = 8
_format = ">B"
class nx_int8(BaseInt):
_signed = True
_length = 8
_format = ">b"
class uint8(BaseInt):
_signed = False
_length = 8
_format = "<B"
class int8(BaseInt):
_signed = True
_length = 8
_format = "<b"
class nx_uint16(BaseInt):
_signed = False
_length = 16
_format = ">H"
class nx_int16(BaseInt):
_signed = True
_length = 16
_format = ">h"
class uint16(BaseInt):
_signed = False
_length = 16
_format = "<H"
class int16(BaseInt):
_signed = True
_length = 16
_format = "<h"
class nx_uint32(BaseInt):
_signed = False
_length = 32
_format = ">I"
class nx_int32(BaseInt):
_signed = True
_length = 32
_format = ">i"
class uint32(BaseInt):
_signed = False
_length = 32
_format = "<I"
class int32(BaseInt):
_signed = True
_length = 32
_format = "<i"
class nx_uint64(BaseInt):
_signed = False
_length = 64
_format = ">Q"
class nx_int64(BaseInt):
_signed = True
_length = 64
_format = ">q"
class uint64(BaseInt):
_signed = False
_length = 64
_format = "<Q"
class int64(BaseInt):
_signed = True
_length = 64
_format = "<q"
| mit | -1,633,917,069,778,689,300 | 28.394247 | 110 | 0.523141 | false |
ytisf/PyExfil | pyexfil/Comm/GQUIC/__init__.py | 1 | 1560 | import zlib
import socket
import struct
from pyexfil.includes.prepare import PrepFile, DEFAULT_KEY
GQUIC_PORT = 443
def _send_one(data, pckt_counter, dest_ip, dest_port=GQUIC_PORT, ccc=None):
"""
Send out one packet of data
:param data: Data to send [str]
:param pckt_counter: Counter of file/comm [int]
:param dest_ip: IP [str]
:param dest_port: Port [int]
:param ccc: CRC of file [binary]
:return: [True, Always!]
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
actual_data = "\x0d" # Flags - None
if ccc:
actual_data += ccc # CID
else:
actual_data += "\x43\x53\x50\x45\x54\x48\x53\x59"
actual_data += "\x51\x30\x34\x33" # Version Q304
actual_data += struct.pack('B', pckt_counter) # Packet number increment
actual_data += data # Payload
s.sendto(actual_data, (dest_ip, dest_port))
return True
def Send(file_name, CNC_ip, CNC_port=GQUIC_PORT, key=DEFAULT_KEY):
"""
Send a file out (can be used for any communication
:param file_name: String, path to file
:param CNC_ip: String, IP
:param CNC_port: Int, Port
:param key: String of key
:return: TRUE FOREVER! VIVA LA REVOLUSION!
"""
this_prepObj = PrepFile(
file_path = file_name,
kind = 'binary',
max_size = 128,
enc_key = key
)
crc = hex(zlib.crc32(open(file_name, 'rb').read()))
_send_one(data = this_prepObj['Packets'][0], pckt_counter = 0, dest_ip = CNC_ip, dest_port = CNC_port, ccc = crc)
i = 1
for pkt in this_prepObj['Packets'][1:]:
_send_one(data=pkt, pckt_counter=i, dest_ip=CNC_ip, dest_port=CNC_port)
i += 1
return True
| mit | 7,913,941,341,791,180,000 | 25.896552 | 114 | 0.670513 | false |
noironetworks/heat | heat/engine/resources/openstack/nova/server.py | 1 | 78105 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import port as neutron_port
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources.openstack.nova import server_network_mixin
from heat.engine.resources import scheduler_hints as sh
from heat.engine.resources import server_base
from heat.engine import support
from heat.engine import translation
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
cfg.CONF.import_opt('default_user_data_format', 'heat.common.config')
LOG = logging.getLogger(__name__)
NOVA_MICROVERSIONS = (MICROVERSION_TAGS, MICROVERSION_STR_NETWORK,
MICROVERSION_NIC_TAGS) = ('2.26', '2.37', '2.42')
class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
server_network_mixin.ServerNetworkMixin):
"""A resource for managing Nova instances.
A Server resource manages the running virtual machine instance within an
OpenStack cloud.
"""
PROPERTIES = (
NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT, USER_DATA_UPDATE_POLICY,
TAGS, DEPLOYMENT_SWIFT_DATA
) = (
'name', 'image', 'block_device_mapping', 'block_device_mapping_v2',
'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name',
'admin_user', 'availability_zone', 'security_groups', 'networks',
'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
'reservation_id', 'config_drive', 'diskConfig', 'personality',
'admin_pass', 'software_config_transport', 'user_data_update_policy',
'tags', 'deployment_swift_data'
)
_BLOCK_DEVICE_MAPPING_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name', 'volume_id',
'snapshot_id',
'volume_size',
'delete_on_termination',
)
_BLOCK_DEVICE_MAPPING_V2_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME,
BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_IMAGE_ID,
BLOCK_DEVICE_MAPPING_IMAGE,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_SWAP_SIZE,
BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
BLOCK_DEVICE_MAPPING_DISK_BUS,
BLOCK_DEVICE_MAPPING_BOOT_INDEX,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE,
BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT,
) = (
'device_name',
'volume_id',
'image_id',
'image',
'snapshot_id',
'swap_size',
'device_type',
'disk_bus',
'boot_index',
'volume_size',
'delete_on_termination',
'ephemeral_size',
'ephemeral_format'
)
_NETWORK_KEYS = (
NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
NETWORK_SUBNET, NETWORK_PORT_EXTRA, NETWORK_FLOATING_IP,
ALLOCATE_NETWORK, NIC_TAG,
) = (
'uuid', 'network', 'fixed_ip', 'port',
'subnet', 'port_extra_properties', 'floating_ip',
'allocate_network', 'tag',
)
_IFACE_MANAGED_KEYS = (NETWORK_PORT, NETWORK_ID,
NETWORK_FIXED_IP, NETWORK_SUBNET)
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
_ALLOCATE_TYPES = (
NETWORK_NONE, NETWORK_AUTO,
) = (
'none', 'auto',
)
_DEPLOYMENT_SWIFT_DATA_KEYS = (
CONTAINER, OBJECT
) = (
'container', 'object',
)
ATTRIBUTES = (
NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS,
INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS, TAGS_ATTR,
OS_COLLECT_CONFIG
) = (
'name', 'addresses', 'networks', 'first_address',
'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls', 'tags',
'os_collect_config'
)
# Image Statuses
IMAGE_STATUSES = (IMAGE_ACTIVE, IMAGE_ERROR,
IMAGE_DELETED) = ('active', 'error', 'deleted')
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
update_allowed=True
),
BLOCK_DEVICE_MAPPING: properties.Schema(
properties.Schema.LIST,
_('Block device mappings for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
required=True
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to boot from. Only one '
'of volume_id or snapshot_id should be '
'provided.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size.')
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
)
),
BLOCK_DEVICE_MAPPING_V2: properties.Schema(
properties.Schema.LIST,
_('Block device mappings v2 for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The volume_id can be boot or non-boot device '
'to the server.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create a volume from.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='9.0.0',
message=_('Use property %s.') %
BLOCK_DEVICE_MAPPING_IMAGE,
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='7.0.0',
previous_status=support.SupportStatus(
version='5.0.0')
)
),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image '
'to create a volume from.'),
support_status=support.SupportStatus(version='7.0.0'),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the swap, in MB.')
),
BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the local ephemeral block device, '
'in GB.'),
support_status=support.SupportStatus(version='8.0.0'),
constraints=[constraints.Range(min=1)]
),
BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT: properties.Schema(
properties.Schema.STRING,
_('The format of the local ephemeral block device. '
'If no format is specified, uses default value, '
'defined in nova configuration file.'),
constraints=[
constraints.AllowedValues(['ext2', 'ext3', 'ext4',
'xfs', 'ntfs'])
],
support_status=support.SupportStatus(version='8.0.0')
),
BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Device type: at the moment we can make distinction '
'only between disk and cdrom.'),
constraints=[
constraints.AllowedValues(['cdrom', 'disk']),
],
),
BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema(
properties.Schema.STRING,
_('Bus of the device: hypervisor driver chooses a '
'suitable default if omitted.'),
constraints=[
constraints.AllowedValues(['ide', 'lame_bus',
'scsi', 'usb',
'virtio']),
],
),
BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema(
properties.Schema.INTEGER,
_('Integer used for ordering the boot disks. If '
'it is not specified, value "0" will be set '
'for bootable sources (volume, snapshot, image); '
'value "-1" will be set for non-bootable sources.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Size of the block device in GB. If it is omitted, '
'hypervisor driver calculates size.'),
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
),
support_status=support.SupportStatus(version='2015.1')
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
FLAVOR_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a flavor update; either by requesting '
'a server resize or by replacing the entire server.'),
default='RESIZE',
constraints=[
constraints.AllowedValues(['RESIZE', 'REPLACE']),
],
update_allowed=True
),
IMAGE_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply an image-id update; either by '
'requesting a server rebuild or by replacing '
'the entire server.'),
default='REBUILD',
constraints=[
constraints.AllowedValues(['REBUILD', 'REPLACE',
'REBUILD_PRESERVE_EPHEMERAL']),
],
update_allowed=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keypair to inject into the server.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
]
),
ADMIN_USER: properties.Schema(
properties.Schema.STRING,
_('Name of the administrative user to use on the server.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('The default cloud-init user set up for each image '
'(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for '
'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for server placement.')
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of security group names or IDs. Cannot be used if '
'neutron ports are associated with this server; assign '
'security groups to the ports instead.'),
default=[]
),
NETWORKS: properties.Schema(
properties.Schema.LIST,
_('An ordered list of nics to be added to this server, with '
'information about connected networks, fixed ips, port etc.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NETWORK_UUID: properties.Schema(
properties.Schema.STRING,
_('ID of network to create a port on.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % NETWORK_ID,
version='2014.1'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('Name or ID of network to create a port on.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
ALLOCATE_NETWORK: properties.Schema(
properties.Schema.STRING,
_('The special string values of network, '
'auto: means either a network that is already '
'available to the project will be used, or if one '
'does not exist, will be automatically created for '
'the project; none: means no networking will be '
'allocated for the created server. Supported by '
'Nova API since version "2.37". This property can '
'not be used with other network keys.'),
support_status=support.SupportStatus(version='9.0.0'),
constraints=[
constraints.AllowedValues(
[NETWORK_NONE, NETWORK_AUTO])
],
update_allowed=True,
),
NETWORK_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IP address to specify for the port '
'created on the requested network.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
NETWORK_PORT: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port to associate with this '
'server.'),
constraints=[
constraints.CustomConstraint('neutron.port')
]
),
NETWORK_PORT_EXTRA: properties.Schema(
properties.Schema.MAP,
_('Dict, which has expand properties for port. '
'Used only if port property is not specified '
'for creating port.'),
schema=neutron_port.Port.extra_properties_schema,
support_status=support.SupportStatus(version='6.0.0')
),
NETWORK_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet in which to allocate the IP address for '
'port. Used for creating port, based on derived '
'properties. If subnet is specified, network '
'property becomes optional.'),
support_status=support.SupportStatus(version='5.0.0')
),
NETWORK_FLOATING_IP: properties.Schema(
properties.Schema.STRING,
_('ID of the floating IP to associate.'),
support_status=support.SupportStatus(version='6.0.0')
),
NIC_TAG: properties.Schema(
properties.Schema.STRING,
_('Port tag. Heat ignores any update on this property '
'as nova does not support it.'),
support_status=support.SupportStatus(version='9.0.0')
)
},
),
update_allowed=True
),
SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'boot a server.')
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True,
default={}
),
USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. For '
'HEAT_CFNTOOLS, the user_data is bundled as part of the '
'heat-cfntools cloud-init boot configuration data. For RAW '
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=cfg.CONF.default_user_data_format,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
]
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling. ZAQAR_MESSAGE will '
'create a dedicated zaqar queue and post the metadata '
'for polling.'),
default=cfg.CONF.default_software_config_transport,
update_allowed=True,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
USER_DATA_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a user_data update; either by '
'ignoring it or by replacing the entire server.'),
default='REPLACE',
constraints=[
constraints.AllowedValues(['REPLACE', 'IGNORE']),
],
support_status=support.SupportStatus(version='6.0.0'),
update_allowed=True
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data script to be executed by cloud-init. Changes cause '
'replacement of the resource by default, but can be ignored '
'altogether by setting the `user_data_update_policy` property.'),
default='',
update_allowed=True
),
RESERVATION_ID: properties.Schema(
properties.Schema.STRING,
_('A UUID for the set of servers being requested.')
),
CONFIG_DRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, enable config drive on the server.')
),
DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Control how the disk is partitioned when the server is '
'created.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('A map of files to create/overwrite on the server upon boot. '
'Keys are file names and values are the file contents.'),
default={}
),
ADMIN_PASS: properties.Schema(
properties.Schema.STRING,
_('The administrator password for the server.'),
update_allowed=True
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Server tags. Supported since client version 2.26.'),
support_status=support.SupportStatus(version='8.0.0'),
schema=properties.Schema(properties.Schema.STRING),
update_allowed=True
),
DEPLOYMENT_SWIFT_DATA: properties.Schema(
properties.Schema.MAP,
_('Swift container and object to use for storing deployment data '
'for the server resource. The parameter is a map value '
'with the keys "container" and "object", and the values '
'are the corresponding container and object names. The '
'software_config_transport parameter must be set to '
'POLL_TEMP_URL for swift to be used. If not specified, '
'and software_config_transport is set to POLL_TEMP_URL, a '
'container will be automatically created from the resource '
'name, and the object name will be a generated uuid.'),
support_status=support.SupportStatus(version='9.0.0'),
default={},
update_allowed=True,
schema={
CONTAINER: properties.Schema(
properties.Schema.STRING,
_('Name of the container.'),
constraints=[
constraints.Length(min=1)
]
),
OBJECT: properties.Schema(
properties.Schema.STRING,
_('Name of the object.'),
constraints=[
constraints.Length(min=1)
]
)
}
)
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
ADDRESSES: attributes.Schema(
_('A dict of all network addresses with corresponding port_id and '
'subnets. Each network will have two keys in dict, they are '
'network name and network id. The port ID may be obtained '
'through the following expression: ``{get_attr: [<server>, '
'addresses, <network name_or_id>, 0, port]}``. The subnets may '
'be obtained trough the following expression: ``{get_attr: '
'[<server>, addresses, <network name_or_id>, 0, subnets]}``. '
'The network may be obtained through the following expression: '
'``{get_attr: [<server>, addresses, <network name_or_id>, 0, '
'network]}``.'),
type=attributes.Schema.MAP,
support_status=support.SupportStatus(
version='11.0.0',
status=support.SUPPORTED,
message=_('The attribute was extended to include subnets and '
'network with version 11.0.0.'),
previous_status=support.SupportStatus(
status=support.SUPPORTED
)
)
),
NETWORKS_ATTR: attributes.Schema(
_('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4], '
'"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. '
'Each network will have two keys in dict, they are network '
'name and network id.'),
type=attributes.Schema.MAP
),
FIRST_ADDRESS: attributes.Schema(
_('Convenience attribute to fetch the first assigned network '
'address, or an empty string if nothing has been assigned at '
'this time. Result may not be predictable if the server has '
'addresses from more than one network.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use the networks attribute instead of '
'first_address. For example: "{get_attr: '
'[<server name>, networks, <network name>, 0]}"'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
INSTANCE_NAME: attributes.Schema(
_('AWS compatible instance name.'),
type=attributes.Schema.STRING
),
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
type=attributes.Schema.STRING
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
type=attributes.Schema.STRING
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
"To get a specific console type, the requested type "
"can be specified as parameter to the get_attr function, "
"e.g. get_attr: [ <server>, console_urls, novnc ]. "
"Currently supported types are "
"novnc, xvpvnc, spice-html5, rdp-html5, serial and webmks."),
support_status=support.SupportStatus(version='2015.1'),
type=attributes.Schema.MAP
),
TAGS_ATTR: attributes.Schema(
_('Tags from the server. Supported since client version 2.26.'),
support_status=support.SupportStatus(version='8.0.0'),
type=attributes.Schema.LIST
),
OS_COLLECT_CONFIG: attributes.Schema(
_('The os-collect-config configuration for the server\'s local '
'agent to be configured to connect to Heat to retrieve '
'deployment data.'),
support_status=support.SupportStatus(version='9.0.0'),
type=attributes.Schema.MAP,
cache_mode=attributes.Schema.CACHE_NONE
),
}
default_client_name = 'nova'
def translation_rules(self, props):
neutron_client_plugin = self.client_plugin('neutron')
glance_client_plugin = self.client_plugin('glance')
rules = [
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
translation_path=[self.NETWORKS, self.NETWORK_ID],
value_name=self.NETWORK_UUID),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.FLAVOR],
client_plugin=self.client_plugin('nova'),
finder='find_flavor_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.IMAGE],
client_plugin=glance_client_plugin,
finder='find_image_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
translation_path=[self.BLOCK_DEVICE_MAPPING_V2,
self.BLOCK_DEVICE_MAPPING_IMAGE],
value_name=self.BLOCK_DEVICE_MAPPING_IMAGE_ID),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.BLOCK_DEVICE_MAPPING_V2,
self.BLOCK_DEVICE_MAPPING_IMAGE],
client_plugin=glance_client_plugin,
finder='find_image_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.NETWORKS, self.NETWORK_ID],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_NETWORK),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.NETWORKS, self.NETWORK_SUBNET],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_SUBNET),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.NETWORKS, self.NETWORK_PORT],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_PORT)
]
return rules
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
if self.user_data_software_config():
self._register_access_key()
self.default_collectors = ['ec2']
def _config_drive(self):
# This method is overridden by the derived CloudServer resource
return self.properties[self.CONFIG_DRIVE]
def user_data_raw(self):
return self.properties[self.USER_DATA_FORMAT] == self.RAW
def user_data_software_config(self):
return self.properties[
self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG
def get_software_config(self, ud_content):
with self.rpc_client().ignore_error_by_name('NotFound'):
sc = self.rpc_client().show_software_config(
self.context, ud_content)
return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
return ud_content
def handle_create(self):
security_groups = self.properties[self.SECURITY_GROUPS]
user_data_format = self.properties[self.USER_DATA_FORMAT]
ud_content = self.properties[self.USER_DATA]
if self.user_data_software_config() or self.user_data_raw():
if uuidutils.is_uuid_like(ud_content):
# attempt to load the userdata from software config
ud_content = self.get_software_config(ud_content)
metadata = self.metadata_get(True) or {}
if self.user_data_software_config():
self._create_transport_credentials(self.properties)
self._populate_deployments_metadata(metadata, self.properties)
userdata = self.client_plugin().build_userdata(
metadata,
ud_content,
instance_user=None,
user_data_format=user_data_format)
availability_zone = self.properties[self.AVAILABILITY_ZONE]
instance_meta = self.properties[self.METADATA]
if instance_meta:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
scheduler_hints = self._scheduler_hints(
self.properties[self.SCHEDULER_HINTS])
nics = self._build_nics(self.properties[self.NETWORKS],
security_groups=security_groups)
block_device_mapping = self._build_block_device_mapping(
self.properties[self.BLOCK_DEVICE_MAPPING])
block_device_mapping_v2 = self._build_block_device_mapping_v2(
self.properties[self.BLOCK_DEVICE_MAPPING_V2])
reservation_id = self.properties[self.RESERVATION_ID]
disk_config = self.properties[self.DISK_CONFIG]
admin_pass = self.properties[self.ADMIN_PASS] or None
personality_files = self.properties[self.PERSONALITY]
key_name = self.properties[self.KEY_NAME]
flavor = self.properties[self.FLAVOR]
image = self.properties[self.IMAGE]
server = None
try:
server = self.client().servers.create(
name=self._server_name(),
image=image,
flavor=flavor,
key_name=key_name,
security_groups=security_groups,
userdata=userdata,
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
reservation_id=reservation_id,
config_drive=self._config_drive(),
disk_config=disk_config,
files=personality_files,
admin_pass=admin_pass)
finally:
# Avoid a race condition where the thread could be canceled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server.id
def check_create_complete(self, server_id):
check = self.client_plugin()._check_active(server_id)
if check:
if self.properties[self.TAGS]:
self._update_server_tags(self.properties[self.TAGS])
self.store_external_ports()
return check
def _update_server_tags(self, tags):
server = self.client().servers.get(self.resource_id)
self.client().servers.set_tags(server, tags)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
status = self.client_plugin().get_status(server)
checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
self._verify_check_conditions(checks)
def get_live_resource_data(self):
try:
server = self.client().servers.get(self.resource_id)
server_data = server.to_dict()
active = self.client_plugin()._check_active(server)
if not active:
# There is no difference what error raised, because update
# method of resource just silently log it as warning.
raise exception.Error(_('Server %s is not '
'in ACTIVE state') % self.name)
except Exception as ex:
if self.client_plugin().is_not_found(ex):
raise exception.EntityNotFound(entity='Resource',
name=self.name)
raise
if self.client_plugin().is_version_supported(MICROVERSION_TAGS):
tag_server = self.client().servers.get(self.resource_id)
server_data['tags'] = tag_server.tag_list()
return server, server_data
def parse_live_resource_data(self, resource_properties, resource_data):
server, server_data = resource_data
result = {
# there's a risk that flavor id will be int type, so cast to str
self.FLAVOR: six.text_type(server_data.get(self.FLAVOR)['id']),
self.IMAGE: six.text_type(server_data.get(self.IMAGE)['id']),
self.NAME: server_data.get(self.NAME),
self.METADATA: server_data.get(self.METADATA),
self.NETWORKS: self._get_live_networks(server, resource_properties)
}
if 'tags' in server_data:
result.update({self.TAGS: server_data['tags']})
return result
def _get_live_networks(self, server, props):
reality_nets = self._add_attrs_for_address(server,
extend_networks=False)
reality_net_ids = {}
client_plugin = self.client_plugin('neutron')
for net_key in reality_nets:
try:
net_id = client_plugin.find_resourceid_by_name_or_id(
client_plugin.RES_TYPE_NETWORK,
net_key)
except Exception as ex:
if (client_plugin.is_not_found(ex) or
client_plugin.is_no_unique(ex)):
net_id = None
else:
raise
if net_id:
reality_net_ids[net_id] = reality_nets.get(net_key)
resource_nets = props.get(self.NETWORKS)
result_nets = []
for net in resource_nets or []:
net_id = self._get_network_id(net)
if reality_net_ids.get(net_id):
for idx, address in enumerate(reality_net_ids.get(net_id)):
if address['addr'] == net[self.NETWORK_FIXED_IP]:
result_nets.append(net)
reality_net_ids.get(net_id).pop(idx)
break
for key, value in six.iteritems(reality_nets):
for address in reality_nets[key]:
new_net = {self.NETWORK_ID: key,
self.NETWORK_FIXED_IP: address['addr']}
if address['port'] not in [port['id']
for port in self._data_get_ports()]:
new_net.update({self.NETWORK_PORT: address['port']})
result_nets.append(new_net)
return result_nets
@classmethod
def _build_block_device_mapping(cls, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
else:
volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
mapping_parts.append(volume_id)
mapping_parts.append('')
volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete:
mapping_parts.append(str(delete))
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
@classmethod
def _build_block_device_mapping_v2(cls, bdm_v2):
if not bdm_v2:
return None
bdm_v2_list = []
for mapping in bdm_v2:
bmd_dict = None
if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID),
'source_type': 'snapshot',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE),
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': mapping.get(
cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE),
}
elif (mapping.get(cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE) or
mapping.get(cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT)):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True
}
ephemeral_size = mapping.get(
cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE)
if ephemeral_size:
bmd_dict.update({'volume_size': ephemeral_size})
ephemeral_format = mapping.get(
cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT)
if ephemeral_format:
bmd_dict.update({'guest_format': ephemeral_format})
# NOTE(prazumovsky): In case of server doesn't take empty value of
# device name, need to escape from such situation.
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
if device_name:
bmd_dict[cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME] = device_name
update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
cls.BLOCK_DEVICE_MAPPING_DISK_BUS,
cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX,
cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
for update_prop in update_props:
if mapping.get(update_prop) is not None:
bmd_dict[update_prop] = mapping.get(update_prop)
if bmd_dict:
bdm_v2_list.append(bmd_dict)
return bdm_v2_list
def _get_subnets_attr(self, fixed_ips):
subnets = []
try:
for fixed_ip in fixed_ips:
if fixed_ip.get('subnet_id'):
subnets.append(self.client('neutron').show_subnet(
fixed_ip['subnet_id'])['subnet'])
except Exception as ex:
LOG.warning("Failed to fetch resource attributes: %s", ex)
return
return subnets
def _get_network_attr(self, network_id):
try:
return self.client('neutron').show_network(network_id)['network']
except Exception as ex:
LOG.warning("Failed to fetch resource attributes: %s", ex)
return
def _add_attrs_for_address(self, server, extend_networks=True):
"""Adds port id, subnets and network attributes to addresses list.
This method is used only for resolving attributes.
:param server: The server resource
:param extend_networks: When False the network is not extended, i.e
the net is returned without replacing name on
id.
"""
nets = copy.deepcopy(server.addresses) or {}
ifaces = server.interface_list()
ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
iface.mac_addr), iface.port_id)
for iface in ifaces)
for net_name in nets:
for addr in nets[net_name]:
addr['port'] = ip_mac_mapping_on_port_id.get(
(addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
# _get_live_networks() uses this method to get reality_nets.
# We don't need to get subnets and network in that case. Only
# do the external calls if extend_networks is true, i.e called
# from _resolve_attribute()
if not extend_networks:
continue
try:
port = self.client('neutron').show_port(
addr['port'])['port']
except Exception as ex:
addr['subnets'], addr['network'] = None, None
LOG.warning("Failed to fetch resource attributes: %s", ex)
continue
addr['subnets'] = self._get_subnets_attr(port['fixed_ips'])
addr['network'] = self._get_network_attr(port['network_id'])
if extend_networks:
return self._extend_networks(nets)
else:
return nets
def _extend_networks(self, networks):
"""Method adds same networks with replaced name on network id.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(networks)
client_plugin = self.client_plugin('neutron')
for key in list(nets.keys()):
try:
net_id = client_plugin.find_resourceid_by_name_or_id(
client_plugin.RES_TYPE_NETWORK,
key)
except Exception as ex:
if (client_plugin.is_not_found(ex) or
client_plugin.is_no_unique(ex)):
net_id = None
else:
raise
if net_id:
nets[net_id] = nets[key]
return nets
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.FIRST_ADDRESS:
return self.client_plugin().server_to_ipaddress(
self.resource_id) or ''
if name == self.OS_COLLECT_CONFIG:
return self.metadata_get().get('os-collect-config', {})
if name == self.NAME_ATTR:
return self._server_name()
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
return self._add_attrs_for_address(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None)
if name == self.ACCESSIPV4:
return server.accessIPv4
if name == self.ACCESSIPV6:
return server.accessIPv6
if name == self.CONSOLE_URLS:
return self.client_plugin('nova').get_console_urls(server)
if name == self.TAGS_ATTR:
if self.client_plugin().is_version_supported(MICROVERSION_TAGS):
return self.client().servers.tag_list(server)
return None
def add_dependencies(self, deps):
super(Server, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as the networks attached to this server.
# It is not known which subnet a server might be assigned
# to so all subnets in a network should be created before
# the servers in that network.
try:
nets = self.properties[self.NETWORKS]
except (ValueError, TypeError):
# Properties errors will be caught later in validation,
# where we can report them in their proper context.
return
if not nets:
return
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
try:
subnet_net = res.properties.get(subnet.Subnet.NETWORK)
except (ValueError, TypeError):
# Properties errors will be caught later in validation,
# where we can report them in their proper context.
continue
# Be wary of the case where we do not know a subnet's
# network. If that's the case, be safe and add it as a
# dependency.
if not subnet_net:
deps += (self, res)
continue
for net in nets:
# worry about network_id because that could be the match
# assigned to the subnet as well and could have been
# created by this stack. Regardless, the server should
# still wait on the subnet.
net_id = net.get(self.NETWORK_ID)
if net_id and net_id == subnet_net:
deps += (self, res)
break
# If we don't know a given net_id right now, it's
# plausible this subnet depends on it.
if not net_id:
deps += (self, res)
break
def _update_flavor(self, after_props):
flavor = after_props[self.FLAVOR]
handler_args = checker_args = {'args': (flavor,)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, after_props):
image_update_policy = after_props[self.IMAGE_UPDATE_POLICY]
instance_meta = after_props[self.METADATA]
if instance_meta is not None:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
personality_files = after_props[self.PERSONALITY]
image = after_props[self.IMAGE]
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = after_props[self.ADMIN_PASS]
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral,
'meta': instance_meta,
'files': personality_files}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, after_props):
updaters = []
new_networks = after_props[self.NETWORKS]
old_networks = self.properties[self.NETWORKS]
security_groups = after_props[self.SECURITY_GROUPS]
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
remove_ports, add_nets = self.calculate_networks(
old_networks, new_networks, interfaces, security_groups)
for port in remove_ports:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
handler_extra={'args': (port,)},
checker_extra={'args': (port,)})
)
for args in add_nets:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
handler_extra={'kwargs': args},
checker_extra={'args': (args['port_id'],)})
)
return updaters
def needs_replace_with_prop_diff(self, changed_properties_set,
after_props, before_props):
"""Needs replace based on prop_diff."""
if self.FLAVOR in changed_properties_set:
flavor_update_policy = (
after_props.get(self.FLAVOR_UPDATE_POLICY) or
before_props.get(self.FLAVOR_UPDATE_POLICY))
if flavor_update_policy == 'REPLACE':
return True
if self.IMAGE in changed_properties_set:
image_update_policy = (
after_props.get(self.IMAGE_UPDATE_POLICY) or
before_props.get(self.IMAGE_UPDATE_POLICY))
if image_update_policy == 'REPLACE':
return True
if self.USER_DATA in changed_properties_set:
ud_update_policy = (
after_props.get(self.USER_DATA_UPDATE_POLICY) or
before_props.get(self.USER_DATA_UPDATE_POLICY))
return ud_update_policy == 'REPLACE'
def needs_replace_failed(self):
if not self.resource_id:
return True
with self.client_plugin().ignore_not_found:
server = self.client().servers.get(self.resource_id)
return server.status in ('ERROR', 'DELETED', 'SOFT_DELETED')
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
updaters = super(Server, self).handle_update(
json_snippet,
tmpl_diff,
prop_diff)
server = None
after_props = json_snippet.properties(self.properties_schema,
self.context)
if self.METADATA in prop_diff:
server = self.client_plugin().get_server(self.resource_id)
self.client_plugin().meta_update(server,
after_props[self.METADATA])
if self.TAGS in prop_diff:
self._update_server_tags(after_props[self.TAGS] or [])
if self.NAME in prop_diff:
if not server:
server = self.client_plugin().get_server(self.resource_id)
self.client_plugin().rename(server, after_props[self.NAME])
if self.NETWORKS in prop_diff:
updaters.extend(self._update_networks(server, after_props))
if self.FLAVOR in prop_diff:
updaters.extend(self._update_flavor(after_props))
if self.IMAGE in prop_diff:
updaters.append(self._update_image(after_props))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client_plugin().get_server(self.resource_id)
server.change_password(after_props[self.ADMIN_PASS])
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
"""Push all updaters to completion in list order."""
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
status = all(prg.complete for prg in updaters)
if status:
self.store_external_ports()
return status
def _validate_block_device_mapping(self):
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or []
bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or []
image = self.properties[self.IMAGE]
if bdm and bdm_v2:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2)
bootable = image is not None
for mapping in bdm:
device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
if device_name == 'vda':
bootable = True
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is not None and snapshot_id is not None:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is None and snapshot_id is None:
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % device_name
raise exception.StackValidationFailed(message=msg)
bootable_devs = [image]
for mapping in bdm_v2:
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE)
boot_index = mapping.get(self.BLOCK_DEVICE_MAPPING_BOOT_INDEX)
swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
ephemeral = (mapping.get(
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE) or mapping.get(
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT))
property_tuple = (volume_id, snapshot_id, image_id, swap_size,
ephemeral)
if property_tuple.count(None) < 4:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
self.BLOCK_DEVICE_MAPPING_IMAGE,
self.BLOCK_DEVICE_MAPPING_SWAP_SIZE,
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE,
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT
)
if property_tuple.count(None) == 5:
msg = _('Either volume_id, snapshot_id, image_id, swap_size, '
'ephemeral_size or ephemeral_format must be '
'specified.')
raise exception.StackValidationFailed(message=msg)
if any((volume_id is not None, snapshot_id is not None,
image_id is not None)):
# boot_index is not specified, set boot_index=0 when
# build_block_device_mapping for volume, snapshot, image
if boot_index is None or boot_index == 0:
bootable = True
bootable_devs.append(volume_id)
bootable_devs.append(snapshot_id)
bootable_devs.append(image_id)
if not bootable:
msg = _('Neither image nor bootable volume is specified for '
'instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
if bdm_v2 and len(list(
dev for dev in bootable_devs if dev is not None)) != 1:
msg = _('Multiple bootable sources for instance %s.') % self.name
raise exception.StackValidationFailed(message=msg)
def _validate_image_flavor(self, image, flavor):
try:
image_obj = self.client_plugin('glance').get_image(image)
flavor_obj = self.client_plugin().get_flavor(flavor)
except Exception as ex:
# Flavor or image may not have been created in the backend
# yet when they are part of the same stack/template.
if (self.client_plugin().is_not_found(ex) or
self.client_plugin('glance').is_not_found(ex)):
return
raise
else:
if image_obj.status.lower() != self.IMAGE_ACTIVE:
msg = _('Image status is required to be %(cstatus)s not '
'%(wstatus)s.') % {
'cstatus': self.IMAGE_ACTIVE,
'wstatus': image_obj.status}
raise exception.StackValidationFailed(message=msg)
# validate image/flavor combination
if flavor_obj.ram < image_obj.min_ram:
msg = _('Image %(image)s requires %(imram)s minimum ram. '
'Flavor %(flavor)s has only %(flram)s.') % {
'image': image, 'imram': image_obj.min_ram,
'flavor': flavor, 'flram': flavor_obj.ram}
raise exception.StackValidationFailed(message=msg)
# validate image/flavor disk compatibility
if flavor_obj.disk < image_obj.min_disk:
msg = _('Image %(image)s requires %(imsz)s GB minimum '
'disk space. Flavor %(flavor)s has only '
'%(flsz)s GB.') % {
'image': image, 'imsz': image_obj.min_disk,
'flavor': flavor, 'flsz': flavor_obj.disk}
raise exception.StackValidationFailed(message=msg)
def validate(self):
"""Validate any of the provided params."""
super(Server, self).validate()
if self.user_data_software_config():
if 'deployments' in self.t.metadata():
msg = _('deployments key not allowed in resource metadata '
'with user_data_format of SOFTWARE_CONFIG')
raise exception.StackValidationFailed(message=msg)
self._validate_block_device_mapping()
# make sure the image exists if specified.
image = self.properties[self.IMAGE]
flavor = self.properties[self.FLAVOR]
if image:
self._validate_image_flavor(image, flavor)
networks = self.properties[self.NETWORKS] or []
for network in networks:
self._validate_network(network)
has_str_net = self._str_network(networks) is not None
if has_str_net:
if len(networks) != 1:
msg = _('Property "%s" can not be specified if '
'multiple network interfaces set for '
'server.') % self.ALLOCATE_NETWORK
raise exception.StackValidationFailed(message=msg)
# Check if str_network is allowed to use
if not self.client_plugin().is_version_supported(
MICROVERSION_STR_NETWORK):
msg = (_('Cannot use "%s" property - compute service '
'does not support the required api '
'microversion.') % self.ALLOCATE_NETWORK)
raise exception.StackValidationFailed(message=msg)
# record if any networks include explicit ports
has_port = any(n[self.NETWORK_PORT] is not None for n in networks)
# if 'security_groups' present for the server and explicit 'port'
# in one or more entries in 'networks', raise validation error
if has_port and self.properties[self.SECURITY_GROUPS]:
raise exception.ResourcePropertyConflict(
self.SECURITY_GROUPS,
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# Check if nic tag is allowed to use
if self._is_nic_tagged(networks=networks):
if not self.client_plugin().is_version_supported(
MICROVERSION_NIC_TAGS):
msg = (_('Cannot use "%s" property in networks - '
'nova does not support required '
'api microversion.'), self.NIC_TAG)
raise exception.StackValidationFailed(message=msg)
# Check if tags is allowed to use
if self.properties[self.TAGS]:
if not self.client_plugin().is_version_supported(
MICROVERSION_TAGS):
msg = (_('Cannot use "%s" property - nova does not support '
'required api microversion.') % self.TAGS)
raise exception.StackValidationFailed(message=msg)
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
if metadata or personality:
limits = self.client_plugin().absolute_limits()
# verify that the number of metadata entries is not greater
# than the maximum number allowed in the provider's absolute
# limits
if metadata:
msg = _('Instance metadata must not contain greater than %s '
'entries. This is the maximum number allowed by your '
'service provider') % limits['maxServerMeta']
self._check_maximum(len(metadata),
limits['maxServerMeta'], msg)
# verify the number of personality files and the size of each
# personality file against the provider's absolute limits
if personality:
msg = _("The personality property may not contain "
"greater than %s entries.") % limits['maxPersonality']
self._check_maximum(len(personality),
limits['maxPersonality'], msg)
for path, contents in personality.items():
msg = (_("The contents of personality file \"%(path)s\" "
"is larger than the maximum allowed personality "
"file size (%(max_size)s bytes).") %
{'path': path,
'max_size': limits['maxPersonalitySize']})
self._check_maximum(len(bytes(contents.encode('utf-8'))
) if contents is not None else 0,
limits['maxPersonalitySize'], msg)
def _delete(self):
if self.user_data_software_config():
self._delete_queue()
self._delete_user()
self._delete_temp_url()
# remove internal and external ports
self._delete_internal_ports()
self.data_delete('external_ports')
if self.resource_id is None:
return
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return progress.ServerDeleteProgress(self.resource_id)
def handle_snapshot_delete(self, state):
if state[1] != self.FAILED and self.resource_id:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self._delete()
def handle_delete(self):
return self._delete()
def check_delete_complete(self, prg):
if not prg:
return True
if not prg.image_complete:
image = self.client_plugin('glance').get_image(prg.image_id)
if image.status.lower() in (self.IMAGE_ERROR,
self.IMAGE_DELETED):
raise exception.Error(image.status)
elif image.status.lower() == self.IMAGE_ACTIVE:
prg.image_complete = True
if not self._delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
prg.server_id)
def handle_suspend(self):
"""Suspend a server.
Note we do not wait for the SUSPENDED state, this is polled for by
check_suspend_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s', self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s',
{'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = exception.ResourceUnknownStatus(
result=_('Suspend of server %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
"""Resume a server.
Note we do not wait for the ACTIVE state, this is polled for by
check_resume_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s', self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_snapshot(self):
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
self.data_set('snapshot_image_id', image_id)
return image_id
def check_snapshot_complete(self, image_id):
image = self.client_plugin('glance').get_image(image_id)
if image.status.lower() == self.IMAGE_ACTIVE:
return True
elif image.status.lower() in (self.IMAGE_ERROR, self.IMAGE_DELETED):
raise exception.Error(image.status)
return False
def handle_delete_snapshot(self, snapshot):
image_id = snapshot['resource_data'].get('snapshot_image_id')
with self.client_plugin('glance').ignore_not_found:
self.client('glance').images.delete(image_id)
def handle_restore(self, defn, restore_data):
image_id = restore_data['resource_data']['snapshot_image_id']
props = dict((k, v) for k, v in self.properties.data.items()
if v is not None)
for key in [self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2,
self.NETWORKS]:
if props.get(key) is not None:
props[key] = list(dict((k, v) for k, v in prop.items()
if v is not None)
for prop in props[key])
props[self.IMAGE] = image_id
return defn.freeze(properties=props)
def prepare_for_replace(self):
# if the server has not been created yet, do nothing
if self.resource_id is None:
return
self.prepare_ports_for_replace()
def restore_prev_rsrc(self, convergence=False):
self.restore_ports_after_rollback(convergence=convergence)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
| apache-2.0 | -5,055,696,018,264,513,000 | 43.102202 | 79 | 0.531682 | false |
icse18-FAST/FAST | tools/clean-preprocessed-input.py | 1 | 1377 | '''
This file is part of an ICSE'18 submission that is currently under review.
For more information visit: https://github.com/icse18-FAST/FAST.
This is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this source. If not, see <http://www.gnu.org/licenses/>.
'''
import os
if __name__ == "__main__":
for root, folders, files in os.walk("input/"):
for file in files:
if file[0] == ".":
pass
elif (file == "fault_matrix_key_tc.pickle" or
file == "fault_matrix.pickle"):
pass
elif ("-bbox.txt" in file or
"-function.txt" in file or
"-line.txt" in file or
"-branch.txt" in file):
pass
else:
print "Deleting {}/{}".format(root, file)
os.remove("{}/{}".format(root, file))
| gpl-3.0 | -6,610,629,189,178,085,000 | 36.216216 | 75 | 0.615105 | false |
atkvo/masters-bot | src/autobot/src/capture_image.py | 1 | 2117 | #!/usr/bin/env python
from std_msgs.msg import String
#import roslib
import sys
import datetime
import time
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
import cv2
import rospy
from autobot.msg import drive_param
from sensor_msgs.msg import Image
bridge = CvBridge()
i = 0
bridge = CvBridge();
currentImage = 0
currentAngle = 0
currentVel = 0
time = time.time()
def __init__(self):
bridge = CvBridge();
def callback(temp):
#print("current image updated")
global currentImage
try:
currentImage = bridge.imgmsg_to_cv2(temp, desired_encoding="passthrough")
except CvBridgeError as e:
print(e)
global currentAngle
global currentVel
global i
if currentAngle > 0:
filepath = "dataset2/right" + str(currentAngle) + "_" + str(i) + str(time) + ".png"
elif currentAngle < 0:
filepath = "dataset2/left" + str(currentAngle) + "_" + str(i) + str(time) + ".png"
else:
filepath = "dataset2/zero" + str(currentAngle) + "_" + str(i) + str(time) + ".png"
i+=1
if currentVel > 7.0:
print("picture taken")
cv2.imwrite(filepath, currentImage)
def takePicture(data):
#define file path
global currentAngle
global currentVel
currentAngle = data.angle
currentVel = data.velocity
global i
if currentAngle > 0:
filepath = "dataset2/right" + str(currentAngle) + "_" + str(i) + str(time) + ".png"
elif currentAngle < 0:
filepath = "dataset2/left" + str(currentAngle) + "_" + str(i) + str(time) + ".png"
else:
filepath = "dataset2/zero" + str(currentAngle) + "_" + str(i) + str(time) + ".png"
i+=1
if currentVel > 7.0:
print("picture taken")
cv2.imwrite(filepath, currentImage)
def listen():
bridge = CvBridge();
rospy.init_node('capture_image', anonymous=True)
rospy.Subscriber("left/image_rect_color", Image, callback)
rospy.Subscriber("drive_parameters", drive_param, takePicture)
rospy.spin()
if __name__ == '__main__':
print("image capture initialized")
print(time)
listen()
| mit | 5,722,113,862,861,524,000 | 25.4625 | 91 | 0.632971 | false |
GoodYk/formula-editor- | FormulaShow/asciimathmll.py | 1 | 23678 | # Copyright (c) 2010-2011, Gabriele Favalessa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from xml.etree.ElementTree import Element, tostring
__all__ = ['parse']
Element_ = Element
AtomicString_ = lambda s: s
def El(tag, text=None, *children, **attrib):
element = Element_(tag, **attrib)
if not text is None:
if isinstance(text, basestring):
element.text = AtomicString_(text)
else:
children = (text, ) + children
for child in children:
element.append(child)
return element
number_re = re.compile('-?(\d+\.(\d+)?|\.?\d+)')
def strip_parens(n):
if n.tag == 'mrow':
if n[0].get('_opening', False):
del n[0]
if n[-1].get('_closing', False):
del n[-1]
return n
def is_enclosed_in_parens(n):
return n.tag == 'mrow' and n[0].get('_opening', False) and n[-1].get('_closing', False)
def binary(operator, operand_1, operand_2, swap=False):
operand_1 = strip_parens(operand_1)
operand_2 = strip_parens(operand_2)
if not swap:
operator.append(operand_1)
operator.append(operand_2)
else:
operator.append(operand_2)
operator.append(operand_1)
return operator
def unary(operator, operand, swap=False):
operand = strip_parens(operand)
if swap:
operator.insert(0, operand)
else:
operator.append(operand)
return operator
def frac(num, den):
return El('mfrac', strip_parens(num), strip_parens(den))
def sub(base, subscript):
subscript = strip_parens(subscript)
if base.tag in ('msup', 'mover'):
children = base.getchildren()
n = El('msubsup' if base.tag == 'msup' else 'munderover', children[0], subscript, children[1])
else:
n = El('munder' if base.get('_underover', False) else 'msub', base, subscript)
return n
def sup(base, superscript):
superscript = strip_parens(superscript)
if base.tag in ('msub', 'munder'):
children = base.getchildren()
n = El('msubsup' if base.tag == 'msub' else 'munderover', children[0], children[1], superscript)
else:
n = El('mover' if base.get('_underover', False) else 'msup', base, superscript)
return n
def parse(s, element=Element, atomicstring=lambda s: s):
"""
Translates from ASCIIMathML (an easy to type and highly readable way to
represent math formulas) into MathML (a w3c standard directly displayable by
some web browsers).
The function `parse()` generates a tree of elements:
>>> import asciimathml
>>> asciimathml.parse('sqrt 2')
<Element math at b76fb28c>
The tree can then be manipulated using the standard python library. For
example we can generate its string representation:
El('mtr', El('mtd', *nodes))
El('mtr', El('mtd', El('msup', *nodes)))
>>> from xml.etree.ElementTree import tostring
>>> tostring(asciimathml.parse('sqrt 2'))
'<math><mstyle><msqrt><mn>2</mn></msqrt></mstyle></math>'
"""
global Element_, AtomicString_
Element_ = element
AtomicString_ = atomicstring
s, nodes = parse_exprs(s)
remove_invisible(nodes)
nodes = map(remove_private, nodes)
return tostring(El('mtr', El('mtd', *nodes)))
delimiters = {'{': '}', '(': ')', '[': ']'}
def parse_string(s):
opening = s[0]
if opening in delimiters:
closing = delimiters[opening]
end = s.find(closing)
text = s[1:end]
s = s[end+1:]
else:
s, text = parse_m(s)
return s, El('mrow', El('mtext', text))
tracing_level = 0
def trace_parser(p):
"""
Decorator for tracing the parser.
Use it to decorate functions with signature:
string -> (string, nodes)
and a trace of the progress made by the parser will be printed to stderr.
Currently parse_exprs(), parse_expr() and parse_m() have the right signature.
"""
def nodes_to_string(n):
if isinstance(n, list):
result = '[ '
for m in map(nodes_to_string, n):
result += m
result += ' '
result += ']'
return result
else:
try:
return tostring(remove_private(copy(n)))
except Exception as e:
return n
def print_trace(*args):
import sys
sys.stderr.write(" " * tracing_level)
for arg in args:
sys.stderr.write(str(arg))
sys.stderr.write(' ')
sys.stderr.write('\n')
sys.stderr.flush()
def wrapped(s, *args, **kwargs):
global tracing_level
print_trace(p.__name__, repr(s))
tracing_level += 1
s, n = p(s, *args, **kwargs)
tracing_level -= 1
print_trace("-> ", repr(s), nodes_to_string(n))
return s, n
return wrapped
def parse_expr(s, siblings, required=False):
s, n = parse_m(s, required=required)
if not n is None:
# Being both an _opening and a _closing element is a trait of
# symmetrical delimiters (e.g. ||).
# In that case, act as an opening delimiter only if there is not
# already one of the same kind among the preceding siblings.
if n.get('_opening', False) \
and (not n.get('_closing', False) \
or find_node_backwards(siblings, n.text) == -1):
s, children = parse_exprs(s, [n], inside_parens=True)
n = El('mrow', *children)
if n.tag == 'mtext':
s, n = parse_string(s)
elif n.get('_arity', 0) == 1:
s, m = parse_expr(s, [], True)
n = unary(n, m, n.get('_swap', False))
elif n.get('_arity', 0) == 2:
s, m1 = parse_expr(s, [], True)
s, m2 = parse_expr(s, [], True)
n = binary(n, m1, m2, n.get('_swap', False))
return s, n
def find_node(ns, text):
for i, n in enumerate(ns):
if n.text == text:
return i
return -1
def find_node_backwards(ns, text):
for i, n in enumerate(reversed(ns)):
if n.text == text:
return len(ns) - i
return -1
def nodes_to_row(row):
mrow = El('mtr')
nodes = row.getchildren()
while True:
i = find_node(nodes, ',')
if i > 0:
mrow.append(El('mtd', *nodes[:i]))
nodes = nodes[i+1:]
else:
mrow.append(El('mtd', *nodes))
break
return mrow
def nodes_to_matrix(nodes):
mtable = El('mtable')
for row in nodes[1:-1]:
if row.text == ',':
continue
mtable.append(nodes_to_row(strip_parens(row)))
return El('mrow', nodes[0], mtable, nodes[-1])
def parse_exprs(s, nodes=None, inside_parens=False):
if nodes is None:
nodes = []
inside_matrix = False
while True:
s, n = parse_expr(s, nodes)
if not n is None:
nodes.append(n)
if n.get('_closing', False):
if not inside_matrix:
return s, nodes
else:
return s, nodes_to_matrix(nodes)
if inside_parens and n.text == ',' and is_enclosed_in_parens(nodes[-2]):
inside_matrix = True
if len(nodes) >= 3 and nodes[-2].get('_special_binary'):
transform = nodes[-2].get('_special_binary')
nodes[-3:] = [transform(nodes[-3], nodes[-1])]
if s == '':
return '', nodes
def remove_private(n):
_ks = [k for k in n.keys() if k.startswith('_') or k == 'attrib']
for _k in _ks:
del n.attrib[_k]
for c in n.getchildren():
remove_private(c)
return n
def remove_invisible(ns):
for i in range(len(ns)-1, 0, -1):
if ns[i].get('_invisible', False):
del ns[i]
else:
remove_invisible(ns[i].getchildren())
def copy(n):
m = El(n.tag, n.text, **dict(n.items()))
for c in n.getchildren():
m.append(copy(c))
return m
def parse_m(s, required=False):
s = s.lstrip()
if s == '':
return '', El('mi', u'\u25a1') if required else None
m = number_re.match(s)
if m:
number = m.group(0)
if number[0] == '-':
return s[m.end():], El('mrow', El('mo', '-'), El('mn', number[1:]))
else:
return s[m.end():], El('mn', number)
for y in symbol_names:
if (s.startswith(y)):
n = copy(symbols[y])
if n.get('_space', False):
n = El('mrow',
El('mspace', width='0.5ex'),
n,
El('mspace', width='0.5ex'))
return s[len(y):], n
return s[1:], El('mi' if s[0].isalpha() else 'mo', s[0])
symbols = {}
def Symbol(input, el):
symbols[input] = el
Symbol(input="alpha", el=El("mi", u"\u03B1"))
Symbol(input="beta", el=El("mi", u"\u03B2"))
Symbol(input="chi", el=El("mi", u"\u03C7"))
Symbol(input="delta", el=El("mi", u"\u03B4"))
Symbol(input="Delta", el=El("mo", u"\u0394"))
Symbol(input="epsi", el=El("mi", u"\u03B5"))
Symbol(input="varepsilon", el=El("mi", u"\u025B"))
Symbol(input="eta", el=El("mi", u"\u03B7"))
Symbol(input="gamma", el=El("mi", u"\u03B3"))
Symbol(input="Gamma", el=El("mo", u"\u0393"))
Symbol(input="iota", el=El("mi", u"\u03B9"))
Symbol(input="kappa", el=El("mi", u"\u03BA"))
Symbol(input="lambda", el=El("mi", u"\u03BB"))
Symbol(input="Lambda", el=El("mo", u"\u039B"))
Symbol(input="mu", el=El("mi", u"\u03BC"))
Symbol(input="nu", el=El("mi", u"\u03BD"))
Symbol(input="omega", el=El("mi", u"\u03C9"))
Symbol(input="Omega", el=El("mo", u"\u03A9"))
Symbol(input="phi", el=El("mi", u"\u03C6"))
Symbol(input="varphi", el=El("mi", u"\u03D5"))
Symbol(input="Phi", el=El("mo", u"\u03A6"))
Symbol(input="pi", el=El("mi", u"\u03C0"))
Symbol(input="Pi", el=El("mo", u"\u03A0"))
Symbol(input="psi", el=El("mi", u"\u03C8"))
Symbol(input="Psi", el=El("mi", u"\u03A8"))
Symbol(input="rho", el=El("mi", u"\u03C1"))
Symbol(input="sigma", el=El("mi", u"\u03C3"))
Symbol(input="Sigma", el=El("mo", u"\u03A3"))
Symbol(input="tau", el=El("mi", u"\u03C4"))
Symbol(input="theta", el=El("mi", u"\u03B8"))
Symbol(input="vartheta", el=El("mi", u"\u03D1"))
Symbol(input="Theta", el=El("mo", u"\u0398"))
Symbol(input="upsilon", el=El("mi", u"\u03C5"))
Symbol(input="xi", el=El("mi", u"\u03BE"))
Symbol(input="Xi", el=El("mo", u"\u039E"))
Symbol(input="zeta", el=El("mi", u"\u03B6"))
Symbol(input="*", el=El("mo", u"\u22C5", mathsize='big'))
Symbol(input="**", el=El("mo", u"\u22C6"))
Symbol(input=" ", el=El('mspace', width='0.5ex'))
Symbol(input="'", el=El("mo", u"'", mathsize='4ex'))
Symbol(input="/", el=El("mo", u"/", _special_binary=frac))
Symbol(input="^", el=El("mo", u"^", _special_binary=sup))
Symbol(input="_", el=El("mo", u"_", _special_binary=sub))
Symbol(input="//", el=El("mo", u"/"))
Symbol(input="\\\\", el=El("mo", u"\\"))
Symbol(input="setminus", el=El("mo", u"\\"))
Symbol(input="xx", el=El("mo", u"\u00D7"))
Symbol(input="-:", el=El("mo", u"\u00F7"))
Symbol(input="@", el=El("mo", u"\u2218"))
Symbol(input="o+", el=El("mo", u"\u2295"))
Symbol(input="ox", el=El("mo", u"\u2297"))
Symbol(input="o.", el=El("mo", u"\u2299"))
Symbol(input="sum", el=El("mo", u"\u2211", _underover=True, mathsize='3ex'))
Symbol(input="prod", el=El("mo", u"\u220F", _underover=True))
Symbol(input="^^", el=El("mo", u"\u2227"))
Symbol(input="^^^", el=El("mo", u"\u22C0", _underover=True))
Symbol(input="vv", el=El("mo", u"\u2228"))
Symbol(input="vvv", el=El("mo", u"\u22C1", _underover=True))
Symbol(input="nn", el=El("mo", u"\u2229"))
Symbol(input="nnn", el=El("mo", u"\u22C2", _underover=True))
Symbol(input="uu", el=El("mo", u"\u222A"))
Symbol(input="uuu", el=El("mo", u"\u22C3", _underover=True))
Symbol(input="!=", el=El("mo", u"\u2260"))
Symbol(input=":=", el=El("mo", u":="))
Symbol(input="lt", el=El("mo", u"<"))
Symbol(input="<=", el=El("mo", u"\u2264"))
Symbol(input="lt=", el=El("mo", u"\u2264"))
Symbol(input=">=", el=El("mo", u"\u2265"))
Symbol(input="geq", el=El("mo", u"\u2265"))
Symbol(input="-<", el=El("mo", u"\u227A"))
Symbol(input="-lt", el=El("mo", u"\u227A"))
Symbol(input=">-", el=El("mo", u"\u227B"))
Symbol(input="-<=", el=El("mo", u"\u2AAF"))
Symbol(input=">-=", el=El("mo", u"\u2AB0"))
Symbol(input="in", el=El("mo", u"\u2208"))
Symbol(input="!in", el=El("mo", u"\u2209"))
Symbol(input="sub", el=El("mo", u"\u2282"))
Symbol(input="sup", el=El("mo", u"\u2283"))
Symbol(input="sube", el=El("mo", u"\u2286"))
Symbol(input="supe", el=El("mo", u"\u2287"))
Symbol(input="-=", el=El("mo", u"\u2261"))
Symbol(input="~=", el=El("mo", u"\u2245"))
Symbol(input="~~", el=El("mo", u"\u2248"))
Symbol(input="prop", el=El("mo", u"\u221D"))
Symbol(input="algorithm", el=El("mtext", u"algorithm", _space=True))
Symbol(input="and", el=El("mtext", u"and", _space=True))
Symbol(input="annotation", el=El("mtext", u"annotation", _space=True))
Symbol(input="assert", el=El("mtext", u"assert", _space=True))
Symbol(input="block", el=El("mtext", u"block", _space=True))
Symbol(input="break", el=El("mtext", u"break", _space=True))
Symbol(input="class", el=El("mtext", u"class", _space=True))
Symbol(input="connect", el=El("mtext", u"connect", _space=True))
Symbol(input="connector", el=El("mtext", u"connector", _space=True))
Symbol(input="constant", el=El("mtext", u"constant", _space=True))
Symbol(input="constrainedby", el=El("mtext", u"constrainedby", _space=True))
Symbol(input="der", el=El("mtext", u"der", _space=True))
Symbol(input="discrete", el=El("mtext", u"discrete", _space=True))
Symbol(input="each", el=El("mtext", u"each", _space=True))
Symbol(input="else", el=El("mtext", u"else", _space=True))
Symbol(input="elseif", el=El("mtext", u"elseif", _space=True))
Symbol(input="elsewhen", el=El("mtext", u"elsewhen", _space=True))
Symbol(input="encapsulated", el=El("mtext", u"encapsulated", _space=True))
Symbol(input="end", el=El("mtext", u"end", _space=True))
Symbol(input="enumeration", el=El("mtext", u"enumeration", _space=True))
Symbol(input="equation", el=El("mtext", u"equation", _space=True))
Symbol(input="expandable", el=El("mtext", u"expandable", _space=True))
Symbol(input="extends", el=El("mtext", u"extends", _space=True))
Symbol(input="external", el=El("mtext", u"external", _space=True))
Symbol(input="false", el=El("mtext", u"false", _space=True))
Symbol(input="final", el=El("mtext", u"final", _space=True))
Symbol(input="flow", el=El("mtext", u"flow", _space=True))
Symbol(input="for", el=El("mtext", u"for", _space=True))
Symbol(input="function", el=El("mtext", u"function", _space=True))
Symbol(input="if", el=El("mtext", u"if", _space=True))
Symbol(input="import", el=El("mtext", u"import", _space=True))
Symbol(input="impure", el=El("mtext", u"impure", _space=True))
Symbol(input="in", el=El("mtext", u"in", _space=True))
Symbol(input="initial", el=El("mtext", u"initial", _space=True))
Symbol(input="inner", el=El("mtext", u"inner", _space=True))
Symbol(input="input", el=El("mtext", u"input", _space=True))
Symbol(input="loop", el=El("mtext", u"loop", _space=True))
Symbol(input="model", el=El("mtext", u"model", _space=True))
Symbol(input="operator", el=El("mtext", u"operator", _space=True))
Symbol(input="or", el=El("mtext", u"or", _space=True))
Symbol(input="outer", el=El("mtext", u"outer", _space=True))
Symbol(input="output", el=El("mtext", u"output", _space=True))
Symbol(input="package", el=El("mtext", u"package", _space=True))
Symbol(input="parameter", el=El("mtext", u"parameter", _space=True))
Symbol(input="partial", el=El("mtext", u"partial", _space=True))
Symbol(input="protected", el=El("mtext", u"protected", _space=True))
Symbol(input="public", el=El("mtext", u"public", _space=True))
Symbol(input="pure", el=El("mtext", u"pure", _space=True))
Symbol(input="record", el=El("mtext", u"record", _space=True))
Symbol(input="redeclare", el=El("mtext", u"redeclare", _space=True))
Symbol(input="replaceable", el=El("mtext", u"replaceable", _space=True))
Symbol(input="return", el=El("mtext", u"return", _space=True))
Symbol(input="stream", el=El("mtext", u"stream", _space=True))
Symbol(input="then", el=El("mtext", u"then", _space=True))
Symbol(input="true", el=El("mtext", u"true", _space=True))
Symbol(input="type", el=El("mtext", u"type", _space=True))
Symbol(input="when", el=El("mtext", u"when", _space=True))
Symbol(input="while", el=El("mtext", u"while", _space=True))
Symbol(input="within", el=El("mtext", u"within", _space=True))
# Symbol(input="and", el=El("mtext", u"and", _space=True))
# Symbol(input="or", el=El("mtext", u"or", _space=True))
Symbol(input="not", el=El("mo", u"\u00AC"))
Symbol(input="=>", el=El("mo", u"\u21D2"))
# Symbol(input="if", el=El("mo", u"if", _space=True))
Symbol(input="<=>", el=El("mo", u"\u21D4"))
Symbol(input="AA", el=El("mo", u"\u2200"))
Symbol(input="EE", el=El("mo", u"\u2203"))
Symbol(input="_|_", el=El("mo", u"\u22A5"))
Symbol(input="TT", el=El("mo", u"\u22A4"))
Symbol(input="|--", el=El("mo", u"\u22A2"))
Symbol(input="|==", el=El("mo", u"\u22A8"))
Symbol(input="(", el=El("mo", "(", _opening=True))
Symbol(input=")", el=El("mo", ")", _closing=True))
Symbol(input="[", el=El("mo", "[", _opening=True))
Symbol(input="]", el=El("mo", "]", _closing=True))
Symbol(input="{", el=El("mo", "{", _opening=True))
Symbol(input="}", el=El("mo", "}", _closing=True))
Symbol(input="|", el=El("mo", u"|", _opening=True, _closing=True))
Symbol(input="||", el=El("mo", u"\u2016", _opening=True, _closing=True)) # double vertical line
Symbol(input="(:", el=El("mo", u"\u2329", _opening=True))
Symbol(input=":)", el=El("mo", u"\u232A", _closing=True))
Symbol(input="<<", el=El("mo", u"\u2329", _opening=True))
Symbol(input=">>", el=El("mo", u"\u232A", _closing=True))
Symbol(input="{:", el=El("mo", u"{:", _opening=True, _invisible=True))
Symbol(input=":}", el=El("mo", u":}", _closing=True, _invisible=True))
Symbol(input="int", el=El("mo", u"\u222B", mathsize='3ex'))
# Symbol(input="dx", el=El("mi", u"{:d x:}", _definition=True))
# Symbol(input="dy", el=El("mi", u"{:d y:}", _definition=True))
# Symbol(input="dz", el=El("mi", u"{:d z:}", _definition=True))
# Symbol(input="dt", el=El("mi", u"{:d t:}", _definition=True))
Symbol(input="oint", el=El("mo", u"\u222E", mathsize='4ex'))
Symbol(input="del", el=El("mo", u"\u2202"))
Symbol(input="grad", el=El("mo", u"\u2207"))
Symbol(input="+-", el=El("mo", u"\u00B1"))
Symbol(input="O/", el=El("mo", u"\u2205"))
Symbol(input="oo", el=El("mo", u"\u221E"))
Symbol(input="aleph", el=El("mo", u"\u2135"))
Symbol(input="...", el=El("mo", u"..."))
Symbol(input=":.", el=El("mo", u"\u2234"))
Symbol(input="/_", el=El("mo", u"\u2220"))
Symbol(input="\\ ", el=El("mo", u"\u00A0"))
Symbol(input="quad", el=El("mo", u"\u00A0\u00A0"))
Symbol(input="qquad", el=El("mo", u"\u00A0\u00A0\u00A0\u00A0"))
Symbol(input="cdots", el=El("mo", u"\u22EF"))
Symbol(input="vdots", el=El("mo", u"\u22EE"))
Symbol(input="ddots", el=El("mo", u"\u22F1"))
Symbol(input="diamond", el=El("mo", u"\u22C4"))
Symbol(input="square", el=El("mo", u"\u25A1"))
Symbol(input="|__", el=El("mo", u"\u230A"))
Symbol(input="__|", el=El("mo", u"\u230B"))
Symbol(input="|~", el=El("mo", u"\u2308"))
Symbol(input="~|", el=El("mo", u"\u2309"))
Symbol(input="CC", el=El("mo", u"\u2102"))
Symbol(input="NN", el=El("mo", u"\u2115"))
Symbol(input="QQ", el=El("mo", u"\u211A"))
Symbol(input="RR", el=El("mo", u"\u211D"))
Symbol(input="ZZ", el=El("mo", u"\u2124"))
# Symbol(input="f", el=El("mi", u"f", _func=True)) # sample
# Symbol(input="g", el=El("mi", u"g", _func=True))
Symbol(input="lim", el=El("mo", u"lim", _underover=True))
Symbol(input="Lim", el=El("mo", u"Lim", _underover=True))
Symbol(input="sin", el=El("mrow", El("mo", "sin"), _arity=1))
Symbol(input="sin", el=El("mrow", El("mo", "sin"), _arity=1))
Symbol(input="cos", el=El("mrow", El("mo", "cos"), _arity=1))
Symbol(input="tan", el=El("mrow", El("mo", "tan"), _arity=1))
Symbol(input="sinh", el=El("mrow", El("mo", "sinh"), _arity=1))
Symbol(input="cosh", el=El("mrow", El("mo", "cosh"), _arity=1))
Symbol(input="tanh", el=El("mrow", El("mo", "tanh"), _arity=1))
Symbol(input="cot", el=El("mrow", El("mo", "cot"), _arity=1))
Symbol(input="sec", el=El("mrow", El("mo", "sec"), _arity=1))
Symbol(input="csc", el=El("mrow", El("mo", "csc"), _arity=1))
Symbol(input="log", el=El("mrow", El("mo", "log"), _arity=1))
Symbol(input="ln", el=El("mrow", El("mo", "ln"), _arity=1))
Symbol(input="det", el=El("mrow", El("mo", "det"), _arity=1))
Symbol(input="gcd", el=El("mrow", El("mo", "gcd"), _arity=1))
Symbol(input="lcm", el=El("mrow", El("mo", "lcm"), _arity=1))
Symbol(input="dim", el=El("mo", u"dim"))
Symbol(input="mod", el=El("mo", u"mod"))
Symbol(input="lub", el=El("mo", u"lub"))
Symbol(input="glb", el=El("mo", u"glb"))
Symbol(input="min", el=El("mo", u"min", _underover=True))
Symbol(input="max", el=El("mo", u"max", _underover=True))
Symbol(input="uarr", el=El("mo", u"\u2191"))
Symbol(input="darr", el=El("mo", u"\u2193"))
Symbol(input="rarr", el=El("mo", u"\u2192"))
Symbol(input="->", el=El("mo", u"\u2192"))
Symbol(input="|->", el=El("mo", u"\u21A6"))
Symbol(input="larr", el=El("mo", u"\u2190"))
Symbol(input="harr", el=El("mo", u"\u2194"))
Symbol(input="rArr", el=El("mo", u"\u21D2"))
Symbol(input="lArr", el=El("mo", u"\u21D0"))
Symbol(input="hArr", el=El("mo", u"\u21D4"))
Symbol(input="hat", el=El("mover", El("mo", u"\u005E"), _arity=1, _swap=1))
Symbol(input="bar", el=El("mover", El("mo", u"\u00AF"), _arity=1, _swap=1))
Symbol(input="vec", el=El("mover", El("mo", u"\u2192"), _arity=1, _swap=1))
Symbol(input="dot", el=El("mover", El("mo", u"."), _arity=1, _swap=1))
Symbol(input="ddot",el=El("mover", El("mo", u".."), _arity=1, _swap=1))
Symbol(input="ul", el=El("munder", El("mo", u"\u0332"), _arity=1, _swap=1))
Symbol(input="sqrt", el=El("msqrt", _arity=1))
Symbol(input="root", el=El("mroot", _arity=2, _swap=True))
Symbol(input="frac", el=El("mfrac", _arity=2))
Symbol(input="stackrel", el=El("mover", _arity=2))
Symbol(input="text", el=El("mtext", _arity=1))
# {input:"mbox", tag:"mtext", output:"mbox", tex:null, ttype:TEXT},
# {input:"\"", tag:"mtext", output:"mbox", tex:null, ttype:TEXT};
symbol_names = sorted(symbols.keys(), key=lambda s: len(s), reverse=True)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
if args[0] == '-m':
import markdown
args.pop(0)
element = markdown.etree.Element
elif args[0] == '-c':
from xml.etree.cElementTree import Element
args.pop(0)
element = Element
else:
element = Element
print """\
<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml" />
<title>ASCIIMathML preview</title>
</head>
<body>
"""
print tostring(parse(' '.join(args), element))
print """\
</body>
</html>
"""
| gpl-3.0 | -8,934,841,912,234,516,000 | 34.875758 | 104 | 0.579948 | false |
omnidan/python-latex | test/test_parser.py | 1 | 8543 | from latex import LatexParser
class TestLatexParser:
def test_parser(self):
self.parser = LatexParser(self.document)
assert self.parser.getResult().getDocument() == r"""\documentclass[11pt,a4paper,oneside]{report}
\usepackage{pslatex,palatino,avant,graphicx,color}
\usepackage[margin=2cm]{geometry}
% testtesttesttest
\begin{document}
\title{\color{red}Practical Typesetting}
\author{\color{blue}Name}
\date{\color{green}December 2005}
\maketitle
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam dapibus consectetur tellus. Duis vehicula, tortorgravida sollicitudin eleifend, erat eros feugiat nisl, eget ultricies risus magna ac leo. Ut est diam, faucibustincidunt ultrices sit amet, congue sed tellus. Donec vel tellus vitae sem mattis congue. Suspendisse faucibussemper faucibus. Curabitur congue est arcu, nec sollicitudin odio blandit at. Nullam tempus vulputate aliquam.Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis tempus ligula eu nullapharetra eleifend. Pellentesque eget nisi gravida, faucibus justo ac, volutpat elit. Praesent egestas posuere elit,et imperdiet magna rhoncus eget. Donec porttitor enim lectus, quis egestas quam dignissim in. Donec dignissim sapienodio, nec molestie enim imperdiet ac. Praesent venenatis quis mi nec pretium.
\section*{Displayed Text}
\end{document}"""
def test_parser_keep_empty_lines(self):
self.parser = LatexParser(self.document, keep_empty_lines=True)
assert self.parser.getResult().getDocument() == r"""
\documentclass[11pt,a4paper,oneside]{report}
\usepackage{pslatex,palatino,avant,graphicx,color}
\usepackage[margin=2cm]{geometry}
% testtesttesttest
\begin{document}
\title{\color{red}Practical Typesetting}
\author{\color{blue}Name}
\date{\color{green}December 2005}
\maketitle
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam dapibus consectetur tellus. Duis vehicula, tortorgravida sollicitudin eleifend, erat eros feugiat nisl, eget ultricies risus magna ac leo. Ut est diam, faucibustincidunt ultrices sit amet, congue sed tellus. Donec vel tellus vitae sem mattis congue. Suspendisse faucibussemper faucibus. Curabitur congue est arcu, nec sollicitudin odio blandit at. Nullam tempus vulputate aliquam.Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis tempus ligula eu nullapharetra eleifend. Pellentesque eget nisi gravida, faucibus justo ac, volutpat elit. Praesent egestas posuere elit,et imperdiet magna rhoncus eget. Donec porttitor enim lectus, quis egestas quam dignissim in. Donec dignissim sapienodio, nec molestie enim imperdiet ac. Praesent venenatis quis mi nec pretium.
\section*{Displayed Text}
\end{document}"""
def test_parser_do_not_concat_text(self):
self.parser = LatexParser(self.document, do_not_concat_text=True)
assert self.parser.getResult().getDocument() == r"""\documentclass[11pt,a4paper,oneside]{report}
\usepackage{pslatex,palatino,avant,graphicx,color}
\usepackage[margin=2cm]{geometry}
% test
% test
% test
% test
\begin{document}
\title{\color{red}Practical Typesetting}
\author{\color{blue}Name}
\date{\color{green}December 2005}
\maketitle
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam dapibus consectetur tellus. Duis vehicula, tortor
gravida sollicitudin eleifend, erat eros feugiat nisl, eget ultricies risus magna ac leo. Ut est diam, faucibus
tincidunt ultrices sit amet, congue sed tellus. Donec vel tellus vitae sem mattis congue. Suspendisse faucibus
semper faucibus. Curabitur congue est arcu, nec sollicitudin odio blandit at. Nullam tempus vulputate aliquam.
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis tempus ligula eu nulla
pharetra eleifend. Pellentesque eget nisi gravida, faucibus justo ac, volutpat elit. Praesent egestas posuere elit,
et imperdiet magna rhoncus eget. Donec porttitor enim lectus, quis egestas quam dignissim in. Donec dignissim sapien
odio, nec molestie enim imperdiet ac. Praesent venenatis quis mi nec pretium.
\section*{Displayed Text}
\end{document}"""
def test_parser_do_not_concat_and_empty_lines(self):
self.parser = LatexParser(self.document, keep_empty_lines=True, do_not_concat_text=True)
assert self.parser.getResult().getDocument() == r"""
\documentclass[11pt,a4paper,oneside]{report}
\usepackage{pslatex,palatino,avant,graphicx,color}
\usepackage[margin=2cm]{geometry}
% test
% test
% test
% test
\begin{document}
\title{\color{red}Practical Typesetting}
\author{\color{blue}Name}
\date{\color{green}December 2005}
\maketitle
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam dapibus consectetur tellus. Duis vehicula, tortor
gravida sollicitudin eleifend, erat eros feugiat nisl, eget ultricies risus magna ac leo. Ut est diam, faucibus
tincidunt ultrices sit amet, congue sed tellus. Donec vel tellus vitae sem mattis congue. Suspendisse faucibus
semper faucibus. Curabitur congue est arcu, nec sollicitudin odio blandit at. Nullam tempus vulputate aliquam.
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis tempus ligula eu nulla
pharetra eleifend. Pellentesque eget nisi gravida, faucibus justo ac, volutpat elit. Praesent egestas posuere elit,
et imperdiet magna rhoncus eget. Donec porttitor enim lectus, quis egestas quam dignissim in. Donec dignissim sapien
odio, nec molestie enim imperdiet ac. Praesent venenatis quis mi nec pretium.
\section*{Displayed Text}
\end{document}"""
def test_parser_with_prefixes(self):
self.parser = LatexParser(self.document, do_not_concat_text=True)
assert self.parser.getResult().getDocument(no_prefix=False) == r"""\documentclass[11pt,a4paper,oneside]{report}
\usepackage{pslatex,palatino,avant,graphicx,color}
\usepackage[margin=2cm]{geometry}
% test
% test
% test
% test
\begin{document}
\title{\color{red}Practical Typesetting}
\author{\color{blue}Name}
\date{\color{green}December 2005}
\maketitle
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam dapibus consectetur tellus. Duis vehicula, tortor
gravida sollicitudin eleifend, erat eros feugiat nisl, eget ultricies risus magna ac leo. Ut est diam, faucibus
tincidunt ultrices sit amet, congue sed tellus. Donec vel tellus vitae sem mattis congue. Suspendisse faucibus
semper faucibus. Curabitur congue est arcu, nec sollicitudin odio blandit at. Nullam tempus vulputate aliquam.
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis tempus ligula eu nulla
pharetra eleifend. Pellentesque eget nisi gravida, faucibus justo ac, volutpat elit. Praesent egestas posuere elit,
et imperdiet magna rhoncus eget. Donec porttitor enim lectus, quis egestas quam dignissim in. Donec dignissim sapien
odio, nec molestie enim imperdiet ac. Praesent venenatis quis mi nec pretium.
\section*{Displayed Text}
\end{document}"""
def test_parser_do_not_beautify(self):
self.parser = LatexParser(self.document, keep_empty_lines=True, do_not_concat_text=True)
assert self.parser.getResult().getDocument(no_prefix=False) == self.document
def __init__(self):
self.document = r"""
\documentclass[11pt,a4paper,oneside]{report}
\usepackage{pslatex,palatino,avant,graphicx,color}
\usepackage[margin=2cm]{geometry}
% test
% test
% test
% test
\begin{document}
\title{\color{red}Practical Typesetting}
\author{\color{blue}Name}
\date{\color{green}December 2005}
\maketitle
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam dapibus consectetur tellus. Duis vehicula, tortor
gravida sollicitudin eleifend, erat eros feugiat nisl, eget ultricies risus magna ac leo. Ut est diam, faucibus
tincidunt ultrices sit amet, congue sed tellus. Donec vel tellus vitae sem mattis congue. Suspendisse faucibus
semper faucibus. Curabitur congue est arcu, nec sollicitudin odio blandit at. Nullam tempus vulputate aliquam.
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis tempus ligula eu nulla
pharetra eleifend. Pellentesque eget nisi gravida, faucibus justo ac, volutpat elit. Praesent egestas posuere elit,
et imperdiet magna rhoncus eget. Donec porttitor enim lectus, quis egestas quam dignissim in. Donec dignissim sapien
odio, nec molestie enim imperdiet ac. Praesent venenatis quis mi nec pretium.
\section*{Displayed Text}
\end{document}""" | bsd-3-clause | 7,697,924,839,489,832,000 | 51.740741 | 867 | 0.784151 | false |
ExCiteS/geokey-checklist | geokey_checklist/models.py | 1 | 3037 | from django.conf import settings
from django.db import models
from .base import TYPE
from .base import ITEM_TYPE
from .base import EXPIRY_FACTOR
from .base import PER_TYPE
from .base import FREQUENCY_EXPIRED_REMINDER
from .base import REMINDER_BEFORE_EXPIRATION
class Checklist(models.Model):
"""
Stores a single checklist.
"""
name = models.CharField(max_length=100)
project = models.ForeignKey('projects.Project')
category = models.ForeignKey('categories.Category')
description = models.TextField(null=True, blank=True)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
checklisttype = models.CharField(choices=TYPE, default=TYPE.Blank, max_length=100)
numberofpeople = models.IntegerField()
numberofchildren = models.IntegerField()
numberoftoddlers = models.IntegerField()
numberofinfants = models.IntegerField()
numberofpets = models.IntegerField()
latitude = models.FloatField()
longitude = models.FloatField()
def update(self, newdata):
for key in newdata:
val = newdata[key]
setattr(self,key,val)
self.save()
class ChecklistSettings(models.Model):
"""
Stores settings for the checklist extension for the user.
"""
project = models.ForeignKey('projects.Project')
reminderson = models.BooleanField(default=True)
frequencyonexpiration = models.CharField(choices=FREQUENCY_EXPIRED_REMINDER, default=FREQUENCY_EXPIRED_REMINDER.one_month, max_length=100)
#frequencybeforeexpiration = models.CharField(choices=REMINDER_BEFORE_EXPIRATION, default=REMINDER_BEFORE_EXPIRATION.six_months, max_length=100)
lastremindercheck = models.DateTimeField(null=True)
class ChecklistItem(models.Model):
"""
Stores a single checklist item.
"""
name = models.CharField(max_length=100)
project = models.ForeignKey('projects.Project')
category = models.ForeignKey('categories.Category')
field = models.ForeignKey('categories.Field')
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
checklistitemdescription = models.CharField(max_length=100, null=True)
checklistitemurl = models.CharField(max_length=255, null=True)
checklistitemtype = models.CharField(choices=ITEM_TYPE, default=ITEM_TYPE.Custom, max_length=100)
quantityfactor = models.IntegerField()
pertype = models.CharField(choices=PER_TYPE, default=PER_TYPE.individual, max_length=100)
quantity = models.IntegerField()
quantityunit = models.CharField(max_length=100, null=True)
expiryfactor = models.IntegerField(null=True)
expiry = models.DateTimeField(null=True)
haveit = models.BooleanField(default=False)
def update(self, newdata):
for key in newdata:
val = newdata[key]
if key == "haveit":
if type(val) <> 'bool':
if val == "True":
val = True
else:
val = False
setattr(self,key,val)
self.save()
| mit | -7,941,475,644,263,964,000 | 37.935897 | 148 | 0.691801 | false |
Mercy-Nekesa/sokoapp | sokoapp/request/middleware.py | 1 | 1024 | from django.core.urlresolvers import get_callable
from request.models import Request
from request import settings
from request.router import patterns
class RequestMiddleware(object):
def process_response(self, request, response):
if request.method.lower() not in settings.REQUEST_VALID_METHOD_NAMES:
return response
if response.status_code < 400 and settings.REQUEST_ONLY_ERRORS:
return response
ignore = patterns(False, *settings.REQUEST_IGNORE_PATHS)
if ignore.resolve(request.path[1:]):
return response
if request.is_ajax() and settings.REQUEST_IGNORE_AJAX:
return response
if request.META.get('REMOTE_ADDR') in settings.REQUEST_IGNORE_IP:
return response
if getattr(request, 'user', False):
if request.user.username in settings.REQUEST_IGNORE_USERNAME:
return response
r = Request()
r.from_http_request(request, response)
return response
| mit | -7,902,570,841,348,118,000 | 30.030303 | 77 | 0.665039 | false |
SimBil91/pyTenvis | modules/TenvisAudio.py | 1 | 1178 | import subprocess
import pyaudio
import os
class TenvisAudio():
def __init__(self,ip):
self.command=['avconv','-i','rtsp://'+ip+'/11','-ar', '16000','-vn','-f','wav', 'pipe:1']
self.FNULL = open(os.devnull, 'w')
self.start_stream()
def start_stream(self):
self.pipe = subprocess.Popen(self.command, stdout = subprocess.PIPE, stderr=self.FNULL)
def stop_stream(self):
self.pipe.terminate()
def play_audio(self):
# read data
p = pyaudio.PyAudio()
self.stream = p.open(
format = pyaudio.paInt16,
channels = 1,
rate = 16000,
output = True,
frames_per_buffer = 1000)
data = self.pipe.stdout.read(100)
# play stream
while data != '':
self.stream.write(data)
data = self.pipe.stdout.read(100)
#self.pipe.stdout.flush()
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print "Usage: %s <ip_address>" % sys.argv[0]
exit()
else:
domain=sys.argv[1]
taudio=TenvisAudio(domain)
taudio.play_audio() | mit | -1,959,038,631,366,071,000 | 29.230769 | 97 | 0.523769 | false |
magenta-aps/mox | oio_rest/tests/test_db_patching.py | 1 | 5022 | # Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk.
# Contact: [email protected].
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import tempfile
import flask_testing
from oio_rest import settings
from oio_rest import app
from oio_rest.utils import test_support
class Tests(flask_testing.TestCase):
def create_app(self):
return app.app
def get_fields(self):
return settings.REAL_DB_STRUCTURE['organisationfunktion']
def test_patching_with_dict(self):
orig = {
"egenskaber": [
"brugervendtnoegle",
"funktionsnavn",
"integrationsdata"
]
}
self.assertEqual(
self.get_fields()['attributter'],
orig,
)
with test_support.extend_db_struct({
"organisationfunktion": {
"attributter": {
"fætre": [
"hest",
"høg",
],
},
},
}):
self.assertEqual(
self.get_fields()['attributter'],
{
**orig,
'fætre': ['hest', 'høg'],
})
self.assertEqual(
self.get_fields()['attributter'],
orig,
)
def test_patching_with_file(self):
orig = {
"egenskaber": [
"brugervendtnoegle",
"funktionsnavn",
"integrationsdata"
]
}
self.assertEqual(
self.get_fields()['attributter'],
orig,
)
with tempfile.NamedTemporaryFile('w+t') as fp:
json.dump({
"organisationfunktion": {
"attributter": {
"fætre": [
"hest",
"høg",
],
},
},
}, fp)
fp.flush()
with test_support.extend_db_struct(fp.name):
self.assertEqual(
self.get_fields()['attributter'],
{
**orig,
'fætre': ['hest', 'høg'],
},
)
self.assertEqual(
self.get_fields()['attributter'],
orig,
)
def test_patching_order(self):
with test_support.extend_db_struct({
"organisationfunktion": {
"attributter": {
"fætre": [
"hest",
"høg",
],
},
"tilstande": {
"høj": [
"Nej",
"Ja",
],
},
},
}):
self.assertEqual(
list(self.get_fields()['attributter']),
['egenskaber', 'fætre'],
)
self.assertEqual(
list(self.get_fields()['tilstande']),
['gyldighed', 'høj'],
)
with test_support.extend_db_struct({
"organisationfunktion": {
"attributter": {
"xyzzy": [
"dood",
"daad",
],
},
"tilstande": {
"zzz": [
"baab",
"beeb",
],
},
},
}):
self.assertEqual(
list(self.get_fields()['attributter']),
['egenskaber', 'xyzzy'],
)
self.assertEqual(
list(self.get_fields()['tilstande']),
['gyldighed', 'zzz'],
)
with test_support.extend_db_struct({
"organisationfunktion": {
"attributter": {
"aardvark": [
"laal",
"lool",
],
},
"tilstande": {
"aabenraa": [
"aach",
"heen",
],
},
},
}):
self.assertEqual(
list(self.get_fields()['attributter']),
['egenskaber', 'aardvark'],
)
self.assertEqual(
list(self.get_fields()['tilstande']),
['gyldighed', 'aabenraa'],
)
| mpl-2.0 | 5,041,461,417,427,765,000 | 26.827778 | 69 | 0.350769 | false |
ma2rten/kaggle-evergreen | src/data.py | 1 | 1582 | import csv, json, re
from collections import defaultdict, Counter
from unidecode import unidecode
from util import *
def extract_domain(url):
# extract domains
domain = url.lower().split('/')[2]
domain_parts = domain.split('.')
# e.g. co.uk
if domain_parts[-2] not in ['com', 'co']:
return '.'.join(domain_parts[-2:])
else:
return '.'.join(domain_parts[-3:])
def load_data(filename):
csv_file_object = csv.reader(file(filename, 'rb'), delimiter='\t')
header = csv_file_object.next()
data = []
for row in csv_file_object:
# make dictionary
item = {}
for i in range(len(header)):
item[header[i]] = row[i]
# url
item['real_url'] = item['url'].lower()
item['domain'] = extract_domain(item['url'])
item['tld'] = item['domain'].split('.')[-1]
# parse boilerplate
boilerplate = json.loads(item['boilerplate'])
for f in ['title', 'url', 'body']:
item[f] = boilerplate[f] if (f in boilerplate) else u''
item[f] = unidecode(item[f]) if item[f] else ''
del item['boilerplate']
# label
if 'label' in item:
item['label'] = item['label'] == '1'
else:
item['label'] = '?'
data.append(item)
return data
def get_train():
return load('train', lambda: load_data('data/train.tsv'))
def get_test():
return load('test', lambda: load_data('data/test.tsv'))
def get_labels():
return np.array([item['label'] for item in get_train()])
| mit | -5,455,935,004,014,464,000 | 22.969697 | 70 | 0.553097 | false |
drewcsillag/skunkweb | pylibs/pargen/Common.py | 1 | 9798 | #
# Copyright (C) 2001 Andrew T. Csillag <[email protected]>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
from CONSTANTS import *
import RuleItems
Set = RuleItems.Set
startItem = RuleItems.Item("S'", ['S'], 0, -1, None, Set('$'))
#def FIRST(symbol, ruleSet, terminals, recurSymbols = []):
# """compute the set of terminals that begin strings derived from <symbol>
# internal version that works with dicts"""
# # if it's a terminal, duh, just return a set of self
# if symbol in terminals:
# return [symbol]
#
# # did we recursively hit ourselves, if so, just return empty
# if symbol in recurSymbols:
# return []
#
# symbols = Set()
#
# productions = getProductions(ruleSet, symbol)
# for rule in productions:
# if not len(rule.rhs): #rhs is Epsilon
# symbols.add(None)
# else:
# #print 'symbol is:', symbol
# #print 'recursymbols is:', recurSymbols
# #print 'new symbol is:', rule.rhs[0]
# f = FIRST(rule.rhs[0], ruleSet, terminals, [symbol] + recurSymbols)
# symbols.addList(f)
# #if symbols.contains(Set(None)):
# # symbols = symbols - None
# # symbols.add('$')
# return symbols.items()
#def FIRSTBase(symbol, ruleSet, terminals, recurSymbols = []):
# """compute the set of terminals that begin strings derived from <symbol>
# internal version that works with dicts"""
# # if it's a terminal, duh, just return a set of self
# if symbol in terminals:
# return [symbol]
#
# # did we recursively hit ourselves, if so, just return empty
# if symbol in recurSymbols:
# return []
#
# symbols = Set()
#
# productions = getProductions(ruleSet, symbol)
# for rule in productions:
# if not len(rule.rhs): #rhs is Epsilon
# symbols.add(None)
# else:
# #print 'symbol is:', symbol
# #print 'recursymbols is:', recurSymbols
# #print 'new symbol is:', rule.rhs[0]
# f = FIRSTBase(rule.rhs[0], ruleSet, terminals, [symbol] + recurSymbols)
# symbols.addList(f)
# #if symbols.contains(Set(None)):
# # symbols = symbols - None
# # symbols.add('$')
# return symbols.items()
#
#def FIRST(X, ruleSet, terminals):
# symbols = [X]
# firsts = Set(None)
#
# while 1:
# #print 'symbols =', symbols, 'firsts=', firsts
# oldFirsts = firsts
#
# #remove None from firsts
# firsts = firsts - None
#
# #add FIRSTBase to firsts
# for symbol in symbols:
# firsts.addList(FIRSTBase(symbol, ruleSet, terminals))
#
# if firsts == oldFirsts:
# break
#
# if not firsts.contains(Set(None)):
# break
#
# #find symbols Y where A -> alpha X Y Beta
# #symbols = []
# for rule in ruleSet:
# if X in rule.rhs:
# for ind in range(len(rule.rhs)):
# #print 'found rule with %s in it' % X, rule
# #if there is something after X
# if rule.rhs[ind] == X and (ind + 1) < len(rule.rhs):
# newSymbol = rule.rhs[ind + 1]
# if newSymbol not in symbols:
# #print 'adding', rule.rhs[ind+1]
# symbols.append(rule.rhs[ind+1])
#
# #if firsts.contains(Set(None)):
# # firsts = firsts - None
# # firsts.add('$')
# return firsts.items()
#
#
#def FIRSTS(symbols, symbolSet, ruleSet, terminals):
# if symbols:
# f = FIRST(symbols[0], ruleSet, terminals)
# return f
# else:
# return symbolSet
#def FIRSTS(symbols, symbolSet, ruleSet, terminals):
# firsts = Set()
# ind = 0
# while ind < len(symbols):
# X = symbols[ind]
# ind = ind + 1
# if ind == len(symbols):
# break
# firsts.addList(FIRST(X, ruleSet, terminals))
# if not firsts.contains(Set(None)):
# break
# firsts = firsts - None
# if firsts.contains(Set(None)) or not firsts.items(): #index blew out first
# #firsts = firsts - None
# firsts.addList(symbolSet)
#
# return firsts.items()
def FIRST(symbols, ruleSet, terminals, recurSymbols = None):
if recurSymbols is None:
recurSymbols = []
if type(symbols) not in (type(()), type([])):
symbols = [symbols]
first = Set()
addNone = 0
#print 'symbols is', symbols
#from pg 189
for X in symbols:
#print 'X is', X
#if we're already doing X, just continue
if X in recurSymbols:
#print 'already in me'
continue
#if X is terminal, then FIRST(X) is {X}
if X in terminals:
#print 'X (%s) is terminal' % X
first.add(X)
break
prods = getProductions(ruleSet, X)
for rule in prods:
#if X -> Epsilon then add Epsilon (None for us) to FIRST(X)
if len(rule.rhs) == 0:
#print 'rule "%s" .rhs is NULL' % rule
addNone = 1
first.add(None)
else: #if X -> Y1Y2... then add FIRST(Y1Y2Y3) to FIRST(X)
#print 'rule %s, doing FIRST(%s)' % (rule, rule.rhs)
first.addList(FIRST(rule.rhs, ruleSet, terminals,
recurSymbols + [X]))
if not first.contains(Set(None)):
#print 'firsts is', first
break
if addNone:
first.add(None)
return first.items()
def FIRSTS(symbols, symbolSet, ruleSet, terminals):
myExtraRules = []
for X in symbolSet:
myExtraRules.append(
RuleItems.Rule('$BOGO$', [X], -2))
r = FIRST(symbols, ruleSet + myExtraRules, terminals)
if None in r or len(r) == 0:
#if len(r) == 0:
r.extend(symbolSet)
if None in r:
r.remove(None)
return r
def FOLLOW(lookie, ruleSet, terminals, nonTerminals):
symbols = terminals + nonTerminals
fset = {}
for i in symbols:
fset[i] = {}
fset['S']['$'] = 1
for X in symbols:
firsts = []
for rule in ruleSet:
if X in rule.rhs:
for j in range(len(rule.rhs)):
if j + 1 < len(rule.rhs) and rule.rhs[j] == X:
firsts.extend(FIRST(rule.rhs[j+1], ruleSet, terminals))
for i in firsts:
if i != None: fset[X][i] = 1
added = 1
while added:
added = 0
for rule in ruleSet:
if len(rule.rhs):
B = rule.rhs[-1]
A = rule.lhs
id = fset[B].copy()
fset[B].update(fset[A])
if fset[B] != id:
added = 1
if len(rule.rhs) >= 2:
for i in range(-1, -len(rule.rhs), -1):
if None not in FIRST(rule.rhs[i], ruleSet, terminals):
B = rule.rhs[i]
A = rule.lhs
id = fset[B].copy()
fset[B].update(fset[A])
if fset[B] != id:
added = 1
#for k in fset.keys():
# print '%s: %s' % (k, fset[k].keys())
return fset[lookie].keys()
def siftTokens(ruleSet):
"""Sifts through <ruleSet> and returns three things:
1) the terminals
2) the non-terminals
"""
terminals = {'$':1}
nonTerminals = {}
for rule in ruleSet:
nonTerminals[rule.lhs] = 1 #lhs is obviously a non-terminal
for token in rule.rhs:
terminals[token] = 1 #for now, we'll kill the nt's below
#remove the known non-terminals from the terminals dict
for token in nonTerminals.keys():
if terminals.has_key(token):
del terminals[token]
return terminals.keys(), nonTerminals.keys()
def getProductions(ruleSet, nonTerminal):
"""return all rules in <ruleSet> with <nonTerminal> as the lhs"""
return filter(lambda x, nt = nonTerminal: x.lhs == nt, ruleSet)
def printItems(legend, items):
print legend
for i in range(len(items)):
print 'state %d' % i
for j in items[i]:
print ' ', j
def printStateTable(newRules, t, nt, sd, gd):
#t = t + ['$']
print ' ', 'action',(' '*((len(t)-1)*6))+' goto'
print ' ',
for s in t:
print '%5s' % repr(s),
print ' ',
for s in nt:
print '%3s' % s,
print
for i in range(len(newRules)):
print '%3s' % i,
for s in t:
if sd[i].has_key(s):
if sd[i][s][0] == SHIFT:
print '%5s' % ('s%d' % sd[i][s][1]),
elif sd[i][s][0] == REDUCE:
print '%5s' % ('r%d' % sd[i][s][1]),
else:
print '%5s' % 'acc',
else:
print '%5s' % ' ',
print ' ',
for s in nt:
if gd[i].has_key(s):
print '%3s' % gd[i][s],
else:
print '%3s' % ' ',
print
def digestRules(ruleSet):
"""reduces the rules in ruleset to a form that
1) is marshallable
2) is literable
and
3) is only what the parser actually needs (well mostly)
"""
nr = []
for rule in ruleSet:
nr.append({
'funcName': rule.funcName,
'lhs': rule.lhs,
'lenrhs': len(rule.rhs),
'ruleString': str(rule)
})
return nr
| gpl-2.0 | 599,409,482,687,697,200 | 29.714734 | 84 | 0.508675 | false |
patrickmelix/Python4ChemistryTools | geometryAnalyzer.py | 1 | 7994 | import numpy as np
from ase import io, Atoms
def convertNames(inFile, outFile):
xyz = open(outFile,'w')
with open(inFile) as f:
for line in f:
split = line.strip().split()
if len(split) < 4:
xyz.write(line)
continue
name = split[0]
split[1:] = [float(x) for x in split[1:]]
if name[0].lower() == 'c':
split[0] = 'C'
elif name[0].lower() == 'o':
split[0] = 'O'
elif name[0].lower() == 'h':
split[0] = 'H'
elif name[0:2].lower() == 'ni':
split[0] = 'Ni'
elif name[0].lower() == 'n':
split[0] = 'N'
xyz.write(("{:10} "+"{:20.6f} "*3+"\n").format(*split))
xyz.close()
def getBonds(A,B,inMol,bondList):
#get all bonds A-B
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
allBonds = []
for molecule in mols:
nAtoms = len(molecule)
bonds = []
allIdx = []
for i in range(0,nAtoms):
if molecule[i].symbol == A:
allIdx.append(i)
for iIdx in allIdx:
try:
ibonds = bondList[str(iIdx)]
except:
continue
for bonded in ibonds:
if not molecule[bonded].symbol == B:
continue
bonds.append([iIdx, bonded])
#delete duplicates if A=B
if A == B:
for bond in bonds:
del bonds[bonds.index(list(reversed(bond)))]
allBonds.extend(bonds)
return allBonds
def getAngles(A,B,C,inMol,bondList):
#get all angles B-A-C
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
allAngles = []
for molecule in mols:
nAtoms = len(molecule)
angles = []
allIdx = []
for i in range(0,nAtoms):
if molecule[i].symbol == A:
allIdx.append(i)
for iIdx in allIdx:
try:
bonds = bondList[str(iIdx)]
except:
continue
for bonded in bonds:
if not molecule[bonded].symbol == B:
continue
for j in bonds:
if j == bonded:
continue
if molecule[j].symbol == C:
angles.append([bonded, iIdx, j])
#delete duplicates if B=C
if B == C:
for angle in angles:
del angles[angles.index(list(reversed(angle)))]
allAngles.extend(angles)
return allAngles
def getDihedrals(A,B,C,D,molecule,bondList):
"""Make a list of all Dihedrals"""
dihedralList = []
allIdx = []
nAtoms = len(molecule)
for i in range(0,nAtoms):
if molecule[i].symbol == A:
allIdx.append(i)
for idx in allIdx:#A
try:
ibonds = bondList[str(idx)]
except:
continue
for j in bondList[str(idx)]:
if not molecule[j].symbol == B:
continue
for k in bondList[str(j)]:
if not molecule[k].symbol == C:
continue
if idx == k:
continue
for l in bondList[str(k)]:
if not molecule[l].symbol == D:
continue
if (not l == k) and (not l == idx) and (not l == j):
dihedralList.append([idx,j,k,l])
return dihedralList
def bond(v1,v2):
""" Returns the length of the vector. """
return np.linalg.norm(np.subtract(v1,v2))
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle(v1, v2):
""" Angle in Degree"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))/np.pi*180
def dihedral(vec1,vec2,vec3):
""" Dihedral in Degree"""
v1_u = unit_vector(vec1)
v2_u = unit_vector(vec2)
v3_u = unit_vector(vec3)
#get the two normal vectors standing on the planes
v1v2 = np.cross(v1_u,v2_u)
v2v3 = np.cross(v2_u,v3_u)
#angle between them is the dihedral
return angle(v1v2,v2v3)
def getPBCVector(staticVec, vec, box, cut=5.0):
#find new pbcVec using PBC so that pbcVec-staticVec is less then 5A away
#test 6 most propable directions first
pbcVec = np.subtract(vec,staticVec)
for i in range(0,3):
for j in [-1,1]:
newVec = np.add(vec,box[i]*j)
newVec = np.subtract(newVec,staticVec)
if np.linalg.norm(newVec) < cut:
return newVec
#if not yet exited, perhaps it is one of the boxes on the edges
#there are eight of them
for dim in range(0,3):
dims = list(range(0,3))
dims.remove(dim)
for i in [-1,1]:
for j in [-1,1]:
translate = np.add(box[dims[0]]*i,box[dims[1]]*j)
newVec = np.add(vec,translate)
newVec = np.subtract(newVec,staticVec)
if np.linalg.norm(newVec) < cut:
return newVec
#check the corner-connected boxes
for i in [-1,1]:
for j in [-1,1]:
for k in [-1,1]:
translate = np.add(box[0]*i,box[1]*j)
translate = np.add(translate,box[2]*k)
newVec = np.add(vec,translate)
newVec = np.subtract(newVec,staticVec)
if np.linalg.norm(newVec) < cut:
return newVec
#if there is no result yet something is wrong
raise ValueError("No matching PBC point found!")
def getBondValues(inMol,bondLists):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
bonds = {}
for name in bondLists:
bonds[name] = []
for molecule in mols:
for name, bondList in bondLists.items():
for item in bondList:
bonds[name].append(molecule.get_distance(item[0],item[1],mic=True))
return bonds
def getAngleValues(inMol,angleLists):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
angles = {}
for name in angleLists:
angles[name] = []
for molecule in mols:
for name, angleList in angleLists.items():
for item in angleList:
angles[name].append(molecule.get_angle(item[0],item[1],item[2],mic=True))
return angles
def getDihedralValues(inMol, dihedralLists):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
dihedrals = {}
for name in dihedralLists:
dihedrals[name] = []
for molecule in mols:
for name, dihedralList in dihedralLists.items():
for item in dihedralList:
dihedrals[name].append(molecule.get_dihedral(item[0],item[1],item[2],item[3],mic=True))
return dihedrals
def get_distance2plane(inMol,idxP1,idxP2,idxP3,idxDist):
if not isinstance(inMol, list):
mols = [ inMol ]
else:
mols = inMol
dists = []
for mol in mols:
molecule = mol.copy()
toCenter = -1.0 * molecule[idxP1].position
molecule.translate(toCenter)
print(molecule[idxP1])
print(molecule[idxP2])
print(molecule[idxP3])
toXAxis = molecule[idxP2].position
molecule.rotate(toXAxis,'x',rotate_cell=True)
print(molecule[idxP1])
print(molecule[idxP2])
print(molecule[idxP3])
toXYPlane = molecule[idxP3].position[:]
toXYPlane[0] = 0
molecule.rotate(toXYPlane,'y')
print(molecule[idxP1])
print(molecule[idxP2])
print(molecule[idxP3])
print(molecule[idxDist])
dists.append(abs(molecule[idxDist].position[-1]))
return dists
| mit | 7,997,418,269,323,740,000 | 30.848606 | 103 | 0.523893 | false |
cwm-kylin/kylin_ops | permission/models.py | 1 | 2259 | import datetime
from django.db import models
from asset.models import Asset, AssetGroup
from usermanagers.models import User, UserGroup
class PermLog(models.Model):
datetime = models.DateTimeField(auto_now_add=True)
action = models.CharField(max_length=100, null=True, blank=True, default='')
results = models.CharField(max_length=1000, null=True, blank=True, default='')
is_success = models.BooleanField(default=False)
is_finish = models.BooleanField(default=False)
class PermSudo(models.Model):
name = models.CharField(max_length=100, unique=True)
date_added = models.DateTimeField(auto_now=True)
commands = models.TextField()
comment = models.CharField(max_length=100, null=True, blank=True, default='')
def __unicode__(self):
return self.name
class PermRole(models.Model):
name = models.CharField(max_length=100, unique=True)
comment = models.CharField(max_length=100, null=True, blank=True, default='')
password = models.CharField(max_length=512)
key_path = models.CharField(max_length=100)
date_added = models.DateTimeField(auto_now=True)
sudo = models.ManyToManyField(PermSudo, related_name='perm_role')
def __unicode__(self):
return self.name
class PermRule(models.Model):
date_added = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=100, unique=True)
comment = models.CharField(max_length=100)
asset = models.ManyToManyField(Asset, related_name='perm_rule')
asset_group = models.ManyToManyField(AssetGroup, related_name='perm_rule')
user = models.ManyToManyField(User, related_name='perm_rule')
user_group = models.ManyToManyField(UserGroup, related_name='perm_rule')
role = models.ManyToManyField(PermRole, related_name='perm_rule')
def __unicode__(self):
return self.name
class PermPush(models.Model):
asset = models.ForeignKey(Asset, related_name='perm_push')
role = models.ForeignKey(PermRole, related_name='perm_push')
is_public_key = models.BooleanField(default=False)
is_password = models.BooleanField(default=False)
success = models.BooleanField(default=False)
result = models.TextField(default='')
date_added = models.DateTimeField(auto_now=True)
| lgpl-3.0 | 8,404,812,628,688,477,000 | 36.65 | 82 | 0.722001 | false |
Scienziatopazzo/machine-learning | simple_perceptron.py | 1 | 2836 | import math
import random
import operator
import numpy as np
class Perceptron:
'''
Implements the Perceptron Learning Algorithm
fields:
int dim Dimensionality of the data
List weights Array (dim+1 x 1) of the weights
List data Array (N x 1) of tuples (x, y) composed of vectors x and results y=f(x)
int iterations Number of iterations of PLA undergone
'''
def __init__(self, dim, data = []):
self.dim = dim
self.reset(data)
def reset(self, data, weights = [0]):
'''
Reset weights and iterations and feed a data sample
'''
if np.all(weights == [0]):
self.weights = [0.0] * (self.dim+1)
elif len(weights)!=(self.dim+1):
raise ValueError('Wrong initial weights dimensionality')
else:
self.weights = weights
for t in data:
if len(t[0])!=self.dim:
raise ValueError('Wrong data dimensionality')
elif t[1]!=1 and t[1]!=-1:
raise ValueError('Function output is not binary')
self.data = data
self.iterations = 0
def hypothesis(self, x):
'''
Takes d-dimensional data vector x and computes h(x)
using the current weights
'''
x_adj = [1.0] + x #adjusted to include 1 at the start
weighted_sum = sum(map(operator.mul, self.weights, x_adj)) #dot product of w and x
if weighted_sum==0.0:
return 0.0
else:
return math.copysign(1.0, weighted_sum) #sign function
def classify(self, point):
'''
Takes as "point" a tuple (x, y) with x a vector and y=f(x)
and classifies it, returning True if h(x)=f(x) and False if not
'''
h = self.hypothesis(point[0])
return h == point[1]
def train(self):
'''
Trains the perceptron with the data using the PLA
'''
misclass = True
#iterate until there is no more misclassification
while(misclass):
#obtain a set of misclassified points
misclass_points = [] #array of indexes of misclassified points in data
for point in self.data:
if not self.classify(point):
misclass_points.append(self.data.index(point))
if len(misclass_points)!=0:
#choose the misclassified point at random
p = self.data[random.choice(misclass_points)]
x_adj = [1.0] + p[0]
# w <- w + yx where (x,y) is a misclassified point
x_sign = [p[1]*xi for xi in x_adj]
self.weights = [self.weights[i] + x_sign[i] for i in range(len(x_sign))]
#increment number of iterations
self.iterations += 1
else:
misclass=False
def f_disagreement(self, new_data):
'''
When given a sufficiently big new dataset new_data with the same format of self.data,
returns the disagreement fraction between the trained function g and the original f
P[f(x) != g(x)]
'''
g_misclass_points = 0 #counter of newdata points misclassified by g
for point in new_data:
if not self.classify(point):
g_misclass_points += 1
#return the fraction of P
return g_misclass_points / len(new_data)
| mit | 191,354,775,047,642,500 | 29.494624 | 87 | 0.674542 | false |
allanlei/django-saas | saas/multidb/middleware.py | 1 | 1173 | from django.utils.functional import curry
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from saas.multidb.signals import db_route_read, db_route_write
from saas.multidb.models import Database
class ModelRoutingMiddleware(object):
@classmethod
def request_router_info(cls, sender, request=None, **kwargs):
if request:
return request.REQUEST.get('domain', 'default')
def get_signal_function(self, **kwargs):
return curry(self.request_router_info, **kwargs)
def process_request(self, request):
db_route_read.connect(self.get_signal_function(request=request), weak=False, dispatch_uid=request)
db_route_write.connect(self.get_signal_function(request=request), weak=False, dispatch_uid=request)
return None
def process_response(self, request, response):
db_route_read.disconnect(weak=False, dispatch_uid=request)
db_route_write.disconnect(weak=False, dispatch_uid=request)
return response
class AutoLoadMiddleware(object):
def __init__(self):
Database.objects.all().load()
raise MiddlewareNotUsed
| bsd-3-clause | -4,862,116,266,822,954,000 | 34.545455 | 107 | 0.70844 | false |
seclab-ucr/INTANG | src/test/test_dns.py | 1 | 2747 | #!/usr/bin/env python
import redis
import socket
import struct
import subprocess
import time
TEST_ROUNDS = 50
JAIL_TIME = 95
TEST_SUCCESS = 1
TEST_FAILED = 2
TEST_SVR_NO_RESP = 3
TEST_OTHER = 4
resolvers = [
'216.146.35.35',
'216.146.36.36',
'208.67.222.222',
'208.67.220.220',
]
jail_time = {}
# connect to redis
redis_conn = redis.StrictRedis(host='localhost', port=6389, db=0)
results = {
}
for resolver_ip in resolvers:
results[resolver_ip] = []
def ip2int(addr):
return struct.unpack("I", socket.inet_aton(addr))[0]
def is_all_done():
for resolver_ip in resolvers:
if len(results[resolver_ip]) < TEST_ROUNDS:
return False
return True
def is_in_jail(resolver_ip):
if resolver_ip in jail_time:
if jail_time[resolver_ip] + JAIL_TIME < time.time():
del jail_time[resolver_ip]
return False
else:
return True
return False
def update_statfile():
f = open('status.log', 'w')
for resolver_ip in resolvers:
f.write("%30s : " % (resolver_ip))
for res in results[resolver_ip]:
if res == TEST_SUCCESS:
# success
f.write('+')
elif res == TEST_SVR_NO_RESP:
# svr no resp
f.write('*')
elif res == TEST_FAILED:
# reset (may differentiate type-1 and type-2 later)
f.write('-')
else:
# unknown
f.write('?')
f.write("\n")
f.close()
while not is_all_done():
for resolver_ip in resolvers:
if is_in_jail(resolver_ip):
time.sleep(0.1)
continue
ret = subprocess.check_output("dig +tcp @%s www.dropbox.com" % resolver_ip, shell=True)
#print(ret)
# sleep 2s to wait for late GFW rst
time.sleep(2)
#print("rst:attack1:*_%d" % ip2int(ip))
type1rst = redis_conn.keys("rst:attack1:*_%d" % ip2int(resolver_ip))
print(type1rst)
#print("rst:attack2:*_%d" % ip2int(ip))
type2rst = redis_conn.keys("rst:attack2:*_%d" % ip2int(resolver_ip))
print(type2rst)
if type1rst or type2rst:
results[resolver_ip].append(TEST_FAILED)
jail_time[resolver_ip] = time.time()
elif "connection reset" in ret:
pass
else:
results[resolver_ip].append(TEST_SUCCESS)
update_statfile()
time.sleep(0.1)
for resolver_ip in resolvers:
print("%s, %d, %d" % (resolver_ip, results[resolver_ip].count(TEST_SUCCESS), results[resolver_ip].count(TEST_FAILED)))
| gpl-3.0 | 6,996,596,000,312,077,000 | 24.915094 | 122 | 0.539134 | false |
kernelci/kernelci-backend | app/models/tests/test_bisect_model.py | 1 | 9506 | # Copyright (C) Collabora Limited 2018,2019
# Author: Guillaume Tucker <[email protected]>
#
# Copyright (C) Linaro Limited 2014,2015,2017
# Author: Milo Casagrande <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import models.base as modb
import models.bisect as modbs
class TestBisectModel(unittest.TestCase):
def test_bisect_base_document(self):
bisect_doc = modbs.BisectDocument()
self.assertIsInstance(bisect_doc, modb.BaseDocument)
def test_case_bisect_document(self):
bisect_doc = modbs.TestCaseBisectDocument()
self.assertIsInstance(bisect_doc, modbs.BisectDocument)
self.assertIsInstance(bisect_doc, modb.BaseDocument)
def test_bisect_base_document_collection(self):
bisect_doc = modbs.BisectDocument()
self.assertEqual(bisect_doc.collection, "bisect")
def test_bisect_case_document_collection(self):
bisect_doc = modbs.TestCaseBisectDocument()
self.assertEqual(bisect_doc.collection, "bisect")
def test_bisect_base_from_json(self):
bisect_doc = modbs.BisectDocument()
self.assertIsNone(bisect_doc.from_json({}))
self.assertIsNone(bisect_doc.from_json([]))
self.assertIsNone(bisect_doc.from_json(()))
self.assertIsNone(bisect_doc.from_json(""))
def test_bisect_base_to_dict(self):
bisect_doc = modbs.BisectDocument()
expected = {
"created_on": None,
"job": None,
"compare_to": None,
"good_commit": None,
"good_commit_date": None,
"good_commit_url": None,
"good_summary": None,
"bad_commit": None,
"bad_commit_date": None,
"bad_commit_url": None,
"bad_summary": None,
"version": None,
"job_id": None,
"type": None,
"found_summary": None,
"kernel": None,
"log": None,
"checks": {},
"arch": None,
"build_id": None,
"defconfig": None,
"defconfig_full": None,
"compiler": None,
"compiler_version": None,
"build_environment": None,
"git_branch": None,
"git_url": None,
}
self.assertDictEqual(expected, bisect_doc.to_dict())
def test_bisect_base_to_dict_with_id(self):
bisect_doc = modbs.BisectDocument()
bisect_doc.id = "bar"
expected = {
"_id": "bar",
"created_on": None,
"job": None,
"compare_to": None,
"good_commit": None,
"good_commit_date": None,
"good_commit_url": None,
"good_summary": None,
"bad_commit": None,
"bad_commit_date": None,
"bad_commit_url": None,
"bad_summary": None,
"version": None,
"job_id": None,
"type": None,
"found_summary": None,
"kernel": None,
"log": None,
"checks": {},
"arch": None,
"build_id": None,
"defconfig": None,
"defconfig_full": None,
"compiler": None,
"compiler_version": None,
"build_environment": None,
"git_branch": None,
"git_url": None,
}
self.assertDictEqual(expected, bisect_doc.to_dict())
def test_bisect_case_to_dict(self):
bisect_doc = modbs.TestCaseBisectDocument()
bisect_doc.id = "bar"
bisect_doc.board = "baz"
bisect_doc.version = "1.0"
bisect_doc.build_id = "build-id"
bisect_doc.job_id = "job-id"
bisect_doc.git_url = "https://somewhere.com/blah.git"
bisect_doc.git_branch = "master"
bisect_doc.kernel = "v123.456"
bisect_doc.log = "https://storage.org/log.txt"
bisect_doc.test_case_path = "test.case.path"
bisect_doc.regression_id = "regr-id"
bisect_doc.device_type = "qemu"
bisect_doc.lab_name = "secret-lab"
bisect_doc.plan_variant = "cunning"
expected = {
"_id": "bar",
"created_on": None,
"job": None,
"compare_to": None,
"good_commit": None,
"good_commit_date": None,
"good_commit_url": None,
"good_summary": None,
"bad_commit": None,
"bad_commit_date": None,
"bad_commit_url": None,
"bad_summary": None,
"version": "1.0",
"build_id": "build-id",
"job_id": "job-id",
"type": "test",
"compiler": None,
"compiler_version": None,
"build_environment": None,
"arch": None,
"defconfig": None,
"defconfig_full": None,
"git_url": "https://somewhere.com/blah.git",
"git_branch": "master",
"kernel": "v123.456",
"log": "https://storage.org/log.txt",
"found_summary": None,
"checks": {},
"test_case_path": "test.case.path",
"regression_id": "regr-id",
"device_type": "qemu",
"lab_name": "secret-lab",
"plan_variant": "cunning",
}
self.assertDictEqual(expected, bisect_doc.to_dict())
def test_bisect_base_properties(self):
bisect_doc = modbs.TestCaseBisectDocument()
bisect_doc.id = "bar"
bisect_doc.created_on = "now"
bisect_doc.job = "fooz"
bisect_doc.good_commit = "1"
bisect_doc.good_commit_date = "now"
bisect_doc.good_commit_url = "url"
bisect_doc.bad_commit = "2"
bisect_doc.bad_commit_date = "now"
bisect_doc.bad_commit_url = "url"
bisect_doc.found_summary = "1234abcd foo: bar"
bisect_doc.verified = "pass"
bisect_doc.kernel = "v456.789"
bisect_doc.log = "https://storage.org/log.txt"
self.assertEqual(bisect_doc.id, "bar")
self.assertEqual(bisect_doc.created_on, "now")
self.assertEqual(bisect_doc.job, "fooz")
self.assertEqual(bisect_doc.good_commit, "1")
self.assertEqual(bisect_doc.good_commit_date, "now")
self.assertEqual(bisect_doc.good_commit_url, "url")
self.assertEqual(bisect_doc.bad_commit, "2")
self.assertEqual(bisect_doc.bad_commit_date, "now")
self.assertEqual(bisect_doc.bad_commit_url, "url")
self.assertEqual(bisect_doc.found_summary, "1234abcd foo: bar")
self.assertEqual(bisect_doc.verified, "pass")
self.assertEqual(bisect_doc.kernel, "v456.789")
self.assertEqual(bisect_doc.log, "https://storage.org/log.txt")
def test_bisect_case_properties(self):
bisect_doc = modbs.TestCaseBisectDocument()
bisect_doc.device_type = "devboard"
self.assertEqual(bisect_doc.device_type, "devboard")
def test_bisect_defconfig_to_dict(self):
bisect_doc = modbs.DefconfigBisectDocument()
bisect_doc.id = "bar"
bisect_doc.build_id = "build-id"
bisect_doc.defconfig = "defconfig-name"
bisect_doc.version = "1.0"
bisect_doc.job = "job"
bisect_doc.job_id = "job-id"
bisect_doc.defconfig_full = "defconfig-full"
bisect_doc.arch = "arm"
bisect_doc.found_summary = "7890cdef foo: change bar into baz"
bisect_doc.kernel = "v4.56"
bisect_doc.git_url = "https://somewhere.com/blah.git"
bisect_doc.compiler = "randomcc"
bisect_doc.compiler_version = "123.456"
bisect_doc.build_environment = "build-env"
expected = {
"_id": "bar",
"created_on": None,
"job": "job",
"compare_to": None,
"good_commit": None,
"good_commit_date": None,
"good_commit_url": None,
"good_summary": None,
"bad_commit": None,
"bad_commit_date": None,
"bad_commit_url": None,
"bad_summary": None,
"version": "1.0",
"build_id": "build-id",
"defconfig": "defconfig-name",
"job_id": "job-id",
"defconfig_full": "defconfig-full",
"compiler": "randomcc",
"compiler_version": "123.456",
"build_environment": "build-env",
"arch": "arm",
"type": "build",
"git_branch": None,
"found_summary": "7890cdef foo: change bar into baz",
"git_url": "https://somewhere.com/blah.git",
"kernel": "v4.56",
"log": None,
"checks": {},
}
self.assertDictEqual(expected, bisect_doc.to_dict())
| lgpl-2.1 | 3,625,843,801,229,218,300 | 35.144487 | 79 | 0.554387 | false |
kl456123/machine_learning | workspace/fully_connected_feed.py | 1 | 9326 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist
# Basic model parameters as external flags.
FLAGS = None
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
# logits = mnist.inference(images_placeholder,
# FLAGS.hidden1,
# FLAGS.hidden2)
logits = mnist.inference(images_placeholder
,[32,32,32,32,32,128])
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.merge_all_summaries()
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.summary_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=20000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='dataset/mnist',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
parser.add_argument(
'--summary_dir',
type=str,
default='dataset/mnist/summary',
help='Directory to put the summary data.'
)
FLAGS = parser.parse_args()
tf.app.run() | mit | 5,444,828,281,552,272,000 | 32.916364 | 80 | 0.64036 | false |
alabarga/SocialLearning | SocialLearning/apps/learningobjects/utils/search.py | 1 | 8616 | from learningobjects.utils.google import search as search_google
import sha
import xml.etree.ElementTree as ET
import unfurl
import urllib2, urllib
import json
import time
from urlunshort import resolve
import wikipedia
import re
import requests
class SearchEngine(object):
def __init__(self, engine):
self.engine = engine
def clean(self,url):
furl=url
i=0
while resolve(url)!=None and i<5:
furl=url
url=resolve(url)
i+=1
print i
return furl
"""
from GoogleScraper import scrape_with_config, GoogleSearchError
from GoogleScraper.database import ScraperSearch, SERP, Link
class GoogleScrape(SearchEngine):
def __init__(self):
super(GoogleScrape, self).__init__("GoogleScrape")
def search(self, query):
# See in the config.cfg file for possible values
config = {
'SCRAPING': {
'use_own_ip': 'True',
'keyword': query,
'search_engine': 'duckduckgo'
},
'SELENIUM': {
'sel_browser': 'chrome',
},
'GLOBAL': {
'do_caching': 'True'
}
}
try:
sqlalchemy_session = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
# let's inspect what we got
links = []
for search in sqlalchemy_session.query(ScraperSearch).all():
for serp in search.serps:
print(serp)
for link in serp.links:
links.append(link)
print(link)
return links
"""
class Delicious(SearchEngine):
def __init__(self):
super(Delicious, self).__init__("delicious")
#Busca en google con los parametros que se pasen
def search(self,query):
url="http://feeds.delicious.com/v2/json/tag/"+query+"?count=100"
response=urllib2.urlopen(url)
resp=json.loads(response.read())
links=[]
for res in resp:
links.insert(0,res["u"])
return links
class Google(SearchEngine):
def __init__(self):
super(Google, self).__init__("google")
#Busca en google con los parametros que se pasen
def search(self,query):
links=[]
for result in search_google(query):
links.append(result)
return links
class XGoogle(SearchEngine):
def __init__(self):
super(XGoogle, self).__init__("xgoogle")
#Busca en google con los parametros que se pasen
def search(self,query):
from xgoogle.search import GoogleSearch, SearchError
links = []
try:
gs = GoogleSearch(query)
gs.results_per_page = 50
results = gs.get_results()
for res in results:
links.append(res.url.encode('utf8'))
print res.title.encode('utf8')
print res.desc.encode('utf8')
print res.url.encode('utf8')
except SearchError:
print "Search failed!"
return links
"""
class Google(SearchEngine):
def __init__(self):
super(Google, self).__init__("google")
#Busca en google con los parametros que se pasen
def search(self,query):
links=[]
SEARCH_ENGINE_ID = '009363772985848074726:jxffracj2_8' #os.environ['SEARCH_ENGINE_ID']
API_KEY = 'AIzaSyCE9D6fjIW86IN2uekwJbaS3TDfNbim-lE' #os.environ['GOOGLE_CLOUD_API_KEY']
googleApi = GoogleCustomSearch(SEARCH_ENGINE_ID, API_KEY)
for result in googleApi.search(query):
link=result['link']
if link not in links:
links.insert(0,link)
return links
"""
class Wikipedia(SearchEngine):
def __init__(self):
super(Wikipedia, self).__init__("wikipedia")
def search(self,query):
links = []
wikipedia.set_lang("es")
ids = wikipedia.search(query)
for id in ids:
wiki = wikipedia.page(id)
refs = wiki.references
links.extend(refs)
return links
class DuckDuckGo(SearchEngine):
def __init__(self):
super(DuckDuckGo, self).__init__("duckduckgo")
def search(self,query):
links = []
for i in [0]:
time.sleep(2)
url = "https://duckduckgo.com/d.js?q=%s&l=es-es&p=1&s=%d" % (urllib.quote_plus(query), i)
user_agent = {'User-agent': 'Mozilla/5.0'}
res = requests.get(url, headers=user_agent)
#res = urllib2.urlopen(url).read()
h = re.findall('{.*?}', res.text)
n = len(h) - 1
enlaces = json.loads('['+ (','.join(h[1:n])) + ']')
for item in enlaces:
links.append(item['c'])
def related(self,url):
return self.search('related:'+url)
class DuckDuckGoAPI(SearchEngine):
def __init__(self):
super(DuckDuckGoIO, self).__init__("duckduckgo")
def search(self,query):
import json
import urllib.request
url = "http://api.duckduckgo.com/?q=%s&format=json&pretty=1" % urllib.quote(query)
get_ddg = urllib.request.urlopen(url)
ddg_read = get_ddg.read()
ddg_read_decode = json.loads(ddg_read.decode('utf-8'))
ddg_read = ddg_read_decode
json_string = json.dumps(ddg_read,sort_keys=True,indent=2)
print(json_string)
ddg_topics = ddg_read['RelatedTopics']
for item in ddg_topics:
print(item['FirstURL'])
class DuckDuckGoIO(SearchEngine):
def __init__(self):
super(DuckDuckGoIO, self).__init__("duckduckgo")
#Busca en duckduck con los parametros que se pasen
def search(self,query):
links=[]
IMPORT_IO_USER = "7d0326db-696a-436d-8aba-f6c2e1c9e921"
IMPORTIO_API_KEY = "89Gl8Ce2tiqX949GcKQTE9hCg6NW%2FkN36WpGKEA4knjhoTTRT72%2BitSWPicKFsZ4RmTwvyMbC%2BOrPtxAvy1EGw%3D%3D"
url="https://api.import.io/store/data/97e350d1-d55c-4c66-bcc4-5c2bd2eb8765/_query?input/query="+urllib.quote(query)+"&_user="+IMPORT_IO_USER+"&_apikey="+IMPORTIO_API_KEY
response=urllib2.urlopen(url)
res=response.read()
res=json.loads(res)
res=res['results']
for li in res:
try:
link=li['url']
if link not in links:
links.insert(0,link)
except:
continue
return links
def related(self,url):
return self.search('related:'+url)
# https://api.import.io/store/data/2297660e-b775-433d-a408-8fb6d7a808e7/_query?input/webpage/url=http%3A%2F%2Fwefollow.com%2Finterest%2F3dprinting%2F62-100&_user=7d0326db-696a-436d-8aba-f6c2e1c9e921&_apikey=89Gl8Ce2tiqX949GcKQTE9hCg6NW%2FkN36WpGKEA4knjhoTTRT72%2BitSWPicKFsZ4RmTwvyMbC%2BOrPtxAvy1EGw%3D%3D
class Slideshare(SearchEngine):
def __init__(self):
super(Slideshare, self).__init__("slideshare")
def search(self,query):
####Slideshare API keys####
ssapi_key = 'lKp4aIF5' # Your api key
sssecret_key = 'x7fmnUa8' # Your secret key
links=[]
ts = int(time.time())
time_hash=sha.new(sssecret_key + str(ts)).hexdigest()
if query!="":
url="https://www.slideshare.net/api/2/search_slideshows?q="+query+"&api_key="+ssapi_key+"&hash="+time_hash+"&ts="+str(ts)
elif tag!="":
url="https://www.slideshare.net/api/2/get_slideshows_by_tag?tag="+tag+"&limit=10&api_key="+ssapi_key+"&hash="+time_hash+"&ts="+str(ts)
else:
print "error"
response=urllib2.urlopen(url)
res=response.read()
#print res
root = ET.fromstring(res)
for child in root:
try:
link=child[5].text
if link not in links:
links.insert(0,link)
except:
pass
return links
class Yahoo(SearchEngine):
def __init__(self):
super(Yahoo, self).__init__("yahoo")
def search(self,query):
url='http://pipes.yahoo.com/pipes/pipe.run?_id=nHNB8TJm3BGumlGA9YS63A&_render=json&searchInput='+query
res=urllib2.urlopen(url)
res=res.read()
res=json.loads(res)
response=res["value"].values()[3]#los resultados
links=[]
for r in response:
if r["link"] not in links:
links.append(r["link"])
return links
| gpl-3.0 | 6,216,194,071,072,516,000 | 29.125874 | 305 | 0.565692 | false |
hammerlab/immuno_research | eluted_peptide_prediction.py | 1 | 1459 | #!/usr/bin/python
import pandas as pd
from sklearn import ensemble
from sklearn import linear_model
from sklearn.cross_validation import cross_val_score
import data
import reduced_alphabet
"""
Build models off Dana-Farber Repository for Machine Learning in Immunology
For details of the repository, please refer to the papers below:
Zhang GL, Lin HH, Keskin DB, Reinherz EL, Brusic V. (2011) Dana-Farber repository for machine learning in immunology. J Immunol Methods. 2011; 374(1-2):18-25.
2nd Machine Learning Competition in Immunology 2012
"""
ELUTED = "eluted"
BINDING = "binding"
NON_BINDING = "nonbinding"
def get_url(base_url, group, allele):
return base_url + group + "_" + allele + ".htm"
def get_data(base_url='http://bio.dfci.harvard.edu/DFRMLI/datasets/', allele='HLA-A0201'):
eluted = pd.read_html(get_url(base_url, ELUTED, allele), infer_types=False, header=0)[0]
binding = pd.read_html(get_url(base_url, BINDING, allele), infer_types=False, header=0)[0]
nonbinding = pd.read_html(get_url(base_url, BINDING, allele), infer_types=False, header=0)[0]
return eluted, binding, nonbinding
if __name__ == '__main__':
E, B, N = get_data()
model = ensemble.RandomForestClassifier(n_estimators = 50)
EB = pd.concat([E, B])
print EB.count()
print N.count()
X, Y = data.make_ngram_dataset(EB.Peptide, N.Peptide, max_ngram=2, normalize_row=True, rebalance=True)
print cross_val_score(model, X, Y, scoring='roc_auc')
| gpl-2.0 | -2,094,679,284,893,819,600 | 31.422222 | 159 | 0.718986 | false |
esoma/wicked | doc/source/conf.py | 1 | 8341 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# wicked documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 10 16:10:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import wkd
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'wicked'
copyright = '2015, Erik Soma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wkd.__version__
# The full version, including alpha/beta/rc tags.
release = wkd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wickeddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'wicked.tex', 'wicked Documentation',
'Erik Soma', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wicked', 'wicked Documentation',
['Erik Soma'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wicked', 'wicked Documentation',
'Erik Soma', 'wicked', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoclass_content = 'both'
autodoc_member_order = 'bysource' | gpl-3.0 | -4,493,148,297,499,592,000 | 30.126866 | 79 | 0.707109 | false |
manuelli/director | src/python/director/cameracontrol.py | 1 | 15095 | import vtk
import time
import numpy as np
from director import transformUtils
from director.timercallback import TimerCallback
from director import propertyset
from collections import OrderedDict
class OrbitController(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.view = view
self.orbitTime = 20.0
def tick(self):
speed = 360.0 / self.orbitTime
degrees = self.elapsed * speed
self.view.camera().Azimuth(degrees)
self.view.render()
class CameraInterpolator(object):
def __init__(self, view):
self.view = view
self.reset()
def getViewCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def reset(self):
self.interp = vtk.vtkCameraInterpolator()
def addCameraAtTime(self, camera, t):
self.interp.AddCamera(t, camera)
def addViewCameraAtTime(self, t):
self.addCameraAtTime(self.getViewCameraCopy(), t)
def setViewCameraAtTime(self, t):
self.interp.InterpolateCamera(t, self.view.camera())
self.view.render()
class Flyer(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.view = view
self.flyTime = 0.5
self.startTime = 0.0
self.maintainViewDirection = False
self.positionZoom = 0.7
def getCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def zoomTo(self, newFocalPoint, newPosition=None):
self.interp = vtk.vtkCameraInterpolator()
self.interp.AddCamera(0.0, self.getCameraCopy())
c = self.getCameraCopy()
newFocalPoint = np.array(newFocalPoint)
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
if newPosition is None:
if self.maintainViewDirection:
newPosition = oldPosition + (newFocalPoint - oldFocalPoint)
else:
newPosition = oldPosition
newPosition += self.positionZoom*(newFocalPoint - newPosition)
#newPosition = newFocalPoint - self.positionZoom*(newFocalPoint - newPosition)
c.SetFocalPoint(newFocalPoint)
c.SetPosition(newPosition)
c.SetViewUp([0.0, 0.0, 1.0])
self.interp.AddCamera(1.0, c)
self.startTime = time.time()
self.start()
def tick(self):
elapsed = time.time() - self.startTime
t = (elapsed / float(self.flyTime)) if self.flyTime > 0 else 1.0
self.interp.InterpolateCamera(t, self.view.camera())
self.view.render()
if t >= 1.0:
return False
class CameraTracker(object):
def __init__(self, view, targetFrame):
self.view = view
self.targetFrame = targetFrame
self.camera = view.camera()
self.actions = []
self.properties = propertyset.PropertySet()
self.properties.connectPropertyChanged(self.onPropertyChanged)
self.setup()
def getTargetPose(self):
return transformUtils.poseFromTransform(self.targetFrame.transform)
def getTargetQuaternion(self):
return self.getTargetPose()[1]
def getTargetPosition(self):
return np.array(self.targetFrame.transform.GetPosition())
def getCameraTransform(self):
c = self.camera
return transformUtils.getLookAtTransform(c.GetFocalPoint(), c.GetPosition(), c.GetViewUp())
def getCameraToTargetTransform(self, targetFrame):
targetToWorld = transformUtils.copyFrame(targetFrame)
cameraToWorld = self.getCameraTransform()
cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])
focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
return cameraToTarget, focalDistance
def setCameraFocalPointToTarget(self):
self.camera.SetFocalPoint(self.getTargetPosition())
self.view.render()
def getProperties():
return self.properties
def setup(self):
pass
def reset(self):
pass
def update(self):
pass
def onAction(self, actionName):
pass
def getMinimumUpdateRate(self):
return 0
def onPropertyChanged(self, propertySet, propertyName):
pass
class PositionTracker(CameraTracker):
def setup(self):
self.actions = ['Re-center']
def onAction(self, actionName):
if actionName == 'Re-center':
self.setCameraFocalPointToTarget()
def reset(self):
self.lastTargetPosition = self.getTargetPosition()
self.lastTargetQuaternion = self.getTargetQuaternion()
def update(self):
newTargetPosition = self.getTargetPosition()
delta = newTargetPosition - self.lastTargetPosition
followAxes = [True, True, True]
for i in xrange(3):
if not followAxes[i]:
delta[i] = 0.0
self.lastTargetPosition = newTargetPosition
c = self.camera
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
c.SetFocalPoint(oldFocalPoint + delta)
c.SetPosition(oldPosition + delta)
self.view.render()
class LookAtTracker(CameraTracker):
def update(self):
self.setCameraFocalPointToTarget()
def reset(self):
pass
class OrbitTracker(PositionTracker):
def setup(self):
super(OrbitTracker, self).setup()
self.properties.addProperty('Orbit Time (s)', 20, attributes=propertyset.PropertyAttributes(minimum=1, maximum=100, singleStep=1))
def update(self):
super(OrbitTracker, self).update()
orbitTime = self.properties.getProperty('Orbit Time (s)')
speed = 360.0 / orbitTime
degrees = self.dt * speed
self.view.camera().Azimuth(degrees)
self.view.render()
def getMinimumUpdateRate(self):
return 60
class PositionOrientationTracker(CameraTracker):
def storeTargetPose(self):
self.lastTargetPosition = self.getTargetPosition()
self.lastTargetQuaternion = self.getTargetQuaternion()
def reset(self):
self.storeTargetPose()
targetToWorld = transformUtils.copyFrame(self.targetFrame.transform)
cameraToWorld = self.getCameraTransform()
cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()])
self.boomTransform = cameraToTarget
self.focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
def update(self):
previousTargetFrame = transformUtils.transformFromPose(self.lastTargetPosition, self.lastTargetQuaternion)
self.storeTargetPose()
cameraToTarget, focalDistance = self.getCameraToTargetTransform(previousTargetFrame)
targetToWorld = self.targetFrame.transform
#cameraToTarget = self.boomTransform
cameraToWorld = transformUtils.concatenateTransforms([cameraToTarget, targetToWorld])
c = self.camera
focalPoint = cameraToWorld.TransformPoint([self.focalDistance, 0, 0])
focalPoint = targetToWorld.GetPosition()
#print 'focal distance:', self.focalDistance
#print 'cameraToTarget pos:', cameraToTarget.GetPosition()
#print 'cameraToWorld pos:', cameraToWorld.GetPosition()
#print 'targetToWorld pos:', targetToWorld.GetPosition()
#print 'focal pos:', focalPoint
c.SetPosition(cameraToWorld.GetPosition())
c.SetFocalPoint(focalPoint)
self.view.render()
class SmoothFollowTracker(CameraTracker):
def getMinimumUpdateRate(self):
return 30
def setup(self):
self.properties.addProperty('Smooth Time (s)', 0.5, attributes=propertyset.PropertyAttributes(decimals=1, minimum=0.1, maximum=5, singleStep=0.1))
self.properties.addProperty('Distance (m)', 15, attributes=propertyset.PropertyAttributes(decimals=1, minimum=0.5, maximum=1000.0, singleStep=1))
self.properties.addProperty('Elevation (deg)', 10, attributes=propertyset.PropertyAttributes(minimum=-90, maximum=90, singleStep=2))
self.properties.addProperty('Azimuth (deg)', 0, attributes=propertyset.PropertyAttributes(minimum=-180, maximum=180, singleStep=10))
def reset(self):
self.currentVelocity = np.array([0.0, 0.0, 0.0])
def update(self):
if not self.targetFrame:
return
r = self.properties.getProperty('Distance (m)')
theta = np.radians(90 - self.properties.getProperty('Elevation (deg)'))
phi = np.radians(180 - self.properties.getProperty('Azimuth (deg)'))
x = r * np.cos(phi) * np.sin(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(theta)
c = self.camera
targetToWorld = self.targetFrame.transform
currentPosition = np.array(c.GetPosition())
desiredPosition = np.array(targetToWorld.TransformPoint([x, y, z]))
smoothTime = self.properties.getProperty('Smooth Time (s)')
newPosition, self.currentVelocity = smoothDamp(currentPosition, desiredPosition, self.currentVelocity, smoothTime, maxSpeed=100, deltaTime=self.dt)
trackerToWorld = transformUtils.getLookAtTransform(targetToWorld.GetPosition(), newPosition)
c.SetFocalPoint(targetToWorld.GetPosition())
c.SetPosition(trackerToWorld.GetPosition())
self.view.render()
class TargetFrameConverter(object):
def __init__(self):
self.targetFrame = None
def getTargetFrame(self):
return self.targetFrame
@classmethod
def canConvert(cls, obj):
return False
class CameraTrackerManager(object):
def __init__(self):
self.target = None
self.targetFrame = None
self.trackerClass = None
self.camera = None
self.view = None
self.timer = TimerCallback()
self.timer.callback = self.updateTimer
self.addTrackers()
self.initTracker()
def updateTimer(self):
tNow = time.time()
dt = tNow - self.tLast
if dt < self.timer.elapsed/2.0:
return
self.update()
def setView(self, view):
self.view = view
self.camera = view.camera()
def setTarget(self, target):
'''
target should be an instance of TargetFrameConverter or
any object that provides a method getTargetFrame().
'''
if target == self.target:
return
self.disableActiveTracker()
if not target:
return
self.target = target
self.targetFrame = target.getTargetFrame()
self.callbackId = self.targetFrame.connectFrameModified(self.onTargetFrameModified)
self.initTracker()
def disableActiveTracker(self):
if self.targetFrame:
self.targetFrame.disconnectFrameModified(self.callbackId)
self.target = None
self.targetFrame = None
self.initTracker()
def update(self):
tNow = time.time()
dt = tNow - self.tLast
self.tLast = tNow
if self.activeTracker:
self.activeTracker.dt = dt
self.activeTracker.update()
def reset(self):
self.tLast = time.time()
if self.activeTracker:
self.activeTracker.reset()
def getModeActions(self):
if self.activeTracker:
return self.activeTracker.actions
return []
def onModeAction(self, actionName):
if self.activeTracker:
self.activeTracker.onAction(actionName)
def getModeProperties(self):
if self.activeTracker:
return self.activeTracker.properties
return None
def onTargetFrameModified(self, frame):
self.update()
def initTracker(self):
self.timer.stop()
self.activeTracker = self.trackerClass(self.view, self.targetFrame) if (self.trackerClass and self.targetFrame) else None
self.reset()
self.update()
if self.activeTracker:
minimumUpdateRate = self.activeTracker.getMinimumUpdateRate()
if minimumUpdateRate > 0:
self.timer.targetFps = minimumUpdateRate
self.timer.start()
def addTrackers(self):
self.trackers = OrderedDict([
['Off', None],
['Position', PositionTracker],
['Position & Orientation', PositionOrientationTracker],
['Smooth Follow', SmoothFollowTracker],
['Look At', LookAtTracker],
['Orbit', OrbitTracker],
])
def setTrackerMode(self, modeName):
assert modeName in self.trackers
self.trackerClass = self.trackers[modeName]
self.initTracker()
def smoothDamp(current, target, currentVelocity, smoothTime, maxSpeed, deltaTime):
'''
Based on Unity3D SmoothDamp
See: http://answers.unity3d.com/answers/310645/view.html
'''
smoothTime = max(0.0001, smoothTime)
num = 2.0 / smoothTime;
num2 = num * deltaTime;
num3 = 1.0 / (1.0 + num2 + 0.48 * num2 * num2 + 0.235 * num2 * num2 * num2)
num4 = current - target
num5 = target
num6 = maxSpeed * smoothTime
num4 = np.clip(num4, -num6, num6)
target = current - num4
num7 = (currentVelocity + num * num4) * deltaTime
currentVelocity = (currentVelocity - num * num7) * num3
num8 = target + (num4 + num7) * num3
for i in xrange(len(current)):
if (num5[i] - current[i] > 0.0 == num8[i] > num5[i]):
num8[i] = num5[i]
currentVelocity[i] = (num8[i] - num5[i]) / deltaTime
return num8, currentVelocity
class RobotModelFollower(object):
def __init__(self, view, robotModel, jointController):
self.view = view
self.robotModel = robotModel
self.jointController = jointController
self.followAxes = [True, True, True]
self.callbackId = None
def start(self):
self.callbackId = self.robotModel.connectModelChanged(self.onModelChanged)
self.lastTrackPosition = np.array(self.jointController.q[:3])
def stop(self):
self.robotModel.disconnectModelChanged(self.callbackId)
def getCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def onModelChanged(self, model):
newTrackPosition = np.array(self.jointController.q[:3])
delta = newTrackPosition - self.lastTrackPosition
for i in xrange(3):
if not self.followAxes[i]:
delta[i] = 0.0
self.lastTrackPosition = newTrackPosition
c = self.view.camera()
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
c.SetFocalPoint(oldFocalPoint + delta)
c.SetPosition(oldPosition + delta)
self.view.render()
| bsd-3-clause | -7,207,181,618,333,525,000 | 28.656189 | 155 | 0.645711 | false |
GreenVars/diary | diary/diary.py | 1 | 7909 | from __future__ import absolute_import
from __future__ import print_function
import atexit
import os.path
import codecs
import sys
from diary import logdb
from diary import levels
from diary import formats
from diary import events
_PY2 = sys.version_info[0] == 2
class Diary(object):
"""Diary is a low-dependency and easy to use logger"""
def __init__(self, path, file_name="diary.txt", db_name="diary.db",
event=events.Event, log_format=formats.standard,
db=logdb.DiaryDB, async=True, debug_enabled=True,
encoding="utf-8", also_print=True):
"""
Initialization takes a file path meant to make startup simple
:param path: str of a path pointing to:
* An empty directory where Diary will create a db and log
* A text file where Diary will append
* A database file where Diary will read and write
* A directory with a database and txt file
- looks for file_name and db_name arguments
* A nonexistent path for where a db or a log will be made
:param file_name: a specified name for a log text file
:param db_name: a specified name for a log database file
:param event: Event object to construct log info
:param log_format: function to format logging info (see formats.py)
:param db: database class for reading/writing database
:param async: boolean if logging should occur in own thread
:param debug_enabled: boolean if logger supports debugging
:param encoding: str type of encoding to use for writing to log file
:param also_print: boolean if a logged statement will also be printed to the console
"""
self.path = path
self.encoding = encoding
self.log_file = None
self.db_file = None
self.also_print = also_print
if os.path.exists(path):
if os.path.isdir(path):
self.log_file = codecs.open(os.path.join(path, file_name), mode='a+', buffering=1, encoding=self.encoding)
self.db_file = open(os.path.join(path, db_name), 'a')
elif os.path.isfile(path):
head, tail = os.path.split(path)
_, ext = os.path.splitext(tail)
if ext == '':
self.log_file = codecs.open(path, mode='a+', buffering=1, encoding=self.encoding)
elif tail == db_name or ext[1:] in ('db', 'sql', 'sqlite',
'sqlite3'):
self.db_file = open(path, 'a')
elif tail == file_name or ext[1:] in ('txt', 'text', 'log'):
self.log_file = codecs.open(path, mode='a+', buffering=1, encoding=self.encoding)
else:
raise ValueError("Could not resolve to database or text file: {}".format(
path))
else:
raise ValueError("Could not handle path: {} | did not find a directory or file".format(
path))
else:
try:
_, ext = os.path.splitext(path)
if len(ext) > 1:
if ext[1:] in ('db', 'sql', 'sqlite', 'sqlite3'):
self.db_file = open(path, 'a')
else:
self.log_file = codecs.open(path, mode='a+', buffering=1, encoding=self.encoding)
else:
self.log_file = codecs.open(path, mode='a+', buffering=1, encoding=self.encoding)
except Exception as e:
raise e
@atexit.register
def cleanup():
"""Called on system exit to ensure logs are saved."""
if self.async:
self.thread.join()
if self.db_file:
self.db_file.close()
self.logdb.close()
if self.log_file:
self.log_file.close()
self.timer = None
self.close = cleanup
self.event = event
self.format = log_format
self.db = db
self.async = async
self.debug_enabled = debug_enabled
self.logdb = None
self.last_logged_event = None
sets_db = self.db_file is not None
if async:
from diary.logthread import DiaryThread
self.thread = DiaryThread(self, sets_db=sets_db)
elif sets_db:
self.set_db()
def set_db(self):
"""
In order to keep databases thread safe set_db
is called by self.thread if async is enabled.
"""
if self.db_file is None:
raise ValueError("Cannot set a database without a database file")
self.logdb = self.db(self.db_file.name)
def set_timer(self, interval, func, *args, **kwargs):
"""Set a timer to log an event at every interval
:param interval: time in milliseconds to repeat func
:param func: func to execute
:param args: args to pass into func
:param kwargs: kwargs to pass into func
"""
if self.async is False:
raise RuntimeError("In order to set a timer async must be enabled")
from diary.RepeatedTimer import RepeatedTimer
self.timer = RepeatedTimer(interval, func, args=args, kwargs=kwargs)
self.timer.start()
def _write(self, event):
"""Write an event object to the proper channel
:param event: event object to log
"""
if self.db_file:
self.logdb.log(event)
if self.log_file:
if event.formatter is None:
to_write = self.format(event) + '\n'
else:
to_write = event.formatted() + '\n'
if _PY2:
to_write = to_write.decode(self.encoding)
self.log_file.write(to_write)
if self.also_print:
print(to_write)
self.last_logged_event = event
def log(self, info, level=levels.info, **kwargs):
"""Log info to its relevant level (see levels.py)
:param info: info for logging
:param level: @level decorated function handle relevant behavior
"""
if isinstance(info, events.Event):
event_to_log = info
else:
event_to_log = self.event(info, level)
if _PY2 and isinstance(event_to_log.info, unicode): # short-circuiting at its best
event_to_log.info = event_to_log.info.encode(self.encoding)
if self.async:
level(event_to_log, self.thread.add, **kwargs)
else:
level(event_to_log, self._write, **kwargs)
def info(self, info, **kwargs):
"""Log general info
:param info: info relevant to application processes
"""
if isinstance(info, events.Event):
info.set_level(levels.info)
self.log(info, level=levels.info, **kwargs)
def warn(self, info, **kwargs):
"""Log info that requires a warning
:param info: info relevant to a warning
"""
if isinstance(info, events.Event):
info.set_level(levels.warn)
self.log(info, level=levels.warn, **kwargs)
def error(self, info, **kwargs):
"""Log info that may cause an error
:param info: info relevant to an error
"""
if isinstance(info, events.Event):
info.set_level(levels.error)
self.log(info, level=levels.error, **kwargs)
def debug(self, info, **kwargs):
"""Log info that may only be helpful to the developer
Will only log if debugging is enabled
:param info: info for the devs
"""
if isinstance(info, events.Event):
info.set_level(levels.debug)
if self.debug_enabled:
self.log(info, level=levels.debug, **kwargs)
| mit | -2,702,960,208,736,069,000 | 35.114155 | 122 | 0.561259 | false |
iDigBio/idigbio-python-client | examples/fetch_media/fetch_media.py | 1 | 9969 | from __future__ import print_function
try:
from idigbio.json_client import iDbApiJson
import requests
import shutil
import os
import sys
import time
import argparse
import json
except ImportError as e:
print ("IMPORT ERROR (This exception is likely caused by a missing module): '{0}'".format(e))
raise SystemExit
help_blob = """
This script will download media that are associated with the specimens
returned by an iDigBio specimen record search query.
The iDigBio Query Format is documented at
https://github.com/idigbio/idigbio-search-api/wiki/Query-Format
Notes on the --output-dir / -o parameter:
If the specified output directory does not exist, it will be created.
Omitting this parameter will cause a new directory to be created
under the current directory, named in a timestamp-like style.
### Sample ###
$ python fetch_media.py -o /tmp/idigbio_media_downloads -m 5 -q '{"genus": "acer"}'
<snip>
DOWNLOADING FINISHED with 5 successes and 0 failures
Media downloads are in output directory: '/tmp/idigbio_media_downloads'
$ ls -l /tmp/idigbio_media_downloads
total 604
-rw-rw-r-- 1 dstoner dstoner 93767 Jun 6 09:19 0c9b4669-edaa-467d-b240-f3311c764c04_webview.jpg
-rw-rw-r-- 1 dstoner dstoner 114132 Jun 6 09:19 1f2dbb2b-75ba-48cb-b34c-1ca003b4a38d_webview.jpg
-rw-rw-r-- 1 dstoner dstoner 147900 Jun 6 09:19 56f84bfe-5095-4fbb-b9e0-08cef3fdb448_webview.jpg
-rw-rw-r-- 1 dstoner dstoner 117882 Jun 6 09:19 6a0d0c92-d2be-4ae5-9fef-60453778b0f0_webview.jpg
-rw-rw-r-- 1 dstoner dstoner 136202 Jun 6 09:19 b98b9704-5ac5-4b53-b74d-d2d4d7d46ddd_webview.jpg
###
The media record for the first download above would be viewable in the iDigBio portal at
https://www.idigbio.org/portal/mediarecords/0c9b4669-edaa-467d-b240-f3311c764c04
"""
# MAX_MAX_COUNT is a safety limit to keep an erroneous query from downloading all of iDigBio's media.
# Change this value if you are legitimately trying to download more than 100k media.
# Also, please consider letting us know that you are doing this because we are interested
# in these kinds of use cases. [email protected]
MAX_MAX_COUNT = 100000
DEFAULT_MAX_COUNT = 100
SIZES = ["thumbnail", "webview", "fullsize"]
DEFAULT_SIZE = "webview"
DEFAULT_OUTPUT_DIR = None
argparser = argparse.ArgumentParser(description=help_blob, formatter_class=argparse.RawDescriptionHelpFormatter)
argparser.add_argument("-m", "--max", type=int, default=DEFAULT_MAX_COUNT,
help="Maximum number of records to be returned from search query. Default: {0}, Maximum allowed value: {1}".format(DEFAULT_MAX_COUNT,MAX_MAX_COUNT))
argparser.add_argument("-s", "--size", choices=SIZES, default=DEFAULT_SIZE,
help="Size of derivative to download. Default: '{0}'".format(DEFAULT_SIZE))
argparser.add_argument("-o", "--output-dir", default=DEFAULT_OUTPUT_DIR,
help="Directory path for downloaded media files. Default: a new directory will be created under current directory")
argparser.add_argument("-d", "--debug", default=False, action='store_true',
help="enable debugging output")
arg_group = argparser.add_mutually_exclusive_group(required=True)
arg_group.add_argument("-q", "--query",
help="query in iDigBio Query Format.")
arg_group.add_argument("--query-file",
help="file path containing query string in iDigBio Query Format")
arg_group.add_argument("--records-uuids-file",
help="file path containing list of iDigBio record uuids, one per line")
arg_group.add_argument("--mediarecords-uuids-file",
help="file path containing list of iDigBio mediarecord uuids, one per line")
args = argparser.parse_args()
MAX_RESULTS = max(0,(min(args.max, MAX_MAX_COUNT)))
SIZE = args.size
output_directory = args.output_dir
QUERY_TYPE = 'rq'
debug_flag = args.debug
if debug_flag:
print ()
print ("** DEBUGGING ENABLED **")
print ()
print ()
modulenames = set(sys.modules)&set(globals())
allmodules = [sys.modules[name] for name in modulenames]
print ("Loaded modules...")
for each_mod in allmodules:
print (each_mod)
print ()
def read_query_file(query_filename):
if os.path.isfile(query_filename):
with open(query_filename, 'r') as queryfile:
q = queryfile.read()
return q
else:
print ("*** Error: query file could not be read or does not exist.")
raise SystemExit
def get_query_from_uuids_list_file(uuids_file):
uuids_from_file = []
with open(uuids_file) as uf:
for line in uf:
uuids_from_file.append(line.strip())
q = '{"uuid":'
q += json.dumps(uuids_from_file)
q += '}'
return q
query = None
if args.query:
# use the query as supplied on the command line
query = args.query
if args.query_file:
# use the query as supplied in a file
query = read_query_file(args.query_file)
if args.records_uuids_file:
# generate a query from a list of record uuids
query = get_query_from_uuids_list_file(args.records_uuids_file)
if args.mediarecords_uuids_file:
# generate a query from a list of mediarecord uuids
query = get_query_from_uuids_list_file(args.mediarecords_uuids_file)
QUERY_TYPE = 'mq'
# Verify that the provided query string is valid JSON
if query is None:
print ("*** ERROR! Query source is empty or unusable.")
else:
try:
query_json = json.loads(query)
except Exception as e:
print ('*** FATAL ERROR parsing query string:')
print (e)
print ('*** Supplied query string:')
print (query)
raise SystemExit
# The following should work whether one has specified an existing directory name, created a new directory by name,
# or left the output_directory unspecified.
if output_directory is None:
now_ms = str(time.time())
output_directory = time.strftime("%Y%m%d%H%M%S") + "." + str(time.time()).rsplit('.')[ len(now_ms.rsplit('.')) - 1]
try:
os.makedirs(output_directory)
except:
print ("*** ERROR! Could not create directroy for output: '{0}'".format(os.path.abspath(output_directory)))
raise SystemExit
else:
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except:
print ("*** ERROR! Could not create directroy for output: '{0}'".format(os.path.abspath(output_directory)))
raise SystemExit
def get_media_with_naming (output_dir, media_url, uuid, size):
"""
Download a media file to a directory and name it based on the input parameters.
'output_dir' controls where the download is placed.
'media_url' is the url / link to the media that will be downloaded.
'uuid' is used to uniquely identify the output filename.
'SIZE' is the class of image derivative, useful in the output filename.
"""
try:
response = requests.get(media_url, stream=True)
response.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
print('*** HTTP ERROR: {0}'.format(e))
return False
### iDigBio returns 200 OK and displays an SVG status image when a derivative
### is not present. Check for "Content-Type: image/svg+xml" header to notice this condition.
if response.headers['Content-Type'] == 'image/svg+xml':
print("*** WARNING - No media at '{0}'".format(media_url))
return False
# Output filenames will be of the form: {mediarecord_uuid}_{SIZE}.jpg
local_filepath = os.path.join(output_dir, uuid + '_' + SIZE + '.jpg')
try:
with open(local_filepath, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
return True
except:
return False
if __name__ == '__main__':
api = iDbApiJson()
print ()
print ("Using query:")
print ()
print (query)
print ()
print ("OPERATING PARAMETERS...")
print ()
print ("Maximum number of media to fetch: {:d}".format(MAX_RESULTS))
print ("Media derivative size: {0}".format(SIZE))
print ("Output directory: {0}".format(os.path.abspath(output_directory)))
print ("Query Type: {0}".format(QUERY_TYPE))
print ()
print ("EXECUTING SEARCH QUERY...")
print ()
if QUERY_TYPE == 'mq':
results = api.search_media(mq=query, limit=MAX_RESULTS)
else:
results = api.search_media(rq=query, limit=MAX_RESULTS)
print ()
if debug_flag:
print ("Results JSON:")
print (json.dumps(results))
print ()
print ("Search query produced {:d} results.".format(results['itemCount']))
print ()
if results['itemCount'] == 0 or MAX_RESULTS == 0:
print ("Nothing to download. Exiting.")
raise SystemExit
if results['itemCount'] > MAX_RESULTS:
print ("*** WARNING: search query produced more results than the designated maximum number of media to fetch.")
print ("*** Use the -m or --max parameter to increase the maximum number of media to fetch.")
print ()
print("BEGINNING DOWNLOADS NOW...")
print ()
successes = 0
failures = 0
for each in results['items']:
media_record_uuid = each['indexTerms']['uuid']
media_url = 'https://api.idigbio.org/v2/media/' + media_record_uuid + '?size=' + SIZE
print ("Downloading: '{0}'".format(media_url))
if get_media_with_naming(output_directory, media_url, media_record_uuid, SIZE):
successes += 1
else:
failures += 1
print ()
print ("DOWNLOADING FINISHED with {0:d} successes and {1:d} failures".format(successes, failures))
print ()
print ("Media downloads are in output directory: '{0}'".format(os.path.abspath(output_directory)))
| mit | -8,380,299,812,791,761,000 | 36.618868 | 171 | 0.657438 | false |
yousrabk/mne-python | mne/io/constants.py | 1 | 32419 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
#
# License: BSD (3-clause)
class Bunch(dict):
""" Container object for datasets: dictionnary-like object that
exposes its keys as attributes.
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
class BunchConst(Bunch):
"""Class to prevent us from re-defining constants (DRY)"""
def __setattr__(self, attr, val):
if attr != '__dict__' and hasattr(self, attr):
raise AttributeError('Attribute "%s" already set' % attr)
super(BunchConst, self).__setattr__(attr, val)
FIFF = BunchConst()
#
# Blocks
#
FIFF.FIFFB_ROOT = 999
FIFF.FIFFB_MEAS = 100
FIFF.FIFFB_MEAS_INFO = 101
FIFF.FIFFB_RAW_DATA = 102
FIFF.FIFFB_PROCESSED_DATA = 103
FIFF.FIFFB_EVOKED = 104
FIFF.FIFFB_ASPECT = 105
FIFF.FIFFB_SUBJECT = 106
FIFF.FIFFB_ISOTRAK = 107
FIFF.FIFFB_HPI_MEAS = 108
FIFF.FIFFB_HPI_RESULT = 109
FIFF.FIFFB_HPI_COIL = 110
FIFF.FIFFB_PROJECT = 111
FIFF.FIFFB_CONTINUOUS_DATA = 112
FIFF.FIFFB_VOID = 114
FIFF.FIFFB_EVENTS = 115
FIFF.FIFFB_INDEX = 116
FIFF.FIFFB_DACQ_PARS = 117
FIFF.FIFFB_REF = 118
FIFF.FIFFB_SMSH_RAW_DATA = 119
FIFF.FIFFB_SMSH_ASPECT = 120
FIFF.FIFFB_HPI_SUBSYSTEM = 121
FIFF.FIFFB_EPOCHS = 122
FIFF.FIFFB_ICA = 123
FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related
FIFF.FIFFB_BEM = 310 # Boundary-element method
FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces
FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition
FIFF.FIFFB_PROJ = 313
FIFF.FIFFB_PROJ_ITEM = 314
FIFF.FIFFB_MRI = 200
FIFF.FIFFB_MRI_SET = 201
FIFF.FIFFB_MRI_SLICE = 202
FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices'
FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes...
FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data
FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region
FIFF.FIFFB_PROCESSING_HISTORY = 900
FIFF.FIFFB_PROCESSING_RECORD = 901
FIFF.FIFFB_DATA_CORRECTION = 500
FIFF.FIFFB_CHANNEL_DECOUPLER = 501
FIFF.FIFFB_SSS_INFO = 502
FIFF.FIFFB_SSS_CAL = 503
FIFF.FIFFB_SSS_ST_INFO = 504
FIFF.FIFFB_SSS_BASES = 505
FIFF.FIFFB_SMARTSHIELD = 510
#
# Of general interest
#
FIFF.FIFF_FILE_ID = 100
FIFF.FIFF_DIR_POINTER = 101
FIFF.FIFF_BLOCK_ID = 103
FIFF.FIFF_BLOCK_START = 104
FIFF.FIFF_BLOCK_END = 105
FIFF.FIFF_FREE_LIST = 106
FIFF.FIFF_FREE_BLOCK = 107
FIFF.FIFF_NOP = 108
FIFF.FIFF_PARENT_FILE_ID = 109
FIFF.FIFF_PARENT_BLOCK_ID = 110
FIFF.FIFF_BLOCK_NAME = 111
FIFF.FIFF_BLOCK_VERSION = 112
FIFF.FIFF_CREATOR = 113 # Program that created the file (string)
FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string)
FIFF.FIFF_REF_ROLE = 115
FIFF.FIFF_REF_FILE_ID = 116
FIFF.FIFF_REF_FILE_NUM = 117
FIFF.FIFF_REF_FILE_NAME = 118
#
# Megacq saves the parameters in these tags
#
FIFF.FIFF_DACQ_PARS = 150
FIFF.FIFF_DACQ_STIM = 151
FIFF.FIFF_NCHAN = 200
FIFF.FIFF_SFREQ = 201
FIFF.FIFF_DATA_PACK = 202
FIFF.FIFF_CH_INFO = 203
FIFF.FIFF_MEAS_DATE = 204
FIFF.FIFF_SUBJECT = 205
FIFF.FIFF_COMMENT = 206
FIFF.FIFF_NAVE = 207
FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch
FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch
FIFF.FIFF_ASPECT_KIND = 210
FIFF.FIFF_REF_EVENT = 211
FIFF.FIFF_EXPERIMENTER = 212
FIFF.FIFF_DIG_POINT = 213
FIFF.FIFF_CH_POS = 214
FIFF.FIFF_HPI_SLOPES = 215
FIFF.FIFF_HPI_NCOIL = 216
FIFF.FIFF_REQ_EVENT = 217
FIFF.FIFF_REQ_LIMIT = 218
FIFF.FIFF_LOWPASS = 219
FIFF.FIFF_BAD_CHS = 220
FIFF.FIFF_ARTEF_REMOVAL = 221
FIFF.FIFF_COORD_TRANS = 222
FIFF.FIFF_HIGHPASS = 223
FIFF.FIFF_CH_CALS = 22 # This will not occur in new files
FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi
FIFF.FIFF_HPI_CORR_COEFF = 226 # Hpi curve fit correlations
FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging
FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch
FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum
FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage
FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage
FIFF.FIFF_NAME = 233 # Intended to be a short name.
FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object
FIFF.FIFF_DIG_STRING = 234 # String of digitized points
FIFF.FIFF_LINE_FREQ = 235 # Line frequency
FIFF.FIFF_CUSTOM_REF = 236 # Whether a custom reference was applied to the data (NB: overlaps with HPI const #)
#
# HPI fitting program tags
#
FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency
FIFF.FIFF_HPI_COIL_MOMENTS = 240 # Estimated moment vectors for the HPI coil magnetic dipoles
FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit
FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below)
FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit
FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference
FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement
FIFF.FIFF_HPI_COILS_USED = 246 # List of coils finally used when the transformation was computed
FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247 # Which Isotrak digitization point corresponds to each of the coils energized
#
# Pointers
#
FIFF.FIFFV_NEXT_SEQ = 0
FIFF.FIFFV_NEXT_NONE = -1
#
# Channel types
#
FIFF.FIFFV_MEG_CH = 1
FIFF.FIFFV_REF_MEG_CH = 301
FIFF.FIFFV_EEG_CH = 2
FIFF.FIFFV_MCG_CH = 201
FIFF.FIFFV_STIM_CH = 3
FIFF.FIFFV_EOG_CH = 202
FIFF.FIFFV_EMG_CH = 302
FIFF.FIFFV_ECG_CH = 402
FIFF.FIFFV_MISC_CH = 502
FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring
FIFF.FIFFV_SEEG_CH = 702 # stereotactic EEG
FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only)
FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only)
FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel
#
# Quaternion channels for head position monitoring
#
FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion
FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation
FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation
FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation
FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation
FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation
FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation
FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi
FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi
FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi
#
# Coordinate frames
#
FIFF.FIFFV_COORD_UNKNOWN = 0
FIFF.FIFFV_COORD_DEVICE = 1
FIFF.FIFFV_COORD_ISOTRAK = 2
FIFF.FIFFV_COORD_HPI = 3
FIFF.FIFFV_COORD_HEAD = 4
FIFF.FIFFV_COORD_MRI = 5
FIFF.FIFFV_COORD_MRI_SLICE = 6
FIFF.FIFFV_COORD_MRI_DISPLAY = 7
FIFF.FIFFV_COORD_DICOM_DEVICE = 8
FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
#
# Needed for raw and evoked-response data
#
FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data
FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers
FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel
FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples
FIFF.FIFF_MNE_BASELINE_MIN = 304 # Time of baseline beginning
FIFF.FIFF_MNE_BASELINE_MAX = 305 # Time of baseline end
#
# Info on subject
#
FIFF.FIFF_SUBJ_ID = 400 # Subject ID
FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject
FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject
FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject
FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject
FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject
FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject
FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject
FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject
FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject
FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System
FIFF.FIFF_PROJ_ID = 500
FIFF.FIFF_PROJ_NAME = 501
FIFF.FIFF_PROJ_AIM = 502
FIFF.FIFF_PROJ_PERSONS = 503
FIFF.FIFF_PROJ_COMMENT = 504
FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers
FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: <sample before after>
FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel
FIFF.FIFF_EVENT_BITS = 603 # Event bits array
#
# Tags used in saving SQUID characteristics etc.
#
FIFF.FIFF_SQUID_BIAS = 701
FIFF.FIFF_SQUID_OFFSET = 702
FIFF.FIFF_SQUID_GATE = 703
#
# Aspect values used to save charactersitic curves of SQUIDs. (mjk)
#
FIFF.FIFFV_ASPECT_IFII_LOW = 1100
FIFF.FIFFV_ASPECT_IFII_HIGH = 1101
FIFF.FIFFV_ASPECT_GATE = 1102
#
# Values for file references
#
FIFF.FIFFV_ROLE_PREV_FILE = 1
FIFF.FIFFV_ROLE_NEXT_FILE = 2
#
# References
#
FIFF.FIFF_REF_PATH = 1101
#
# Different aspects of data
#
FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs
FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean
FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data
FIFF.FIFFV_ASPECT_SUBAVERAGE = 103
FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage
FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph
FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum
FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve
#
# BEM surface IDs
#
FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1
FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1
FIFF.FIFFV_BEM_SURF_ID_SKULL = 3
FIFF.FIFFV_BEM_SURF_ID_HEAD = 4
FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number
FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name
FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface
FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface
FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3)
FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3)
FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors
FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix
FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below
FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model
FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment
FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach
FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach
#
# More of those defined in MNE
#
FIFF.FIFFV_MNE_SURF_UNKNOWN = -1
FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102
FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system
#
# These relate to the Isotrak data
#
FIFF.FIFFV_POINT_CARDINAL = 1
FIFF.FIFFV_POINT_HPI = 2
FIFF.FIFFV_POINT_EEG = 3
FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG
FIFF.FIFFV_POINT_EXTRA = 4
FIFF.FIFFV_POINT_LPA = 1
FIFF.FIFFV_POINT_NASION = 2
FIFF.FIFFV_POINT_RPA = 3
#
# SSP
#
FIFF.FIFF_PROJ_ITEM_KIND = 3411
FIFF.FIFF_PROJ_ITEM_TIME = 3412
FIFF.FIFF_PROJ_ITEM_NVEC = 3414
FIFF.FIFF_PROJ_ITEM_VECTORS = 3415
FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416
FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417
#
# MRIs
#
FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH
FIFF.FIFF_MRI_SOURCE_FORMAT = 2002
FIFF.FIFF_MRI_PIXEL_ENCODING = 2003
FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004
FIFF.FIFF_MRI_PIXEL_SCALE = 2005
FIFF.FIFF_MRI_PIXEL_DATA = 2006
FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007
FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008
FIFF.FIFF_MRI_BOUNDING_BOX = 2009
FIFF.FIFF_MRI_WIDTH = 2010
FIFF.FIFF_MRI_WIDTH_M = 2011
FIFF.FIFF_MRI_HEIGHT = 2012
FIFF.FIFF_MRI_HEIGHT_M = 2013
FIFF.FIFF_MRI_DEPTH = 2014
FIFF.FIFF_MRI_DEPTH_M = 2015
FIFF.FIFF_MRI_THICKNESS = 2016
FIFF.FIFF_MRI_SCENE_AIM = 2017
FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020
FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021
FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022
FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023
FIFF.FIFF_MRI_VOXEL_DATA = 2030
FIFF.FIFF_MRI_VOXEL_ENCODING = 2031
FIFF.FIFF_MRI_MRILAB_SETUP = 2100
FIFF.FIFF_MRI_SEG_REGION_ID = 2200
#
FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0
FIFF.FIFFV_MRI_PIXEL_BYTE = 1
FIFF.FIFFV_MRI_PIXEL_WORD = 2
FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3
FIFF.FIFFV_MRI_PIXEL_FLOAT = 4
FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5
FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6
FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7
FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8
#
# These are the MNE fiff definitions
#
FIFF.FIFFB_MNE = 350
FIFF.FIFFB_MNE_SOURCE_SPACE = 351
FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352
FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353
FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354
FIFF.FIFFB_MNE_COV = 355
FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356
FIFF.FIFFB_MNE_NAMED_MATRIX = 357
FIFF.FIFFB_MNE_ENV = 358
FIFF.FIFFB_MNE_BAD_CHANNELS = 359
FIFF.FIFFB_MNE_VERTEX_MAP = 360
FIFF.FIFFB_MNE_EVENTS = 361
FIFF.FIFFB_MNE_MORPH_MAP = 362
FIFF.FIFFB_MNE_SURFACE_MAP = 363
FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364
#
# CTF compensation data
#
FIFF.FIFFB_MNE_CTF_COMP = 370
FIFF.FIFFB_MNE_CTF_COMP_DATA = 371
FIFF.FIFFB_MNE_DERIVATIONS = 372
#
# Fiff tags associated with MNE computations (3500...)
#
#
# 3500... Bookkeeping
#
FIFF.FIFF_MNE_ROW_NAMES = 3502
FIFF.FIFF_MNE_COL_NAMES = 3503
FIFF.FIFF_MNE_NROW = 3504
FIFF.FIFF_MNE_NCOL = 3505
FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults:
# FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI
# FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD
# FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD
FIFF.FIFF_MNE_CH_NAME_LIST = 3507
FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501)
#
# 3510... 3590... Source space or surface
#
FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices
FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals
FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices
FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space
FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use
FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices
FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices
FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier
FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume
FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based)
FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = 3596 # Voxel space dimensions in a volume source space
FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = 3597 # Matrix to interpolate a volume source space into a mri volume
FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation
FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles
FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation
FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use
FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space
FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces)
FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = 3595 # Neighbors for each source space point (used for volume source spaces)
FIFF.FIFF_MNE_SOURCE_SPACE_DIST = 3599 # Distances between vertices in use (along the surface)
FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = 3600 # If distance is above this limit (in the volume) it has not been calculated
FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data
FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map
#
# 3520... Forward solution
#
FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520
FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free
FIFF.FIFF_MNE_INCLUDED_METHODS = 3522
FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523
#
# 3530... Covariance matrix
#
FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix
FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension
FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle)
FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix
FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above
FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535
FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom
FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used
FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood
#
# 3540... Inverse operator
#
# We store the inverse operator as the eigenleads, eigenfields,
# and weights
#
FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads
FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = 3546 # The eigenleads (already weighted with R^0.5)
FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields
FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values
FIFF.FIFF_MNE_PRIORS_USED = 3543 # Which kind of priors have been used for the source covariance matrix
FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix
# This matrix includes the whitening operator as well
# The regularization is applied
FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545 # Contains the orientation of one source per row
# The source orientations must be expressed in the coordinate system
# given by FIFF_MNE_COORD_FRAME
FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ?
#
# 3550... Saved environment info
#
FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created
FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file
FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = 3552 # Reference to an external binary file (big-endian) */
FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553 # Reference to an external binary file (little-endian) */
#
# 3560... Miscellaneous
#
FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active?
FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI 014)
FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes
FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data
FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes
FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channnel values
FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string
#
# 3570... Morphing maps
#
FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere
FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from
FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to
#
# 3580... CTF compensation data
#
FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation
FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself
FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated?
FIFF.FIFF_MNE_DERIVATION_DATA = 3585 # Used to store information about EEG and other derivations
#
# 3601... values associated with ICA decomposition
#
FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters
FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names
FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener
FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components
FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance
FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean
FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix
FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources
FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params
#
# Maxfilter tags
#
FIFF.FIFF_SSS_FRAME = 263
FIFF.FIFF_SSS_JOB = 264
FIFF.FIFF_SSS_ORIGIN = 265
FIFF.FIFF_SSS_ORD_IN = 266
FIFF.FIFF_SSS_ORD_OUT = 267
FIFF.FIFF_SSS_NMAG = 268
FIFF.FIFF_SSS_COMPONENTS = 269
FIFF.FIFF_SSS_CAL_CHANS = 270
FIFF.FIFF_SSS_CAL_CORRS = 271
FIFF.FIFF_SSS_ST_CORR = 272
FIFF.FIFF_SSS_NFREE = 278
FIFF.FIFF_SSS_ST_LENGTH = 279
FIFF.FIFF_DECOUPLER_MATRIX = 800
#
# Fiff values associated with MNE computations
#
FIFF.FIFFV_MNE_UNKNOWN_ORI = 0
FIFF.FIFFV_MNE_FIXED_ORI = 1
FIFF.FIFFV_MNE_FREE_ORI = 2
FIFF.FIFFV_MNE_MEG = 1
FIFF.FIFFV_MNE_EEG = 2
FIFF.FIFFV_MNE_MEG_EEG = 3
FIFF.FIFFV_MNE_PRIORS_NONE = 0
FIFF.FIFFV_MNE_PRIORS_DEPTH = 1
FIFF.FIFFV_MNE_PRIORS_LORETA = 2
FIFF.FIFFV_MNE_PRIORS_SULCI = 3
FIFF.FIFFV_MNE_UNKNOWN_COV = 0
FIFF.FIFFV_MNE_SENSOR_COV = 1
FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called
FIFF.FIFFV_MNE_SOURCE_COV = 2
FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3
FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers
FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior
FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior
FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = 10 # Linear projection related to EEG average reference
#
# Output map types
#
FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size
FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic
FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic
FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic
FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic
FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = 9 # Square root of the (approximate) chi^2 statistic
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar)
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector)
#
# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE)
#
FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1
FIFF.FIFFV_MNE_SPACE_SURFACE = 1
FIFF.FIFFV_MNE_SPACE_VOLUME = 2
FIFF.FIFFV_MNE_SPACE_DISCRETE = 3
#
# Covariance matrix channel classification
#
FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea
FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T]
FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m]
FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V]
#
# Projection item kinds
#
FIFF.FIFFV_PROJ_ITEM_NONE = 0
FIFF.FIFFV_PROJ_ITEM_FIELD = 1
FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2
FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3
FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4
FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5
#
# Additional coordinate frames
#
FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data
FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates
FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates
FIFF.FIFFV_MNE_COORD_DIGITIZER = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates
FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates
FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates
FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin
FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates
FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0)
FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0)
FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates
#
# 4D and KIT use the same head coordinate system definition as CTF
#
FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD
FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD
#
# KIT system coil types
#
FIFF.FIFFV_COIL_KIT_GRAD = 6001
FIFF.FIFFV_COIL_KIT_REF_MAG = 6002
#
# CTF coil and channel types
#
FIFF.FIFFV_COIL_CTF_GRAD = 5001
FIFF.FIFFV_COIL_CTF_REF_MAG = 5002
FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004
#
# Magnes reference sensors
#
FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003
FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004
FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005
#
# BabySQUID sensors
#
FIFF.FIFFV_COIL_BABY_GRAD = 7001
FIFF.FIFFV_COIL_BABY_MAG = 7002
FIFF.FIFFV_COIL_BABY_REF_MAG = 7003
#
# FWD Types
#
FIFF.FWD_COIL_UNKNOWN = 0
FIFF.FWD_COILC_UNKNOWN = 0
FIFF.FWD_COILC_EEG = 1000
FIFF.FWD_COILC_MAG = 1
FIFF.FWD_COILC_AXIAL_GRAD = 2
FIFF.FWD_COILC_PLANAR_GRAD = 3
FIFF.FWD_COILC_AXIAL_GRAD2 = 4
FIFF.FWD_COIL_ACCURACY_POINT = 0
FIFF.FWD_COIL_ACCURACY_NORMAL = 1
FIFF.FWD_COIL_ACCURACY_ACCURATE = 2
FIFF.FWD_BEM_UNKNOWN = -1
FIFF.FWD_BEM_CONSTANT_COLL = 1
FIFF.FWD_BEM_LINEAR_COLL = 2
FIFF.FWD_BEM_IP_APPROACH_LIMIT = 0.1
FIFF.FWD_BEM_LIN_FIELD_SIMPLE = 1
FIFF.FWD_BEM_LIN_FIELD_FERGUSON = 2
FIFF.FWD_BEM_LIN_FIELD_URANKAR = 3
#
# Data types
#
FIFF.FIFFT_VOID = 0
FIFF.FIFFT_BYTE = 1
FIFF.FIFFT_SHORT = 2
FIFF.FIFFT_INT = 3
FIFF.FIFFT_FLOAT = 4
FIFF.FIFFT_DOUBLE = 5
FIFF.FIFFT_JULIAN = 6
FIFF.FIFFT_USHORT = 7
FIFF.FIFFT_UINT = 8
FIFF.FIFFT_ULONG = 9
FIFF.FIFFT_STRING = 10
FIFF.FIFFT_LONG = 11
FIFF.FIFFT_DAU_PACK13 = 13
FIFF.FIFFT_DAU_PACK14 = 14
FIFF.FIFFT_DAU_PACK16 = 16
FIFF.FIFFT_COMPLEX_FLOAT = 20
FIFF.FIFFT_COMPLEX_DOUBLE = 21
FIFF.FIFFT_OLD_PACK = 23
FIFF.FIFFT_CH_INFO_STRUCT = 30
FIFF.FIFFT_ID_STRUCT = 31
FIFF.FIFFT_DIR_ENTRY_STRUCT = 32
FIFF.FIFFT_DIG_POINT_STRUCT = 33
FIFF.FIFFT_CH_POS_STRUCT = 34
FIFF.FIFFT_COORD_TRANS_STRUCT = 35
FIFF.FIFFT_DIG_STRING_STRUCT = 36
FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37
#
# Units of measurement
#
FIFF.FIFF_UNIT_NONE = -1
#
# SI base units
#
FIFF.FIFF_UNIT_M = 1
FIFF.FIFF_UNIT_KG = 2
FIFF.FIFF_UNIT_SEC = 3
FIFF.FIFF_UNIT_A = 4
FIFF.FIFF_UNIT_K = 5
FIFF.FIFF_UNIT_MOL = 6
#
# SI Supplementary units
#
FIFF.FIFF_UNIT_RAD = 7
FIFF.FIFF_UNIT_SR = 8
#
# SI base candela
#
FIFF.FIFF_UNIT_CD = 9
#
# SI derived units
#
FIFF.FIFF_UNIT_HZ = 101
FIFF.FIFF_UNIT_N = 102
FIFF.FIFF_UNIT_PA = 103
FIFF.FIFF_UNIT_J = 104
FIFF.FIFF_UNIT_W = 105
FIFF.FIFF_UNIT_C = 106
FIFF.FIFF_UNIT_V = 107
FIFF.FIFF_UNIT_F = 108
FIFF.FIFF_UNIT_OHM = 109
FIFF.FIFF_UNIT_MHO = 110
FIFF.FIFF_UNIT_WB = 111
FIFF.FIFF_UNIT_T = 112
FIFF.FIFF_UNIT_H = 113
FIFF.FIFF_UNIT_CEL = 114
FIFF.FIFF_UNIT_LM = 115
FIFF.FIFF_UNIT_LX = 116
#
# Others we need
#
FIFF.FIFF_UNIT_T_M = 201 # T/m
FIFF.FIFF_UNIT_AM = 202 # Am
FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2
FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3
#
# Multipliers
#
FIFF.FIFF_UNITM_E = 18
FIFF.FIFF_UNITM_PET = 15
FIFF.FIFF_UNITM_T = 12
FIFF.FIFF_UNITM_MEG = 6
FIFF.FIFF_UNITM_K = 3
FIFF.FIFF_UNITM_H = 2
FIFF.FIFF_UNITM_DA = 1
FIFF.FIFF_UNITM_NONE = 0
FIFF.FIFF_UNITM_D = -1
FIFF.FIFF_UNITM_C = -2
FIFF.FIFF_UNITM_M = -3
FIFF.FIFF_UNITM_MU = -6
FIFF.FIFF_UNITM_N = -9
FIFF.FIFF_UNITM_P = -12
FIFF.FIFF_UNITM_F = -15
FIFF.FIFF_UNITM_A = -18
#
# Coil types
#
FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data
FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0
FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils
FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT
FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system
FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead
FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition
# The coil info contains dipole location (r0) and
# direction (ex)
FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software
FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer
FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer
FIFF.FIFFV_COIL_POINT_MAGNETOMETER_X = 2002 # Simple point magnetometer, x-direction
FIFF.FIFFV_COIL_POINT_MAGNETOMETER_Y = 2003 # Simple point magnetometer, y-direction
FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor
FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer
FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer
FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer
FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer
FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer
FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer
FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer
FIFF.FIFFV_COIL_MAGNES_R_MAG = 4003 # Magnes WH reference magnetometer
FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA = 4004 # Magnes WH reference diagonal gradioometer
FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = 4005 # Magnes WH reference off-diagonal gradiometer
# MNE RealTime
FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command
FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client
# MNE epochs bookkeeping
FIFF.FIFFB_MNE_EPOCHS_SELECTION = 3800 # the epochs selection
FIFF.FIFFB_MNE_EPOCHS_DROP_LOG = 3801 # the drop log
| bsd-3-clause | -6,057,507,269,390,150,000 | 39.52375 | 130 | 0.643573 | false |
anotherjesse/nova | nova/api/direct.py | 1 | 7794 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Public HTTP interface that allows services to self-register.
The general flow of a request is:
- Request is parsed into WSGI bits.
- Some middleware checks authentication.
- Routing takes place based on the URL to find a controller.
(/controller/method)
- Parameters are parsed from the request and passed to a method on the
controller as keyword arguments.
- Optionally 'json' is decoded to provide all the parameters.
- Actual work is done and a result is returned.
- That result is turned into json and returned.
"""
import inspect
import urllib
import routes
import webob
from nova import context
from nova import flags
from nova import utils
from nova import wsgi
ROUTES = {}
def register_service(path, handle):
ROUTES[path] = handle
class Router(wsgi.Router):
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self._load_registered_routes(mapper)
super(Router, self).__init__(mapper=mapper)
def _load_registered_routes(self, mapper):
for route in ROUTES:
mapper.connect('/%s/{action}' % route,
controller=ServiceWrapper(ROUTES[route]))
class DelegatedAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
os_user = request.headers['X-OpenStack-User']
os_project = request.headers['X-OpenStack-Project']
context_ref = context.RequestContext(user=os_user, project=os_project)
request.environ['openstack.context'] = context_ref
class JsonParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
if 'json' not in request.params:
return
params_json = request.params['json']
params_parsed = utils.loads(params_json)
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ['openstack.params'] = params
class PostParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ['openstack.params'] = params
class Reflection(object):
"""Reflection methods to list available methods."""
def __init__(self):
self._methods = {}
self._controllers = {}
def _gather_methods(self):
methods = {}
controllers = {}
for route, handler in ROUTES.iteritems():
controllers[route] = handler.__doc__.split('\n')[0]
for k in dir(handler):
if k.startswith('_'):
continue
f = getattr(handler, k)
if not callable(f):
continue
# bunch of ugly formatting stuff
argspec = inspect.getargspec(f)
args = [x for x in argspec[0]
if x != 'self' and x != 'context']
defaults = argspec[3] and argspec[3] or []
args_r = list(reversed(args))
defaults_r = list(reversed(defaults))
args_out = []
while args_r:
if defaults_r:
args_out.append((args_r.pop(0),
repr(defaults_r.pop(0))))
else:
args_out.append((str(args_r.pop(0)),))
# if the method accepts keywords
if argspec[2]:
args_out.insert(0, ('**%s' % argspec[2],))
if f.__doc__:
short_doc = f.__doc__.split('\n')[0]
doc = f.__doc__
else:
short_doc = doc = _('not available')
methods['/%s/%s' % (route, k)] = {
'short_doc': short_doc,
'doc': doc,
'name': k,
'args': list(reversed(args_out))}
self._methods = methods
self._controllers = controllers
def get_controllers(self, context):
"""List available controllers."""
if not self._controllers:
self._gather_methods()
return self._controllers
def get_methods(self, context):
"""List available methods."""
if not self._methods:
self._gather_methods()
method_list = self._methods.keys()
method_list.sort()
methods = {}
for k in method_list:
methods[k] = self._methods[k]['short_doc']
return methods
def get_method_info(self, context, method):
"""Get detailed information about a method."""
if not self._methods:
self._gather_methods()
return self._methods[method]
class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
del arg_dict['action']
context = req.environ['openstack.context']
# allow middleware up the stack to override the params
params = {}
if 'openstack.params' in req.environ:
params = req.environ['openstack.params']
# TODO(termie): do some basic normalization on methods
method = getattr(self.service_handle, action)
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = dict([(str(k), v) for (k, v) in params.iteritems()])
result = method(context, **params)
if type(result) is dict or type(result) is list:
return self._serialize(result, req)
else:
return result
class Proxy(object):
"""Pretend a Direct API endpoint is an object."""
def __init__(self, app, prefix=None):
self.app = app
self.prefix = prefix
def __do_request(self, path, context, **kwargs):
req = webob.Request.blank(path)
req.method = 'POST'
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
req.environ['openstack.context'] = context
resp = req.get_response(self.app)
try:
return utils.loads(resp.body)
except Exception:
return resp.body
def __getattr__(self, key):
if self.prefix is None:
return self.__class__(self.app, prefix=key)
def _wrapper(context, **kwargs):
return self.__do_request('/%s/%s' % (self.prefix, key),
context,
**kwargs)
_wrapper.func_name = key
return _wrapper
| apache-2.0 | 1,636,530,801,207,467,800 | 31.475 | 78 | 0.564665 | false |
zestyr/lbry | build/release.py | 1 | 6531 | """Bump version and create Github release
This script should be run locally, not on a build server.
"""
import argparse
import contextlib
import os
import re
import subprocess
import sys
import git
import github
import changelog
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def main():
bumpversion_parts = get_bumpversion_parts()
parser = argparse.ArgumentParser()
parser.add_argument("part", choices=bumpversion_parts, help="part of version to bump")
parser.add_argument("--skip-sanity-checks", action="store_true")
parser.add_argument("--skip-push", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--confirm", action="store_true")
args = parser.parse_args()
if args.dry_run:
print "DRY RUN. Nothing will be committed/pushed."
repo = Repo('lbry', args.part, ROOT)
branch = 'master'
print 'Current version: {}'.format(repo.current_version)
print 'New version: {}'.format(repo.new_version)
if not args.confirm and not confirm():
print "Aborting"
return 1
if not args.skip_sanity_checks:
run_sanity_checks(repo, branch)
repo.assert_new_tag_is_absent()
is_rc = re.search('\drc\d+$', repo.new_version) is not None
# only have a release message for real releases, not for RCs
release_msg = None if is_rc else repo.get_unreleased_changelog()
if release_msg is None:
release_msg = ''
if args.dry_run:
print "rc: " + ("yes" if is_rc else "no")
print "release message: \n" + (release_msg if not is_rc else " NO MESSAGE FOR RCs")
return
gh_token = get_gh_token()
auth = github.Github(gh_token)
github_repo = auth.get_repo('lbryio/lbry')
if not is_rc:
repo.bump_changelog()
repo.bumpversion()
new_tag = repo.get_new_tag()
github_repo.create_git_release(new_tag, new_tag, release_msg, draft=True, prerelease=is_rc)
if args.skip_push:
print (
'Skipping push; you will have to reset and delete tags if '
'you want to run this script again.'
)
else:
repo.git_repo.git.push(follow_tags=True, recurse_submodules='check')
class Repo(object):
def __init__(self, name, part, directory):
self.name = name
self.part = part
if not self.part:
raise Exception('Part required')
self.directory = directory
self.git_repo = git.Repo(self.directory)
self._bumped = False
self.current_version = self._get_current_version()
self.new_version = self._get_new_version()
self._changelog = changelog.Changelog(os.path.join(self.directory, 'CHANGELOG.md'))
def get_new_tag(self):
return 'v' + self.new_version
def get_unreleased_changelog(self):
return self._changelog.get_unreleased()
def bump_changelog(self):
self._changelog.bump(self.new_version)
with pushd(self.directory):
self.git_repo.git.add(os.path.basename(self._changelog.path))
def _get_current_version(self):
with pushd(self.directory):
output = subprocess.check_output(
['bumpversion', '--dry-run', '--list', '--allow-dirty', self.part])
return re.search('^current_version=(.*)$', output, re.M).group(1)
def _get_new_version(self):
with pushd(self.directory):
output = subprocess.check_output(
['bumpversion', '--dry-run', '--list', '--allow-dirty', self.part])
return re.search('^new_version=(.*)$', output, re.M).group(1)
def bumpversion(self):
if self._bumped:
raise Exception('Cowardly refusing to bump a repo twice')
with pushd(self.directory):
subprocess.check_call(['bumpversion', '--allow-dirty', self.part])
self._bumped = True
def assert_new_tag_is_absent(self):
new_tag = self.get_new_tag()
tags = self.git_repo.git.tag()
if new_tag in tags.split('\n'):
raise Exception('Tag {} is already present in repo {}.'.format(new_tag, self.name))
def is_behind(self, branch):
self.git_repo.remotes.origin.fetch()
rev_list = '{branch}...origin/{branch}'.format(branch=branch)
commits_behind = self.git_repo.git.rev_list(rev_list, right_only=True, count=True)
commits_behind = int(commits_behind)
return commits_behind > 0
def get_bumpversion_parts():
with pushd(ROOT):
output = subprocess.check_output([
'bumpversion', '--dry-run', '--list', '--allow-dirty', 'fake-part',
])
parse_line = re.search('^parse=(.*)$', output, re.M).group(1)
return tuple(re.findall('<([^>]+)>', parse_line))
def get_gh_token():
if 'GH_TOKEN' in os.environ:
return os.environ['GH_TOKEN']
else:
print """
Please enter your personal access token. If you don't have one
See https://github.com/lbryio/lbry-app/wiki/Release-Script#generate-a-personal-access-token
for instructions on how to generate one.
You can also set the GH_TOKEN environment variable to avoid seeing this message
in the future"""
return raw_input('token: ').strip()
def confirm():
try:
return raw_input('Is this what you want? [y/N] ').strip().lower() == 'y'
except KeyboardInterrupt:
return False
def run_sanity_checks(repo, branch):
if repo.git_repo.is_dirty():
print 'Cowardly refusing to release a dirty repo'
sys.exit(1)
if repo.git_repo.active_branch.name != branch:
print 'Cowardly refusing to release when not on the {} branch'.format(branch)
sys.exit(1)
if repo.is_behind(branch):
print 'Cowardly refusing to release when behind origin'
sys.exit(1)
if not is_custom_bumpversion_version():
print (
'Install LBRY\'s fork of bumpversion: '
'pip install -U git+https://github.com/lbryio/bumpversion.git'
)
sys.exit(1)
def is_custom_bumpversion_version():
try:
output = subprocess.check_output(['bumpversion', '-v'], stderr=subprocess.STDOUT).strip()
if output == 'bumpversion 0.5.4-lbry':
return True
except (subprocess.CalledProcessError, OSError):
pass
return False
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(previous_dir)
if __name__ == '__main__':
sys.exit(main())
| mit | -1,668,162,865,751,404,800 | 31.014706 | 97 | 0.623947 | false |
QualiSystems/Ansible-Shell | package/setup.py | 1 | 1194 | from setuptools import setup, find_packages
import os
with open(os.path.join('version.txt')) as version_file:
version_from_file = version_file.read().strip()
with open('requirements.txt') as f_required:
required = f_required.read().splitlines()
with open('test_requirements.txt') as f_tests:
required_for_tests = f_tests.read().splitlines()
setup(
name="cloudshell-cm-ansible",
author="Quali",
author_email="[email protected]",
description=("A repository for projects providing out of the box capabilities within CloudShell remotely "
"configure vm's"),
packages=find_packages(),
test_suite='nose.collector',
test_requires=required_for_tests,
package_data={'': ['*.txt']},
install_requires=required,
version=version_from_file,
include_package_data=True,
keywords="ansible cloudshell configuration configuration-manager",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
],
#requires=required,
) | apache-2.0 | -1,948,971,499,357,489,200 | 35.212121 | 114 | 0.636516 | false |
yaras/BCalc | setup.py | 1 | 2644 | from setuptools import setup, find_packages
setup(
name='BCalc',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='BCalc (Business Calculator) is a script for calculating salary and taxes for wage in Poland.',
# The project's main homepage.
url='https://github.com/yaras/bcalc',
# Author details
author='yaras',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='bcalc salary wage',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'nose==1.3.7',
'coverage==4.1'
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[
('', ['config.json'])
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pensja=bcalc.pensja:main',
'chorobowe=bcalc.chorobowe:main'
],
},
)
| mit | 6,277,856,458,397,335,000 | 33.72973 | 111 | 0.636914 | false |
akrherz/iem | scripts/GIS/wwa2shp.py | 1 | 5518 | """Something to dump current warnings to a shapefile."""
import zipfile
import os
import shutil
import subprocess
from osgeo import ogr
from pyiem.util import utc
def main():
"""Go Main Go"""
utcnow = utc()
os.chdir("/tmp")
fp = "current_ww"
for suffix in ["shp", "shx", "dbf"]:
if os.path.isfile("%s.%s" % (fp, suffix)):
os.remove("%s.%s" % (fp, suffix))
source = ogr.Open(
"PG:host=iemdb-postgis.local dbname=postgis user=nobody "
"gssencmode=disable"
)
out_driver = ogr.GetDriverByName("ESRI Shapefile")
out_ds = out_driver.CreateDataSource("%s.shp" % (fp,))
out_layer = out_ds.CreateLayer("polygon", None, ogr.wkbPolygon)
fd = ogr.FieldDefn("ISSUED", ogr.OFTString)
fd.SetWidth(12)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("EXPIRED", ogr.OFTString)
fd.SetWidth(12)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("UPDATED", ogr.OFTString)
fd.SetWidth(12)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("INIT_ISS", ogr.OFTString)
fd.SetWidth(12)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("INIT_EXP", ogr.OFTString)
fd.SetWidth(12)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("TYPE", ogr.OFTString)
fd.SetWidth(2)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("PHENOM", ogr.OFTString)
fd.SetWidth(2)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("GTYPE", ogr.OFTString)
fd.SetWidth(1)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("SIG", ogr.OFTString)
fd.SetWidth(1)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("WFO", ogr.OFTString)
fd.SetWidth(3)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("ETN", ogr.OFTInteger)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("STATUS", ogr.OFTString)
fd.SetWidth(3)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("NWS_UGC", ogr.OFTString)
fd.SetWidth(6)
out_layer.CreateField(fd)
fd = ogr.FieldDefn("FL_NWSLI", ogr.OFTString)
fd.SetWidth(5)
out_layer.CreateField(fd)
sql = """
SELECT geom, 'P' as gtype, significance, wfo, status, eventid,
null as ugc, phenomena,
to_char(expire at time zone 'UTC', 'YYYYMMDDHH24MI') as utcexpire,
to_char(issue at time zone 'UTC', 'YYYYMMDDHH24MI') as utcissue,
to_char(polygon_begin at time zone 'UTC', 'YYYYMMDDHH24MI') as utcupdated,
to_char(issue at time zone 'UTC', 'YYYYMMDDHH24MI') as utc_prodissue,
to_char(init_expire at time zone 'UTC', 'YYYYMMDDHH24MI')
as utc_init_expire,
hvtec_nwsli
from sbw_%s WHERE polygon_begin <= '%s' and polygon_end > '%s'
UNION
SELECT u.simple_geom as geom, 'C' as gtype, significance, w.wfo, status,
eventid, u.ugc, phenomena,
to_char(expire at time zone 'UTC', 'YYYYMMDDHH24MI') as utcexpire,
to_char(issue at time zone 'UTC', 'YYYYMMDDHH24MI') as utcissue,
to_char(updated at time zone 'UTC', 'YYYYMMDDHH24MI') as utcupdated,
to_char(product_issue at time zone 'UTC', 'YYYYMMDDHH24MI')
as utc_prodissue,
to_char(init_expire at time zone 'UTC', 'YYYYMMDDHH24MI')
as utc_init_expire,
hvtec_nwsli
from warnings_%s w JOIN ugcs u on (u.gid = w.gid) WHERE
expire > '%s' and w.gid is not null
""" % (
utcnow.year,
utcnow,
utcnow,
utcnow.year,
utcnow,
)
data = source.ExecuteSQL(sql)
while True:
feat = data.GetNextFeature()
if not feat:
break
geom = feat.GetGeometryRef()
if geom is None:
continue
# at 0.001 we had marine zones disappear!
geom = geom.Simplify(0.0001)
featDef = ogr.Feature(out_layer.GetLayerDefn())
featDef.SetGeometry(geom)
featDef.SetField("GTYPE", feat.GetField("gtype"))
featDef.SetField("TYPE", feat.GetField("phenomena"))
featDef.SetField("PHENOM", feat.GetField("phenomena"))
featDef.SetField("ISSUED", feat.GetField("utcissue"))
featDef.SetField("EXPIRED", feat.GetField("utcexpire"))
featDef.SetField("UPDATED", feat.GetField("utcupdated"))
featDef.SetField("INIT_ISS", feat.GetField("utc_prodissue"))
featDef.SetField("INIT_EXP", feat.GetField("utc_init_expire"))
featDef.SetField("SIG", feat.GetField("significance"))
featDef.SetField("WFO", feat.GetField("wfo"))
featDef.SetField("STATUS", feat.GetField("status"))
featDef.SetField("ETN", feat.GetField("eventid"))
featDef.SetField("NWS_UGC", feat.GetField("ugc"))
featDef.SetField("FL_NWSLI", feat.GetField("hvtec_nwsli"))
out_layer.CreateFeature(featDef)
feat.Destroy()
source.Destroy()
out_ds.Destroy()
z = zipfile.ZipFile("current_ww.zip", "w", zipfile.ZIP_DEFLATED)
z.write("current_ww.shp")
shutil.copy(
"/opt/iem/scripts/GIS/current_ww.shp.xml", "current_ww.shp.xml"
)
z.write("current_ww.shp.xml")
z.write("current_ww.shx")
z.write("current_ww.dbf")
shutil.copy("/opt/iem/data/gis/meta/4326.prj", "current_ww.prj")
z.write("current_ww.prj")
z.close()
cmd = (
'pqinsert -p "zip c %s '
'gis/shape/4326/us/current_ww.zip bogus zip" current_ww.zip'
) % (utcnow.strftime("%Y%m%d%H%M"),)
subprocess.call(cmd, shell=True)
for suffix in ["shp", "shp.xml", "shx", "dbf", "prj", "zip"]:
os.remove("current_ww.%s" % (suffix,))
if __name__ == "__main__":
main()
| mit | -8,800,740,664,368,985,000 | 30.175141 | 79 | 0.623233 | false |
bwillis32/inclusionRE | appreciation/models.py | 1 | 1075 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Property(models.Model):
id = models.AutoField(primary_key = True)
streetAddress = models.CharField(max_length=200)
borough = models.CharField(max_length=65)
state = models.CharField(max_length= 65)
zip = models.PositiveSmallIntegerField()
def __str__(self):
return '{}'.format(self.streetAddress)
class Sales(models.Model):
id = models.IntegerField(default=0, primary_key=True)
neighborhood = models.CharField(max_length=255)
year = models.CharField(max_length=30, blank=True)
type_of_home = models.PositiveSmallIntegerField(null = True)
num_of_sales = models.PositiveSmallIntegerField(null = True)
lowest_sale_price =models.FloatField(null = True)
avg_sale_price = models.FloatField(null = True)
med_sale_price = models.FloatField(null = True)
high_sale_price = models.FloatField(null = True)
link = models.ForeignKey('appreciation.Property', related_name='Sales')
def __str__(self):
return '{}'.format(self.neighborhood)
| apache-2.0 | -3,489,742,085,480,704,500 | 32.59375 | 73 | 0.733023 | false |
andreyto/YAP | bits/logging_helper.py | 1 | 1032 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the MGTAXA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Some support for logging"""
import logging
def assert_log(cond,msg,logger):
"""If condition is False, log error and raise AssertionError"""
if not cond:
logger.error(msg)
raise AssertionError(msg)
def logging_config(detail="high",level=logging.DEBUG):
"""Some common logging configuration scenarious"""
if detail == "high":
logging.basicConfig(level=level, datefmt="%y-%m-%d %H:%M:%S",
filemode="a",
format=("%(asctime)s [%(levelname)5.5s] pid:%(process)-5s\t"
+ "thread:%(threadName)10.10s\t"
+ "%(module)20.20s.:%(funcName)-12.12s:%(lineno)-5s:\t"
+ "%(message)s"))
else:
raise ValueError("Unknown value detail={}".format(detail))
| mit | 1,184,815,785,125,451,000 | 33.4 | 78 | 0.511628 | false |
BryceSchroeder/delvmod | redelv/redelvlib/script_editor.py | 1 | 11007 | #!/usr/bin/env python
# Copyright 2015-16 Bryce Schroeder, www.bryce.pw, [email protected]
# Wiki: http://www.ferazelhosting.net/wiki/delv
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# "Cythera" and "Delver" are trademarks of either Glenn Andreas or
# Ambrosia Software, Inc.
import csv
import gtk, pango
import editors
import delv.store
import sys, traceback
import images
import delv.rdasm
import urllib2
from cStringIO import StringIO
class ScriptEditor(editors.Editor):
name = "Scripting System Editor"
default_size = 680,512
icon = images.script_path
def gui_setup(self):
pbox = gtk.VBox(False,0)
self.set_default_size(*self.default_size)
menu_items = (
("/File/Import Text", "<control>I", self.file_import, 0, None),
("/File/Export Text", "<control>E", self.file_export, 0, None),
("/File/Revert to Saved Source", None, self.load_source, 0, None),
("/File/Revert to Disassembly", None, self.load_disassemble, 0,
None),
("/File/Load from TAUCS", None, self.load_taucs, 0, None),
("/File/Assemble and Save", "<control>S", self.file_save, 0, None),
("/File/Save Source Code", "<shift><control>S",
self.do_save_source,
0, None),
("/Edit/Edit in External Text Editor", "<control>G",
self.edit_external, 0, None),
("/Edit/Purge Source", None,
self.purge, 0, None),
#("/Edit/Check Syntax", "<control>T",
# self.edit_syntax, 0, None),
#("/Edit/Copy", "<control>C", self.edit_copy, 0, None),
#("/Edit/Paste","<control>V", self.edit_paste, 0, None),
)
accel = gtk.AccelGroup()
ifc = gtk.ItemFactory(gtk.MenuBar, "<main>", accel)
self.add_accel_group(accel)
ifc.create_items(menu_items)
self.menu_bar = ifc.get_widget("<main>")
pbox.pack_start(self.menu_bar, False, True, 0)
self.text_buf = gtk.TextBuffer()
self.text_buf.set_text(" Nothing Loaded ".center(78,';'))
self.text_view = gtk.TextView()
self.text_view.set_buffer(self.text_buf)
self.text_view.set_wrap_mode(gtk.WRAP_CHAR)
fontdesc = pango.FontDescription("monospace 10")
self.text_view.modify_font(fontdesc)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
sw.add(self.text_view)
pbox.pack_start(sw, True, True, 0)
hbox = gtk.HBox(False,0)
hbox.pack_start(gtk.Label("Status:"), False, True, 0)
self.asm_status = gtk.Entry()
self.asm_status.set_editable(False)
self.asm_status.set_text("Disassembling binary... ")
hbox.pack_start(self.asm_status, True, True, 0)
self.auto_assemble = gtk.ToggleButton(
"Auto-Assemble")
self.auto_assemble.set_active(True)
hbox.pack_start(self.auto_assemble, False, True, 0)
self.save_source = gtk.ToggleButton(
"Auto-save Source")
self.save_source.set_active(True)
hbox.pack_start(self.save_source, False, True, 0)
pbox.pack_start(hbox, False, True, 0)
self.add(pbox)
self.assembler = None
self.cycle_check = False
def get_assembler(self):
if not self.assembler:
self.errorstream = StringIO()
self.asm_status.set_text("Preparing assembler...")
while gtk.events_pending(): gtk.main_iteration()
self.assembler = delv.rdasm.Assembler(
message_stream=self.errorstream,
filename="<res 0x%04X>"%self.res.resid,
)
# check to make sure that we can assemble this file correctly
self.asm_status.set_text("Checking cycle validity... ")
while gtk.events_pending(): gtk.main_iteration()
if not self.cycle_check:
obin = self.assembler.assemble(self.original_disassembly)
if self.canonical_object.data != obin:
ofs1 = ((self.canonical_object.data[0]<<8)
|self.canonical_object.data[1])
ofs2 = ((ord(obin[0])<<8)
|ord(obin[1]))
if (ofs1 == ofs2
and obin[0:ofs1] == self.canonical_object.data[0:ofs1]):
self.asm_status.set_text(
"Pass with table mismatch; ready.")
else:
self.asm_status.set_text(
"FAILED CHECK: This resource can't be reassembled.")
return None
else:
self.asm_status.set_text("Passed validation check; ready.")
self.cycle_check = True
while gtk.events_pending(): gtk.main_iteration()
return self.assembler
def editor_setup(self):
self.load()
self.text_buf.connect("changed", self.textbuf_changed, None)
def load_taucs(self, *argv):
try:
data = urllib2.urlopen(
self.redelv.preferences['source_archive']%self.res.resid
).read()
except urllib2.HTTPError:
self.asm_status.set_text(
"ERROR: Could not find canonical sources for this resource.")
#message = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
# buttons=gtk.BUTTONS_OK)
#message.set_markup(
# "Could not find canonical sources for this resource.")
#message.run()
return
self.cycle_check = True
self.asm_status.set_text(
"Source successfully downloaded from the archive.")
data = data[data.find('{{{')+3:data.find('}}}')
].strip().replace('\r','')
self.text_buf.set_text(data)
self.set_unsaved()
def _loadsource(self):
code = self.redelv.get_library().get_code_store()
src = code.get(self.res.resid)
self.cycle_check = True
return src
def purge(self, *argv):
code = self.redelv.get_library().get_code_store()
code.purge(self.res.resid)
self.asm_status.set_text(
"Saved source code deleted.")
def do_save_source(self, *argv):
code = self.redelv.get_library().get_code_store()
code.save_source(self.res.resid, self.text_buf.get_text(
*self.text_buf.get_bounds()))
def load_source(self, *argv):
src = self._loadsource()
if not src:
self.asm_status.set_text(
"ERROR: You have no saved source for this resource.")
return False
else:
self.cycle_check = True
self.text_buf.set_text(src)
return True
def load(self, *argv):
src = self._loadsource()
if src:
self.text_buf.set_text(src)
self.asm_status.set_text(
"Loaded your saved source for this resource.")
else:
self.load_disassemble(*argv)
def load_disassemble(self, *argv):
self.set_title("Script Editor [%04X]"%self.res.resid)
self.text_buf.set_text("; DISASSEMBLING")
#self.canonical_object.load_from_library(
# self.redelv.get_library())
asmc = self.canonical_object.disassemble()
self.original_disassembly = asmc
self.text_buf.set_text(asmc)
self.asm_status.set_text("Disassembly complete.")
#self.canonical_object.printout(sys.stdout,0)
def edit_external(self, *argv):
self.text_view.set_editable(False)
self.open_external_editor(
self.redelv.preferences['assembly_editor_cmd'],
self.external_writeout, self.external_readin,
file_extension = '.rdasm')
def external_writeout(self, target, cbdata):
target.write(str(self.text_buf.get_text(
*self.text_buf.get_bounds())))
target.flush()
def external_readin(self, path, cbdata):
self.asm_status.set_text("Changed in external editor.")
self.text_buf.set_text(open(path,'rb').read())
self.set_unsaved()
if self.auto_assemble.get_active(): self.file_save()
def file_import(self,*args):
#if self.unsaved and self.warn_unsaved_changes(): return
path = "<undefined>"
try:
path = self.ask_open_path()
if not path: return
data = open(path,'rb').read()
except Exception,e:
self.error_message("Couldn't open '%s': %s"%(path,
repr(e)))
return
self.set_unsaved()
self.asm_status.set_text("Imported a file (not assembled yet.)")
# set text to data here
def file_export(self, *args):
path = self.ask_save_path(default = "Script%04X.rdasm"%self.res.resid)
if not path: return
if not path.endswith(".rdasm"): path += ".rdasm"
try:
open(path,'rb').write("EXPORT TEST")
except Exception,e:
self.error_message("Couldn't open '%s': %s"%(path,
repr(e)))
return
def edit_syntax(self, *args):
#try:
# av = self.assemble()
#except delv.script.AssemblerError, e:
# self.error_message(str(e))
# except syntax errors blah:
self.asm_status.set_text("Not implemented yet. Just try it and see.")
def textbuf_changed(self, *args):
if self.save_source.get_active():
self.do_save_source()
self.set_unsaved()
def file_save(self, *args):
# Assemble of course.
# try:
asm = self.get_assembler()
if not asm:
return
src = str(self.text_buf.get_text(*self.text_buf.get_bounds()))
self.asm_status.set_text("Assembling source of %d bytes..."%len(src))
while gtk.events_pending(): gtk.main_iteration()
av = self.get_assembler().assemble(src)
self.asm_status.set_text("Assembled successfully; %d bytes."%len(av))
self.res.set_data(av)
self.redelv.set_unsaved()
self.set_saved()
self.assembler = None
| gpl-3.0 | -2,756,007,720,885,981,000 | 40.535849 | 79 | 0.568093 | false |
alexei-matveev/ccp1gui | interfaces/smeagolio.py | 1 | 9939 | #
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2005 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
#
# Read a data file output by Smeagol
import os,sys
if __name__ == "__main__":
# Need to add the gui directory to the python path so
# that all the modules can be imported
gui_path = os.path.split(os.path.dirname( os.path.realpath( __file__ ) ))[0]
sys.path.append(gui_path)
else:
from viewer.paths import gui_path
# import python modules
import math
import unittest
# import local modules
import objects.field
import objects.vector
# import external modules
import vtk # We create the VTK objects directly
from fileio import FileIO
class SmeagolIO(FileIO):
"""
Load objects from the files generated by Smeagol
"""
def __init__(self, filepath=None,**kw):
""" Set up the structures we need
"""
# Initialise base class
FileIO.__init__(self,filepath=filepath,**kw)
# List which types of object we can read/write
self.canRead = True
self.canWrite = []
def _ReadFile(self,**kw):
""" Read in the data from an RHO file.
Format of the file is expected to be:
1: Z lattice vector - float(x) float(y) float(z)
2: Y lattice vector - float(x) float(y) float(z)
3: X lattice vector - float(x) float(y) float(z)
4: zdim ydim xdim nspin - number of mesh points in each direction
plus the number of spins - if nspin=2 then
we are creating 2 datasets
5:data... - list of data points as floats
"""
f = open( self.filepath, 'r')
# Read in the first 4 lines containing the grid definition
# and the dimensions & spin
zline = f.readline().strip()
yline = f.readline().strip()
xline = f.readline().strip()
dimline = f.readline().strip()
# Set up the Z lattice vector
zLatVec = self.__get_latvec( zline )
if not zLatVec:
print "No Z Lattice Vector!"
return 1
# Set up the Y lattice vector
yLatVec = self.__get_latvec( yline )
if not yLatVec:
print "No Z Lattice Vector!"
return 1
# Set up the X lattice vector
xLatVec = self.__get_latvec( xline )
if not xLatVec:
print "No X Lattice Vector!"
return 1
# Get the dimensions & the number of spins
fields = dimline.split()
if ( len( fields ) != 4 ):
raise Exception,"Problem with dimension line in smeagolreader read_grid_dimensions!"
xDim,yDim,zDim,nspin = fields[0:4]
xDim,yDim,zDim,nspin = int(xDim), int(yDim), int(zDim), int(nspin)
#Work out how many data points we've got
npoints = xDim * yDim * zDim
# Now loop over the spins & read in the data points.
# We assume that the origin is at the centre of the grid and that the data has been written
# out starting at the origin, going to the edge and then being translated back
# by the unit cell vector and writing out the remaining points back to the origin
# Need to allocate memory for the data
#data = []
#for i in range(npoints):
# data.append(0)
scalars = vtk.vtkFloatArray()
scalars.SetNumberOfValues( npoints )
for spin in range(nspin):
#print "Reading data points from file..."
for z in range(zDim):
if ( z < (zDim/2) ):
zt = z + (zDim/2)
else:
zt = z - (zDim/2)
for y in range(yDim):
if ( y < (yDim/2) ):
yt = y + (yDim/2)
else:
yt = y - (yDim/2)
for x in range(xDim):
if ( x < (xDim/2) ):
xt = x + (xDim/2)
else:
xt = x - (xDim/2)
#if not count % 10000:
# print '...',
line = f.readline()
if not line:
print "ERROR reading Data in smeagolreader!"
return None
try:
dat = float(line)
except:
print "Bad Data in smeagol reader!: %s " % line
dat = float(-999999)
offset = (zt * xDim * yDim) + (yt * xDim) + xt
#data[offset] = dat
scalars.SetValue( offset, dat )
#End of loop over z,x,y
if ( nspin == 2):
title = self.name + str(spin)
else:
title = self.name
# Create the field object
smgfield = self.create_vtkfield( title, scalars, zLatVec, yLatVec, xLatVec, \
zDim, yDim, xDim )
# Add the field to the list of objects
if self.debug:
print "smeagolreader appending field:"
smgfield.list()
self.fields.append( smgfield )
def __get_latvec( self, line ):
""" Take a line read in from the RHO file and return the
relevant lattice vector as a list of floats
REM: we need to convert from Bohrs -> Angstroms so we do
this here.
"""
bohr_2_angs = 0.529177
x,y,z = line.split()
x,y,z = float(x), float(y), float(z)
x,y,z = x* bohr_2_angs, y* bohr_2_angs, z* bohr_2_angs
return [ x, y, z ]
# def create_vtkfield( self, title, data, zLatVec, yLatVec, xLatVec, zDim, yDim, xDim ):
def create_vtkfield( self, title, scalars, zLatVec, yLatVec, xLatVec, zDim, yDim, xDim ):
""" Create a field object that holds the data in a vtkImageData object
"""
vtkdata = vtk.vtkImageData()
vtkdata.SetDimensions( xDim, yDim, zDim )
# work out the spacing
# asume the grid origin is always at 0.0, 0.0, 0.0
origin = [ 0.0, 0.0, 0.0 ]
xlen = math.sqrt( math.pow( (origin[0] - xLatVec[0]), 2) + \
math.pow( (origin[1] - xLatVec[1]), 2) + \
math.pow( (origin[2] - xLatVec[2]), 2) )
xspacing = float( xlen/xDim )
ylen = math.sqrt( math.pow( (origin[0] - yLatVec[0]), 2) + \
math.pow( (origin[1] - yLatVec[1]), 2) + \
math.pow( (origin[2] - yLatVec[2]), 2) )
yspacing = float( ylen/yDim )
zlen = math.sqrt( math.pow( (origin[0] - zLatVec[0]), 2) + \
math.pow( (origin[1] - zLatVec[1]), 2) + \
math.pow( (origin[2] - zLatVec[2]), 2) )
zspacing = float( zlen/zDim )
vtkdata.SetSpacing( xspacing, yspacing, zspacing )
#scalars = vtk.vtkFloatArray()
#npoints = zDim * yDim * xDim
#scalars.SetNumberOfValues( npoints )
#for i in range( npoints ):
# What on earth is the vtkIdType??? (1st arg)
#scalars.SetValue( i, data[i] )
# scalars.InsertNextValue( data[i] )
vtkdata.GetPointData().SetScalars(scalars)
vtkdata.SetScalarTypeToFloat()
# Work out the origin (assume it's at the centre of the grid)
origin = [ -xlen / 2, -ylen / 2 , -zlen / 2 ]
vtkdata.SetOrigin( origin )
# Instantiate the field object
field = objects.field.Field()
field.title = title
field.vtkdata = vtkdata
# Need to add axis, dimensions & origin as these are required by the CutSlice visulaliser
# NB: May need to use Vector as currently these are only lists?
field.dim = [ xDim, yDim, zDim ]
field.x = objects.vector.Vector( xLatVec )
#field.axis.append( field.x )
field.axis[0] = field.x
field.y = objects.vector.Vector( yLatVec )
#field.axis.append( field.y )
field.axis[1] = field.y
field.z = objects.vector.Vector( zLatVec )
#field.axis.append( field.z )
field.axis[2] = field.z
#field.origin =objects.vector.Vector( origin )
#jmht HACK - need to think about this
field.origin =objects.vector.Vector( [0.,0.,0.] )
return field
##########################################################
#
#
# Unittesting stuff goes here
#
#
##########################################################
class testSmeagol_IO(unittest.TestCase):
"""Test whether we deal with Smeagol data"""
reader = SmeagolIO()
def testRead(self):
""" read in scalar data
"""
fields = self.reader.GetObjects(
filepath='/c/qcg/jmht/share/codes/ccp1gui/smeagol/Benz.short.rho',
otype = 'fields'
)
self.assertEqual( fields[0].dim[0] , 90)
def testMe():
"""Return a unittest test suite with all the testcases that should be run by the main
gui testing framework."""
return unittest.TestLoader().loadTestsFromTestCase(testSmeagol_IO)
if ( __name__ == "__main__" ):
unittest.main()
| gpl-2.0 | 6,608,747,880,550,937,000 | 33.037671 | 98 | 0.542912 | false |
bansallab/roundup | _164_scrape.py | 1 | 8322 | import csv
from urllib.request import Request, urlopen
import re
from datetime import date
import dateutil.parser
from sys import argv
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
from os import system
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
temp_raw = scrape_util.ReportRaw(argv, prefix)
report_path = 'index.php?option=com_content&view=article&id=251&Itemid=575'
strip_char = ';,. \n\t'
def get_sale_date(date_string):
"""Return the date of the sale."""
sale_date = dateutil.parser.parse(date_string, fuzzy = True)
return sale_date
def get_sale_day(date_string, year):
"""Return the date of the sale."""
date_string = date_string.replace(str(year), '')
match = re.search(r'([0-9]+)', date_string)
if match:
sale_day = int(match.group(1))
return sale_day
def is_heading(word):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
cattle_clue = r'(bulls?|steers?|strs?|cows?|heifers?|hfrs?|calf|calves|pairs?|cattle|weighups?|yrlgs?)'
is_not_succinct = len(word) > 1
has_cattle = False
has_number = False
for this_word in word:
if re.search(cattle_clue, this_word, re.IGNORECASE):
has_cattle = True
break
for this_word in word:
if re.search(r'[0-9]', this_word):
has_number = True
break
return bool(is_not_succinct and has_cattle and not has_number)
def is_sale(word):
"""Determine whether a given line describes a sale of cattle."""
is_not_succinct = len(word) > 2
has_price = False
for this_word in word:
if re.search(r'[0-9,]+\.[0-9]{2}', this_word):
has_price = True
break
return bool(has_price and is_not_succinct)
def is_number(string):
"""Test whether a string is number-ish. Ignoring units like 'cwt' and 'hd'."""
if string:
string = re.sub(r'\$|[,-/]|cwt|he?a?d?|per', '', string, flags = re.IGNORECASE)
try:
float(string)
result = True
except ValueError:
result = False
else:
result = False
return result
def get_sale_location(word):
"""Convert address strings into a list of address components."""
sale_location = ' '.join(word)
sale_location = re.sub(r'\(.*?\)$', '', sale_location)
match = re.search(r'(.*?),?(' + scrape_util.state + r')$', sale_location, re.IGNORECASE)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [sale_location]
return sale_location
def get_sale(word, consignor_info, price_key):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
if len(word)==2:
match = re.search(r'\b([0-9]+)$', word[0])
if match:
word[0:1] = [word[0].replace(match.group(1), ''), match.group(1)]
number_word = [idx for idx, val in enumerate(word) if is_number(val)]
name_location = consignor_info.split(',')
consignor_name = name_location.pop(0)
if name_location:
if re.search(r'&',name_location[0]):
consignor_name = consignor_name.strip() + ',' + name_location.pop(0)
sale = {
'consignor_name': consignor_name.strip(strip_char).title(),
}
if name_location:
sale_location = get_sale_location([','.join(name_location)])
sale['consignor_city'] = sale_location.pop(0).strip(strip_char).title()
if sale_location:
sale['consignor_state'] = sale_location.pop().strip(strip_char)
cattle_string = word[number_word[0]-1]
head_match = re.match(r'([0-9,]+)', cattle_string)
if head_match:
head_string = head_match.group(1).replace(',','')
try:
int(head_string)
sale['cattle_head'] = head_string
except ValueError:
pass
cattle_string = cattle_string.replace(head_match.group(),'')
sale['cattle_cattle'] = cattle_string.strip(strip_char)
weight_string = word[number_word[0]].strip(strip_char).replace(',', '')
try:
float(weight_string)
sale['cattle_avg_weight'] = weight_string
except ValueError:
pass
price_string = word[number_word[1]]
match = re.search(r'([0-9,.]+)', price_string, re.IGNORECASE)
if match:
sale[price_key] = match.group(1).replace(',', '').strip(strip_char)
sale = {k:v.strip() for k,v in sale.items() if v.strip()}
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
consignor_info = ''
price_key = 'cattle_price_cwt'
exist_sale = []
for this_line in line:
word = re.split('\s{2,}', this_line)
if is_heading(word):
cattle_clue = word[0]
price_clue = word[-1]
if re.search(r'cwt', price_clue, re.IGNORECASE):
price_key = 'cattle_price_cwt'
elif re.search(r'pr|hd', price_clue, re.IGNORECASE):
price_key = 'cattle_price'
else:
if re.search(r'bred|pair', cattle_clue, re.IGNORECASE):
price_key = 'cattle_price'
else:
price_key = 'cattle_price_cwt'
elif is_sale(word):
if word[0]=='':
if re.match(r'[0-9]+', word[1]):
word.pop(0)
exist_sale.append(word)
else:
word.pop(0)
if re.match(r',', word[0]) or re.search(r',$', consignor_info):
consignor_info = consignor_info + word[0]
else:
consignor_info = consignor_info + ',' + word[0]
exist_sale.append(word)
else:
for this_sale in exist_sale:
sale = this_default_sale.copy()
sale.update(get_sale(this_sale, consignor_info, price_key))
writer.writerow(sale)
exist_sale.clear()
consignor_info = word.pop(0)
exist_sale.append(word)
for this_sale in exist_sale:
sale = this_default_sale.copy()
sale.update(get_sale(this_sale, consignor_info, price_key))
writer.writerow(sale)
def main():
request = Request(
base_url + report_path,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
report = soup.find('div', attrs={'class': 'module'}).find_all('a')
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
for this_report in report:
# create temporary text file from downloaded pdf
request = Request(
this_report['href'].replace(' ', '%20'),
headers = scrape_util.url_header,
)
with urlopen(request) as io:
response = io.read()
with temp_raw.open('wb') as io:
io.write(response)
exit_value = system(scrape_util.pdftotext.format(str(temp_raw)))
if exit_value != 0:
print('Failure convert PDF in {}.'.format(prefix))
continue
# read sale text into line list
temp_txt = temp_raw.with_suffix('.txt')
with temp_txt.open('r', errors = 'ignore') as io:
line = list(this_line.strip('\n') for this_line in io if this_line.strip())
temp_raw.clean()
sale_date = get_sale_date(line[0])
io_name = archive.new_csv(sale_date)
if not io_name:
break
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
})
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
| mit | -6,300,331,700,896,759,000 | 30.052239 | 107 | 0.56597 | false |
anhaidgroup/py_entitymatching | py_entitymatching/blocker/attr_equiv_blocker.py | 1 | 25900 | import logging
import pandas as pd
import numpy as np
import pyprind
import six
from joblib import Parallel, delayed
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.blocker.blocker import Blocker
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, add_key_column
from py_entitymatching.utils.generic_helper import rem_nan
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
class AttrEquivalenceBlocker(Blocker):
"""
Blocks based on the equivalence of attribute values.
"""
def block_tables(self, ltable, rtable, l_block_attr, r_block_attr,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
allow_missing=False, verbose=False, n_jobs=1):
"""Blocks two tables based on attribute equivalence.
Conceptually, this will check `l_block_attr=r_block_attr` for each tuple
pair from the Cartesian product of tables `ltable` and `rtable`. It outputs a
Pandas dataframe object with tuple pairs that satisfy the equality condition.
The dataframe will include attributes '_id', key attribute from
ltable, key attributes from rtable, followed by lists `l_output_attrs` and
`r_output_attrs` if they are specified. Each of these output and key attributes will be
prefixed with given `l_output_prefix` and `r_output_prefix`. If `allow_missing` is set
to `True` then all tuple pairs with missing value in at least one of the tuples will be
included in the output dataframe.
Further, this will update the following metadata in the catalog for the output table:
(1) key, (2) ltable, (3) rtable, (4) fk_ltable, and (5) fk_rtable.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_block_attr (string): The blocking attribute in left table.
r_block_attr (string): The blocking attribute in right table.
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple in ltable with missing value in the
blocking attribute will be matched with
every tuple in rtable and vice versa.
verbose (boolean): A flag to indicate whether the debug information
should be logged (defaults to False).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus is the total number of CPUs in the
machine). Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_block_attr` is not of type string.
AssertionError: If `r_block_attr` is not of type string.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `allow_missing` is not of type boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `l_block_attr` is not in the ltable columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ab = em.AttrEquivalenceBlocker()
>>> C1 = ab.block_tables(A, B, 'zipcode', 'zipcode', l_output_attrs=['name'], r_output_attrs=['name'])
# Include all possible tuple pairs with missing values
>>> C2 = ab.block_tables(A, B, 'zipcode', 'zipcode', l_output_attrs=['name'], r_output_attrs=['name'], allow_missing=True)
"""
# validate data types of input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix,
r_output_prefix, verbose, n_jobs)
# validate data types of input blocking attributes
self.validate_types_block_attrs(l_block_attr, r_block_attr)
# validate data type of allow_missing
self.validate_allow_missing(allow_missing)
# validate input parameters
self.validate_block_attrs(ltable, rtable, l_block_attr, r_block_attr)
self.validate_output_attrs(ltable, rtable, l_output_attrs,
r_output_attrs)
# get and validate required metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger,
verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger,
verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger,
verbose)
# do blocking
# # do projection of required attributes from the tables
l_proj_attrs = self.get_attrs_to_project(l_key, l_block_attr,
l_output_attrs)
ltable_proj = ltable[l_proj_attrs]
r_proj_attrs = self.get_attrs_to_project(r_key, r_block_attr,
r_output_attrs)
rtable_proj = rtable[r_proj_attrs]
# # remove records with nans in the blocking attribute
l_df = rem_nan(ltable_proj, l_block_attr)
r_df = rem_nan(rtable_proj, r_block_attr)
# # determine number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, len(l_df) * len(r_df))
if n_procs <= 1:
# single process
candset = _block_tables_split(l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
allow_missing)
else:
# multiprocessing
m, n = self.get_split_params(n_procs, len(l_df), len(r_df))
l_splits = np.array_split(l_df, m)
r_splits = np.array_split(r_df, n)
c_splits = Parallel(n_jobs=m * n)(
delayed(_block_tables_split)(l, r, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
allow_missing)
for l in l_splits for r in r_splits)
candset = pd.concat(c_splits, ignore_index=True)
# if allow_missing flag is True, then compute
# all pairs with missing value in left table, and
# all pairs with missing value in right table
if allow_missing:
missing_pairs = self.get_pairs_with_missing_value(ltable_proj,
rtable_proj,
l_key, r_key,
l_block_attr,
r_block_attr,
l_output_attrs,
r_output_attrs,
l_output_prefix,
r_output_prefix)
candset = pd.concat([candset, missing_pairs], ignore_index=True)
# update catalog
key = get_name_for_key(candset.columns)
candset = add_key_column(candset, key)
cm.set_candset_properties(candset, key, l_output_prefix + l_key,
r_output_prefix + r_key, ltable, rtable)
# return candidate set
return candset
def block_candset(self, candset, l_block_attr, r_block_attr,
allow_missing=False, verbose=False, show_progress=True,
n_jobs=1):
"""Blocks an input candidate set of tuple pairs based on attribute equivalence.
Finds tuple pairs from an input candidate set of tuple pairs
such that the value of attribute l_block_attr of the left tuple in a
tuple pair exactly matches the value of attribute r_block_attr of the
right tuple in the tuple pair.
Args:
candset (DataFrame): The input candidate set of tuple pairs.
l_block_attr (string): The blocking attribute in left table.
r_block_attr (string): The blocking attribute in right table.
allow_missing (boolean): A flag to indicate whether tuple pairs
with missing value in at least one of the
blocking attributes should be included in
the output candidate set (defaults to
False). If this flag is set to True, a
tuple pair with missing value in either
blocking attribute will be retained in the
output candidate set.
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus is the total number of CPUs in the
machine). Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `l_block_attr` is not of type string.
AssertionError: If `r_block_attr` is not of type string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `l_block_attr` is not in the ltable columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ab = em.AttrEquivalenceBlocker()
>>> C = ab.block_tables(A, B, 'zipcode', 'zipcode', l_output_attrs=['name'], r_output_attrs=['name'])
>>> D1 = ab.block_candset(C, 'age', 'age', allow_missing=True)
# Include all possible tuple pairs with missing values
>>> D2 = ab.block_candset(C, 'age', 'age', allow_missing=True)
# Execute blocking using multiple cores
>>> D3 = ab.block_candset(C, 'age', 'age', n_jobs=-1)
"""
# validate data types of input parameters
self.validate_types_params_candset(candset, verbose, show_progress,
n_jobs)
# validate data types of input blocking attributes
self.validate_types_block_attrs(l_block_attr, r_block_attr)
# get and validate metadata
log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, ltable, rtable, ltable key, rtable key',
verbose)
# # get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(
candset, logger, verbose)
# # validate metadata
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# validate input parameters
self.validate_block_attrs(ltable, rtable, l_block_attr, r_block_attr)
# do blocking
# # do projection before merge
l_df = ltable[[l_key, l_block_attr]]
r_df = rtable[[r_key, r_block_attr]]
# # set index for convenience
l_df = l_df.set_index(l_key, drop=False)
r_df = r_df.set_index(r_key, drop=False)
# # determine number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, len(candset))
valid = []
if n_procs <= 1:
# single process
valid = _block_candset_split(candset, l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr, fk_ltable,
fk_rtable, allow_missing, show_progress)
else:
c_splits = np.array_split(candset, n_procs)
valid_splits = Parallel(n_jobs=n_procs)(
delayed(_block_candset_split)(c_splits[i],
l_df, r_df,
l_key, r_key,
l_block_attr, r_block_attr,
fk_ltable, fk_rtable, allow_missing,
show_progress and i == len(
c_splits) - 1)
for i in range(len(c_splits)))
valid = sum(valid_splits, [])
# construct output table
if len(candset) > 0:
out_table = candset[valid]
else:
out_table = pd.DataFrame(columns=candset.columns)
# update the catalog
cm.set_candset_properties(out_table, key, fk_ltable, fk_rtable,
ltable, rtable)
# return the output table
return out_table
def block_tuples(self, ltuple, rtuple, l_block_attr, r_block_attr,
allow_missing=False):
"""Blocks a tuple pair based on attribute equivalence.
Args:
ltuple (Series): The input left tuple.
rtuple (Series): The input right tuple.
l_block_attr (string): The blocking attribute in left tuple.
r_block_attr (string): The blocking attribute in right tuple.
allow_missing (boolean): A flag to indicate whether a tuple pair
with missing value in at least one of the
blocking attributes should be blocked
(defaults to False). If this flag is set
to True, the pair will be kept if either
ltuple has missing value in l_block_attr
or rtuple has missing value in r_block_attr
or both.
Returns:
A status indicating if the tuple pair is blocked, i.e., the values
of l_block_attr in ltuple and r_block_attr in rtuple are different
(boolean).
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> ab = em.AttrEquivalenceBlocker()
>>> status = ab.block_tuples(A.loc[0], B.loc[0], 'zipcode', 'zipcode')
"""
l_val, r_val = ltuple[l_block_attr], rtuple[r_block_attr]
if allow_missing:
if pd.isnull(l_val) or pd.isnull(r_val) or l_val == r_val:
return False
else:
return True
else:
if pd.notnull(l_val) and pd.notnull(r_val) and l_val == r_val:
return False
else:
return True
# ------------------------------------------------------------
# utility functions specific to attribute equivalence blocking
# validate the data types of the blocking attributes
def validate_types_block_attrs(self, l_block_attr, r_block_attr):
validate_object_type(l_block_attr, six.string_types, error_prefix='Blocking attribute name of left table')
validate_object_type(r_block_attr, six.string_types, error_prefix='Blocking attribute name of right table')
# validate the blocking attributes
def validate_block_attrs(self, ltable, rtable, l_block_attr, r_block_attr):
if l_block_attr not in ltable.columns:
raise AssertionError(
'Left block attribute is not in the left table')
if r_block_attr not in rtable.columns:
raise AssertionError(
'Right block attribute is not in the right table')
def get_pairs_with_missing_value(self, l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix):
l_df.is_copy, r_df.is_copy = False, False # to avoid setwithcopy warning
l_df['ones'] = np.ones(len(l_df))
r_df['ones'] = np.ones(len(r_df))
# find ltable records with missing value in l_block_attr
l_df_missing = l_df[pd.isnull(l_df[l_block_attr])]
# find ltable records with no missing value in l_block_attr
l_df_no_missing = l_df[pd.notnull(l_df[l_block_attr])]
# find rtable records with missing value in r_block_attr
r_df_missing = r_df[pd.isnull(r_df[r_block_attr])]
missing_pairs_1 = pd.merge(l_df_missing, r_df, left_on='ones',
right_on='ones',
suffixes=('_ltable', '_rtable'))
missing_pairs_2 = pd.merge(l_df_no_missing, r_df_missing,
left_on='ones',
right_on='ones',
suffixes=('_ltable', '_rtable'))
missing_pairs = pd.concat([missing_pairs_1, missing_pairs_2],
ignore_index=True)
retain_cols, final_cols = _output_columns(l_key, r_key,
list(missing_pairs.columns),
l_output_attrs,
r_output_attrs,
l_output_prefix,
r_output_prefix)
missing_pairs = missing_pairs[retain_cols]
missing_pairs.columns = final_cols
return missing_pairs
def _block_tables_split(l_df, r_df, l_key, r_key, l_block_attr, r_block_attr,
l_output_attrs, r_output_attrs, l_output_prefix,
r_output_prefix, allow_missing):
# perform an inner join of the two data frames with no missing values
candset = pd.merge(l_df, r_df, left_on=l_block_attr,
right_on=r_block_attr, suffixes=('_ltable', '_rtable'))
retain_cols, final_cols = _output_columns(l_key, r_key,
list(candset.columns),
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix)
candset = candset[retain_cols]
candset.columns = final_cols
return candset
def _block_candset_split(c_df, l_df, r_df, l_key, r_key,
l_block_attr, r_block_attr, fk_ltable, fk_rtable,
allow_missing, show_progress):
# initialize progress bar
if show_progress:
prog_bar = pyprind.ProgBar(len(c_df))
# initialize list to keep track of valid ids
valid = []
# get the indexes for the key attributes in the candset
col_names = list(c_df.columns)
lkey_idx = col_names.index(fk_ltable)
rkey_idx = col_names.index(fk_rtable)
# create a look up table for the blocking attribute values
l_dict = {}
r_dict = {}
# iterate the rows in candset
for row in c_df.itertuples(index=False):
# # update the progress bar
if show_progress:
prog_bar.update()
# # get the value of block attributes
row_lkey = row[lkey_idx]
if row_lkey not in l_dict:
l_dict[row_lkey] = l_df.loc[row_lkey, l_block_attr]
l_val = l_dict[row_lkey]
row_rkey = row[rkey_idx]
if row_rkey not in r_dict:
r_dict[row_rkey] = r_df.loc[row_rkey, r_block_attr]
r_val = r_dict[row_rkey]
if allow_missing:
if pd.isnull(l_val) or pd.isnull(r_val) or l_val == r_val:
valid.append(True)
else:
valid.append(False)
else:
if pd.notnull(l_val) and pd.notnull(r_val) and l_val == r_val:
valid.append(True)
else:
valid.append(False)
return valid
def _output_columns(l_key, r_key, col_names, l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix):
# retain id columns from merge
ret_cols = [_retain_names(l_key, col_names, '_ltable')]
ret_cols.append(_retain_names(r_key, col_names, '_rtable'))
# final columns in the output
fin_cols = [_final_names(l_key, l_output_prefix)]
fin_cols.append(_final_names(r_key, r_output_prefix))
# retain output attrs from merge
if l_output_attrs:
for at in l_output_attrs:
if at != l_key:
ret_cols.append(_retain_names(at, col_names, '_ltable'))
fin_cols.append(_final_names(at, l_output_prefix))
if r_output_attrs:
for at in r_output_attrs:
if at != r_key:
ret_cols.append(_retain_names(at, col_names, '_rtable'))
fin_cols.append(_final_names(at, r_output_prefix))
return ret_cols, fin_cols
def _retain_names(x, col_names, suffix):
if x in col_names:
return x
else:
return str(x) + suffix
def _final_names(col, prefix):
return prefix + str(col)
| bsd-3-clause | 11,388,610,606,417,046 | 44.359019 | 134 | 0.526641 | false |
USGSDenverPychron/pychron | pychron/core/helpers/archiver.py | 1 | 5290 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Range, Bool, Str, HasTraits
# ============= standard library imports ========================
import os
import shutil
from datetime import datetime, timedelta
# ============= local library imports ==========================
# from logger_setup import simple_logger
from pychron.core.helpers.logger_setup import simple_logger
MONTH_NAMES = ['JAN', 'FEB', 'MAR', 'APR', 'MAY',
'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
# logger = logging.getLogger('Archiver')
# logger.setLevel(logging.DEBUG)
# h = logging.StreamHandler()
# h.setFormatter(logging.Formatter('%(name)-40s: %(asctime)s %(levelname)-7s (%(threadName)-10s) %(message)s'))
# logger.addHandler(h)
logger = simple_logger('Archiver')
class Archiver(HasTraits):
archive_hours = Range(0, 23, 0)
archive_days = Range(0, 31, 0)
archive_months = Range(0, 12, 1)
clean_archives = Bool(True)
root = Str
def info(self, msg, *args, **kw):
logger.info(msg)
def clean(self):
self._clean()
def _clean(self):
"""
1. find all files older than archive_days+archive_hours
- move to archive
2. remove archive directories older than archive_months
"""
root = self.root
if not root:
return
archive_date = datetime.today() - timedelta(days=self.archive_days,
hours=self.archive_hours)
self.info('Files older than {} will be archived'.format(archive_date))
cnt = 0
for p in self._get_files(root):
rp = os.path.join(root, p)
result = os.stat(rp)
mt = result.st_mtime
creation_date = datetime.fromtimestamp(mt)
if creation_date < archive_date:
self._archive(root, p)
cnt += 1
if cnt > 0:
self.info('Archived {} files'.format(cnt))
if self.clean_archives:
self._clean_archive(root)
self.info('Archive cleaning complete')
def _get_files(self, root):
return (p for p in os.listdir(root)
if not p.startswith('.') and os.path.isfile(os.path.join(root, p)))
def _clean_archive(self, root):
self.info('Archives older than {} months will be deleted'.format(self.archive_months))
arch = os.path.join(root, 'archive')
rdate = datetime.today() - timedelta(days=self.archive_months * 30)
if os.path.isdir(arch):
for year_dir in self._get_files(arch):
yarch = os.path.join(arch, year_dir)
for month_dir in self._get_files(yarch):
adate = datetime(year=int(year_dir),
month=MONTH_NAMES.index(month_dir) + 1,
day=1)
if rdate > adate:
self.info('Deleting archive {}/{}'.format(year_dir,
month_dir))
shutil.rmtree(os.path.join(arch, year_dir, month_dir))
# remove empty year archives
if not os.listdir(yarch):
self.info('Deleting empty year archive {}'.format(year_dir))
os.rmdir(yarch)
def _archive(self, root, p):
# create an archive directory
today = datetime.today()
month_idx = today.month
month = MONTH_NAMES[month_idx - 1]
year = today.year
arch = os.path.join(root, 'archive')
if not os.path.isdir(arch):
os.mkdir(arch)
yarch = os.path.join(arch, str(year))
if not os.path.isdir(yarch):
os.mkdir(yarch)
mname = '{:02d}-{}'.format(month_idx, month)
march = os.path.join(yarch, mname)
if not os.path.isdir(march):
os.mkdir(march)
src = os.path.join(root, p)
dst = os.path.join(march, p)
self.info('Archiving {:15s} to ./archive/{}/{}'.format(p, year, mname))
try:
shutil.move(src, dst)
except Exception, e:
self.warning('Archiving failed')
self.warning(e)
# if __name__ == '__main__':
# # from pychron.core.helpers.logger_setup import logging_setup, simple_logger, new_logger
#
# logging_setup('video_main')
# c = Archiver(archive_days=1
# )
# c.root = '/Users/ross/Sandbox/video_test'
# c.clean()
| apache-2.0 | 1,956,422,334,840,348,400 | 34.743243 | 111 | 0.540832 | false |
PFacheris/flake8-function-definition | test/test_checks.py | 1 | 1677 | import ast
import os
import pytest
try:
from flake8.engine import pep8
except ImportError:
import pycodestyle as pep8
from flake8_function_definition.checker import FunctionDefinitionChecker
from test.utils import extract_expected_errors
def load_test_cases():
base_path = os.path.dirname(__file__)
test_case_path = os.path.join(base_path, 'test_cases')
test_case_files = os.listdir(test_case_path)
test_cases = []
for fname in test_case_files:
if not fname.endswith('.py'):
continue
fullpath = os.path.join(test_case_path, fname)
data = open(fullpath).read()
tree = ast.parse(data, fullpath)
codes, messages = extract_expected_errors(data)
test_cases.append((tree, fullpath, codes, messages))
return test_cases
@pytest.mark.parametrize(
'tree, filename, expected_codes, expected_messages',
load_test_cases()
)
def test_expected_error(tree, filename, expected_codes, expected_messages):
argv = []
for style in ['google']:
if style in filename:
argv.append('--function-definition-style=' + style)
break
parser = pep8.get_parser('', '')
FunctionDefinitionChecker.add_options(parser)
options, args = parser.parse_args(argv)
FunctionDefinitionChecker.parse_options(options)
checker = FunctionDefinitionChecker(tree, filename)
codes = []
messages = []
for lineno, col_offset, msg, instance in checker.run():
code, message = msg.split(' ', 1)
codes.append(code)
messages.append(message)
assert codes == expected_codes
assert set(messages) >= set(expected_messages)
| mit | 7,102,056,583,878,051,000 | 26.95 | 75 | 0.664281 | false |
fandemonium/code | seq_util/separate_seqs_into_different_files.py | 1 | 1224 | import sys
from Bio import SeqIO
def seq_range(s_start, s_end):
return str(s_start) + "-" + str(s_end) if s_end - s_start > 0 else str(s_end) + "-" + str(s_start)
def parse_seqid(fasta):
seq_d = {}
for records in fasta:
header = records.id.strip().split(" ")
ID = header[0]
seq_d[ID] = records.seq
return seq_d
d = {}
## read in organized blast table with percent alignment
for n, lines in enumerate(open(sys.argv[1], 'rU')):
if n == 0:
continue
else:
line = lines.strip().split("\t")
seq_id = line[1]
identity = float(line[2])
perc_aln = float(line[-1])
type = line[-5]
start = int(line[-9])
end = int(line[-8])
if identity >= 75 and perc_aln >= 80:
if type not in d:
d[type] = [seq_id + ":" + seq_range(start, end)]
else:
d[type].append(seq_id + ":" + seq_range(start, end))
seqs = SeqIO.parse(open(sys.argv[2], 'rU'), "fasta")
fa = parse_seqid(seqs)
#for keys in fa.keys():
# print keys
for args, values in d.iteritems():
# print args
# for item in values and fa.keys():
# print ">" + item + "\n" + fa[item]
fileout = "%s.txt" % args
fp = open(fileout, 'w')
for item in values:
if item in fa.keys():
fp.write(">%s\n" % (item))
fp.write("%s\n" % (fa[item]))
| mit | -321,727,026,953,268,030 | 24.5 | 99 | 0.593954 | false |
ProgramFan/bentoo | bentoo/tools/svgconvert.py | 1 | 2977 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
svgconvert - Simple tool to convert svg to pdf and png
This tool converts svg to pdf and png using phantomjs. It handles mising fonts
correctly, so svgs with unicode text objects are handled gracefuly.
'''
import argparse
import os
import subprocess
import xml.etree.ElementTree
rasterize_js = '''
"use strict";
var page = require('webpage').create(),
system = require('system'),
address, output, size, pageWidth, pageHeight;
address = system.args[1];
output = system.args[2];
page.viewportSize = { width: 600, height: 600 };
if (system.args.length > 3 && system.args[2].substr(-4) === ".pdf") {
size = system.args[3].split('*');
page.paperSize = size.length === 2 ? { width: size[0], height: size[1],
margin: '0px' }
: { format: system.args[3],
orientation: 'portrait',
margin: '1cm' };
} else if (system.args.length > 3 && system.args[3].substr(-2) === "px") {
size = system.args[3].split('*');
if (size.length === 2) {
pageWidth = parseInt(size[0], 10);
pageHeight = parseInt(size[1], 10);
page.viewportSize = { width: pageWidth, height: pageHeight };
page.clipRect = { top: 0, left: 0,
width: pageWidth, height: pageHeight };
} else {
pageWidth = parseInt(system.args[3], 10);
pageHeight = parseInt(pageWidth * 3/4, 10);
page.viewportSize = { width: pageWidth, height: pageHeight };
}
}
if (system.args.length > 4) {
page.zoomFactor = system.args[4];
}
page.open(address, function (status) {
if (status !== 'success') {
console.log('Unable to load the address!');
phantom.exit(1);
} else {
window.setTimeout(function () {
page.render(output);
phantom.exit();
}, 200);
}
});
'''
def svgconvert(svgfile, outfile):
svg = xml.etree.ElementTree.parse(svgfile)
root = svg.getroot()
assert root.tag == "{http://www.w3.org/2000/svg}svg"
width = root.attrib["width"]
height = root.attrib["height"]
rasterize_fn = os.path.join("/tmp", "svgconvert-%d.js" % os.getpid())
file(rasterize_fn, "w").write(rasterize_js)
try:
cmd = ["phantomjs", rasterize_fn, svgfile, outfile,
"%s*%s" % (width, height)]
subprocess.check_call(cmd, shell=False)
finally:
os.remove(rasterize_fn)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("svgfile",
help="SVG file to convert")
parser.add_argument("outfile",
help="Output file")
args = parser.parse_args()
svgconvert(args.svgfile, args.outfile)
if __name__ == "__main__":
main()
| mit | -1,256,241,127,294,511,400 | 31.358696 | 78 | 0.567014 | false |
tvalacarta/tvalacarta | python/main-classic/servers/canal10.py | 1 | 1649 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para canal10
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import jsontools
def get_video_url( page_url , premium = False , user="" , password="", video_password="", page_data="" ):
logger.info("tvalacarta.servers.canal10 get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
iframe_url = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
logger.info("iframe_url="+repr(iframe_url))
headers = []
headers.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'])
headers.append(['Referer',page_url])
data = scrapertools.cache_page(iframe_url, headers=headers)
# Extrae las zonas de los programas
patron = "src\:\s+'([^']+)',\s+type\:\s+'([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(data)
for media_url,content_type in matches:
video_urls.append( [ "("+scrapertools.get_filename_from_url(media_url)[-4:]+") [canal10]" , "http:"+media_url ] )
for video_url in video_urls:
logger.info("tvalacarta.servers.canal10 %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
return devuelve
| gpl-3.0 | -6,697,105,664,156,559,000 | 34.826087 | 127 | 0.614078 | false |
natko1412/script.module.dudehere.routines | lib/dudehere/routines/scrapers/alluc_api.py | 1 | 2538 | import sys
import os
import re
import urllib
from dudehere.routines import *
from dudehere.routines.scrapers import CommonScraper, ScraperResult
class alluc_apiScraper(CommonScraper):
def __init__(self):
self._settings = {}
self.service='alluc_api'
self.name = 'alluc.com'
self.referrer = 'https://www.alluc.com'
self.base_url = 'https://www.alluc.com/api/search'
self.username = ADDON.get_setting(self.service + '_username')
self.password = ADDON.get_setting(self.service + '_password')
self.apikey = ADDON.get_setting(self.service + '_apikey')
self.max_results = 10
def search_tvshow(self, args):
self.domains = args['domains']
results = []
uri = self.prepair_query('tvshow', args['showname'], args['season'], args['episode'], apikey=True)
data = self.request(uri, return_json=True)
results = self.process_results(data)
return results
def search_movie(self, args):
self.domains = args['domains']
results = []
uri = self.prepair_query('movie', args['title'], args['year'], apikey=True)
print uri
data = self.request(uri, return_json=True)
results = self.process_results(data)
return results
def process_results(self, data):
results = []
for result in data['result']:
title = self.normalize(result['title'])
sourcetitle = self.normalize(result['sourcetitle'])
hoster = result['hosterurls']
extension = result['extension']
size = result['sizeinternal']
extension = result['extension']
host_name = result['hostername']
hosts = result['hosterurls']
for host in hosts:
if host_name in self.domains:
url = "%s://%s" % (self.service, host['url'])
quality = self.test_quality(title+sourcetitle+self.normalize(url))
result = ScraperResult(self.service, host_name, url, title)
result.quality = quality
result.size = size
result.extension = extension
results.append(result)
return results
def prepair_query(self, media, *args, **kwards):
uri = "/stream/?%s"
params = {"from": 0, "count": self.max_results, "getmeta":0}
#if 'apikey' in kwards.keys():
#params['apikey'] = self.apikey
#else:
params['user'] = self.username
params['password'] = self.password
if media == 'tvshow':
params['query'] = "%s S%sE%s" % args
else:
params['query'] = "%s %s" % args
if 'host' in kwards.keys():
params['query'] = params['query'] + + ' host:' + kwards['host']
if 'lang' in kwards.keys():
params['query'] = params['query'] + + ' lang:' + kwards['lang']
return uri % urllib.urlencode(params) | gpl-3.0 | -4,392,441,383,119,036,000 | 32.407895 | 100 | 0.663515 | false |
opendaylight/spectrometer | server/tests/test_gitapi.py | 1 | 3107 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @License EPL-1.0 <http://spdx.org/licenses/EPL-1.0>
##############################################################################
# Copyright (c) 2016 The Linux Foundation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
##############################################################################
import json
from flask import url_for
def test_branches(client):
"""Test the git.branches api"""
resp = client.get(url_for('git.branches', project='spectrometer', no_cache='true'))
data = json.loads(resp.get_data(as_text=True))
assert data['branches']
# Validate that the master branch was returned
branches = data['branches']
assert ('master' in branches)
def test_commits(client):
"""Test the git.commits api"""
resp = client.get(url_for('git.commits', project='spectrometer', no_cache='true'))
data = json.loads(resp.get_data(as_text=True))
assert data['commits']
# Validate the first commit has the expected spectrometer commit hash
commits = data['commits']
assert (commits[-1].get('hash') == '8b053408ae61ce7cb67372146edd7ed5a0fd6838')
# Test commit data. Using the 2nd commit pushed into spectrometer repo
# as the test data.
commits = data['commits']
assert (commits[-2].get('author') == 'Thanh Ha')
assert (commits[-2].get('author_email') == '[email protected]')
assert (commits[-2].get('author_tz_offset') == 18000)
assert (commits[-2].get('authored_date') == 1448849259)
assert (commits[-2].get('committer') == 'Thanh Ha')
assert (commits[-2].get('committer_email') == '[email protected]')
assert (commits[-2].get('committer_tz_offset') == 18000)
assert (commits[-2].get('committed_date') == 1448849300)
assert (commits[-2].get('hash') == 'f3d7296885386ca68b074c0fe21b42c8d799f818')
lines = commits[-2].get('lines')
assert (lines.get('deletions') == 0)
assert (lines.get('files') == 1)
assert (lines.get('insertions') == 5)
assert (lines.get('lines') == 5)
assert (commits[-2].get('message').startswith('Add .gitreview'))
def test_commits_since_ref(client):
"""Test the git.commits api"""
resp = client.get(url_for(
'git.commits_since_ref',
project='spectrometer',
ref1='09e539aa4542df7839b2602e0ebe8ff1ba43c6d8',
ref2='364d571daa352de261dce8d9feb599419b08c913',
no_cache='true'))
data = json.loads(resp.get_data(as_text=True))
assert data['commits']
# Validate the first commit is ref1
commits = data['commits']
assert (commits[0].get('hash') == '09e539aa4542df7839b2602e0ebe8ff1ba43c6d8')
# Validate the last commit does not include ref2 hash and instead contains the
# commit after that.
commits = data['commits']
assert (commits[-1].get('hash') == 'c1a8458e66347ffd6e334fc26db9c52ab68afe85')
| epl-1.0 | 2,675,317,610,734,996,000 | 38.833333 | 87 | 0.641455 | false |
axxiao/tuobi | axtools/connector-i2c.py | 1 | 2524 | """
The connector for i2c
__author__ = "Alex Xiao <http://www.alexxiao.me/>"
__date__ = "2017-04-22"
__version__ = "0.5"
"""
import smbus
class connector_i2c():
'''
The standard I2c connector implementation
'''
def __init__(self,bus_num):
'''
bus_num: i2c bus number, start from 0
'''
self.bus_num=bus_num
self.bus=smbus.SMBus(bus_num)
def send(self,addr,instr,reg=0x00,mode='string'):
'''
send string command to address
addr: the address
in_str: the string to be sent
'''
if mode=='string':
for ec in instr.encode(encoding='UTF-8'):
self.bus.write_byte_data(addr,reg,ec)
else:
self.bus.write_byte_data(addr,reg,instr)
def read_bytes(self,addr,length,offset=0):
return self.bus.read_i2c_block_data(addr,offset, length)
def receive(self,addr):
'''
Receive string from address
* if no new incoming message, it will repeat the last received!
'''
recv=self.bus.read_i2c_block_data(addr, 0);
rtn=''
for ec in recv:
if ec<255:
rtn+=chr(ec)
return rtn
class tuobi_i2c():
'''
The I2c connector implementation for tuobi
'''
def __init__(self,i2c_bus_num):
self.i2c_bus_num=i2c_bus_num
self.i2c=connector_i2c(i2c_bus_num)
self.last_ts=dict()
def send(self,addr,instr,reg=0x00):
'''
send string command to address
addr: the address
in_str: the string to be sent
'''
if instr[-1]!=';': instr=instr+';'
self.i2c.send(addr,instr,reg)
def get(self,addr):
'''
Receive the string from address
return: cmd, data
Note: different from standard i2c interface, it will NOT repeat the last one
'''
if addr not in self.last_ts: self.last_ts[addr]=-1
recv=self.i2c.receive(addr)
rtn=None,None
if recv[-1]==';':
cmd,data,ts=recv[:-1].split(':')
if self.last_ts[addr]<int(ts):
#if newly received
rtn=cmd,data
self.last_ts[addr]=int(ts)
return rtn
#t=tuobi_body(0)
##t.send(0x05,'FO;')
#t.get(0x05)
##i2c=connector_i2c(0)
##address=0x05
#i2c.send(address,'SC;')
#i2c.receive(address)
# | mit | 5,864,206,459,583,922,000 | 26.445652 | 88 | 0.516244 | false |
jreinhardt/manual-labour | src/manuallabour/exporters/html.py | 1 | 3684 | # Manual labour - a library for step-by-step instructions
# Copyright (C) 2014 Johannes Reinhardt <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
"""
This module defines exporters for export of schedules to HTML and other
classes related to this task.
"""
from manuallabour.exporters.common import ScheduleExporterBase, MarkupBase
from jinja2 import Environment, FileSystemLoader
from os.path import join, exists
from shutil import rmtree,copytree
from os import remove
# pylint: disable=W0622
from codecs import open
class HTMLMarkup(MarkupBase):
"""
Markup for HTML export
"""
def __init__(self,store):
MarkupBase.__init__(self)
self.store = store
def _handle_obj(self,obj,text):
if obj["images"]:
url = self.store.get_blob_url(obj["images"][0]["blob_id"])
return "<a href='%s'>%s</a>" % (url,text or obj["name"])
else:
return text or obj["name"]
def part(self,obj,text):
return self._handle_obj(obj,text)
def tool(self,obj,text):
return self._handle_obj(obj,text)
def result(self,obj,text):
return self._handle_obj(obj,text)
def image(self,res,text):
return "<img src='%s' alt='%s'>" % (res["url"],res["alt"])
def file(self,res,text):
return "<a href='%s'>%s</a>" % (res["url"],text or res["filename"])
class SinglePageHTMLExporter(ScheduleExporterBase):
"""
Exporter to export schedules into a single HTML page.
"""
def __init__(self,layout_path):
ScheduleExporterBase.__init__(self)
self.layout_path = layout_path
self.env = Environment(loader=FileSystemLoader(layout_path))
def export(self,schedule,store,path,**kwargs):
ScheduleExporterBase.export(self,schedule,store,path,**kwargs)
#clean up output dir
if exists(join(path)):
rmtree(join(path))
#copy over stuff
copytree(self.layout_path,path)
remove(join(path,'template.html'))
with open(join(path,'out.html'),'w','utf8') as fid:
fid.write(self.render(schedule,store,**kwargs))
def render(self,schedule,store,**kwargs):
ScheduleExporterBase.render(self,schedule,store,**kwargs)
#prepare stuff for rendering
markup = HTMLMarkup(store)
bom = schedule.collect_bom(store)
sourcefiles = schedule.collect_sourcefiles(store)
parts = [ref.dereference(store) for ref in bom["parts"].values()]
tools = [ref.dereference(store) for ref in bom["tools"].values()]
steps = []
for step in schedule.steps:
steps.append(step.markup(store,markup))
template = self.env.get_template('template.html')
#pylint: disable=E1103
return template.render(
doc = kwargs,
schedule = schedule,
sourcefiles = sourcefiles,
parts = parts,
tools = tools,
steps = steps
)
| lgpl-2.1 | 7,247,617,497,565,202,000 | 35.117647 | 76 | 0.649566 | false |
kozak127/gherkins | app/oauth.py | 1 | 2315 | from rauth import OAuth2Service
from flask import current_app, url_for, request, redirect
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name,
_external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclasses__():
provider = provider_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
| mit | -1,819,415,229,331,384,800 | 33.044118 | 77 | 0.558963 | false |
juanchopanza/pyhistuples | pyhistuples/pyhistoplots.py | 1 | 3940 | '''Helper functions for plotting histograms, sequences, and ntuple columns
Copyright (c) 2010 Juan Palacios [email protected]
Subject to the Lesser GNU Public License - see < http://www.gnu.org/licenses/lgpl.html>
'''
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Juan Palacios <[email protected]>'
__version__ = '1.3'
__all__ = ('histo_plot','sequence_histo_plot','ntuple_plot', 'ntuple_column_histo', 'default_stats')
from matplotlib import pyplot
from pyhistuples.pyhistogram.histostats import poissonSigma
default_stats = ('entries','mean', 'sigma', 'integral')
def histo_plot(histogram, errorfunction = poissonSigma, stats = default_stats, show = True, **kwargs) :
'''
Plot the contents of a histogram.Histogram
'''
axis = histogram.axis
bins = histogram.filledBins()
x = [bin.centre for bin in bins]
w = [bin.height for bin in bins]
e = None
if errorfunction :
e = [errorfunction(bin) for bin in bins]
else :
e = [0 for bin in bins]
stat_text = None
if stats :
stat_text = 'Statistics'
for stat in stats :
stat_text += '\n%(st)s %(#)g' % { 'st' : stat, '#' : histogram.__getattribute__(stat)() }
return sequence_histo_plot(x,
weights=w,
errors = e,
bins=axis.nbins,
range = axis.range,
xlabel=axis.label,
stats = stat_text,
show = show,
**kwargs)
def sequence_histo_plot(x, weights=None, errors = None, bins=100, range = None, xlabel='', stats = None, show = True, histtype = 'step', **kwargs) :
'''
Plot the contents of a sequence
'''
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
if errors == None :
ax.hist(x, weights = weights, bins=bins, range = range, histtype=histtype, **kwargs)
else :
ax.errorbar(x, weights, errors, fmt = 'o', **kwargs)
ax.set_xlim(range[0], range[1])
ax.set_ylabel('Entries')
ax.set_xlabel(xlabel)
if stats :
ax.text(0.975, 0.975, stats,
horizontalalignment='right',
verticalalignment='top',
multialignment = 'left',
transform = ax.transAxes,
bbox=dict(edgecolor = 'black', facecolor='none', alpha=1.))
if show :
fig.show()
return fig
def ntuple_plot(ntuple, tag, cut=None, **kwargs) :
'''
Plot the contents of an ntuple.Ntuple column
'''
column_histo = ntuple_column_histo(ntuple, tag, cut)
return histo_plot(column_histo, **kwargs)
def ntuple_column_histo(ntuple, tag, cut=None, bins=100) :
'''
Create and return a histogram.Histogram object constructed from the
contents of an ntuple.NTuple column.
'''
from pyhistuples.pyhistogram.histogram import Histogram, Axis
col = ntuple.column(tag,cut)
min_range = min(col)
max_range = max(col)
width = (max_range-min_range)/bins
max_range += width/2.
max_range -= width/2.
histo = Histogram(axis=Axis(bins, min_range, max_range, label=tag))
for x in col :
histo.fill(x)
return histo
| gpl-3.0 | -8,844,344,140,079,734,000 | 34.178571 | 148 | 0.601523 | false |
googleapis/python-compute | google/cloud/compute_v1/services/disks/transports/base.py | 1 | 10910 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
from requests import __version__ as requests_version
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
grpc_version=None,
rest_version=requests_version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class DisksTransport(abc.ABC):
"""Abstract transport class for Disks."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.add_resource_policies: gapic_v1.method.wrap_method(
self.add_resource_policies,
default_timeout=None,
client_info=client_info,
),
self.aggregated_list: gapic_v1.method.wrap_method(
self.aggregated_list, default_timeout=None, client_info=client_info,
),
self.create_snapshot: gapic_v1.method.wrap_method(
self.create_snapshot, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.remove_resource_policies: gapic_v1.method.wrap_method(
self.remove_resource_policies,
default_timeout=None,
client_info=client_info,
),
self.resize: gapic_v1.method.wrap_method(
self.resize, default_timeout=None, client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
self.set_iam_policy, default_timeout=None, client_info=client_info,
),
self.set_labels: gapic_v1.method.wrap_method(
self.set_labels, default_timeout=None, client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_timeout=None,
client_info=client_info,
),
}
@property
def add_resource_policies(
self,
) -> Callable[
[compute.AddResourcePoliciesDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListDisksRequest],
Union[compute.DiskAggregatedList, Awaitable[compute.DiskAggregatedList]],
]:
raise NotImplementedError()
@property
def create_snapshot(
self,
) -> Callable[
[compute.CreateSnapshotDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetDiskRequest], Union[compute.Disk, Awaitable[compute.Disk]]
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
) -> Callable[
[compute.GetIamPolicyDiskRequest],
Union[compute.Policy, Awaitable[compute.Policy]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListDisksRequest], Union[compute.DiskList, Awaitable[compute.DiskList]]
]:
raise NotImplementedError()
@property
def remove_resource_policies(
self,
) -> Callable[
[compute.RemoveResourcePoliciesDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def resize(
self,
) -> Callable[
[compute.ResizeDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def set_iam_policy(
self,
) -> Callable[
[compute.SetIamPolicyDiskRequest],
Union[compute.Policy, Awaitable[compute.Policy]],
]:
raise NotImplementedError()
@property
def set_labels(
self,
) -> Callable[
[compute.SetLabelsDiskRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsDiskRequest],
Union[
compute.TestPermissionsResponse, Awaitable[compute.TestPermissionsResponse]
],
]:
raise NotImplementedError()
__all__ = ("DisksTransport",)
| apache-2.0 | 2,032,316,623,487,839,000 | 33.745223 | 103 | 0.620623 | false |
radiocosmology/alpenhorn | tests/test_db.py | 1 | 1655 | """
test_db
-------
Tests for `alpenhorn.db` module.
"""
import peewee as pw
import pytest
import alpenhorn.db as db
import test_import as ti
# try:
# from unittest.mock import patch, call
# except ImportError:
# from mock import patch, call
class FailingSqliteDatabase(pw.SqliteDatabase):
def execute_sql(self, sql, *args, **kwargs):
self.fail ^= True
if self.fail:
self.fail_count += 1
raise pw.OperationalError("Fail every other time")
else:
return super(FailingSqliteDatabase, self).execute_sql(sql, *args, **kwargs)
def close(self):
if not self.fail:
return super(FailingSqliteDatabase, self).close()
@pytest.fixture
def fixtures(tmpdir):
db._connect()
# the database connection will fail to execute a statement every other time
db.database_proxy.obj.__class__ = type(
"FailingRetryableDatabase",
(db.RetryOperationalError, FailingSqliteDatabase),
{},
)
db.database_proxy.obj.fail_count = 0
db.database_proxy.obj.fail = False
yield ti.load_fixtures(tmpdir)
assert db.database_proxy.obj.fail_count > 0
db.database_proxy.close()
def test_schema(fixtures):
setup_fail_count = db.database_proxy.obj.fail_count
ti.test_schema(fixtures)
# we have had more failures during test_import
assert db.database_proxy.obj.fail_count > setup_fail_count
def test_model(fixtures):
setup_fail_count = db.database_proxy.obj.fail_count
ti.test_import(fixtures)
# we have had more failures during test_import
assert db.database_proxy.obj.fail_count > setup_fail_count
| mit | -1,769,590,961,203,917,000 | 24.859375 | 87 | 0.672508 | false |
pgierz/semic | netcdf_interface/echam6_calculate_air_density.py | 1 | 1919 | #!/usr/bin/env python
"""
Calculates the density of air baed upon the Ideal Gas Law and the Magnus Equation
"""
import argparse
import numpy as np
from scipy.io import netcdf
import math
import os
def calculate_density(pressure, temperature, relative_humidity):
Rl = 287.058 # R for luft (dry air), [J/kg/K]
Rd = 461.523 # R for dampf (water vapour), [J/kg/K]
def Magnus(temperature):
# See http://dx.doi.org/10.1175/1520-0450(1996)035%3C0601:IMFAOS%3E2.0.CO;2
temperature_celsius = temperature - 273.15
# if not (-40.0 < temperature_celsius) and not (temperature_celsius < 50.0):
# print("WARNING: The Magnus Equation may deliver incorrect results for the temperature used: %s" % str(temperature_celsius))
return 6.1094 * math.e**((17.625 * temperature_celsius)/(temperature_celsius + 243.04))
saturation_pressure = Magnus(temperature)
Rf = (Rl) / (1.0 - relative_humidity * (saturation_pressure/pressure) * (1 - (Rl/Rd)))
rho = pressure / (Rf * temperature)
return rho
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("ifile")
parser.add_argument("ofile")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
fin = netcdf.netcdf_file(args.ifile, "r")
os.system("cp "+args.ifile+" "+args.ofile)
fout = netcdf.netcdf_file(args.ofile, "a")
rho_file = fout.createVariable("rhoa", "f", fin.variables["temp2"].dimensions)
pressure = fin.variables["aps"].data.squeeze()
temperature = fin.variables["temp2"].data.squeeze()
relative_humidity = fin.variables["rhumidity"].data[:, -1, :, :].squeeze()
# PG: Get the relative humidity of the BOTTOM layer (last). This might need to change!!
rho = calculate_density(pressure, temperature, relative_humidity)
rho_file[:] = rho
fout.sync()
fout.close()
| mit | 2,043,884,311,162,926,800 | 40.717391 | 137 | 0.655029 | false |
linvictor88/vse-lbaas-driver | quantum/manager.py | 1 | 7441 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
from oslo.config import cfg
from quantum.common.exceptions import ClassNotFound
from quantum.openstack.common import importutils
from quantum.openstack.common import lockutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import periodic_task
from quantum.plugins.common import constants
LOG = logging.getLogger(__name__)
class Manager(periodic_task.PeriodicTasks):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def __init__(self, host=None):
if not host:
host = cfg.CONF.host
self.host = host
super(Manager, self).__init__()
def periodic_tasks(self, context, raise_on_error=False):
self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""Handle initialization if this is a standalone service.
Child classes should override this method.
"""
pass
def after_start(self):
"""Handler post initialization stuff.
Child classes can override this method.
"""
pass
def validate_post_plugin_load():
"""Checks if the configuration variables are valid.
If the configuration is invalid then the method will return an error
message. If all is OK then it will return None.
"""
if ('dhcp_agents_per_network' in cfg.CONF and
cfg.CONF.dhcp_agents_per_network <= 0):
msg = _("dhcp_agents_per_network must be >= 1. '%s' "
"is invalid.") % cfg.CONF.dhcp_agents_per_network
return msg
def validate_pre_plugin_load():
"""Checks if the configuration variables are valid.
If the configuration is invalid then the method will return an error
message. If all is OK then it will return None.
"""
if cfg.CONF.core_plugin is None:
msg = _('Quantum core_plugin not configured!')
return msg
class QuantumManager(object):
"""Quantum's Manager class.
Quantum's Manager class is responsible for parsing a config file and
instantiating the correct plugin that concretely implement
quantum_plugin_base class.
The caller should make sure that QuantumManager is a singleton.
"""
_instance = None
def __init__(self, options=None, config_file=None):
# If no options have been provided, create an empty dict
if not options:
options = {}
msg = validate_pre_plugin_load()
if msg:
LOG.critical(msg)
raise Exception(msg)
# NOTE(jkoelker) Testing for the subclass with the __subclasshook__
# breaks tach monitoring. It has been removed
# intentianally to allow v2 plugins to be monitored
# for performance metrics.
plugin_provider = cfg.CONF.core_plugin
LOG.debug(_("Plugin location: %s"), plugin_provider)
# If the plugin can't be found let them know gracefully
try:
LOG.info(_("Loading Plugin: %s"), plugin_provider)
plugin_klass = importutils.import_class(plugin_provider)
except ClassNotFound:
LOG.exception(_("Error loading plugin"))
raise Exception(_("Plugin not found. You can install a "
"plugin with: pip install <plugin-name>\n"
"Example: pip install quantum-sample-plugin"))
self.plugin = plugin_klass()
msg = validate_post_plugin_load()
if msg:
LOG.critical(msg)
raise Exception(msg)
# core plugin as a part of plugin collection simplifies
# checking extensions
# TODO(enikanorov): make core plugin the same as
# the rest of service plugins
self.service_plugins = {constants.CORE: self.plugin}
self._load_service_plugins()
def _load_services_from_core_plugin(self):
"""Puts core plugin in service_plugins for supported services."""
LOG.debug(_("Loading services supported by the core plugin"))
# supported service types are derived from supported extensions
if not hasattr(self.plugin, "supported_extension_aliases"):
return
for ext_alias in self.plugin.supported_extension_aliases:
if ext_alias in constants.EXT_TO_SERVICE_MAPPING:
service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias]
self.service_plugins[service_type] = self.plugin
LOG.info(_("Service %s is supported by the core plugin"),
service_type)
def _load_service_plugins(self):
"""Loads service plugins.
Starts from the core plugin and checks if it supports
advanced services then loads classes provided in configuration.
"""
# load services from the core plugin first
self._load_services_from_core_plugin()
plugin_providers = cfg.CONF.service_plugins
LOG.debug(_("Loading service plugins: %s"), plugin_providers)
for provider in plugin_providers:
if provider == '':
continue
try:
LOG.info(_("Loading Plugin: %s"), provider)
plugin_class = importutils.import_class(provider)
except ClassNotFound:
LOG.exception(_("Error loading plugin"))
raise Exception(_("Plugin not found."))
plugin_inst = plugin_class()
# only one implementation of svc_type allowed
# specifying more than one plugin
# for the same type is a fatal exception
if plugin_inst.get_plugin_type() in self.service_plugins:
raise Exception(_("Multiple plugins for service "
"%s were configured"),
plugin_inst.get_plugin_type())
self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst
LOG.debug(_("Successfully loaded %(type)s plugin. "
"Description: %(desc)s"),
{"type": plugin_inst.get_plugin_type(),
"desc": plugin_inst.get_plugin_description()})
@classmethod
@lockutils.synchronized("qmlock", "qml-")
def _create_instance(cls):
if cls._instance is None:
cls._instance = cls()
@classmethod
def get_instance(cls):
# double checked locking
if cls._instance is None:
cls._create_instance()
return cls._instance
@classmethod
def get_plugin(cls):
return cls.get_instance().plugin
@classmethod
def get_service_plugins(cls):
return cls.get_instance().service_plugins
| apache-2.0 | 4,690,132,415,027,411,000 | 35.297561 | 78 | 0.622228 | false |
TD22057/T-Home | python/tHome/broker/connect.py | 1 | 1264 | #===========================================================================
#
# Broker connection
#
#===========================================================================
from . import config
import paho.mqtt.client as mqtt
#===========================================================================
class Client( mqtt.Client ):
"""Logging client
"""
def __init__( self, log=None ):
mqtt.Client.__init__( self )
self._logger = log
# Restore callbacks overwritten by stupid mqtt library
self.on_log = Client.on_log
def on_log( self, userData, level, buf ):
if self._logger:
self._logger.log( level, buf )
#===========================================================================
def connect( configDir, log, client=None ):
cfg = config.parse( configDir )
if client is None:
client = Client( log )
if cfg.user:
client.username_pw_set( cfg.user, cfg.password )
if cfg.ca_certs:
client.tls_set( cfg.ca_certs, cfg.certFile, cfg.keyFile )
log.info( "Connecting to broker at %s:%d" % ( cfg.host, cfg.port ) )
client.connect( cfg.host, cfg.port, cfg.keepAlive )
return client
#===========================================================================
| bsd-2-clause | -1,453,303,299,164,701,700 | 27.727273 | 76 | 0.432753 | false |
notconfusing/ddc_graph | ddc_graph.py | 1 | 8813 |
# In[165]:
import xml.etree.ElementTree as ET
from collections import defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import pyddc
import operator
import math
get_ipython().magic(u'pylab inline')
# Out[165]:
# Populating the interactive namespace from numpy and matplotlib
#
# We will be looking to populate a networkX DiGraph.
# In[2]:
G = nx.DiGraph()
# In[3]:
ET.register_namespace('marc', 'http://www.loc.gov/MARC21/slim')
tree = ET.parse('CLASS_23eng_marc_webdewey_20131020.xml')
root = tree.getroot()
# We're going to need these helper functions.
# In[6]:
def valid_record(record):
#is there a control field 008 whose byte 8 should be a
valid = False
for field in record:
if field.tag == '{http://www.loc.gov/MARC21/slim}controlfield' and field.attrib['tag'] == '008':
if field.text[8] == 'a':
valid = True
return valid
def get_subfields(field, letters):
ret_subfields = {letter: None for letter in letters}
for subfield in field:
if subfield.attrib['code'] in letters:
ddc = pyddc.classification(subfield.text)
if ddc.is_sane() and ddc.len() >= 3:
ret_subfields[subfield.attrib['code']] = ddc
return ret_subfields
def make_nodes(start_letter, end_letter):
k_nodes = list()
#do we have a range?
if start_letter and end_letter:
try:
k = pyddc.classification_range(start_letter, end_letter)
k_nodes = k.ids()
except AssertionError:
#if the range is too large (more than ten) just take the first one
k_nodes = [start_letter.id]
except ValueError:
#not sure what to do with letters.
raise ValueError
#we might just have one
elif start_letter and not end_letter:
k_nodes = [start_letter.id]
return k_nodes
def make_relationship(a_ddcs, b_ddcs, reltype):
try:
m_nodes = make_nodes(a_ddcs['a'], a_ddcs['c'])
#maybe there's a better way to detect if we should be checking for e and f
if b_ddcs.keys()[0] == 'e':
n_nodes = make_nodes(b_ddcs['e'], b_ddcs['f'])
else:
n_nodes = make_nodes(b_ddcs['a'], b_ddcs['c'])
except KeyError:
print 'addc', a_ddcs,
print 'bddc', b_ddcs
#put all the connections in our graph
for m_node in m_nodes:
for n_node in n_nodes:
G.add_node(m_node)
G.add_node(n_node)
G.add_edge(m_node, n_node, rel=reltype)
# In[7]:
valid_records = filter(lambda record: valid_record(record), root)
# In[10]:
get_ipython().magic(u'pinfo valid_records')
# Interesting so every one of these records has a 153 field. I think that means that is it's canonical ddc.
# We have possibilites.
#
# 1. Does 153 have 'e' or 'f'?
# 1. make_edge(reltionship = hiearchy, 153'ac', 153'ef')
# 2. Is there a 253_0?
# 1. make_edge(relationship = hiearchy, 253'ac', 153'ac')
# 3. Is there a 253_2?
#
# _there may be multiple $a fields_
# 1. make_edges(relationship = related, [253'a'], 153'ac')
# In[11]:
cases = defaultdict(int)
for record in valid_records:
#our is out internal important bits of the record
r = {num : None for num in ['153_base','153', '253_0', '253_2']}
for field in record:
if field.tag == '{http://www.loc.gov/MARC21/slim}datafield':
num = field.attrib['tag']
ind1 = field.attrib['ind1']
if num =='153':
r[num+'_base'] = get_subfields(field, ['a','c'])
r[num] = get_subfields(field, ['e','f'])
if num == '253':
if ind1 == '0':
r[num +'_'+ ind1] = get_subfields(field, ['a','c'])
if ind1 == '2':
r[num +'_'+ ind1] = get_subfields(field, ['a','c'])
#we are expecting a gaurantee at this point that we have a 153 with a and maybe c
if r['153']['e']:
cases[1] += 1
make_relationship(r['153_base'], r['153'], reltype = 'hier')
if r['253_0']:
cases[2] += 1
make_relationship(r['153_base'], r['253_0'], reltype = 'hier')
if r['253_2']:
cases[3] += 1
make_relationship(r['153_base'], r['253_2'], reltype = 'related')
print cases
# Out[11]:
# defaultdict(<type 'int'>, {1: 45480, 2: 5959, 3: 5918})
#
# Ok so now we have __r__ as our main data
# make_nodes takes a start and end like 'a' and 'c' or 'e' and 'f' and even if the end is None returns the list of range beteween or just the list of length one of ids
# In[12]:
filter(lambda node: len(node) == 3 and node[1:3] == '00', G.nodes())
# Out[12]:
# ['600', '800', '200', '400', '700', '900', '300', '500']
# Oddly I can't see why there should be no elements dhat have '000' or '100' as their parent
# In[14]:
nx.shortest_path(G,'075','005')
# Out[14]:
# ['075', '616.8', '002', '368.6', '006', '005']
# In[31]:
G['075']
# Out[31]:
# {'028': {'rel': 'hier'},
# '070': {'rel': 'related'},
# '071': {'rel': 'hier'},
# '072': {'rel': 'hier'},
# '073': {'rel': 'hier'},
# '074': {'rel': 'hier'},
# '075': {'rel': 'hier'},
# '076': {'rel': 'hier'},
# '077': {'rel': 'hier'},
# '078': {'rel': 'hier'},
# '079': {'rel': 'hier'},
# '093': {'rel': 'hier'},
# '094': {'rel': 'hier'},
# '095': {'rel': 'hier'},
# '096': {'rel': 'hier'},
# '097': {'rel': 'hier'},
# '098': {'rel': 'hier'},
# '099': {'rel': 'hier'},
# '280': {'rel': 'hier'},
# '324.24': {'rel': 'hier'},
# '324.25': {'rel': 'hier'},
# '324.26': {'rel': 'hier'},
# '324.27': {'rel': 'hier'},
# '324.28': {'rel': 'hier'},
# '324.29': {'rel': 'hier'},
# '328.4': {'rel': 'hier'},
# '328.5': {'rel': 'hier'},
# '328.6': {'rel': 'hier'},
# '328.7': {'rel': 'hier'},
# '328.8': {'rel': 'hier'},
# '328.9': {'rel': 'hier'},
# '616.1': {'rel': 'hier'},
# '616.2': {'rel': 'hier'},
# '616.3': {'rel': 'hier'},
# '616.4': {'rel': 'hier'},
# '616.5': {'rel': 'hier'},
# '616.6': {'rel': 'hier'},
# '616.7': {'rel': 'hier'},
# '616.8': {'rel': 'hier'},
# '616.9': {'rel': 'hier'},
# '617': {'rel': 'hier'},
# '618.1': {'rel': 'hier'},
# '618.2': {'rel': 'hier'},
# '618.3': {'rel': 'hier'},
# '618.4': {'rel': 'hier'},
# '618.5': {'rel': 'hier'},
# '618.6': {'rel': 'hier'},
# '618.7': {'rel': 'hier'},
# '618.8': {'rel': 'hier'},
# '790.132': {'rel': 'related'}}
# In[90]:
def neighbors_n(G, root, n):
E = nx.DiGraph()
def n_tree(tree, n_remain):
neighbors_dict = G[tree]
for neighbor, relations in neighbors_dict.iteritems():
E.add_edge(tree, neighbor, rel=relations['rel'])
#you can use this map if you want to retain functional purity
#map(lambda neigh_rel: E.add_edge(tree, neigh_rel[0], rel=neigh_rel[1]['rel']), neighbors_dict.iteritems() )
neighbors = list(neighbors_dict.iterkeys())
n_forest(neighbors, n_remain= (n_remain - 1))
def n_forest(forest, n_remain):
if n_remain <= 0:
return
else:
map(lambda tree: n_tree(tree, n_remain=n_remain), forest)
n_forest( [root] , n)
return E
# In[107]:
nx.degree(E, '075')
# Out[107]:
# 51
# In[182]:
def draw_n_hops(G, root, n):
E = neighbors_n(G, root, n)
edge_rels = map(lambda edge_tup: edge_tup[2]['rel'], E.edges(data=True) )
edge_colors = map(lambda edge: 'grey' if edge == 'hier' else 'green', edge_rels)
max_node_size = max( map(lambda node: nx.degree(E, node), E.nodes()))
node_lognorms = map(lambda node: (math.log(nx.degree(E, node)) / math.log(max_node_size) ) , E.nodes() )
node_sizes = map(lambda norm: norm * 1500, node_lognorms)
node_colors= node_lognorms
pos=nx.graphviz_layout(E,prog="twopi",root=root)
plt.figure(1,figsize=(20,20))
nx.draw(E, pos=pos, node_size=node_sizes, node_color=node_colors, edge_color=edge_colors)
# In[185]:
draw_n_hops(G, '075', 2)
# Out[185]:
# image file:
# In[13]:
subgraphs = nx.connected_component_subgraphs(UG)
UG_big = subgraphs[0]
# In[21]:
mains = filter(lambda node: len(node) == 3 and node[1:3] == '00', UG_big.nodes())
# In[18]:
for main in mains:
UG_big.add_edge('start',main, rel='hier')
# In[7]:
any(map(lambda n: n==4, map(lambda d: len(d), deplist)))
# Out[7]:
# False
| mit | 6,523,096,059,266,003,000 | 25.229167 | 167 | 0.52275 | false |
zz1217/exchange_rate_scraper | exchange_rate_scraper/pipelines.py | 1 | 7332 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from datetime import datetime
from exchange_rate_scraper import settings
from scrapy.exceptions import DropItem
from twisted.enterprise import adbapi
import MySQLdb
import MySQLdb.cursors
class ExchangeRateScraperPipeline(object):
def process_item(self, item, spider):
return item
class MySQLStoreReutersPipeline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL']['HOST'],
port=settings['MYSQL']['PORT'],
db=settings['MYSQL']['DBNAME'],
user=settings['MYSQL']['USER'],
passwd=settings['MYSQL']['PWD'],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
# 忽略比目标开始时间早的数据
date_fmt = '%Y-%m-%d'
if datetime.strptime(item['date'], date_fmt) < datetime.strptime(spider.meta[0]['from_date'], date_fmt):
raise DropItem("Date too old %s" % item)
d = self.dbpool.runInteraction(self._do_upinsert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
def _do_upinsert(self, conn, item, spider):
#print '\n=============\n'
#print """
# UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
# """% (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
#print '\n=============\n'
# 奇怪,用逗号就不行。。。
conn.execute(
#"""
#UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
#""", (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
"""
UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
"""% (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
)
def _handle_error(self, failue, item, spider):
log.err(failue)
class MySQLStoreOandaPipeline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL']['HOST'],
port=settings['MYSQL']['PORT'],
db=settings['MYSQL']['DBNAME'],
user=settings['MYSQL']['USER'],
passwd=settings['MYSQL']['PWD'],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
# 忽略比目标开始时间早的数据
# date_fmt = '%Y-%m-%d'
# if datetime.strptime(item['date'], date_fmt) < datetime.strptime(spider.meta[0]['from_date'], date_fmt):
# raise DropItem("Date too old %s" % item)
d = self.dbpool.runInteraction(self._do_upinsert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
def _do_upinsert(self, conn, item, spider):
#print '\n=============\n'
#print """
# UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
# """% (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
#print '\n=============\n'
print '\n=============111111111\n'
# 奇怪,用逗号就不行。。。
conn.execute(
#"""
#UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
#""", (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
"""
UPDATE oanda SET value=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
"""% (item['value'], item['from_currency'], item['to_currency'], item['date'])
)
def _handle_error(self, failue, item, spider):
log.err(failue)
class MySQLStoreXePipeline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL']['HOST'],
port=settings['MYSQL']['PORT'],
db=settings['MYSQL']['DBNAME'],
user=settings['MYSQL']['USER'],
passwd=settings['MYSQL']['PWD'],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
# 忽略比目标开始时间早的数据
# date_fmt = '%Y-%m-%d'
# if datetime.strptime(item['date'], date_fmt) < datetime.strptime(spider.meta[0]['date'], date_fmt):
# raise DropItem("Date too old %s" % item)
d = self.dbpool.runInteraction(self._do_upinsert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
def _do_upinsert(self, conn, item, spider):
#print '\n=============\n'
#print """
# UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
# """% (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
#print '\n=============\n'
print '\n=============111111111\n'
# 奇怪,用逗号就不行。。。
conn.execute(
#"""
#UPDATE reuters SET high=%s, low=%s, open=%s, close=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
#""", (item['high_rate'], item['low_rate'], item['open_rate'], item['close_rate'], item['from_currency'], item['to_currency'], item['date'])
"""
UPDATE xe SET from_to_value=%s, to_from_value=%s WHERE from_currency = '%s' AND to_currency = '%s' AND date = '%s'
"""% (item['from_to_value'], item['to_from_value'], item['from_currency'], item['to_currency'], item['date'])
)
def _handle_error(self, failue, item, spider):
log.err(failue)
| mit | 8,735,536,536,062,566,000 | 38.9 | 152 | 0.546784 | false |
shareactorIO/pipeline | cli/pio/pio.py | 1 | 73821 | #-*- coding: utf-8 -*-
__version__ = "0.38"
'''
Requirements
python3, kops, ssh-keygen, awscli, packaging, appdirs, gcloud, azure-cli, helm, kubectl, kubernetes.tar.gz
# References: https://github.com/kubernetes-incubator/client-python/blob/master/kubernetes/README.md
# https://github.com/kubernetes/kops/blob/master/docs/aws.md
'''
import warnings
import requests
import fire
import tarfile
import os
import sys
import kubernetes.client as kubeclient
from kubernetes.client.rest import ApiException
import kubernetes.config as kubeconfig
import pick
import yaml
import json
import dill as pickle
from git import Repo
from pprint import pprint
import subprocess
class PioCli(object):
_kube_deploy_registry = {'jupyter': (['jupyterhub.ml/jupyterhub-deploy.yaml'], []),
'spark': (['apachespark.ml/master-deploy.yaml'], ['spark-worker', 'metastore']),
'spark-worker': (['apachespark.ml/worker-deploy.yaml'], []),
'metastore': (['metastore.ml/metastore-deploy.yaml'], ['mysql']),
'hdfs': (['hdfs.ml/namenode-deploy.yaml'], []),
'redis': (['keyvalue.ml/redis-master-deploy.yaml'], []),
'presto': (['presto.ml/master-deploy.yaml',
'presto.ml/worker-deploy.yaml'], ['metastore']),
'presto-ui': (['presto.ml/ui-deploy.yaml'], ['presto']),
'airflow': (['scheduler.ml/airflow-deploy.yaml'], ['mysql', 'redis']),
'mysql': (['sql.ml/mysql-master-deploy.yaml'], []),
'www': (['web.ml/home-deploy.yaml'], []),
'zeppelin': (['zeppelin.ml/zeppelin-deploy.yaml'], []),
'zookeeper': (['zookeeper.ml/zookeeper-deploy.yaml'], []),
'elasticsearch': (['elasticsearch.ml/elasticsearch-2-3-0-deploy.yaml'], []),
'kibana': (['kibana.ml/kibana-4-5-0-deploy.yaml'], ['elasticsearch'], []),
'kafka': (['stream.ml/kafka-0.10-deploy.yaml'], ['zookeeper']),
'cassandra': (['cassandra.ml/cassandra-deploy.yaml'], []),
'prediction-jvm': (['prediction.ml/jvm-deploy.yaml'], []),
'prediction-python3': (['prediction.ml/python3-deploy.yaml'], []),
'prediction-tensorflow': (['prediction.ml/tensorflow-deploy.yaml'], []),
'turbine': (['dashboard.ml/turbine-deploy.yaml'], []),
'hystrix': (['dashboard.ml/hystrix-deploy.yaml'], []),
'weave-scope-app': (['dashboard.ml/weavescope/scope-1.3.0.yaml'], []),
'kubernetes-dashboard': (['dashboard.ml/kubernetes-dashboard/v1.6.0.yaml'], []),
'heapster': (['metrics.ml/monitoring-standalone/v1.3.0.yaml'], []),
'route53-mapper': (['dashboard.ml/route53-mapper/v1.3.0.yml'], []),
'kubernetes-logging': (['dashboard.ml/logging-elasticsearch/v1.5.0.yaml'], []),
}
_kube_svc_registry = {'jupyter': (['jupyterhub.ml/jupyterhub-svc.yaml'], []),
'spark': (['apachespark.ml/master-svc.yaml'], ['spark-worker', 'metastore']),
'spark-worker': (['apachespark.ml/worker-svc.yaml'], []),
'metastore': (['metastore.ml/metastore-svc.yaml'], ['mysql']),
'hdfs': (['hdfs.ml/namenode-svc.yaml'], []),
'redis': (['keyvalue.ml/redis-master-svc.yaml'], []),
'presto': (['presto.ml/presto-master-svc.yaml',
'presto.ml/presto-worker-svc.yaml'], ['metastore']),
'presto-ui': (['presto.ml/presto-ui-svc.yaml'], ['presto']),
'airflow': (['scheduler.ml/airflow-svc.yaml'], ['mysql', 'redis']),
'mysql': (['sql.ml/mysql-master-svc.yaml'], []),
'www': (['web.ml/home-svc.yaml'], []),
'zeppelin': (['zeppelin.ml/zeppelin-svc.yaml'], []),
'zookeeper': (['zookeeper.ml/zookeeper-svc.yaml'], []),
'kafka': (['stream.ml/kafka-0.10-svc.yaml'], ['zookeeper']),
'cassandra': (['cassandra.ml/cassandra-svc.yaml'], []),
'prediction-jvm': (['prediction.ml/jvm-svc.yaml'], []),
'prediction-python3': (['prediction.ml/python3-svc.yaml'], []),
'prediction-tensorflow': (['prediction.ml/tensorflow-svc.yaml'], []),
'turbine': (['dashboard.ml/turbine-svc.yaml'], []),
'hystrix': (['dashboard.ml/hystrix-svc.yaml'], []),
}
<<<<<<< HEAD
class PioCli(object):
kube_deploy_registry = {'jupyter': (['jupyterhub.ml/jupyterhub-deploy.yaml'], []),
'spark': (['apachespark.ml/master-deploy.yaml'], ['spark-worker', 'metastore']),
'spark-worker': (['apachespark.ml/worker-deploy.yaml'], []),
'metastore': (['metastore.ml/metastore-deploy.yaml'], ['mysql']),
'hdfs': (['hdfs.ml/namenode-deploy.yaml'], []),
'redis': (['keyvalue.ml/redis-master-deploy.yaml'], []),
'presto': (['presto.ml/presto-master-deploy.yaml',
'presto.ml/presto-worker-deploy.yaml'], ['metastore']),
'presto-ui': (['presto.ml/presto-ui-deploy.yaml'], ['presto']),
'airflow': (['scheduler.ml/airflow-deploy.yaml'], ['mysql', 'redis']),
'mysql': (['sql.ml/mysql-master-deploy.yaml'], []),
'www': (['web.ml/home-deploy.yaml'], []),
'zeppelin': (['zeppelin.ml/zeppelin-deploy.yaml'], []),
'zookeeper': (['zookeeper.ml/zookeeper-deploy.yaml'], []),
'kafka': (['stream.ml/kafka-0.10-rc.yaml'], ['zookeeper']),
'cassandra': (['cassandra.ml/cassandra-rc.yaml'], []),
'prediction-jvm': (['prediction.ml/jvm-deploy.yaml'], []),
'prediction-python3': (['prediction.ml/python3-deploy.yaml'], []),
'prediction-tensorflow': (['prediction.ml/tensorflow-deploy.yaml'], []),
'turbine': (['dashboard.ml/turbine-deploy.yaml'], []),
'hystrix': (['dashboard.ml/hystrix-deploy.yaml'], []),
'weavescope': (['dashboard.ml/weavescope.yaml'], []),
#'dashboard': (['dashboard.ml/kubernetes/kubernetes-dashboard.yaml'], []),
#'heapster': (['https://raw.githubusercontent.com/kubernetes/kops/master/addons/monitoring-standalone/v1.3.0.yaml']), []),
#'route53': (['https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.3.0.yml']), []),
}
kube_svc_registry = {'jupyter': (['jupyterhub.ml/jupyterhub-svc.yaml'], []),
'spark': (['apachespark.ml/master-svc.yaml'], ['spark-worker', 'metastore']),
'spark-worker': (['apachespark.ml/worker-svc.yaml'], []),
'metastore': (['metastore.ml/metastore-svc.yaml'], ['mysql']),
'hdfs': (['hdfs.ml/namenode-svc.yaml'], []),
'redis': (['keyvalue.ml/redis-master-svc.yaml'], []),
'presto': (['presto.ml/presto-master-svc.yaml',
'presto.ml/presto-worker-svc.yaml'], ['metastore']),
'presto-ui': (['presto.ml/presto-ui-svc.yaml'], ['presto']),
'airflow': (['scheduler.ml/airflow-svc.yaml'], ['mysql', 'redis']),
'mysql': (['sql.ml/mysql-master-svc.yaml'], []),
'www': (['web.ml/home-svc.yaml'], []),
'zeppelin': (['zeppelin.ml/zeppelin-svc.yaml'], []),
'zookeeper': (['zookeeper.ml/zookeeper-svc.yaml'], []),
'kafka': (['stream.ml/kafka-0.10-svc.yaml'], ['zookeeper']),
'cassandra': (['cassandra.ml/cassandra-svc.yaml'], []),
'prediction-jvm': (['prediction.ml/jvm-svc.yaml'], []),
'prediction-python3': (['prediction.ml/python3-svc.yaml'], []),
'prediction-tensorflow': (['prediction.ml/tensorflow-svc.yaml'], []),
}
=======
>>>>>>> fluxcapacitor/master
def _get_default_pio_api_version(self):
return 'v1'
<<<<<<< HEAD
def config_get(self,
config_key):
print(self.config_get_all()[config_key])
print("\n")
return self.config_get_all()[config_key]
def config_set(self,
config_key,
config_value):
self.config_merge_dict({config_key: config_value})
def config_merge_dict(self,
=======
def _get_default_pio_git_home(self):
return 'https://github.com/fluxcapacitor/pipeline/'
def _get_default_pio_git_version(self):
return 'v1.2.0'
def get_config_value(self,
config_key):
print("")
pprint(self._get_full_config())
print("")
return self._get_full_config()[config_key]
def set_config_value(self,
config_key,
config_value):
print("config_key: '%s'" % config_key)
self._merge_config_dict({config_key: config_value})
print("config_value: '%s'" % self._get_full_config()[config_key])
self._merge_config_dict({config_key: config_value})
print("")
pprint(self._get_full_config())
print("")
def _merge_config_dict(self,
>>>>>>> fluxcapacitor/master
config_dict):
pio_api_version = self._get_full_config()['pio_api_version']
config_file_base_path = os.path.expanduser("~/.pio/")
expanded_config_file_base_path = os.path.expandvars(config_file_base_path)
expanded_config_file_base_path = os.path.expanduser(expanded_config_file_base_path)
expanded_config_file_base_path = os.path.abspath(expanded_config_file_base_path)
expanded_config_file_path = os.path.join(expanded_config_file_base_path, 'config')
<<<<<<< HEAD
pprint("Merging dict '%s' with existing config '%s'..." % (config_dict, expanded_config_file_path))
existing_config_dict = self.config_get_all()
=======
print("")
pprint("Merging dict '%s' with existing config '%s'." % (config_dict, expanded_config_file_path))
existing_config_dict = self._get_full_config()
>>>>>>> fluxcapacitor/master
# >= Python3.5
# {**existing_config_dict, **config_dict}
existing_config_dict.update(config_dict)
new_config_yaml = yaml.dump(existing_config_dict, default_flow_style=False, explicit_start=True)
with open(expanded_config_file_path, 'w') as fh:
fh.write(new_config_yaml)
print(new_config_yaml)
<<<<<<< HEAD
print("...Done!")
print("\n")
=======
print("")
>>>>>>> fluxcapacitor/master
def _get_full_config(self):
config_file_base_path = os.path.expanduser("~/.pio/")
<<<<<<< HEAD
expanded_config_file_base_path = os.path.expandvars(config_file_base_path)
expanded_config_file_base_path = os.path.expanduser(expanded_config_file_base_path)
expanded_config_file_base_path = os.path.abspath(expanded_config_file_base_path)
expanded_config_file_path = os.path.join(expanded_config_file_base_path, 'config')
# >= Python3.5
# os.makedirs(expanded_config_file_base_path, exist_ok=True)
if not os.path.exists(expanded_config_file_path):
if not os.path.exists(expanded_config_file_base_path):
os.makedirs(expanded_config_file_base_path)
pio_api_version = self.pio_api_version()
initial_config_dict = {'pio_api_version': pio_api_version}
initial_config_yaml = yaml.dump(initial_config_dict, default_flow_style=False, explicit_start=True)
print("Creating config '%s'..." % expanded_config_file_path)
with open(expanded_config_file_path, 'w') as fh:
=======
config_file_base_path = os.path.expandvars(config_file_base_path)
config_file_base_path = os.path.expanduser(config_file_base_path)
config_file_base_path = os.path.abspath(config_file_base_path)
config_file_filename = os.path.join(config_file_base_path, 'config')
if not os.path.exists(config_file_filename):
if not os.path.exists(config_file_base_path):
os.makedirs(config_file_base_path)
initial_config_dict = {'pio_api_version': self._get_default_pio_api_version(),
'pio_git_home': self._get_default_pio_git_home(),
'pio_git_version': self._get_default_pio_git_version()}
initial_config_yaml = yaml.dump(initial_config_dict, default_flow_style=False, explicit_start=True)
print("")
print("Default config created at '%s'. Override with 'pio init-pio'" % config_file_filename)
print("")
with open(config_file_filename, 'w') as fh:
>>>>>>> fluxcapacitor/master
fh.write(initial_config_yaml)
pprint(initial_config_dict)
# Update the YAML
<<<<<<< HEAD
with open(expanded_config_file_path, 'r') as fh:
=======
with open(config_file_filename, 'r') as fh:
>>>>>>> fluxcapacitor/master
existing_config_dict = yaml.load(fh)
return existing_config_dict
print("\n")
<<<<<<< HEAD
def config_view(self):
pprint(self.config_get_all())
print("\n")
def cluster_top(self):
subprocess.call("kubectl top node", shell=True)
print("\n")
print("Note: Heapster must be deployed for this command ^^ to work.\n")
print("\n")
def app_top(self,
app_name):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
if (app_name in pod.metadata.name):
subprocess.call("kubectl top %s" % app_name, shell=True)
print("\n")
print("Note: Heapster must be deployed for this command ^^ to work.\n")
print("\n")
def cluster_init(self,
pio_home,
pio_runtime_version,
kube_cluster_context,
kube_namespace='default'):
=======
def config(self):
pprint(self._get_full_config())
print("")
def proxy(self,
app_name,
local_port=None,
app_port=None):
self.tunnel(app_name, local_port, app_port)
def tunnel(self,
app_name,
local_port=None,
app_port=None):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
pod = self._get_pod_by_app_name(app_name)
if not pod:
print("")
print("App '%s' is not running." % app_name)
print("")
return
if not app_port:
svc = self._get_svc_by_app_name(app_name)
if not svc:
print("")
print("App '%s' proxy port cannot be found." % app_name)
print("")
return
app_port = svc.spec.ports[0].target_port
if not local_port:
print("")
print("Proxying local port '<randomly-chosen>' to app '%s' port '%s' using pod '%s'." % (app_port, app_name, pod.metadata.name))
print("")
print("Use 'http://127.0.0.1:<randomly-chosen>' to access app '%s' on port '%s'." % (app_name, app_port))
print("")
print("If you break out of this terminal, your proxy session will end.")
print("")
subprocess.call('kubectl port-forward %s :%s' % (pod.metadata.name, app_port), shell=True)
print("")
else:
print("")
print("Proxying local port '%s' to app '%s' port '%s' using pod '%s'." % (local_port, app_port, app_name, pod.metadata.name))
print("")
print("Use 'http://127.0.0.1:%s' to access app '%s' on port '%s'." % (local_port, app_name, app_port))
print("")
print("If you break out of this terminal, your proxy session will end.")
print("")
subprocess.call('kubectl port-forward %s %s:%s' % (pod.metadata.name, local_port, app_port), shell=True)
print("")
# TODO: Start an airflow job
def flow(self,
flow_name):
print("")
print("Submit airflow coming soon!")
# TODO: Submit a spark job
def submit(self,
replicas):
print("Submit spark job coming soon!")
def top(self,
app_name=None):
self.system(app_name)
def system(self,
app_name=None):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured 'pio init-cluster'.")
print("")
return
if (app_name):
print("")
print("Retrieving system resources used by app '%s'." % app_name)
print("")
self._get_app_resources(app_name)
print("")
print("Retrieving system resources for cluster.")
print("")
self._get_cluster_resources()
else:
print("")
print("Retrieving only system resources for cluster. Use '--app-name' for app-level, as well.")
print("")
self._get_cluster_resources()
print("")
def _get_cluster_resources(self):
subprocess.call("kubectl top node", shell=True)
print("")
print("If you see an error above, you need to start Heapster with 'pio start heapster'.")
print("")
>>>>>>> fluxcapacitor/master
<<<<<<< HEAD
if 'http:' in pio_home or 'https:' in pio_home:
pio_home = os.path.expandvars(pio_home)
pio_home = os.path.expanduser(pio_home)
pio_home = os.path.abspath(pio_home)
config_dict = {'pio_home': pio_home,
'pio_runtime_version': pio_runtime_version,
'kube_cluster_context': kube_cluster_context,
'kube_namespace': kube_namespace}
self.config_merge_dict(config_dict)
print("\n")
pprint(self.config_get_all())
print("\n")
=======
def _get_app_resources(self,
app_name):
pio_api_version = self._get_full_config()['pio_api_version']
>>>>>>> fluxcapacitor/master
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured 'pio init-cluster'.")
print("")
return
<<<<<<< HEAD
def model_init(self,
=======
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
if (app_name in pod.metadata.name):
subprocess.call("kubectl top pod %s" % pod.metadata.name, shell=True)
print("")
def join(self,
federation):
print("")
print("Federation joining coming soon!")
print("")
def up(self,
provider='aws',
ssh_public_key='~/.ssh/id_rsa.pub',
initial_worker_count='1',
# min_worker_count='1',
# max_worker_count='1',
worker_zones='us-west-2a,us-west-2b',
worker_type='r2.2xlarge',
master_zones='us-west-2c',
master_type='t2.medium',
# dns_zone='',
# vpc='',
kubernetes_version='1.6.2',
kubernetes_image='kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09'):
try:
kops_cluster_name = self._get_full_config()['kops_cluster_name']
kops_state_store = self._get_full_config()['kops_state_store']
if not kops_ssh_public_key:
subprocess.call("ssh-keygen -t rsa", shell=True)
ssh_public_key = '~/.ssh/id_rsa.pub'
subprocess.call("aws configure", shell=True)
subprocess.call("aws s3 mb %s" % kops_state_store, shell=True)
subprocess.call("kops create cluster --ssh-public-key %s --node-count %s --zones %s --master-zones %s --node-size %s --master-size %s --kubernetes-version %s --image %s --state %s --name %s" % (ssh_public_key, initial_worker_count, worker_zones, master_zones, worker_type, master_type, kubernetes_version, kubernetes_image, kops_state_store, kops_cluster_name), shell=True)
subprocess.call("kops update --state %s cluster %s --yes" % (kops_state_store, kops_cluster_name), shell=True)
subprocess.call("kubectl config set-cluster %s --insecure-skip-tls-verify=true" % kops_cluster_name)
print("")
print("Cluster is being created. This may take a few mins.")
print("")
print("Once the cluster is up, run 'kubectl cluster-info' for the Kubernetes-dashboard url.")
print("Username is 'admin'.")
print("Password can be retrieved with 'kops get secrets kube --type secret -oplaintext --state %s'" % kops_state_store)
except:
print("")
print("Kops needs to be configured with 'pio init-kops'.")
print("")
return
def init_kops(self,
kops_cluster_name,
kops_state_store):
config_dict = {'kops_cluster_name': kops_cluster_name,
'kops_state_store': kops_state_store}
self._merge_config_dict(config_dict)
print("")
pprint(self._get_full_config())
print("")
def instancegroups(self):
try:
kops_cluster_name = self._get_full_config()['kops_cluster_name']
kops_state_store = self._get_full_config()['kops_state_store']
subprocess.call("kops --state %s --name %s get instancegroups" % (kops_state_store, kops_cluster_name), shell=True)
print("")
except:
print("")
print("Kops needs to be configured with 'pio init-kops'.")
print("")
return
def clusters(self):
try:
kops_cluster_name = self._get_full_config()['kops_cluster_name']
kops_state_store = self._get_full_config()['kops_state_store']
subprocess.call("kops --state %s --name %s get clusters" % (kops_state_store, kops_cluster_name), shell=True)
print("")
except:
print("")
print("Kops needs to be configured with 'pio init-kops'.")
print("")
return
def federations(self):
try:
kops_cluster_name = self._get_full_config()['kops_cluster_name']
kops_state_store = self._get_full_config()['kops_state_store']
subprocess.call("kops --state %s --name %s get federations" % (kops_state_store, kops_cluster_name), shell=True)
print("")
except:
print("")
print("Kops needs to be configured with 'pio init-kops'.")
print("")
return
def secrets(self):
try:
kops_cluster_name = self._get_full_config()['kops_cluster_name']
kops_state_store = self._get_full_config()['kops_state_store']
subprocess.call("kops --state %s --name %s get secrets" % (kops_state_store, kops_cluster_name), shell=True)
print("")
except:
print("")
print("Kops needs to be configured with 'pio init-kops'.")
print("")
return
def init_pio(self,
pio_api_version,
pio_git_home,
pio_git_version):
config_dict = {'pio_api_version': pio_api_version,
'pio_git_home': pio_git_home,
'pio_git_version': pio_git_version}
self._merge_config_dict(config_dict)
print("")
pprint(self._get_full_config())
print("")
def init_cluster(self,
kube_cluster_context,
kube_namespace):
pio_api_version = self._get_full_config()['pio_api_version']
config_dict = {'kube_cluster_context': kube_cluster_context,
'kube_namespace': kube_namespace}
self._merge_config_dict(config_dict)
print("")
pprint(self._get_full_config())
print("")
def init_model(self,
>>>>>>> fluxcapacitor/master
model_server_url,
model_type,
model_namespace,
model_name,
model_input_mime_type='application/json',
model_output_mime_type='application/json'):
pio_api_version = self._get_full_config()['pio_api_version']
config_dict = {"model_server_url": model_server_url,
"model_type": model_type,
"model_namespace": model_namespace,
"model_name": model_name,
"model_input_mime_type": model_input_mime_type,
"model_output_mime_type": model_output_mime_type,
}
<<<<<<< HEAD
self.config_merge_dict(config_dict)
print("\n")
pprint(self.config_get_all())
print("\n")
def model_deploy(self,
model_version,
model_path,
request_timeout=600):
=======
self._merge_config_dict(config_dict)
print("")
pprint(self._get_full_config())
print("")
# TODO: from_git=True/False (include git_commit_hash, use python git API? Perform immutable deployment??)
# from_docker=True/False (include image name, version)
# canary=True/False
def deploy(self,
model_version,
model_path,
request_timeout=600):
>>>>>>> fluxcapacitor/master
pio_api_version = self._get_full_config()['pio_api_version']
try:
<<<<<<< HEAD
model_server_url = self.config_get_all()['model_server_url']
model_type = self.config_get_all()['model_type']
model_namespace = self.config_get_all()['model_namespace']
model_name = self.config_get_all()['model_name']
=======
model_server_url = self._get_full_config()['model_server_url']
model_type = self._get_full_config()['model_type']
model_namespace = self._get_full_config()['model_namespace']
model_name = self._get_full_config()['model_name']
>>>>>>> fluxcapacitor/master
except:
print("")
print("Model needs to be configured with 'pio init-model'.")
print("")
return
<<<<<<< HEAD
# pprint(self.config_get_all())
=======
>>>>>>> fluxcapacitor/master
model_path = os.path.expandvars(model_path)
model_path = os.path.expanduser(model_path)
model_path = os.path.abspath(model_path)
print('model_version: %s' % model_version)
print('model_path: %s' % model_path)
print('request_timeout: %s' % request_timeout)
if (os.path.isdir(model_path)):
compressed_model_bundle_filename = 'bundle-%s-%s-%s-%s.tar.gz' % (model_type, model_namespace, model_name, model_version)
<<<<<<< HEAD
print("Compressing '%s' into '%s'..." % (model_path, compressed_model_bundle_filename))
print("")
with tarfile.open(compressed_model_bundle_filename, 'w:gz') as tar:
tar.add(model_path, arcname='.')
print("...Done!")
model_file = compressed_model_bundle_filename
upload_key = 'bundle'
=======
print("")
print("Compressing model bundle '%s' to '%s'." % (model_path, compressed_model_bundle_filename))
print("")
with tarfile.open(compressed_model_bundle_filename, 'w:gz') as tar:
tar.add(model_path, arcname='.')
model_file = compressed_model_bundle_filename
upload_key = 'file'
>>>>>>> fluxcapacitor/master
upload_value = compressed_model_bundle_filename
else:
model_file = model_path
upload_key = 'file'
upload_value = os.path.split(model_path)
<<<<<<< HEAD
with open(model_file, 'rb') as fh:
files = [(upload_key, (upload_value, fh))]
full_model_server_url = "%s/%s/model/deploy/%s/%s/%s/%s" % (model_server_url, pio_api_version, model_type, model_namespace, model_name, model_version)
print("Deploying model bundle '%s' to '%s'..." % (model_file, full_model_server_url))
print("")
headers = {'Accept': 'application/json'}
response = requests.post(url=full_model_server_url,
headers=headers,
files=files,
timeout=request_timeout)
pprint(response.text)
print("...Done!")
if (os.path.isdir(model_path)):
print("Removing model bundle '%s'..." % model_file)
os.remove(model_file)
print("...Done!")
print("\n")
def model_predict(self,
model_version,
model_input_file_path,
request_timeout=30):
=======
full_model_url = "%s/%s/model/deploy/%s/%s/%s/%s" % (model_server_url, pio_api_version, model_type, model_namespace, model_name, model_version)
with open(model_file, 'rb') as fh:
files = [(upload_key, (upload_value, fh))]
print("")
print("Deploying model '%s' to '%s'." % (model_file, full_model_url))
print("")
headers = {'Accept': 'application/json'}
try:
response = requests.post(url=full_model_url,
headers=headers,
files=files,
timeout=request_timeout)
if response.text:
pprint(response.text)
else:
print("")
print("Success!")
print("")
print("Predict with 'pio predict' or POST to '%s'" % full_model_url.replace('/deploy/','/predict/'))
print("")
except IOError as e:
print("Error while deploying model. Timeout errors are usually OK.\nError: '%s'" % str(e))
print("")
print("Wait a bit, then try to predict with 'pio predict' or POST to '%s'" % full_model_url)
print("")
if (os.path.isdir(model_path)):
print("")
print("Cleaning up compressed model bundle '%s'..." % model_file)
print("")
os.remove(model_file)
def predict(self,
model_version,
model_input_filename,
request_timeout=30):
>>>>>>> fluxcapacitor/master
pio_api_version = self._get_full_config()['pio_api_version']
try:
<<<<<<< HEAD
model_server_url = self.config_get_all()['model_server_url']
model_type = self.config_get_all()['model_type']
model_namespace = self.config_get_all()['model_namespace']
model_name = self.config_get_all()['model_name']
model_input_mime_type = self.config_get_all()['model_input_mime_type']
model_output_mime_type = self.config_get_all()['model_output_mime_type']
=======
model_server_url = self._get_full_config()['model_server_url']
model_type = self._get_full_config()['model_type']
model_namespace = self._get_full_config()['model_namespace']
model_name = self._get_full_config()['model_name']
model_input_mime_type = self._get_full_config()['model_input_mime_type']
model_output_mime_type = self._get_full_config()['model_output_mime_type']
>>>>>>> fluxcapacitor/master
except:
print("")
print("Model needs to be configured with 'pio model-init'.")
print("")
return
<<<<<<< HEAD
# pprint(self.config_get_all())
=======
>>>>>>> fluxcapacitor/master
print('model_version: %s' % model_version)
print('model_input_filename: %s' % model_input_filename)
print('request_timeout: %s' % request_timeout)
<<<<<<< HEAD
full_model_server_url = "%s/%s/model/predict/%s/%s/%s/%s" % (model_server_url, pio_api_version, model_type, model_namespace, model_name, model_version)
print("Predicting file '%s' with model '%s/%s/%s' at '%s'..." % (model_input_file_path, model_type, model_namespace, model_name, full_model_server_url))
=======
full_model_url = "%s/%s/model/predict/%s/%s/%s/%s" % (model_server_url, pio_api_version, model_type, model_namespace, model_name, model_version)
print("")
print("Predicting file '%s' with model '%s/%s/%s/%s' at '%s'..." % (model_input_filename, model_type, model_namespace, model_name, model_version, full_model_url))
>>>>>>> fluxcapacitor/master
print("")
with open(model_input_filename, 'rb') as fh:
model_input_binary = fh.read()
headers = {'Content-type': model_input_mime_type, 'Accept': model_output_mime_type}
response = requests.post(url=full_model_url,
headers=headers,
data=model_input_binary,
timeout=request_timeout)
pprint(response.text)
<<<<<<< HEAD
print("...Done!")
print("\n")
def cluster_view(self):
pio_api_version = self.config_get_all()['pio_api_version']
try:
kube_cluster_context = self.config_get_all()['kube_cluster_context']
kube_namespace = self.config_get_all()['kube_namespace']
except:
print("Cluster needs to be initialized.")
return
print("\n")
print("Config")
print("******")
pprint(self.config_get_all())
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
print("\n")
print("Apps")
print("****")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace,
watch=False,
pretty=True)
for deploy in response.items:
print("%s (%s of %s replicas available)" % (deploy.metadata.name, deploy.status.ready_replicas, deploy.status.replicas))
print("\n")
print("Internal DNS (Public DNS)")
print("*************************")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_service(namespace=kube_namespace, watch=False, pretty=True)
for svc in response.items:
ingress = 'Not public'
if svc.status.load_balancer.ingress and len(svc.status.load_balancer.ingress) > 0:
if (svc.status.load_balancer.ingress[0].hostname):
ingress = svc.status.load_balancer.ingress[0].hostname
if (svc.status.load_balancer.ingress[0].ip):
ingress = svc.status.load_balancer.ingress[0].ip
print("%s (%s)" % (svc.metadata.name, ingress))
print("\n")
print("Pods")
print("****")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
print("%s (%s)" % (pod.metadata.name, pod.status.phase))
print("\n")
def apps_available(self):
pio_api_version = self.config_get_all()['pio_api_version']
=======
print("")
def cluster(self):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
self.apps()
print("DNS Internal (Public)")
print("**************************")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_service(namespace=kube_namespace, watch=False, pretty=True)
for svc in response.items:
ingress = 'Not public'
if svc.status.load_balancer.ingress and len(svc.status.load_balancer.ingress) > 0:
if (svc.status.load_balancer.ingress[0].hostname):
ingress = svc.status.load_balancer.ingress[0].hostname
if (svc.status.load_balancer.ingress[0].ip):
ingress = svc.status.load_balancer.ingress[0].ip
print("%s (%s)" % (svc.metadata.name, ingress))
print("")
print("Running Pods")
print("************")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
print("%s (%s)" % (pod.metadata.name, pod.status.phase))
print("")
print("Nodes")
print("*****")
self._get_all_nodes()
print("")
print("Config")
print("******")
pprint(self._get_full_config())
print("")
def _get_pod_by_app_name(self,
app_name):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
found = False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
if app_name in pod.metadata.name:
found = True
break
if found:
return pod
else:
return None
def _get_svc_by_app_name(self,
app_name):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
found = False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_service(namespace=kube_namespace, watch=False, pretty=True)
for svc in response.items:
if app_name in svc.metadata.name:
found = True
break
if found:
return svc
else:
return None
def _get_all_available_apps(self):
pio_api_version = self._get_full_config()['pio_api_version']
available_apps = PioCli._kube_deploy_registry.keys()
for app in available_apps:
print(app)
def nodes(self):
pio_api_version = self._get_full_config()['pio_api_version']
>>>>>>> fluxcapacitor/master
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
<<<<<<< HEAD
# pprint(self.config_get_all())
=======
print("")
print("Nodes")
print("*****")
self._get_all_nodes()
print("")
def _get_all_nodes(self):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
>>>>>>> fluxcapacitor/master
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
<<<<<<< HEAD
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace,
watch=False,
pretty=True)
for deploy in response.items:
print("%s (%s of %s replicas available)" % (deploy.metadata.name, deploy.status.ready_replicas, deploy.status.replicas))
print("\n")
def apps_deployed(self):
pio_api_version = self.config_get_all()['pio_api_version']
try:
kube_cluster_context = self.config_get_all()['kube_cluster_context']
kube_namespace = self.config_get_all()['kube_namespace']
except:
print("Cluster needs to be initialized.")
return
# pprint(self.config_get_all())
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
print("\nApps")
print("****")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace,
watch=False,
pretty=True)
for deploy in response.items:
print("%s (%s of %s replicas available)" % (deploy.metadata.name, deploy.status.ready_replicas, deploy.status.replicas))
print("\n")
def app_shell(self,
app_name):
pio_api_version = self.config_get_all()['pio_api_version']
try:
kube_cluster_context = self.config_get_all()['kube_cluster_context']
kube_namespace = self.config_get_all()['kube_namespace']
except:
print("Cluster needs to be initialized.")
return
pprint(self.config_get_all())
=======
response = kubeclient_v1.list_node(watch=False, pretty=True)
for node in response.items:
print("%s (%s)" % (node.metadata.labels['kubernetes.io/hostname'], node.metadata.labels['kubernetes.io/role']))
def apps(self):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
print("")
print("All Available Apps")
print("******************")
self._get_all_available_apps()
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
print("")
print("Started Apps")
print("************")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace,
watch=False,
pretty=True)
for deploy in response.items:
print("%s (%s of %s replicas are running)" % (deploy.metadata.name, deploy.status.ready_replicas, deploy.status.replicas))
print("")
def shell(self,
app_name):
self.connect(app_name)
def connect(self,
app_name):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
>>>>>>> fluxcapacitor/master
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
<<<<<<< HEAD
=======
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
if app_name in pod.metadata.name:
break
print("")
print("Connecting to '%s'" % pod.metadata.name)
print("")
subprocess.call("kubectl exec -it %s bash" % pod.metadata.name, shell=True)
print("")
>>>>>>> fluxcapacitor/master
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
if app_name in pod.metadata.name:
break
print("Shelling into '%s' (%s)" % (pod.metadata.name, pod.status.phase))
subprocess.call("kubectl exec -it %s bash" % pod.metadata.name, shell=True)
print("\n")
<<<<<<< HEAD
def app_logs(self,
app_name):
pio_api_version = self.config_get_all()['pio_api_version']
try:
kube_cluster_context = self.config_get_all()['kube_cluster_context']
kube_namespace = self.config_get_all()['kube_namespace']
except:
print("Cluster needs to be initialized.")
return
# pprint(self.config_get_all())
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
for pod in response.items:
if app_name in pod.metadata.name:
break
print("Tailing logs on '%s' (%s)" % (pod.metadata.name, pod.status.phase))
subprocess.call("kubectl logs -f %s" % pod.metadata.name, shell=True)
print("\n")
def app_scale(self,
app_name,
replicas):
pio_api_version = self.config_get_all()['pio_api_version']
try:
kube_namespace = self.config_get_all()['kube_namespace']
except:
print("Cluster needs to be initialized.")
return
# pprint(self.config_get_all())
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace, watch=False, pretty=True)
for deploy in response.items:
if app_name in deploy.metadata.name:
break
print("Scaling '%s' to %s replicas..." % (deploy.metadata.name, replicas))
subprocess.call("kubectl scale deploy %s --replicas=%s" % (deploy.metadata.name, replicas), shell=True)
print("\n")
def get_config_yamls(self,
app_name):
return []
def get_secret_yamls(self,
app_name):
return []
def get_deploy_yamls(self,
app_name):
(deploy_yamls, dependencies) = PioCli.kube_deploy_registry[app_name]
if len(dependencies) > 0:
for dependency in dependencies:
deploy_yamls = deploy_yamls + self.get_deploy_yamls(dependency)
return deploy_yamls
def get_svc_yamls(self,
app_name):
(svc_yamls, dependencies) = PioCli.kube_svc_registry[app_name]
if len(dependencies) > 0:
for dependency in dependencies:
svc_yamls = svc_yamls + self.get_svc_yamls(dependency)
return svc_yamls
def app_deploy(self,
app_name):
=======
def logs(self,
app_name):
>>>>>>> fluxcapacitor/master
pio_api_version = self._get_full_config()['pio_api_version']
<<<<<<< HEAD
try:
kube_namespace = self.config_get_all()['kube_namespace']
pio_home = self.config_get_all()['pio_home']
if 'http:' in pio_home or 'https:' in pio_home:
pio_home = os.path.expandvars(pio_home)
pio_home = os.path.expanduser(pio_home)
pio_home = os.path.abspath(pio_home)
=======
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
>>>>>>> fluxcapacitor/master
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
<<<<<<< HEAD
# pprint(self.config_get_all())
print("Deploying app... '%s'" % app_name)
kubeconfig.load_kube_config()
config_yaml_filenames = []
secret_yaml_filenames = []
deploy_yaml_filenames = []
svc_yaml_filenames = []
config_yaml_filenames = config_yaml_filenames + self.get_config_yamls(app_name)
secret_yaml_filenames = secret_yaml_filenames + self.get_secret_yamls(app_name)
deploy_yaml_filenames = deploy_yaml_filenames + self.get_deploy_yamls(app_name)
svc_yaml_filenames = svc_yaml_filenames + self.get_svc_yamls(app_name)
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
#for config_yaml_filename in config_yaml_filenames:
# TODO
# return
#for secret_yaml_filename in secret_yaml_filenames:
# TODO
# return
print(deploy_yaml_filenames)
print(svc_yaml_filenames)
for deploy_yaml_filename in deploy_yaml_filenames:
try:
# TODO: handle http: or https:
if 'http:' in pio_home or 'https:' in pio_home:
# TODO: handle http: or https:
pass
else:
with open(os.path.join(pio_home, deploy_yaml_filename)) as fh:
deploy_yaml = yaml.load(fh)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.create_namespaced_deployment(body=deploy_yaml,
namespace=kube_namespace,
pretty=True)
pprint(response)
except ApiException as e:
print("Deployment not created for '%s':\n%s\n" % (deploy_yaml_filename, str(e)))
for svc_yaml_filename in svc_yaml_filenames:
try:
# TODO: handle http: or https:
if 'http:' in pio_home or 'https:' in pio_home:
# TODO: handle http: or https:
pass
else:
with open(os.path.join(pio_home, svc_yaml_filename)) as fh:
svc_yaml = yaml.load(fh)
response = kubeclient_v1.create_namespaced_service(body=svc_yaml,
namespace=kube_namespace,
pretty=True)
pprint(response)
except ApiException as e:
print("Service not created for '%s':\n%s\n" % (svc_yaml_filename, str(e)))
=======
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_namespaced_pod(namespace=kube_namespace, watch=False, pretty=True)
found = False
for pod in response.items:
if app_name in pod.metadata.name:
found = True
break
if found:
print("")
print("Tailing logs on '%s'." % pod.metadata.name)
print("")
subprocess.call("kubectl logs -f %s" % pod.metadata.name, shell=True)
print("")
else:
print("")
print("App '%s' is not running." % app_name)
print("")
def scale(self,
app_name,
replicas):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace, watch=False, pretty=True)
found = False
for deploy in response.items:
if app_name in deploy.metadata.name:
found = True
break
if found:
print("")
print("Scaling app '%s' to '%s' replicas." % (deploy.metadata.name, replicas))
print("")
subprocess.call("kubectl scale deploy %s --replicas=%s" % (deploy.metadata.name, replicas), shell=True)
print("")
print("Check status with 'pio cluster'.")
print("")
else:
print("")
print("App '%s' is not running." % app_name)
print("")
def volumes(self):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
print("")
print("Volumes")
print("*******")
self._get_all_volumes()
print("")
print("Volume Claims")
print("*************")
self._get_all_volume_claims()
print("")
def _get_all_volumes(self):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_persistent_volume(watch=False,
pretty=True)
for claim in response.items:
print("%s" % (claim.metadata.name))
print("")
def _get_all_volume_claims(self):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.list_persistent_volume_claim_for_all_namespaces(watch=False,
pretty=True)
for claim in response.items:
print("%s" % (claim.metadata.name))
print("")
>>>>>>> fluxcapacitor/master
self.cluster_view()
<<<<<<< HEAD
print("\n")
def app_undeploy(self,
app_name):
pio_api_version = self.config_get_all()['pio_api_version']
try:
kube_namespace = self.config_get_all()['kube_namespace']
except:
print("Cluster needs to be initialized.")
return
# pprint(self.config_get_all())
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace, watch=False, pretty=True)
for deploy in response.items:
if app_name in deploy.metadata.name:
break
print("Deleting '%s'" % deploy.metadata.name)
subprocess.call("kubectl delete deploy %s" % deploy.metadata.name, shell=True)
self.cluster_view()
print("\n")
print("Note: There may be a delay in status change above ^^.")
print("\n")
# def git_init(self,
# git_repo_base_path,
# git_revision='HEAD'):
# expanded_git_repo_base_path = os.path.expandvars(git_repo_base_path)
# expanded_git_repo_base_path = os.path.expanduser(expanded_git_repo_base_path)
# expanded_git_repo_base_path = os.path.abspath(expanded_git_repo_base_path)
# pio_api_version = self.config_get_all()['pio_api_version']
# print("git_repo_base_path: '%s'" % git_repo_base_path)
# print("expanded_git_repo_base_path: '%s'" % expanded_git_repo_base_path)
# print("git_revision: '%s'" % git_revision)
# git_repo = Repo(expanded_git_repo_base_path, search_parent_directories=True)
# config_dict = {'git_repo_base_path': git_repo.working_tree_dir , 'git_revision': git_revision}
# self.config_merge_dict(config_dict)
# pprint(self.config_get_all())
# print("\n")
# def git_view(self):
# pio_api_version = self.config_get_all()['pio_api_version']
# try:
# git_repo_base_path = self.config_get_all()['git_repo_base_path']
# expanded_git_repo_base_path = os.path.expandvars(git_repo_base_path)
# expanded_git_repo_base_path = os.path.expanduser(expanded_git_repo_base_path)
# expanded_git_repo_base_path = os.path.abspath(expanded_git_repo_base_path)
# git_revision = self.config_get_all()['git_revision']
# except:
# print("Git needs to be initialized.")
# return
# pprint(self.config_get_all())
=======
def _get_config_yamls(self,
app_name):
return []
def _get_secret_yamls(self,
app_name):
return []
def _get_deploy_yamls(self,
app_name):
try:
(deploy_yamls, dependencies) = PioCli._kube_deploy_registry[app_name]
except:
dependencies = []
deploy_yamls = []
if len(dependencies) > 0:
for dependency in dependencies:
deploy_yamls = deploy_yamls + self._get_deploy_yamls(dependency)
return deploy_yamls
def _get_svc_yamls(self,
app_name):
try:
(svc_yamls, dependencies) = PioCli._kube_svc_registry[app_name]
except:
dependencies = []
svc_yamls = []
if len(dependencies) > 0:
for dependency in dependencies:
svc_yamls = svc_yamls + self._get_svc_yamls(dependency)
return svc_yamls
def start(self,
app_name):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
pio_git_home = self._get_full_config()['pio_git_home']
if 'http:' in pio_git_home or 'https:' in pio_git_home:
pass
else:
pio_git_home = os.path.expandvars(pio_git_home)
pio_git_home = os.path.expanduser(pio_git_home)
pio_git_home = os.path.abspath(pio_git_home)
pio_git_version = self._get_full_config()['pio_git_version']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
config_yaml_filenames = []
secret_yaml_filenames = []
deploy_yaml_filenames = []
svc_yaml_filenames = []
config_yaml_filenames = config_yaml_filenames + self._get_config_yamls(app_name)
secret_yaml_filenames = secret_yaml_filenames + self._get_secret_yamls(app_name)
deploy_yaml_filenames = deploy_yaml_filenames + self._get_deploy_yamls(app_name)
svc_yaml_filenames = svc_yaml_filenames + self._get_svc_yamls(app_name)
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
#for config_yaml_filename in config_yaml_filenames:
# TODO
# return
#for secret_yaml_filename in secret_yaml_filenames:
# TODO
# return
print("")
print("Starting app '%s'." % app_name)
print("")
print("Ignore any 'Already Exists' errors. These are OK.")
print("")
for deploy_yaml_filename in deploy_yaml_filenames:
try:
if 'http:' in deploy_yaml_filename or 'https:' in deploy_yaml_filename:
deploy_yaml_filename = deploy_yaml_filename.replace('github.com', 'raw.githubusercontent.com')
subprocess.call("kubectl create -f %s" % deploy_yaml_filename, shell=True)
else:
if 'http:' in pio_git_home or 'https:' in pio_git_home:
pio_git_home = pio_git_home.replace('github.com', 'raw.githubusercontent.com')
subprocess.call("kubectl create -f %s/%s/%s" % (pio_git_home.rstrip('/'), pio_git_version, deploy_yaml_filename), shell=True)
else:
with open(os.path.join(pio_git_home, deploy_yaml_filename)) as fh:
deploy_yaml = yaml.load(fh)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.create_namespaced_deployment(body=deploy_yaml,
namespace=kube_namespace,
pretty=True)
pprint(response)
except ApiException as e:
print("")
print("App '%s' did not start properly.\n%s" % (deploy_yaml_filename, str(e)))
print("")
for svc_yaml_filename in svc_yaml_filenames:
try:
if 'http:' in svc_yaml_filename or 'https:' in svc_yaml_filename:
svc_yaml_filename = svc_yaml_filename.replace('github.com', 'raw.githubusercontent.com')
subprocess.call("kubectl create -f %s" % svc_yaml_filename, shell=True)
else:
if 'http:' in pio_git_home or 'https:' in pio_git_home:
pio_git_home = pio_git_home.replace('github.com', 'raw.githubusercontent.com')
subprocess.call("kubectl create -f %s/%s/%s" % (pio_git_home.rstrip('/'), pio_git_version, svc_yaml_filename), shell=True)
else:
with open(os.path.join(pio_git_home, svc_yaml_filename)) as fh:
svc_yaml = yaml.load(fh)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1.create_namespaced_service(body=svc_yaml,
namespace=kube_namespace,
pretty=True)
pprint(response)
except ApiException as e:
print("")
print("App '%s' did not start properly.\n%s" % (svc_yaml_filename, str(e)))
print("")
print("")
print("Check status with 'pio cluster'.")
print("")
def stop(self,
app_name):
pio_api_version = self._get_full_config()['pio_api_version']
try:
kube_cluster_context = self._get_full_config()['kube_cluster_context']
kube_namespace = self._get_full_config()['kube_namespace']
except:
print("")
print("Cluster needs to be configured with 'pio init-cluster'.")
print("")
return
kubeconfig.load_kube_config()
kubeclient_v1 = kubeclient.CoreV1Api()
kubeclient_v1_beta1 = kubeclient.ExtensionsV1beta1Api()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = kubeclient_v1_beta1.list_namespaced_deployment(namespace=kube_namespace, watch=False, pretty=True)
found = False
for deploy in response.items:
if app_name in deploy.metadata.name:
found = True
break
if found:
print("")
print("Stopping app '%s'." % deploy.metadata.name)
subprocess.call("kubectl delete deploy %s" % deploy.metadata.name, shell=True)
print("")
print("Check status with 'pio cluster'.")
print("")
else:
print("")
print("App '%s' is not running." % app_name)
print("")
# def git_init(self,
# git_repo_base_path,
# git_revision='HEAD'):
# expanded_git_repo_base_path = os.path.expandvars(git_repo_base_path)
# expanded_git_repo_base_path = os.path.expanduser(expanded_git_repo_base_path)
# expanded_git_repo_base_path = os.path.abspath(expanded_git_repo_base_path)
# pio_api_version = self._get_full_config()['pio_api_version']
# print("git_repo_base_path: '%s'" % git_repo_base_path)
# print("expanded_git_repo_base_path: '%s'" % expanded_git_repo_base_path)
# print("git_revision: '%s'" % git_revision)
# git_repo = Repo(expanded_git_repo_base_path, search_parent_directories=True)
# config_dict = {'git_repo_base_path': git_repo.working_tree_dir , 'git_revision': git_revision}
# self._merge_config_dict(config_dict)
# pprint(self._get_full_config())
# print("")
# def git(self):
# pio_api_version = self._get_full_config()['pio_api_version']
# try:
# git_repo_base_path = self._get_full_config()['git_repo_base_path']
# expanded_git_repo_base_path = os.path.expandvars(git_repo_base_path)
# expanded_git_repo_base_path = os.path.expanduser(expanded_git_repo_base_path)
# expanded_git_repo_base_path = os.path.abspath(expanded_git_repo_base_path)
# git_revision = self._get_full_config()['git_revision']
# except:
# print("Git needs to be configured with 'pio git-init'.")
# return
# pprint(self._get_full_config())
>>>>>>> fluxcapacitor/master
# git_repo = Repo(expanded_git_repo_base_path, search_parent_directories=False)
# ch = git_repo.commit(git_revision)
# print("Git repo base path: '%s'" % git_repo_base_path)
# print("Git revision: '%s'" % git_revision)
# print("Git commit message: '%s'" % ch.message)
# print("Git commit hash: '%s'" % ch.hexsha)
<<<<<<< HEAD
# print("\n")
=======
# print("")
>>>>>>> fluxcapacitor/master
def main():
fire.Fire(PioCli)
if __name__ == '__main__':
main()
| apache-2.0 | 5,127,850,523,423,164,000 | 38.903243 | 385 | 0.52565 | false |
ktmrmshk/Dresp | dresp/imgen.py | 1 | 1692 | from PIL import Image, ImageDraw, ImageFont
import os
def imgen(size, fmt='jpg', outdir='./'):
outfile='{}x{}.{}'.format(size[0], size[1], fmt)
img = Image.new('RGB', size, (210,210,210))
d=ImageDraw.Draw(img)
d.text((0,0), outfile, (0,0,0))
img.save(os.path.join(outdir,outfile))
def imgen_echo(txt, fmt='jpg', outdir='./', fileprefix='tmp123'):
outfile='{}.{}'.format(fileprefix, fmt)
#load font
fontpath='{}/static/CourierNew.ttf'.format(os.path.dirname(os.path.abspath(__file__)))
font = ImageFont.truetype(fontpath, 18)
img = Image.new('RGB', (3200,1600), (210,210,210))
d=ImageDraw.Draw(img)
d.multiline_text((0,0), txt, (0,0,0), font=font)
img.save(os.path.join(outdir,outfile))
import json
if __name__ == '__main__':
imgen((20, 10), 'png')
raw='''{"Host": "localhost:5000", "Connection": "keep-alive", "Pragma": "akamai-x-cache-on, akamai-x-cache-remote-on, akamai-x-check-cacheable, akamai-x-get-cache-key, akamai-x-get-ssl-client-session-id, akamai-x-get-true-cache-key, akamai-x-get-request-id", "Cache-Control": "no-cache", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US,en;q=0.9,ja;q=0.8", "X-Im-Piez": "on", "X-Akamai-Ro-Piez": "on", "X-Akamai-A2-Disable": "on", "Show-Akamai-Debug-4Fbgbadszg": "true", "X-Akamai-Device-Characteristics": "abc123"}'''
imgen_echo(json.dumps(json.loads(raw),indent=2), 'png')
| apache-2.0 | -2,803,733,037,499,778,000 | 53.580645 | 810 | 0.648345 | false |
Yobretaw/ChromeCacheExtractor | src/manager.py | 1 | 2438 | import sys
import os
import urllib
import uuid
import hashlib
from Util import *
from addr import *
from index import *
from block import *
from entrystore import *
from dump_cache import *
class CacheManager(object):
def __init__(self, fromDir=None, toDir=None):
self.fromDir = fromDir
self.toDir = toDir
self.indexFile = None;
self.blockFiles = [None] * 4; # data_0, data_1, data_2, data_3
self.separateFiles = {}
self.entries = []
if fromDir:
self.indexFile = Index(pathToIndex=os.path.join(fromDir, "index"))
self.blockFiles[0] = Block(pathToBlock=os.path.join(fromDir, "data_0"))
self.blockFiles[1] = Block(pathToBlock=os.path.join(fromDir, "data_1"))
self.blockFiles[2] = Block(pathToBlock=os.path.join(fromDir, "data_2"))
self.blockFiles[3] = Block(pathToBlock=os.path.join(fromDir, "data_3"))
separate_files = [name for name in os.listdir(fromDir) if
os.path.isfile(os.path.join(fromDir, name)) and name[0] == 'f']
for fname in separate_files:
with open(os.path.join(fromDir, fname), 'rb') as tmp:
self.separateFiles[fname] = tmp.read()
def processEntries(self):
assert (self.indexFile.table)
for addr in self.indexFile.table:
entry = EntryStore(self.fetchBytesForEntry(addr), self)
self.entries.append(entry)
if entry.next_addr:
self.indexFile.table.append(CacheAddr(entry.next_addr))
def outputToFiles(self):
dumper = CacheDumper(self.toDir)
dumper.init()
for entry in self.entries:
if len(entry.response_header) <= 1:
continue
url = entry.key.encode('utf-8')
ext = getExt(entry.key, entry.headerMap)
dumper.insert(url, '\n'.join(entry.response_header), isHeader=True)
if len(entry.data) > 1:
dumper.insert(url, entry.data[1], ext=ext)
def fetchBytesForEntry(self, addr):
block_file = addr.block_file
block_number = addr.block_number
num_blocks = addr.contiguous_blocks + 1
entries = self.blockFiles[block_file].getEntry(block_number, num_blocks)
return b"".join(entries)
def insertAddrToIndex(self, addr):
self.indexFile.table.append(CacheAddr(addr))
| mit | -5,627,600,396,760,570,000 | 32.39726 | 93 | 0.604184 | false |
li-xirong/jingwei | instance_based/getknn.py | 1 | 5471 |
import os, sys, time
from basic.constant import ROOT_PATH
from basic.common import checkToSkip, printStatus, writeRankingResults
from basic.util import readImageSet
from util.simpleknn.bigfile import BigFile
from util.simpleknn import simpleknn as imagesearch
DEFAULT_K=1500
DEFAULT_DISTANCE = 'l2'
DEFAULT_UU = 1
DEFAULT_BLOCK_SIZE = 1000
INFO = __file__
def unique_user_constraint(knn, im2user, k):
res = []
userSet = set()
removed = 0
for name,score in knn:
userid = im2user[name]
if userid in userSet:
removed += 1
continue
userSet.add(userid)
res.append((name,score))
if len(res) == k:
break
return removed, res
def process(options, trainCollection, testCollection, feature):
rootpath = options.rootpath
k = options.k
distance = options.distance
blocksize = options.blocksize
uniqueUser = options.uu
numjobs = options.numjobs
job = options.job
overwrite = options.overwrite
testset = options.testset
if not testset:
testset = testCollection
searchMethod = distance + 'knn'
if uniqueUser:
searchMethod += ",uu"
tagfile = os.path.join(rootpath, trainCollection, 'TextData', 'id.userid.lemmtags.txt')
im2user = {}
for line in open(tagfile):
im,userid,tags = line.split('\t')
im2user[im] = userid
resultdir = os.path.join(rootpath, testCollection, "SimilarityIndex", testset, trainCollection, "%s,%s,%d" % (feature,searchMethod,k))
feat_dir = os.path.join(rootpath, trainCollection, 'FeatureData', feature)
id_file = os.path.join(feat_dir, 'id.txt')
shape_file = os.path.join(feat_dir, 'shape.txt')
nr_of_images, feat_dim = map(int, open(shape_file).readline().split())
nr_of_images = len(open(id_file).readline().strip().split())
searcher = imagesearch.load_model(os.path.join(feat_dir, 'feature.bin'), feat_dim, nr_of_images, id_file)
searcher.set_distance(distance)
workingSet = readImageSet(testCollection, testset, rootpath=rootpath)
workingSet = [workingSet[i] for i in range(len(workingSet)) if (i%numjobs+1) == job]
printStatus(INFO, "working on %d-%d, %d test images -> %s" % (numjobs,job,len(workingSet),resultdir))
test_feat_dir = os.path.join(rootpath, testCollection, 'FeatureData', feature)
test_feat_file = BigFile(test_feat_dir)
read_time = 0
knn_time = 0
start = 0
done = 0
filtered = 0
while start < len(workingSet):
end = min(len(workingSet), start + blocksize)
printStatus(INFO, 'processing images from %d to %d' % (start, end-1))
s_time = time.time()
renamed,vectors = test_feat_file.read(workingSet[start:end])
read_time += time.time() - s_time
nr_images = len(renamed)
s_time = time.time()
for i in range(nr_images):
resultfile = os.path.join(resultdir, renamed[i][-2:], '%s.txt' % renamed[i])
if checkToSkip(resultfile, overwrite):
continue
knn = searcher.search_knn(vectors[i], max_hits=max(3000,k*3))
if uniqueUser:
removed, newknn = unique_user_constraint(knn, im2user, k)
filtered += removed
knn = newknn
else:
knn = knn[:k]
assert(len(knn) >= k)
writeRankingResults(knn, resultfile)
done += 1
printStatus(INFO, 'job %d-%d: %d done, filtered neighbors %d' % (numjobs, job, done, filtered))
start = end
printStatus(INFO, 'job %d-%d: %d done, filtered neighbors %d' % (numjobs, job, done, filtered))
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] trainCollection testCollection feature""")
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default=0)")
parser.add_option("--uu", default=DEFAULT_UU, type="int", help="unique user constraint (default=%d)" % DEFAULT_UU)
parser.add_option("--testset", default=None, type="string", help="process a specified subset of $testCollection")
parser.add_option("--k", default=DEFAULT_K, type="int", help="number of neighbors (%d)" % DEFAULT_K)
parser.add_option("--distance", default=DEFAULT_DISTANCE, type="string", help="visual distance, can be l1 or l2 (default: %s)" % DEFAULT_DISTANCE)
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="(default: %s)" % ROOT_PATH)
parser.add_option("--numjobs", default=1, type="int", help="number of jobs (default: 1)")
parser.add_option("--job", default=1, type="int", help="current job (default: 1)")
parser.add_option("--blocksize", default=DEFAULT_BLOCK_SIZE, type="int", help="nr of feature vectors loaded per time (default: %d)" % DEFAULT_BLOCK_SIZE)
(options, args) = parser.parse_args(argv)
if len(args) < 3:
parser.print_help()
return 1
assert(options.job>=1 and options.numjobs >= options.job)
return process(options, args[0], args[1], args[2])
if __name__ == "__main__":
sys.exit(main())
| mit | -5,852,272,726,734,158,000 | 36.801418 | 157 | 0.606288 | false |
katajakasa/utuputki2-client | utuclient/player.py | 1 | 3016 | # -*- coding: utf-8 -*-
from gi.repository import GObject as gobject, Gst as gst, Gtk as gtk, GdkX11, GstVideo
import platform
import logging
log = logging.getLogger(__name__)
class Player(object):
VIDEO = 0
IMAGE = 1
def __init__(self, window, url, cb_finish, cb_error, mode=VIDEO):
self.window = window
self.mode = mode
self.url = url
self.cb_finish = cb_finish
self.cb_error = cb_error
# The play binary pipeline
self.pipeline = gst.ElementFactory.make("playbin", "player")
self.pipeline.set_property("uri", url)
# Sink
if platform.system() == "Windows":
self.sink = gst.ElementFactory.make('d3dvideosink', 'sink')
else:
self.sink = gst.ElementFactory.make('ximagesink', 'sink')
self.sink.set_property('force-aspect-ratio', True)
self.pipeline.set_property('video-sink', self.sink)
# Handle image stuff
if mode == Player.IMAGE:
self.freeze = gst.ElementFactory.make("imagefreeze", "freeze")
self.pipeline.add(self.freeze)
else:
self.freeze = None
# Add signal handler
self.bus = self.pipeline.get_bus()
self.bus.connect('message::eos', self.handle_eos)
self.bus.connect('message::error', self.handle_error)
self.bus.add_signal_watch()
# Find the correct window handle and set it as base drawing area for the video sink
if platform.system() == "Windows":
self.sink.set_window_handle(self.window.get_hwnd())
else:
self.sink.set_window_handle(self.window.get_xid())
# Start
self.pipeline.set_state(gst.State.PLAYING)
def handle_error(self, bus, msg):
error = msg.parse_error()[1]
log.warn(u"Caught error %s", error)
self.cb_error(error)
def handle_eos(self, bus, msg):
log.info(u"End of stream")
self.cb_finish()
def play(self):
self.pipeline.set_state(gst.State.PLAYING)
def stop(self):
self.pipeline.set_state(gst.State.NULL)
def pause(self):
self.pipeline.set_state(gst.State.PAUSED)
def is_stopped(self):
return self.pipeline.get_state(gst.CLOCK_TIME_NONE)[1] == gst.State.NULL
def is_playing(self):
return self.pipeline.get_state(gst.CLOCK_TIME_NONE)[1] == gst.State.PLAYING
def is_paused(self):
return self.pipeline.get_state(gst.CLOCK_TIME_NONE)[1] == gst.State.PAUSED
def close(self):
self.stop()
self.bus.remove_signal_watch()
def status(self):
if self.is_stopped():
return 0
if self.is_playing():
return 1
if self.is_paused():
return 2
return None
def seek(self, seconds):
self.pipeline.seek_simple(gst.Format.TIME,
gst.SeekFlags.FLUSH | gst.SeekFlags.KEY_UNIT,
seconds * gst.SECOND)
| mit | -3,054,547,162,323,551,000 | 29.77551 | 91 | 0.587865 | false |
bartsidee/bartsidee-boxee | repo/hulu/hulu.py | 1 | 6855 | from default import *
from library import *
import tools
sys.path.append(os.path.join(CWD, 'external'))
from BeautifulSoup import BeautifulSoup
from urllib import quote_plus
try: import simplejson as json
except: import json
import math
class Module(BARTSIDEE_MODULE):
def __init__(self, app):
self.app = app
BARTSIDEE_MODULE.__init__(self, app)
self.url_base = 'http://www.hulu.com'
self.name = "Hulu" #Name of the channel
self.type = ['search', 'genre'] #Choose between 'search', 'list', 'genre'
self.episode = True #True if the list has episodes
self.genre = ['Comedy', 'Drama', 'Reality and Game Shows','Animation and Cartoons', 'Anime', 'International', 'Kids', 'Family', 'Action and Adventure', 'Food', 'Science Fiction', 'News and Information', 'Classics', 'Latino', 'Horror and Suspense', 'Documentaries', 'Korean Drama', 'Health and Wellness', 'Lifestyle', 'Sports', 'Music', 'Arts and Culture', 'Videogames', 'Gay and Lesbian']
self.filter = [] #Array to add a genres to the genre section [type genre must be enabled]
self.content_type = 'video/x-flv' #Mime type of the content to be played
self.country = 'US' #2 character country id code
self.free = "1"
self.pageSize = 16
self.access_token = re.compile('w.API_DONUT = \'(.*?)\';', re.DOTALL + re.IGNORECASE).search(str(tools.urlopen(self.app, self.url_base))).group(1)
def Search(self, search):
url = self.url_base + '/browse/search?alphabet=All&family_friendly=0&closed_captioned=0&has_free=1&has_huluplus=0&has_hd=0&channel=All&subchannel=&network=All&display=Shows%20with%20full%20episodes%20only&decade=All&type=tv&view_as_thumbnail=false&block_num=0&keyword=' + quote_plus(search)
data = tools.urlopen(self.app, url)
data = re.compile('"show_list", "(.*?)"\)', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
data = data.replace('\\u003c','<').replace('\\u003e','>').replace('\\','').replace('\\n','').replace('\\t','')
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES, smartQuotesTo="xml")
streamlist = list()
for info in soup.findAll('a', {'onclick':True}):
stream = CreateList()
stream.name = info.contents[0]
stream.id = info['href']
streamlist.append(stream)
return streamlist
def Episode(self, stream_name, stream_id, page, totalpage):
data = tools.urlopen(self.app, stream_id, {'cache':3600})
if not data:
return []
soup = BeautifulSoup(data, convertEntities="xml", smartQuotesTo="xml")
show_id = re.compile('show\/(.*?)\?region\=', re.DOTALL + re.IGNORECASE).search(str(data)).group(1)
url = self.url_base + "/mozart/v1.h2o/shows/episodes?free_only="+self.free+"&include_seasons=true&order=asc&shorter_cache=true&show_id="+show_id+"&sort=premiere_date&video_type%5B%5D=episode&video_type%5B%5D=game&items_per_page=" + str(self.pageSize) + "&position=" + str(self.pageSize * (page - 1)) + "&_user_pgid=1&_content_pgid=67&_device_id=1&access_token=" + self.access_token
data = tools.urlopen(self.app, url)
json_data = json.loads(data)
if totalpage == "":
if int(json_data['total_count']) > self.pageSize:
totalpage = int(math.ceil(int(json_data['total_count']) / self.pageSize))
else:
totalpage = 1
episodelist = list()
for item in json_data['data']:
episode = CreateEpisode()
episode.name = stream_name
episode.id = self.url_base + '/watch/'+str(item['video']['id'])
episode.description = 'Episode: ' + str(item['video']['episode_number']) + ' - ' + str(item['video']['title'])
episode.thumbnails = 'http://ib1.huluim.com/video/'+str(item['video']['content_id'])+'?size=220x124'
episode.date = 'Season: ' + str(item['video']['season_number'])
episode.page = page
episode.totalpage = totalpage
episodelist.append(episode)
return episodelist
def Genre(self, genre, filter, page, totalpage):
url = self.url_base + '/mozart/v1.h2o/shows?asset_scope=tv&genre='+genre.replace(" ", "+")+'&order=desc&sort=view_count_week&video_type=tv&items_per_page=' + str(self.pageSize) + '&position='+ str(self.pageSize * (page - 1)) + '&_user_pgid=1&_content_pgid=67&_device_id=1&free_only='+self.free + '&access_token=' + self.access_token
data = tools.urlopen(self.app, url, {'cache':3600})
if data == "":
mc.ShowDialogNotification("No genre found for " + str(genre))
return []
json_data = json.loads(data)
if totalpage == "":
if int(json_data['total_count']) > self.pageSize:
totalpage = int(math.ceil(int(json_data['total_count']) / self.pageSize))
else:
totalpage = 1
genrelist = list()
for item in json_data['data']:
genreitem = CreateEpisode()
genreitem.episode = "True"
genreitem.name = '[UPPERCASE]'+ item['show']['name'] +'[/UPPERCASE] ' + item['show']['description']
genreitem.id = self.url_base + '/' +str(item['show']['canonical_name'])
genreitem.page = page
genreitem.totalpage = totalpage
genrelist.append(genreitem)
return genrelist
def Play(self, stream_name, stream_id, subtitle):
path = self.tinyurl(stream_id)
play = CreatePlay()
play.path = quote_plus(path)
play.domain = 'bartsidee.nl'
play.jsactions = quote_plus('http://boxee.bartsidee.nl/js/hulu.js')
return play
def tinyurl(self, params):
url = "http://tinyurl.com/api-create.php?url=" + str(params)
return tools.urlopen(self.app, url)
def getGenres(self):
url = self.url_base + "/mozart/v1.h2o/shows/genres?sort=view_count_week&type=tv&items_per_page=32&position=0&_user_pgid=1&_content_pgid=67&_device_id=1&access_token=" + self.access_token
data = tools.urlopen(self.app, url, {'cache':36000})
json_data = json.loads(data)
genres = []
for item in json_data['data']:
genres.append(item["genre"]["name"])
return genres
| gpl-3.0 | 6,666,448,503,530,564,000 | 49.036496 | 405 | 0.566302 | false |
praekeltfoundation/docker-ci-deploy | docker_ci_deploy/tests/test_main.py | 1 | 31671 | # -*- coding: utf-8 -*-
import re
import sys
from subprocess import CalledProcessError
from testtools import ExpectedException
from testtools.assertions import assert_that
from testtools.matchers import Equals, MatchesRegex, MatchesStructure
from docker_ci_deploy.__main__ import (
cmd, DockerCiDeployRunner, join_image_tag, main, RegistryTagger,
generate_tags, generate_semver_versions, VersionTagger, split_image_tag)
class TestSplitImageTagFunc(object):
def test_split(self):
"""
Given an image tag with registry, name and tag components,
split_image_tag should return the registry and name as the image and
the tag part as the tag.
"""
image_and_tag = split_image_tag(
'registry.example.com:5000/user/name:tag')
assert_that(image_and_tag, Equals(
('registry.example.com:5000/user/name', 'tag')))
def test_no_tag(self):
"""
Given an image tag with only registry and name components,
split_image_tag should return the image name unchanged and None for the
tag.
"""
image_and_tag = split_image_tag('registry.example.com:5000/user/name')
assert_that(image_and_tag, Equals(
('registry.example.com:5000/user/name', None)))
def test_no_registry(self):
"""
Given an image tag with only name and tag components, split_image_tag
should return the user and name part for the name and the tag part for
the tag.
"""
image_and_tag = split_image_tag('user/name:tag')
assert_that(image_and_tag, Equals(('user/name', 'tag')))
def test_no_registry_or_tag(self):
"""
Given an image tag with only name components, split_image_tag should
return the image name unchanged and None for the tag.
"""
image_and_tag = split_image_tag('user/name')
assert_that(image_and_tag, Equals(('user/name', None)))
def test_tag_unparsable(self):
"""
Given a malformed image tag, split_image_tag should throw an error.
"""
image_tag = 'this:is:invalid/user:test/name:tag/'
with ExpectedException(
ValueError, r"Unable to parse image tag '%s'" % (image_tag,)):
split_image_tag(image_tag)
class TestJoinImageTagFunc(object):
def test_image_and_tag(self):
"""
When an image and tag are provided, the two should be joined using a
':' character.
"""
image_tag = join_image_tag('bar', 'foo')
assert_that(image_tag, Equals('bar:foo'))
def test_tag_is_none(self):
""" When the provided tag is None, the image should be returned. """
image_tag = join_image_tag('bar', None)
assert_that(image_tag, Equals('bar'))
def test_tag_is_empty(self):
""" When the provided tag is empty, the image should be returned. """
image_tag = join_image_tag('bar', '')
assert_that(image_tag, Equals('bar'))
class TestRegistryTagger(object):
def test_image_without_registry(self):
"""
When an image without a registry is provided, the registry should be
prepended to the image with a '/' character.
"""
image = RegistryTagger('registry:5000').generate_tag('bar')
assert_that(image, Equals('registry:5000/bar'))
def test_image_with_registry(self):
"""
When an image is provided that already specifies a registry, that
registry should be replaced with the given registry.
"""
image = RegistryTagger('registry2:5000').generate_tag(
'registry:5000/bar')
assert_that(image, Equals('registry2:5000/bar'))
def test_image_might_have_registry(self):
"""
When an image is provided that looks like it *may* already specify a
registry, the registry should just be prepended to the image name and
returned, provided that the resulting image name is valid.
"""
image = RegistryTagger('registry:5000').generate_tag(
'praekeltorg/alpine-python')
assert_that(image, Equals('registry:5000/praekeltorg/alpine-python'))
def test_image_unparsable(self):
"""
Given a malformed image name, replace_image_registry should throw an
error.
"""
image = 'foo:5000:port/name'
with ExpectedException(
ValueError, r"Unable to parse image name '%s'" % (image,)):
RegistryTagger('registry:5000').generate_tag(image)
class TestGenerateSemverVersionsFunc(object):
def test_standard_version(self):
"""
When a standard 3-part semantic version is passed, 3 version strings
should be returned with decreasing levels of precision.
"""
versions = generate_semver_versions('5.4.1')
assert_that(versions, Equals(['5.4.1', '5.4', '5']))
def test_extended_version(self):
"""
When a version is passed with extra information separated by '-',
version strings should be returned with decreasing levels of precision.
"""
versions = generate_semver_versions('5.5.0-alpha')
assert_that(versions, Equals(['5.5.0-alpha', '5.5.0', '5.5', '5']))
def test_one_version_part(self):
"""
When a version with a single part is passed, that version should be
returned in a list.
"""
versions = generate_semver_versions('foo')
assert_that(versions, Equals(['foo']))
def test_precision_less_than_version(self):
"""
When precision is less than the precision of the version, semantic
versions should be generated up to the specified precision.
"""
versions = generate_semver_versions('3.5.3', precision=2)
assert_that(versions, Equals(['3.5.3', '3.5']))
def test_precision_equal_to_version(self):
"""
When precision is equal to the precision of the version, the generated
versions should be just the version itself.
"""
versions = generate_semver_versions('3.5.3', precision=3)
assert_that(versions, Equals(['3.5.3']))
def test_precision_greater_than_version(self):
"""
When precision is greater than the precision of the version, an error
should be raised.
"""
with ExpectedException(
ValueError,
r'The minimum precision \(4\) is greater than the precision '
r"of version '3\.5\.3' \(3\)"):
generate_semver_versions('3.5.3', precision=4)
def test_does_not_generate_zero(self):
"""
When a version is passed with a major version of 0, the version '0'
should not be returned in the list of versions.
"""
versions = generate_semver_versions('0.6.11')
assert_that(versions, Equals(['0.6.11', '0.6']))
def test_zero_true(self):
"""
When a version is passed with a major version of 0, and the zero
parameter is True, the version '0' should be returned in the list of
versions.
"""
versions = generate_semver_versions('0.6.11', zero=True)
assert_that(versions, Equals(['0.6.11', '0.6', '0']))
def test_does_generate_zero_if_only_zero(self):
"""
When the version '0' is passed, that version should be returned in a
list.
"""
versions = generate_semver_versions('0')
assert_that(versions, Equals(['0']))
class TestVersionTagger(object):
def test_tag_without_version(self):
"""
When a tag does not start with the version, the version should be
prepended to the tag with a '-' character.
"""
tagger = VersionTagger(['1.2.3'])
tags = tagger.generate_tags('foo')
assert_that(tags, Equals(['1.2.3-foo']))
def test_tag_with_version(self):
"""
When a tag starts with one of the versions, then the version and '-'
separator should be removed from the tag and the remaining tag
processed.
"""
tagger = VersionTagger(['1.2.3', '1.2', '1'])
tags = tagger.generate_tags('1.2-foo')
assert_that(tags, Equals(['1.2.3-foo', '1.2-foo', '1-foo']))
def test_tag_is_version(self):
"""
When a tag is equal to one of the versions, the versions should be
returned.
"""
tagger = VersionTagger(['1.2.3', '1.2', '1'])
tags = tagger.generate_tags('1')
assert_that(tags, Equals(['1.2.3', '1.2', '1']))
def test_tag_is_none(self):
""" When a tag is None, the versions should be returned. """
tagger = VersionTagger(['1.2.3'])
tags = tagger.generate_tags(None)
assert_that(tags, Equals(['1.2.3']))
def test_tag_is_latest(self):
""" When the tag is 'latest', the versions should be returned. """
tagger = VersionTagger(['1.2.3'])
tags = tagger.generate_tags('latest')
assert_that(tags, Equals(['1.2.3']))
def test_latest(self):
"""
When latest is True and a tag is provided, the versioned and
unversioned tags should be returned.
"""
tagger = VersionTagger(['1.2.3'], latest=True)
tags = tagger.generate_tags('foo')
assert_that(tags, Equals(['1.2.3-foo', 'foo']))
def test_latest_tag_with_version(self):
"""
When latest is True and the tag already has the version prefixed, the
tag and 'latest' tag should be returned.
"""
tagger = VersionTagger(['1.2.3'], latest=True)
tags = tagger.generate_tags('1.2.3-foo')
assert_that(tags, Equals(['1.2.3-foo', 'foo']))
def test_latest_tag_is_version(self):
"""
When latest is True and the tag is the version, the version and
'latest' tag should be returned.
"""
tagger = VersionTagger(['1.2.3'], latest=True)
tags = tagger.generate_tags('1.2.3')
assert_that(tags, Equals(['1.2.3', 'latest']))
def test_latest_tag_is_none(self):
"""
When latest is True and the tag is None, the version and 'latest' tag
should be returned.
"""
tagger = VersionTagger(['1.2.3'], latest=True)
tags = tagger.generate_tags(None)
assert_that(tags, Equals(['1.2.3', 'latest']))
def test_latest_tag_is_latest(self):
"""
When latest is True and the tag is 'latest', the version and 'latest'
tag should be returned.
"""
tagger = VersionTagger(['1.2.3'], latest=True)
tags = tagger.generate_tags('latest')
assert_that(tags, Equals(['1.2.3', 'latest']))
def assert_output_lines(capfd, stdout_lines, stderr_lines=[]):
out, err = capfd.readouterr()
if sys.version_info < (3,):
# FIXME: I'm not entirely sure how to determine the correct encoding
# here and not sure whether the right answer comes from Python itself
# or pytest. For now, UTF-8 seems like a safe bet.
out = out.encode('utf-8')
err = err.encode('utf-8')
out_lines = out.split('\n')
assert_that(out_lines.pop(), Equals(''))
assert_that(out_lines, Equals(stdout_lines))
err_lines = err.split('\n')
assert_that(err_lines.pop(), Equals(''))
assert_that(err_lines, Equals(stderr_lines))
class TestCmdFunc(object):
def test_stdout(self, capfd):
"""
When a command writes to stdout, that output should be captured and
written to Python's stdout.
"""
cmd(['echo', 'Hello, World!'])
assert_output_lines(
capfd, stdout_lines=['Hello, World!'], stderr_lines=[])
def test_stderr(self, capfd):
"""
When a command writes to stderr, that output should be captured and
written to Python's stderr.
"""
# Have to do something a bit more complicated to echo to stderr
cmd(['awk', 'BEGIN { print "Hello, World!" > "/dev/stderr" }'])
assert_output_lines(
capfd, stdout_lines=[], stderr_lines=['Hello, World!'])
def test_stdout_unicode(self, capfd):
"""
When a command writes Unicode to a standard stream, that output should
be captured and encoded correctly.
"""
cmd(['echo', 'á, é, í, ó, ú, ü, ñ, ¿, ¡'])
assert_output_lines(capfd, ['á, é, í, ó, ú, ü, ñ, ¿, ¡'])
def test_error(self, capfd):
"""
When a command exits with a non-zero return code, an error should be
raised with the correct information about the result of the command.
The stdout or stderr output should still be captured.
"""
args = ['awk', 'BEGIN { print "errored"; exit 1 }']
with ExpectedException(CalledProcessError, MatchesStructure(
cmd=Equals(args),
returncode=Equals(1),
output=Equals(b'errored\n'))):
cmd(args)
assert_output_lines(capfd, ['errored'], [])
class TestGenerateTagsFunc(object):
def test_no_tags(self):
"""
When no parameters are provided, and an image name without a tag is
passed, a list should be returned with the given image name unchanged.
"""
tags = generate_tags('test-image')
assert_that(tags, Equals(['test-image']))
def test_no_tags_existing_tag(self):
"""
When no parameters are provided, and an image tag with a tag is passed,
a list should be returned with the given image tag unchanged.
"""
tags = generate_tags('test-image:abc')
assert_that(tags, Equals(['test-image:abc']))
def test_tags(self):
"""
When the tags parameter is provided, and an image name without a tag is
passed, a list of image tags should be returned with the tags appended.
"""
tags = generate_tags('test-image', tags=['abc', 'def'])
assert_that(tags, Equals(['test-image:abc', 'test-image:def']))
def test_tag_existing_tag(self):
"""
When the tags parameter is provided, and an image tag with a tag is
passed, a list of image tags should be returned with the tag replaced
by the new tags.
"""
tags = generate_tags('test-image:abc', tags=['def', 'ghi'])
assert_that(tags, Equals(['test-image:def', 'test-image:ghi']))
# FIXME?: The following 2 tests describe a weird, unintuitive edge case :-(
# Passing `--tag latest` with `--version <version>` but *not*
# `--version-latest` doesn't actually get you the tag 'latest' but rather
# effectively removes any existing tag.
def test_version_new_tag_is_latest(self, capfd):
"""
When a version is provided as well as a new tag, and the new tag is
'latest', then the image should be tagged with the new version only.
"""
version_tagger = VersionTagger(['1.2.3'])
tags = generate_tags(
'test-image:abc', tags=['latest'], version_tagger=version_tagger)
assert_that(tags, Equals(['test-image:1.2.3']))
def test_version_new_tag_is_latest_with_version(self, capfd):
"""
When a version is provided as well as a new tag, and the new tag is
'latest' plus the version, then the image should be tagged with the
new version only.
"""
version_tagger = VersionTagger(['1.2.3'])
tags = generate_tags('test-image:abc', tags=['1.2.3-latest'],
version_tagger=version_tagger)
assert_that(tags, Equals(['test-image:1.2.3']))
class TestDockerCiDeployRunner(object):
def test_tag(self, capfd):
"""
When ``tag`` is called, the Docker CLI should be called with the 'tag'
command and the source and target tags.
"""
runner = DockerCiDeployRunner(executable='echo')
runner.docker_tag('foo', 'bar')
assert_output_lines(capfd, ['tag foo bar'])
def test_tag_verbose(self, capfd):
"""
When ``tag`` is called, and verbose is True, a message should be
logged.
"""
runner = DockerCiDeployRunner(executable='echo', verbose=True)
runner.docker_tag('foo', 'bar')
assert_output_lines(
capfd, ['Tagging "foo" as "bar"...', 'tag foo bar'])
def test_tag_dry_run(self, capfd):
"""
When ``tag`` is called, and dry_run is True, the Docker command should
be printed but not executed.
"""
runner = DockerCiDeployRunner(dry_run=True)
runner.docker_tag('foo', 'bar')
assert_output_lines(capfd, ['docker tag foo bar'])
def test_tag_same_tag(self, capfd):
"""
When ``tag`` is called, and the output tag is the same as the input
tag, the command should not be executed.
"""
runner = DockerCiDeployRunner(executable='echo')
runner.docker_tag('bar', 'bar')
assert_output_lines(capfd, [], [])
def test_tag_same_tag_verbose(self, capfd):
"""
When ``tag`` is called, and the output tag is the same as the input
tag, and verbose is True, a message should be logged that explains that
no tagging will be done.
"""
runner = DockerCiDeployRunner(executable='echo', verbose=True)
runner.docker_tag('bar', 'bar')
assert_output_lines(capfd, ['Not tagging "bar" as itself'])
def test_push(self, capfd):
"""
When ``push`` is called, the Docker CLI should be called with the
'push' command and the image tag.
"""
runner = DockerCiDeployRunner(executable='echo')
runner.docker_push('foo')
assert_output_lines(capfd, ['push foo'])
def test_push_verbose(self, capfd):
"""
When ``push`` is called, and verbose is True, a message should be
logged.
"""
runner = DockerCiDeployRunner(executable='echo', verbose=True)
runner.docker_push('foo')
assert_output_lines(capfd, ['Pushing tag "foo"...', 'push foo'])
def test_push_dry_run(self, capfd):
"""
When ``push`` is called, and dry_run is True, the Docker command should
be printed but not executed.
"""
runner = DockerCiDeployRunner(dry_run=True)
runner.docker_push('foo')
assert_output_lines(capfd, ['docker push foo'])
class TestMainFunc(object):
def test_args(self, capfd):
"""
When the main function is given a set of common arguments, the script
should be run as expected.
"""
main([
'--registry', 'registry.example.com:5000',
'--executable', 'echo',
'test-image:abc'
])
assert_output_lines(capfd, [
'tag test-image:abc registry.example.com:5000/test-image:abc',
'push registry.example.com:5000/test-image:abc'
])
def test_version(self, capfd):
"""
When the --version flag is used, the version is added to the image tag.
"""
main([
'--executable', 'echo',
'--version', '1.2.3',
'test-image:abc'
])
assert_output_lines(capfd, [
'tag test-image:abc test-image:1.2.3-abc',
'push test-image:1.2.3-abc'
])
def test_semver_precision(self, capfd):
"""
When the --semver-precision option is used, the semver versions are
generated with the correct precision.
"""
main([
'--executable', 'echo',
'--version', '1.2.3',
'--version-semver',
'--semver-precision', '2',
'test-image:abc'
])
assert_output_lines(capfd, [
'tag test-image:abc test-image:1.2.3-abc',
'tag test-image:abc test-image:1.2-abc',
'push test-image:1.2.3-abc',
'push test-image:1.2-abc',
])
def test_semver_precision_default(self, capfd):
"""
When the --version-semver flag is used, but the --semver-precision
option is not, the semver precision should default to 1.
"""
main([
'--executable', 'echo',
'--version', '1.2.3',
'--version-semver',
'test-image:abc'
])
assert_output_lines(capfd, [
'tag test-image:abc test-image:1.2.3-abc',
'tag test-image:abc test-image:1.2-abc',
'tag test-image:abc test-image:1-abc',
'push test-image:1.2.3-abc',
'push test-image:1.2-abc',
'push test-image:1-abc',
])
def test_image_required(self, capfd):
"""
When the main function is given no image argument, it should exit with
a return code of 2 and inform the user of the missing argument.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--tag', 'abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
# More useful error message added to argparse in Python 3
if sys.version_info >= (3,):
# Use re.DOTALL so that '.*' also matches newlines
assert_that(err, MatchesRegex(
r'.*error: the following arguments are required: image$',
re.DOTALL
))
else:
assert_that(
err, MatchesRegex(r'.*error: too few arguments$', re.DOTALL))
def test_version_latest_requires_version(self, capfd):
"""
When the main function is given the `--version-latest` option but no
`--version` option, it should exit with a return code of 2 and inform
the user of the missing option.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--version-latest', 'test-image:abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: the --version-latest option requires --version$',
re.DOTALL
))
def test_version_latest_requires_non_empty_version(self, capfd):
"""
When the main function is given the `--version-latest` option and an
empty `--version` option, it should exit with a return code of 2 and
inform the user of the missing option.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--version-latest', '--version', '', 'test-image:abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: the --version-latest option requires --version$',
re.DOTALL
))
def test_version_semver_requires_version(self, capfd):
"""
When the main function is given the `--version-semver` option but no
`--version` option, it should exit with a return code of 2 and inform
the user of the missing option.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--version-semver', 'test-image:abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: the --version-semver option requires --version$',
re.DOTALL
))
def test_version_semver_requires_non_empty_version(self, capfd):
"""
When the main function is given the `--version-semver` option and an
empty `--version` option, it should exit with a return code of 2 and
inform the user of the missing option.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--version-semver', '--version', '', 'test-image:abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: the --version-semver option requires --version$',
re.DOTALL
))
def test_semver_precision_requires_version_semver(self, capfd):
"""
When the main function is given the `--semver-precision` option but no
`--version-semver` option, it should exit with a return code of 2 and
inform the user of the missing option.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--semver-precision', '2', 'test-image:abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: the --semver-precision option requires '
r'--version-semver$',
re.DOTALL
))
def test_semver_zero_requires_version_semver(self, capfd):
"""
When the main function is given the `--semver-zero` option but no
`--version-semver` option, it should exit with a return code of 2 and
inform the user of the missing option.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--semver-zero', 'test-image:abc'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: the --semver-zero option requires --version-semver$',
re.DOTALL
))
def test_many_tags(self, capfd):
"""
When the main function is given multiple tag arguments in different
ways, the tags should be correctly passed through to the runner.
"""
main([
'--tag', 'abc', 'def',
'-t', 'ghi',
'--executable', 'echo',
'test-image:xyz'
])
assert_output_lines(capfd, [
'tag test-image:xyz test-image:abc',
'tag test-image:xyz test-image:def',
'tag test-image:xyz test-image:ghi',
'push test-image:abc',
'push test-image:def',
'push test-image:ghi'
])
def test_tag_requires_arguments(self, capfd):
"""
When the main function is given the `--tag` option without any
arguments, an error should be raised.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--tag', '--', 'test-image'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: argument -t/--tag: expected at least one argument$',
re.DOTALL
))
def test_version_semver_requires_argument(self, capfd):
"""
When the main function is given the `--version-semver` option without
an argument, an error should be raised.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main([
'--version', '1.2.3',
'--version-semver',
'--semver-precision',
'--', 'test-image',
])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: argument -P/--semver-precision: expected one argument$',
re.DOTALL
))
def test_registry_requires_argument(self, capfd):
"""
When the main function is given the `--registry` option without an
argument, an error should be raised.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--registry', '--', 'test-image'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: argument -r/--registry: expected one argument$',
re.DOTALL
))
def test_executable_requires_argument(self, capfd):
"""
When the main function is given the `--executable` option without an
argument, an error should be raised.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main(['--executable', '--', 'test-image'])
out, err = capfd.readouterr()
assert_that(out, Equals(''))
assert_that(err, MatchesRegex(
r'.*error: argument --executable: expected one argument$',
re.DOTALL
))
def test_deprecated_tag_version(self, capfd):
"""
When the main function is given the `--tag-version` option, the option
should be used as the `--version` option and a deprecation warning
should be printed.
"""
main([
'--executable', 'echo',
'--tag-version', '1.2.3',
'test-image',
])
assert_output_lines(capfd, [
'tag test-image test-image:1.2.3',
'push test-image:1.2.3',
], [
('DEPRECATED: the --tag-version option is deprecated and will be '
'removed in the next release. Please use --version instead')
])
def test_deprecated_tag_latest(self, capfd):
"""
When the main function is given the `--tag-latest` option, the option
should be used as the `--version-latest` option and a deprecation
warning should be printed.
"""
main([
'--executable', 'echo',
'--version', '1.2.3',
'--tag-latest',
'test-image',
])
assert_output_lines(capfd, [
'tag test-image test-image:1.2.3',
'tag test-image test-image:latest',
'push test-image:1.2.3',
'push test-image:latest',
], [
('DEPRECATED: the --tag-latest option is deprecated and will be '
'removed in the next release. Please use --version-latest '
'instead')
])
def test_deprecated_tag_semver(self, capfd):
"""
When the main function is given the `--tag-semver` option, the option
should be used as the `--version-semver` option and a deprecation
warning should be printed.
"""
main([
'--executable', 'echo',
'--version', '1.2.3',
'--tag-semver',
'test-image',
])
assert_output_lines(capfd, [
'tag test-image test-image:1.2.3',
'tag test-image test-image:1.2',
'tag test-image test-image:1',
'push test-image:1.2.3',
'push test-image:1.2',
'push test-image:1',
], [
('DEPRECATED: the --tag-semver option is deprecated and will be '
'removed in the next release. Please use --version-semver '
'instead')
])
def test_version_take_precedence_over_deprecated_tag_version(self, capfd):
"""
When the main function is given the `--version` and `--tag-version`
options, the `--version` value takes precedence over the
`--tag-version` value.
"""
main([
'--executable', 'echo',
'--version', '1.2.3',
'--tag-version', '4.5.6',
'test-image',
])
assert_output_lines(capfd, [
'tag test-image test-image:1.2.3',
'push test-image:1.2.3',
], [
('DEPRECATED: the --tag-version option is deprecated and will be '
'removed in the next release. Please use --version instead')
])
| mit | 2,012,976,566,543,691,300 | 35.216247 | 79 | 0.57549 | false |
cmacmackin/isoft | plotting/layers.py | 1 | 1656 | #
# layers.py
# This file is part of ISOFT.
#
# Copyright 2018 Chris MacMackin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import numpy as np
import numpy.ma as ma
def compute_layers(shelf, vertical_resolution=300):
"""Computes the internal layers or age field for an ice shelf from
the Taylor coefficients.
"""
xc = shelf.grid
zc = np.linspace(np.min(shelf.b), np.max(shelf.s), vertical_resolution)
xx, zz = np.meshgrid(xc, zc)
shelf_domain = np.logical_or(np.greater(zz, shelf.s), np.less(zz, shelf.b))
x = ma.array(xx, mask=shelf_domain, copy=False)
z = ma.array(zz, mask=shelf_domain, copy=False)
kappa = shelf.kappa
# This isn't really the most efficient way to calculate the Taylor
# series, but array broadcasting was giving me a headache.
k = np.zeros_like(z)
for i in range(1, 1+kappa.shape[0]):
k += kappa[i-1] * (shelf.s - z)**i
return x, z, k
| gpl-3.0 | 8,813,709,389,383,055,000 | 35 | 79 | 0.689614 | false |
OCA/account-analytic | analytic_tag_dimension_enhanced/models/analytic.py | 1 | 6808 | # Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo import models, api, fields, _
from odoo.exceptions import ValidationError
class AccountAnalyticDimension(models.Model):
_inherit = 'account.analytic.dimension'
ref_model_id = fields.Many2one(
comodel_name='ir.model',
string='Ref Model',
help="Select model if you want to use it to create analytic tags, "
"each tag will have reference to the data record in that model.\n"
"For example, if you select Department (hr.department) then click "
"Create Tags button, tags will be created from each department "
" and also has resource_ref to the department record",
)
filtered_field_ids = fields.Many2many(
comodel_name='ir.model.fields',
string='Filtered by fields',
domain="[('model_id', '=', ref_model_id),"
"('ttype', '=', 'many2one')]",
help="Filtered listing tags by fields of this model, based on value "
"of selected analytic tags in working document",
)
required = fields.Boolean(
string='Required',
default=False,
help="If required, this dimension needed to be "
"selected in working document",
)
by_sequence = fields.Boolean(
default=False,
help="If checked, this dimemsion's tags will be available "
"only when previous dimension's tags is selected",
)
sequence = fields.Integer(
help="This field works with By Sequence",
)
@api.constrains('by_sequence', 'sequence')
def _check_sequence(self):
seq_list = self.search([('by_sequence', '=', True)]).mapped('sequence')
if len(seq_list) != len(set(seq_list)):
raise ValidationError(_('Duplicated dimension sequences'))
def create_analytic_tags(self):
"""Helper function to create tags based on ref_model_id"""
self.ensure_one()
if not self.ref_model_id:
return
Tag = self.env['account.analytic.tag']
model = self.ref_model_id.model
TagModel = self.env[model]
# Delete orphan tags
self.analytic_tag_ids.filtered(lambda l: not l.resource_ref or
l.resource_ref._name != model).unlink()
tag_res_ids = [x.resource_ref.id for x in self.analytic_tag_ids]
recs = TagModel.search([('id', 'not in', tag_res_ids)])
for rec in recs:
Tag.create({'name': rec.display_name,
'analytic_dimension_id': self.id,
'resource_ref': '%s,%s' % (model, rec.id)})
class AnalyticDimensionLine(models.AbstractModel):
_inherit = 'analytic.dimension.line'
domain_tag_ids = fields.Many2many(
comodel_name='account.analytic.tag',
compute='_compute_analytic_tags_domain',
help="Helper field, the filtered tags_ids when record is saved",
)
@api.depends(lambda self: (self._analytic_tag_field_name,)
if self._analytic_tag_field_name else ())
def _compute_analytic_tags_domain(self):
res = {}
for rec in self:
tag_ids = []
res = rec._dynamic_domain_analytic_tags()
if res['domain'][self._analytic_tag_field_name]:
tag_ids = res['domain'][self._analytic_tag_field_name][0][2]
rec.domain_tag_ids = tag_ids
return res
def _dynamic_domain_analytic_tags(self):
"""
- For dimension without by_sequence, always show
- For dimension with by_sequence, only show tags by sequence
- Option to filter next dimension based on selected_tags
"""
Dimension = self.env['account.analytic.dimension']
Tag = self.env['account.analytic.tag']
# If no dimension with by_sequence, nothing to filter, exist
count = Dimension.search_count([('by_sequence', '=', True)])
if count == 0:
return {'domain': {self._analytic_tag_field_name: []}}
# Find non by_sequence tags, to show always
tags = Tag.search(['|', ('analytic_dimension_id', '=', False),
('analytic_dimension_id.by_sequence', '=', False)])
# Find next dimension by_sequence
selected_tags = self[self._analytic_tag_field_name]
sequences = selected_tags.mapped('analytic_dimension_id').\
filtered('by_sequence').mapped('sequence')
cur_sequence = sequences and max(sequences) or -1
next_dimension = Dimension.search(
[('by_sequence', '=', True), ('sequence', '>', cur_sequence)],
order='sequence', limit=1)
next_tag_ids = []
if next_dimension and next_dimension.filtered_field_ids:
# Filetered by previously selected_tags
next_tag_list = []
for field in next_dimension.filtered_field_ids:
matched_tags = selected_tags.filtered(
lambda l: l.resource_ref and
l.resource_ref._name == field.relation)
tag_resources = matched_tags.mapped('resource_ref')
res_ids = tag_resources and tag_resources.ids or []
tag_ids = next_dimension.analytic_tag_ids.filtered(
lambda l: l.resource_ref[field.name].id in res_ids).ids
next_tag_list.append(set(tag_ids))
# "&" to all in next_tag_list
next_tag_ids = list(set.intersection(*map(set, next_tag_list)))
else:
next_tag_ids = next_dimension.analytic_tag_ids.ids
# Tags from non by_sequence dimension and next dimension
tag_ids = tags.ids + next_tag_ids
# tag_ids = tags.ids + next_tag_ids
domain = [('id', 'in', tag_ids)]
return {'domain': {self._analytic_tag_field_name: domain}}
class AccountAnalyticTag(models.Model):
_inherit = 'account.analytic.tag'
resource_ref = fields.Reference(
selection=lambda self: [(model.model, model.name)
for model in self.env['ir.model'].search([])],
string='Record',
)
def _check_analytic_dimension(self):
super()._check_analytic_dimension()
# Test all required dimension is selected
Dimension = self.env['account.analytic.dimension']
req_dimensions = Dimension.search([('required', '=', True)])
tags_dimension = self.filtered('analytic_dimension_id.required')
dimensions = tags_dimension.mapped('analytic_dimension_id')
missing = req_dimensions - dimensions
if missing:
raise ValidationError(
_("Following dimension(s) not selected: "
"%s") % ', '.join(missing.mapped('name')))
| agpl-3.0 | -2,963,247,440,782,647,300 | 42.641026 | 79 | 0.594301 | false |
digwanderlust/pants | src/python/pants/backend/jvm/tasks/scalastyle.py | 1 | 6152 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.cache_manager import VersionedTargetSet
from pants.base.exceptions import TaskError
from pants.base.target import Target
from pants.process.xargs import Xargs
from pants.util.dirutil import touch
# TODO: Move somewhere more general?
class FileExcluder(object):
def __init__(self, excludes_path, log):
self.excludes = set()
if excludes_path:
if not os.path.exists(excludes_path):
raise TaskError('Excludes file does not exist: {0}'.format(excludes_path))
with open(excludes_path) as fh:
for line in fh.readlines():
pattern = line.strip()
if pattern and not pattern.startswith('#'):
self.excludes.add(re.compile(pattern))
log.debug('Exclude pattern: {pattern}'.format(pattern=pattern))
else:
log.debug('No excludes file specified. All scala sources will be checked.')
def should_include(self, source_filename):
for exclude in self.excludes:
if exclude.match(source_filename):
return False
return True
class Scalastyle(NailgunTask):
"""Checks scala source files to ensure they're stylish.
Scalastyle only checks scala sources in non-synthetic targets.
"""
class UnspecifiedConfig(TaskError):
def __init__(self):
super(Scalastyle.UnspecifiedConfig, self).__init__(
'Path to scalastyle config file must be specified.')
class MissingConfig(TaskError):
def __init__(self, path):
super(Scalastyle.MissingConfig, self).__init__(
'Scalastyle config file does not exist: {0}.'.format(path))
_SCALA_SOURCE_EXTENSION = '.scala'
_MAIN = 'org.scalastyle.Main'
@classmethod
def register_options(cls, register):
super(Scalastyle, cls).register_options(register)
register('--skip', action='store_true', help='Skip scalastyle.')
register('--config', advanced=True, help='Path to scalastyle config file.')
register('--excludes', advanced=True,
help='Path to optional scalastyle excludes file. Each line is a regex. (Blank lines '
'and lines starting with \'#\' are ignored.) A file is skipped if its path '
'(relative to the repo root) matches any of these regexes.')
register('--jvm-options', action='append', metavar='<option>...', advanced=True,
help='Run scalastyle with these extra jvm options.')
cls.register_jvm_tool(register, 'scalastyle')
@classmethod
def get_non_synthetic_scala_targets(cls, targets):
return filter(
lambda target: isinstance(target, Target)
and target.has_sources(cls._SCALA_SOURCE_EXTENSION)
and (not target.is_synthetic),
targets)
@classmethod
def get_non_excluded_scala_sources(cls, scalastyle_excluder, scala_targets):
# Get all the sources from the targets with the path relative to build root.
scala_sources = list()
for target in scala_targets:
scala_sources.extend(target.sources_relative_to_buildroot())
# make sure only the sources with the .scala extension stay.
scala_sources = filter(
lambda filename: filename.endswith(cls._SCALA_SOURCE_EXTENSION),
scala_sources)
# filter out all sources matching exclude patterns, if specified in config.
scala_sources = filter(scalastyle_excluder.should_include, scala_sources)
return scala_sources
def __init__(self, *args, **kwargs):
super(Scalastyle, self).__init__(*args, **kwargs)
self._results_dir = os.path.join(self.workdir, 'results')
def _create_result_file(self, target):
result_file = os.path.join(self._results_dir, target.id)
touch(result_file)
return result_file
@property
def cache_target_dirs(self):
return True
def execute(self):
if self.get_options().skip:
self.context.log.info('Skipping scalastyle.')
return
# Don't even try and validate options if we're irrelevant.
targets = self.get_non_synthetic_scala_targets(self.context.targets())
if not targets:
return
with self.invalidated(targets) as invalidation_check:
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
scalastyle_config = self.validate_scalastyle_config()
scalastyle_excluder = self.create_file_excluder()
self.context.log.debug('Non synthetic scala targets to be checked:')
for target in invalid_targets:
self.context.log.debug(' {address_spec}'.format(address_spec=target.address.spec))
scala_sources = self.get_non_excluded_scala_sources(scalastyle_excluder, invalid_targets)
self.context.log.debug('Non excluded scala sources to be checked:')
for source in scala_sources:
self.context.log.debug(' {source}'.format(source=source))
if scala_sources:
def call(srcs):
cp = self.tool_classpath('scalastyle')
return self.runjava(classpath=cp,
main=self._MAIN,
jvm_options=self.get_options().jvm_options,
args=['-c', scalastyle_config] + srcs)
result = Xargs(call).execute(scala_sources)
if result != 0:
raise TaskError('java {entry} ... exited non-zero ({exit_code})'.format(
entry=Scalastyle._MAIN, exit_code=result))
def validate_scalastyle_config(self):
scalastyle_config = self.get_options().config
if not scalastyle_config:
raise Scalastyle.UnspecifiedConfig()
if not os.path.exists(scalastyle_config):
raise Scalastyle.MissingConfig(scalastyle_config)
return scalastyle_config
def create_file_excluder(self):
return FileExcluder(self.get_options().excludes, self.context.log)
| apache-2.0 | 5,968,127,118,785,450,000 | 36.975309 | 98 | 0.677503 | false |
wernwa/lwfa-las-cli-log | PV_CONN.py | 1 | 1648 |
import sys
sys.path.insert(0, './')
import epics
import time
#from epics_device import PowerSupply
#from physics_device import Magnet
#import json
import thread
class PV_CONN(epics.PV):
def __init__(self, *args, **kwargs):
super(PV_CONN, self).__init__(*args, **kwargs)
self.conn=False
self.connection_callbacks.append(self.onConnectionChange)
def onConnectionChange(self, pvname=None, conn= None, **kws):
#sys.stdout.write('PV connection status changed: %s %s\n' % (pvname, repr(conn)))
#sys.stdout.flush()
self.conn=conn
if conn==False:
#print 'connection lost'
thread.start_new_thread(self.reconnect,())
def reconnect(self):
try:
self.wait_for_connection()
#self.connect()
except Exception as e:
print 'Err: ',e
def get(self, *args, **kwargs):
if self.conn==True:
return super(PV_CONN, self).get(*args, **kwargs)
else:
return None
# increase the values only stepwise 0.1
# def put(self, new_v, *args, **kwargs):
# v = self.value
# diff = new_v-v
# if diff<0: step=-0.1
# elif diff>0: step=0.1
# else: return 0
#
# #print 'curr value',v
# #print 'new value',new_v
# #print 'diff',diff
#
# while abs(diff)>=0.1:
# v+=step
# ret = super(PV_CONN, self).put(v,*args, **kwargs)
# diff = v-new_v
# time.sleep(0.05)
# #print v
#
# if diff==0: return ret
#
# return super(PV_CONN, self).put(new_v,*args, **kwargs)
| gpl-3.0 | 6,725,211,146,869,991,000 | 24.75 | 90 | 0.546117 | false |
MilkywayPwns/infprj2 | infprj2/mainmenu.py | 1 | 2308 | # Main menu python file
import pygame
import translate
import database
import options
import packetevent
# Import button lib
import button
def update(game):
pass
def init(game):
pass
def loadgame(game):
game.players.clear()
res = database.execute_query("SELECT * FROM savegames ORDER BY id DESC")
game.load(res[0]["id"])
def draw(game):
bg = pygame.image.load("assets/img/bg.png")
game.screen.blit(bg,(0,0))
# button variables
btn_width = game.width / 5;
btn_height = game.height / 10;
btn_x_off = (game.width / 2) - (btn_width / 2)
btn_y_off = lambda idx: (game.height / 10) * (idx + 1) + (idx * 10)
# scr, x offset, y offset, width, height, text, fontsize, backcolor, frontcolor, callback
button.draw(game, btn_x_off, btn_y_off(0), int(btn_width), int(btn_height), "Start", 20, (25,25,25), (255,255,255), lambda x: game.set_state(2))
button.draw(game, btn_x_off, btn_y_off(1), int(btn_width), int(btn_height), translate.translate("LOAD"), 20, (25,25,25), (255,255,255), lambda x: loadgame(x))
# button.draw(game, btn_x_off, btn_y_off(2), int(btn_width), int(btn_height), "Multiplayer", 20, (25,25,25), (255,255,255), lambda x: game.set_state(6))
button.draw(game, btn_x_off, btn_y_off(2), int(btn_width), int(btn_height), "Multiplayer", 20, (25,25,25), (255,255,255), lambda x: packetevent.connect(x, "178.62.226.124", 61022))
button.draw(game, btn_x_off, btn_y_off(3), int(btn_width), int(btn_height), translate.translate("OPTIONS"), 20, (25,25,25), (255,255,255), lambda x: game.set_state(1))
button.draw(game, btn_x_off, btn_y_off(4), int(btn_width), int(btn_height), translate.translate("INSTRUCTIONS"), 20, (25,25,25), (255,255,255), lambda x: game.set_state(7))
button.draw(game, btn_x_off, btn_y_off(5), int(btn_width), int(btn_height), "Leaderboard", 20, (25,25,25), (255,255,255), lambda x: game.set_state(5))
button.draw(game, btn_x_off, btn_y_off(6), int(btn_width), int(btn_height), translate.translate("QUIT"), 20, (25,25,25), (255,255,255), lambda x: game.exit())
# button.draw(game, btn_x_off, btn_y_off(7), int(btn_width), int(btn_height), "Afsluiten", 20, (25,25,25), (255,255,255), game.exit())
| mit | 8,542,439,063,819,060,000 | 53.97619 | 193 | 0.623917 | false |
zhujzhuo/openstack-trove | trove/tests/unittests/cluster/test_cluster_controller.py | 1 | 14001 | # Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import jsonschema
from mock import MagicMock
from mock import Mock
from mock import patch
from testtools import TestCase
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster
from trove.cluster.service import ClusterController
from trove.cluster import views
import trove.common.cfg as cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from trove.common import utils
from trove.datastore import models as datastore_models
class TestClusterController(TestCase):
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
}
]
}
}
self.add_shard = {
"add_shard": {}
}
def test_get_schema_create(self):
schema = self.controller.get_schema('create', self.cluster)
self.assertIsNotNone(schema)
self.assertTrue('cluster' in schema['properties'])
self.assertTrue('cluster')
def test_get_schema_action_add_shard(self):
schema = self.controller.get_schema('add_shard', self.add_shard)
self.assertIsNotNone(schema)
self.assertTrue('add_shard' in schema['properties'])
def test_validate_create(self):
body = self.cluster
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_add_shard(self):
body = self.add_shard
schema = self.controller.get_schema('add_shard', body)
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
def test_validate_create_blankname(self):
body = self.cluster
body['cluster']['name'] = " "
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
self.assertThat(len(errors), Is(1))
self.assertThat(errors[0].message,
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
def test_validate_create_blank_datastore(self):
body = self.cluster
body['cluster']['datastore']['type'] = ""
schema = self.controller.get_schema('create', body)
validator = jsonschema.Draft4Validator(schema)
self.assertFalse(validator.is_valid(body))
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
error_messages = [error.message for error in errors]
error_paths = [error.path.pop() for error in errors]
self.assertThat(len(errors), Is(2))
self.assertIn("'' is too short", error_messages)
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
self.assertIn("type", error_paths)
@patch.object(Cluster, 'create')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters_disabled(self,
mock_get_datastore_version,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mysql'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.ClusterDatastoreNotSupported,
self.controller.create,
req,
body,
tenant_id)
@patch.object(Cluster, 'create')
@patch.object(utils, 'get_id_from_href')
@patch.object(datastore_models, 'get_datastore_version')
def test_create_clusters(self,
mock_get_datastore_version,
mock_id_from_href,
mock_cluster_create):
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
datastore = Mock()
mock_get_datastore_version.return_value = (datastore,
datastore_version)
instances = [{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'}]
mock_id_from_href.return_value = '1234'
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
mock_cluster_create.assert_called_with(context, 'products',
datastore, datastore_version,
instances)
@patch.object(Cluster, 'load')
def test_show_cluster(self,
mock_cluster_load):
tenant_id = Mock()
id = Mock()
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
mock_cluster = Mock()
mock_cluster.instances = []
mock_cluster.instances_without_server = []
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = mock_cluster
self.controller.show(req, tenant_id, id)
mock_cluster_load.assert_called_with(context, id)
@patch.object(Cluster, 'load')
@patch.object(Cluster, 'load_instance')
def test_show_cluster_instance(self,
mock_cluster_load_instance,
mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
instance_id = Mock()
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
cluster = Mock()
mock_cluster_load.return_value = cluster
cluster.id = cluster_id
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
mock_cluster_load_instance.assert_called_with(context, cluster.id,
instance_id)
@patch.object(Cluster, 'load')
def test_delete_cluster(self, mock_cluster_load):
tenant_id = Mock()
cluster_id = Mock()
req = MagicMock()
cluster = Mock()
mock_cluster_load.return_value = cluster
self.controller.delete(req, tenant_id, cluster_id)
cluster.delete.assert_called
class TestClusterControllerWithStrategy(TestCase):
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
self.cluster = {
"cluster": {
"name": "products",
"datastore": {
"type": "mongodb",
"version": "2.4.10"
},
"instances": [
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
},
{
"flavorRef": "7",
"volume": {
"size": 1
},
}
]
}
}
def tearDown(self):
super(TestClusterControllerWithStrategy, self).tearDown()
cfg.CONF.clear_override('cluster_support', group='mongodb')
cfg.CONF.clear_override('api_strategy', group='mongodb')
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_disabled(self,
mock_cluster_create,
mock_get_datastore_version):
cfg.CONF.set_override('cluster_support', False, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
self.assertRaises(exception.TroveError, self.controller.create, req,
body, tenant_id)
@patch.object(views.ClusterView, 'data', return_value={})
@patch.object(datastore_models, 'get_datastore_version')
@patch.object(models.Cluster, 'create')
def test_create_clusters_enabled(self,
mock_cluster_create,
mock_get_datastore_version,
mock_cluster_view_data):
cfg.CONF.set_override('cluster_support', True, group='mongodb')
body = self.cluster
tenant_id = Mock()
context = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
datastore_version = Mock()
datastore_version.manager = 'mongodb'
mock_get_datastore_version.return_value = (Mock(), datastore_version)
mock_cluster = Mock()
mock_cluster.datastore_version.manager = 'mongodb'
mock_cluster_create.return_value = mock_cluster
self.controller.create(req, body, tenant_id)
@patch.object(models.Cluster, 'load')
def test_controller_action_no_strategy(self,
mock_cluster_load):
body = {'do_stuff2': {}}
tenant_id = Mock()
context = Mock()
id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = cluster
self.assertRaises(exception.TroveError, self.controller.action, req,
body, tenant_id, id)
@patch.object(strategy, 'load_api_strategy')
@patch.object(models.Cluster, 'load')
def test_controller_action_found(self,
mock_cluster_load,
mock_cluster_api_strategy):
body = {'do_stuff': {}}
tenant_id = Mock()
context = Mock()
id = Mock()
req = Mock()
req.environ = MagicMock()
req.environ.get = Mock(return_value=context)
cluster = Mock()
cluster.datastore_version.manager = 'mongodb'
mock_cluster_load.return_value = cluster
strat = Mock()
do_stuff_func = Mock()
strat.cluster_controller_actions = \
{'do_stuff': do_stuff_func}
mock_cluster_api_strategy.return_value = strat
self.controller.action(req, body, tenant_id, id)
self.assertEqual(1, do_stuff_func.call_count)
| apache-2.0 | 4,534,978,132,679,366,700 | 35.178295 | 79 | 0.512892 | false |
kubeflow/kfp-tekton-backend | components/aws/sagemaker/workteam/src/workteam.py | 1 | 2275 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from common import _utils
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Hyperparameter Tuning Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--team_name', type=str, required=True, help='The name of your work team.')
parser.add_argument('--description', type=str, required=True, help='A description of the work team.')
parser.add_argument('--user_pool', type=str, required=False, help='An identifier for a user pool. The user pool must be in the same region as the service that you are calling.', default='')
parser.add_argument('--user_groups', type=str, required=False, help='A list of identifiers for user groups separated by commas.', default='')
parser.add_argument('--client_id', type=str, required=False, help='An identifier for an application client. You must create the app client ID using Amazon Cognito.', default='')
parser.add_argument('--sns_topic', type=str, required=False, help='The ARN for the SNS topic to which notifications should be published.', default='')
parser.add_argument('--tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region, args.endpoint_url)
logging.info('Submitting a create workteam request to SageMaker...')
workteam_arn = _utils.create_workteam(client, vars(args))
logging.info('Workteam created.')
with open('/tmp/workteam_arn.txt', 'w') as f:
f.write(workteam_arn)
if __name__== "__main__":
main()
| apache-2.0 | -4,135,020,320,136,825,300 | 46.395833 | 191 | 0.736703 | false |
spaceninja/mltshp | models/conversation.py | 1 | 2988 | from lib.flyingcow import Model, Property
from datetime import datetime
from tornado.options import options
import comment
import sharedfile
class Conversation(Model):
user_id = Property()
sharedfile_id= Property()
muted = Property(default=0)
created_at = Property()
updated_at = Property()
def save(self, *args, **kwargs):
if options.readonly:
self.add_error('_', 'Site is read-only.')
return False
self._set_dates()
return super(Conversation, self).save(*args, **kwargs)
def _set_dates(self):
"""
Sets the created_at and updated_at fields. This should be something
a subclass of Property that takes care of this during the save cycle.
"""
if self.id is None or self.created_at is None:
self.created_at = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
self.updated_at = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
def mute(self):
self.muted = 1
self.save()
def sharedfile(self):
"""
Associates sharedfile.
"""
return sharedfile.Sharedfile.get("id=%s", self.sharedfile_id)
def relevant_comments(self):
"""
Returns comments to display for the user's conversation. Returns
all comments that aren't deleted.
"""
return comment.Comment.where('sharedfile_id = %s and deleted = 0', self.sharedfile_id)
@classmethod
def for_user(self, user_id, type='all', page=1, per_page=10):
limit_start = (page-1) * per_page
filtering_by = ""
if type == 'myfiles':
filtering_by = "AND sharedfile.user_id = conversation.user_id"
elif type == 'mycomments':
filtering_by = "AND sharedfile.user_id != conversation.user_id"
select = """
SELECT conversation.* from conversation, sharedfile
WHERE conversation.user_id = %s
AND conversation.muted = 0
AND sharedfile.id = conversation.sharedfile_id
%s
ORDER BY sharedfile.activity_at desc
limit %s, %s
""" % (user_id, filtering_by, limit_start, per_page)
conversations = self.object_query(select)
return conversations
@classmethod
def for_user_count(self, user_id, type='all'):
filtering_by = ''
if type == 'myfiles':
filtering_by = "AND sharedfile.user_id = conversation.user_id"
elif type == 'mycomments':
filtering_by = "AND sharedfile.user_id != conversation.user_id"
select = """
SELECT count(conversation.id) as count from conversation, sharedfile
WHERE conversation.user_id = %s
AND sharedfile.id = conversation.sharedfile_id
AND conversation.muted = 0
%s
""" % (user_id, filtering_by)
result = self.query(select)
return result[0]['count']
| mpl-2.0 | 5,072,496,939,587,368,000 | 34.152941 | 94 | 0.583668 | false |
n-west/gnuradio | gr-uhd/apps/uhd_app.py | 1 | 15414 | #!/usr/bin/env python
#
# Copyright 2015-2016 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
USRP Helper Module: Common tasks for uhd-based apps.
"""
from __future__ import print_function
import sys
import time
import argparse
from gnuradio import eng_arg
from gnuradio import uhd
from gnuradio import gr
from gnuradio import gru
COMMAND_DELAY = .2 # Seconds
COMPACT_TPL = "{mb_id} ({mb_serial}), {db_subdev} ({subdev}, {ant}{db_serial})"
LONG_TPL = """{prefix} Motherboard: {mb_id} ({mb_serial})
{prefix} Daughterboard: {db_subdev}{db_serial}
{prefix} Subdev: {subdev}
{prefix} Antenna: {ant}
"""
class UHDApp(object):
" Base class for simple UHD-based applications "
def __init__(self, prefix=None, args=None):
self.prefix = prefix
self.args = args
self.verbose = args.verbose or 0
if self.args.sync == 'auto' and len(self.args.channels) > 1:
self.args.sync = 'pps'
self.antenna = None
self.gain_range = None
self.samp_rate = None
self.has_lo_sensor = None
self.async_msgq = None
self.async_src = None
self.async_rcv = None
self.tr = None
self.gain = None
self.freq = None
self.channels = None
self.cpu_format = None
def vprint(self, *args):
"""
Print 'string' with 'prefix' prepended if self.verbose is True
"""
if self.verbose:
print("[{prefix}]".format(prefix=self.prefix), *args)
def get_usrp_info_string(self,
compact=False,
tx_or_rx='rx',
chan=0,
mboard=0,
):
"""
Return a nice textual description of the USRP we're using.
"""
assert tx_or_rx == 'rx' or tx_or_rx == 'tx'
try:
info_pp = {}
if self.prefix is None:
info_pp['prefix'] = ""
else:
info_pp['prefix'] = "[{prefix}] ".format(prefix=self.prefix)
usrp_info = self.usrp.get_usrp_info(chan)
info_pp['mb_id'] = usrp_info['mboard_id']
info_pp['mb_serial'] = usrp_info['mboard_serial']
if info_pp['mb_serial'] == "":
info_pp['mb_serial'] = "no serial"
info_pp['db_subdev'] = usrp_info["{xx}_subdev_name".format(xx=tx_or_rx)]
info_pp['db_serial'] = ", " + usrp_info["{xx}_serial".format(xx=tx_or_rx)]
if info_pp['db_serial'] == "":
info_pp['db_serial'] = "no serial"
info_pp['subdev'] = self.usrp.get_subdev_spec(mboard)
info_pp['ant'] = self.usrp.get_antenna(chan)
if info_pp['mb_id'] in ("B200", "B210", "E310"):
# In this case, this is meaningless
info_pp['db_serial'] = ""
tpl = LONG_TPL
if compact:
tpl = COMPACT_TPL
return tpl.format(**info_pp)
except:
return "Can't establish USRP info."
def normalize_antenna_sel(self, args):
"""
Make sure the --antenna option matches the --channels option.
"""
if args.antenna is None:
return None
antennas = [x.strip() for x in args.antenna.split(",")]
if len(antennas) != 1 and len(antennas) != len(args.channels):
raise ValueError("Invalid antenna setting for {n} channels: {a}".format(
n=len(self.channels), a=args.antenna,
))
if len(antennas) == 1:
antennas = [antennas[0],] * len(args.channels)
return antennas
def normalize_subdev_sel(self, spec):
"""
"""
if spec is None:
return None
specs = [x.strip() for x in spec.split(",")]
if len(specs) == 1:
return spec
elif len(specs) != self.usrp.get_num_mboards():
raise ValueError("Invalid subdev setting for {n} mboards: {a}".format(
n=len(self.usrp.get_num_mboards()), a=spec
))
return specs
def async_callback(self, msg):
"""
Call this when USRP async metadata needs printing.
"""
metadata = self.async_src.msg_to_async_metadata_t(msg)
print("[{prefix}] Channel: {chan} Time: {t} Event: {e}".format(
prefix=self.prefix,
chan=metadata.channel,
t=metadata.time_spec.get_real_secs(),
e=metadata.event_code,
))
def setup_usrp(self, ctor, args, cpu_format='fc32'):
"""
Instantiate a USRP object; takes care of all kinds of corner cases and settings.
Pop it and some args onto the class that calls this.
"""
self.channels = args.channels
self.cpu_format = cpu_format
# Create a UHD device object:
self.usrp = ctor(
device_addr=args.args,
stream_args=uhd.stream_args(
cpu_format,
args.otw_format,
args=args.stream_args,
channels=self.channels,
)
)
# Set the subdevice spec:
args.spec = self.normalize_subdev_sel(args.spec)
if args.spec:
for mb_idx in xrange(self.usrp.get_num_mboards()):
if isinstance(args.spec, list):
self.usrp.set_subdev_spec(args.spec[mb_idx], mb_idx)
else:
self.usrp.set_subdev_spec(args.spec, mb_idx)
# Set the clock and/or time source:
if args.clock_source is not None:
for mb_idx in xrange(self.usrp.get_num_mboards()):
self.usrp.set_clock_source(args.clock_source, mb_idx)
if args.time_source is not None:
for mb_idx in xrange(self.usrp.get_num_mboards()):
self.usrp.set_time_source(args.time_source, mb_idx)
# Sampling rate:
self.usrp.set_samp_rate(args.samp_rate)
self.samp_rate = self.usrp.get_samp_rate()
self.vprint("Using sampling rate: {rate}".format(rate=self.samp_rate))
# Set the antenna:
self.antenna = self.normalize_antenna_sel(args)
if self.antenna is not None:
for i, chan in enumerate(self.channels):
if not self.antenna[i] in self.usrp.get_antennas(i):
self.vprint("[ERROR] {} is not a valid antenna name for this USRP device!".format(self.antenna[i]))
exit(1)
self.usrp.set_antenna(self.antenna[i], i)
self.vprint("[{prefix}] Channel {chan}: Using antenna {ant}.".format(
prefix=self.prefix, chan=chan, ant=self.usrp.get_antenna(i)
))
self.antenna = self.usrp.get_antenna(0)
# Set receive daughterboard gain:
self.set_gain(args.gain)
self.gain_range = self.usrp.get_gain_range(0)
# Set frequency (tune request takes lo_offset):
if hasattr(args, 'lo_offset') and args.lo_offset is not None:
treq = uhd.tune_request(args.freq, args.lo_offset)
else:
treq = uhd.tune_request(args.freq)
self.has_lo_sensor = 'lo_locked' in self.usrp.get_sensor_names()
# Make sure tuning is synched:
command_time_set = False
if len(self.channels) > 1:
if args.sync == 'pps':
self.usrp.set_time_unknown_pps(uhd.time_spec())
cmd_time = self.usrp.get_time_now() + uhd.time_spec(COMMAND_DELAY)
try:
for mb_idx in xrange(self.usrp.get_num_mboards()):
self.usrp.set_command_time(cmd_time, mb_idx)
command_time_set = True
except RuntimeError:
sys.stderr.write('[{prefix}] [WARNING] Failed to set command times.\n'.format(prefix=self.prefix))
for i, chan in enumerate(self.channels):
self.tr = self.usrp.set_center_freq(treq, i)
if self.tr == None:
sys.stderr.write('[{prefix}] [ERROR] Failed to set center frequency on channel {chan}\n'.format(
prefix=self.prefix, chan=chan
))
exit(1)
if command_time_set:
for mb_idx in xrange(self.usrp.get_num_mboards()):
self.usrp.clear_command_time(mb_idx)
self.vprint("Syncing channels...".format(prefix=self.prefix))
time.sleep(COMMAND_DELAY)
self.freq = self.usrp.get_center_freq(0)
if args.show_async_msg:
self.async_msgq = gr.msg_queue(0)
self.async_src = uhd.amsg_source("", self.async_msgq)
self.async_rcv = gru.msgq_runner(self.async_msgq, self.async_callback)
def set_gain(self, gain):
"""
Safe gain-setter. Catches some special cases:
- If gain is None, set to mid-point in dB.
- If the USRP is multi-channel, set it on all channels.
"""
if gain is None:
if self.args.verbose:
self.vprint("Defaulting to mid-point gains:".format(prefix=self.prefix))
for i, chan in enumerate(self.channels):
self.usrp.set_normalized_gain(.5, i)
if self.args.verbose:
self.vprint("Channel {chan} gain: {g} dB".format(
prefix=self.prefix, chan=chan, g=self.usrp.get_gain(i)
))
else:
self.vprint("Setting gain to {g} dB.".format(g=gain))
for chan in range( len( self.channels ) ):
self.usrp.set_gain(gain, chan)
self.gain = self.usrp.get_gain(0)
def set_freq(self, freq, skip_sync=False):
"""
Safely tune all channels to freq.
"""
self.vprint("Tuning all channels to {freq} MHz.".format(freq=freq/1e6))
# Set frequency (tune request takes lo_offset):
if hasattr(self.args, 'lo_offset') and self.args.lo_offset is not None:
treq = uhd.tune_request(freq, self.args.lo_offset)
else:
treq = uhd.tune_request(freq)
# Make sure tuning is synched:
command_time_set = False
if len(self.channels) > 1 and not skip_sync:
cmd_time = self.usrp.get_time_now() + uhd.time_spec(COMMAND_DELAY)
try:
for mb_idx in xrange(self.usrp.get_num_mboards()):
self.usrp.set_command_time(cmd_time, mb_idx)
command_time_set = True
except RuntimeError:
sys.stderr.write('[{prefix}] [WARNING] Failed to set command times.\n'.format(prefix=self.prefix))
for i, chan in enumerate(self.channels ):
self.tr = self.usrp.set_center_freq(treq, i)
if self.tr == None:
sys.stderr.write('[{prefix}] [ERROR] Failed to set center frequency on channel {chan}\n'.format(
prefix=self.prefix, chan=chan
))
exit(1)
if command_time_set:
for mb_idx in xrange(self.usrp.get_num_mboards()):
self.usrp.clear_command_time(mb_idx)
self.vprint("Syncing channels...".format(prefix=self.prefix))
time.sleep(COMMAND_DELAY)
self.freq = self.usrp.get_center_freq(0)
self.vprint("First channel has freq: {freq} MHz.".format(freq=self.freq/1e6))
@staticmethod
def setup_argparser(
parser=None,
description='USRP App',
allow_mimo=True,
tx_or_rx="",
skip_freq=False,
):
"""
Create or amend an argument parser with typical USRP options.
"""
def cslist(string):
"""
For ArgParser: Turn a comma separated list into an actual list.
"""
try:
return [int(x.strip()) for x in string.split(",")]
except:
raise argparse.ArgumentTypeError("Not a comma-separated list: {string}".format(string=string))
if parser is None:
parser = argparse.ArgumentParser(
description=description,
)
tx_or_rx = tx_or_rx.strip() + " "
group = parser.add_argument_group('USRP Arguments')
group.add_argument("-a", "--args", default="", help="UHD device address args")
group.add_argument("--spec", help="Subdevice(s) of UHD device where appropriate. Use a comma-separated list to set different boards to different specs.")
group.add_argument("-A", "--antenna", help="Select {xx}antenna(s) where appropriate".format(xx=tx_or_rx))
group.add_argument("-s", "--samp-rate", type=eng_arg.eng_float, default=1e6,
help="Sample rate")
group.add_argument("-g", "--gain", type=eng_arg.eng_float, default=None,
help="Gain (default is midpoint)")
group.add_argument("--gain-type", choices=('db', 'normalized'), default='db',
help="Gain Type (applies to -g)")
if not skip_freq:
group.add_argument("-f", "--freq", type=eng_arg.eng_float, default=None, required=True,
help="Set carrier frequency to FREQ",
metavar="FREQ")
group.add_argument("--lo-offset", type=eng_arg.eng_float, default=0.0,
help="Set daughterboard LO offset to OFFSET [default=hw default]")
if allow_mimo:
group.add_argument("-c", "--channels", default=[0,], type=cslist,
help="Select {xx} Channels".format(xx=tx_or_rx))
group.add_argument("--otw-format", choices=['sc16', 'sc12', 'sc8'], default='sc16',
help="Choose over-the-wire data format")
group.add_argument("--stream-args", default="", help="Set additional stream arguments")
group.add_argument("-m", "--amplitude", type=eng_arg.eng_float, default=0.15,
help="Set output amplitude to AMPL (0.0-1.0)", metavar="AMPL")
group.add_argument("-v", "--verbose", action="count", help="Use verbose console output")
group.add_argument("--show-async-msg", action="store_true",
help="Show asynchronous message notifications from UHD")
group.add_argument("--sync", choices=('default', 'pps', 'auto'),
default='auto', help="Set to 'pps' to sync devices to PPS")
group.add_argument("--clock-source",
help="Set the clock source; typically 'internal', 'external' or 'gpsdo'")
group.add_argument("--time-source",
help="Set the time source")
return parser
| gpl-3.0 | -5,175,482,330,298,451,000 | 42.91453 | 161 | 0.557675 | false |
Sumith1896/sympy | sympy/solvers/pde.py | 1 | 35549 | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from __future__ import print_function, division
from sympy.simplify import simplify
from sympy.core import Add, C, S
from sympy.core.compatibility import (reduce, combinations_with_replacement,
is_sequence, range)
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.utilities.iterables import has_dups
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.simplify import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
given_hint = hint # hint given by the user.
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func,
hint=hint, simplify=True, type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'],
hints['func'], hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimises the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
prep = kwargs.pop('prep', True)
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols, diff
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), (_xi_1,), (4*x - 3*y,)))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)(map(lambda i: checkpdesol(pde, i,
solve_for_func=solve_for_func), sol))
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
# Try solving for the function
if solve_for_func and not (sol.lhs == func and not sol.rhs.has(func)) and not \
(sol.rhs == func and not sol.lhs.has(func)):
try:
solved = solve(sol, func)
if not solved:
raise NotImplementedError
except NotImplementedError:
pass
else:
if len(solved) == 1:
result = checkpdesol(pde, Eq(func, solved[0]),
order=order, solve_for_func=False)
else:
result = checkpdesol(pde, [Eq(func, t) for t in solved],
order=order, solve_for_func=False)
# The first method includes direct substitution of the solution in
# the PDE and simplifying.
pde = pde.lhs - pde.rhs
if sol.lhs == func:
s = pde.subs(func, sol.rhs).doit()
elif sol.rhs == func:
s = pde.subs(func, sol.lhs).doit()
if s:
ss = simplify(s)
if ss:
return False, ss
else:
return True, 0
else:
return True, 0
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy.solvers.pde import (
... pde_1st_linear_constant_coeff_homogeneous)
>>> from sympy import pdsolve
>>> from sympy import Function, diff, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*u + b*ux + c*uy - G(x,y)
>>> pprint(genform)
d d
a*f(x, y) + b*--(f(x, y)) + c*--(f(x, y)) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// b*x + c*y \
|| / |
|| | |
|| | a*xi |
|| | ------- |
|| | 2 2 |
|| | /b*xi + c*eta -b*eta + c*xi\ b + c |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ b + c b + c / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ b + c /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-a*xi ||
-------||
2 2||
b + c ||
e ||
||
/|eta=-b*y + c*x, xi=b*x + c*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*C.Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial differential equation is
.. math:: a(x, y) \frac{df(x, y)}{dx} + a(x, y) \frac{df(x, y)}{dy}
+ c(x, y) f(x, y) - G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary functions
in `x` and `y`. This PDE is converted into an ODE by making the following transformation.
1] `\xi` as `x`
2] `\eta` as the constant in the solution to the differential equation
`\frac{dy}{dx} = -\frac{b}{a}`
Making the following substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - d(\xi, \eta) = 0
which can be solved using dsolve.
The general form of this PDE is::
>>> from sympy.solvers.pde import pdsolve
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - set([x, y])
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - set([x, y])
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - set([x, y])
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - set([x, y])
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
fname = func.__name__
for key, sym in enumerate(syms):
tempfun = Function(fname + str(key))
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), x, x)/X(x), Derivative(T(t), t, t)/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
assert ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), x, x)/X(x), Derivative(Y(y), y, y)/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| bsd-3-clause | -8,731,429,320,929,213,000 | 34.62024 | 133 | 0.541703 | false |
jupyterhub/oauthenticator | oauthenticator/globus.py | 1 | 10431 | """
Custom Authenticator to use Globus OAuth2 with JupyterHub
"""
import base64
import os
import pickle
import urllib
from jupyterhub.auth import LocalAuthenticator
from tornado.httpclient import HTTPRequest
from tornado.web import HTTPError
from traitlets import Bool
from traitlets import default
from traitlets import List
from traitlets import Unicode
from .oauth2 import OAuthenticator
from .oauth2 import OAuthLogoutHandler
class GlobusLogoutHandler(OAuthLogoutHandler):
"""
Handle custom logout URLs and token revocation. If a custom logout url
is specified, the 'logout' button will log the user out of that identity
provider in addition to clearing the session with Jupyterhub, otherwise
only the Jupyterhub session is cleared.
"""
async def get(self):
# Ensure self.handle_logout() is called before self.default_handle_logout()
# If default_handle_logout() is called first, the user session is popped and
# it's not longer possible to call get_auth_state() to revoke tokens.
# See https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/handlers/login.py # noqa
await self.handle_logout()
await self.default_handle_logout()
if self.authenticator.logout_redirect_url:
# super().get() will attempt to render a logout page. Make sure we
# return after the redirect to avoid exceptions.
self.redirect(self.authenticator.logout_redirect_url)
return
await super().get()
async def handle_logout(self):
"""Overridden method for custom logout functionality. Should be called by
Jupyterhub on logout just before destroying the users session to log them out."""
await super().handle_logout()
if self.current_user and self.authenticator.revoke_tokens_on_logout:
await self.clear_tokens(self.current_user)
async def clear_tokens(self, user):
"""Revoke and clear user tokens from the database"""
state = await user.get_auth_state()
if state:
await self.authenticator.revoke_service_tokens(state.get('tokens'))
self.log.info(
'Logout: Revoked tokens for user "{}" services: {}'.format(
user.name, ','.join(state['tokens'].keys())
)
)
state['tokens'] = {}
await user.save_auth_state(state)
class GlobusOAuthenticator(OAuthenticator):
"""The Globus OAuthenticator handles both authorization and passing
transfer tokens to the spawner."""
login_service = 'Globus'
logout_handler = GlobusLogoutHandler
@default("userdata_url")
def _userdata_url_default(self):
return "https://auth.globus.org/v2/oauth2/userinfo"
@default("authorize_url")
def _authorize_url_default(self):
return "https://auth.globus.org/v2/oauth2/authorize"
@default("revocation_url")
def _revocation_url_default(self):
return "https://auth.globus.org/v2/oauth2/token/revoke"
revocation_url = Unicode(help="Globus URL to revoke live tokens.").tag(config=True)
@default("token_url")
def _token_url_default(self):
return "https://auth.globus.org/v2/oauth2/token"
identity_provider = Unicode(
help="""Restrict which institution a user
can use to login (GlobusID, University of Hogwarts, etc.). This should
be set in the app at developers.globus.org, but this acts as an additional
check to prevent unnecessary account creation."""
).tag(config=True)
def _identity_provider_default(self):
return os.getenv('IDENTITY_PROVIDER', '')
exclude_tokens = List(
help="""Exclude tokens from being passed into user environments
when they start notebooks, Terminals, etc."""
).tag(config=True)
def _exclude_tokens_default(self):
return ['auth.globus.org']
def _scope_default(self):
return [
'openid',
'profile',
'urn:globus:auth:scope:transfer.api.globus.org:all',
]
globus_local_endpoint = Unicode(
help="""If Jupyterhub is also a Globus
endpoint, its endpoint id can be specified here."""
).tag(config=True)
def _globus_local_endpoint_default(self):
return os.getenv('GLOBUS_LOCAL_ENDPOINT', '')
revoke_tokens_on_logout = Bool(
help="""Revoke tokens so they cannot be used again. Single-user servers
MUST be restarted after logout in order to get a fresh working set of
tokens."""
).tag(config=True)
def _revoke_tokens_on_logout_default(self):
return False
async def pre_spawn_start(self, user, spawner):
"""Add tokens to the spawner whenever the spawner starts a notebook.
This will allow users to create a transfer client:
globus-sdk-python.readthedocs.io/en/stable/tutorial/#tutorial-step4
"""
spawner.environment['GLOBUS_LOCAL_ENDPOINT'] = self.globus_local_endpoint
state = await user.get_auth_state()
if state:
globus_data = base64.b64encode(pickle.dumps(state))
spawner.environment['GLOBUS_DATA'] = globus_data.decode('utf-8')
async def authenticate(self, handler, data=None):
"""
Authenticate with globus.org. Usernames (and therefore Jupyterhub
accounts) will correspond to a Globus User ID, so [email protected]
will have the 'foouser' account in Jupyterhub.
"""
# Complete login and exchange the code for tokens.
params = dict(
redirect_uri=self.get_callback_url(handler),
code=handler.get_argument("code"),
grant_type='authorization_code',
)
req = HTTPRequest(
self.token_url,
method="POST",
headers=self.get_client_credential_headers(),
body=urllib.parse.urlencode(params),
)
token_json = await self.fetch(req)
# Fetch user info at Globus's oauth2/userinfo/ HTTP endpoint to get the username
user_headers = self.get_default_headers()
user_headers['Authorization'] = 'Bearer {}'.format(token_json['access_token'])
req = HTTPRequest(self.userdata_url, method='GET', headers=user_headers)
user_resp = await self.fetch(req)
username = self.get_username(user_resp)
# Each token should have these attributes. Resource server is optional,
# and likely won't be present.
token_attrs = [
'expires_in',
'resource_server',
'scope',
'token_type',
'refresh_token',
'access_token',
]
# The Auth Token is a bit special, it comes back at the top level with the
# id token. The id token has some useful information in it, but nothing that
# can't be retrieved with an Auth token.
# Repackage the Auth token into a dict that looks like the other tokens
auth_token_dict = {
attr_name: token_json.get(attr_name) for attr_name in token_attrs
}
# Make sure only the essentials make it into tokens. Other items, such as 'state' are
# not needed after authentication and can be discarded.
other_tokens = [
{attr_name: token_dict.get(attr_name) for attr_name in token_attrs}
for token_dict in token_json['other_tokens']
]
tokens = other_tokens + [auth_token_dict]
# historically, tokens have been organized by resource server for convenience.
# If multiple scopes are requested from the same resource server, they will be
# combined into a single token from Globus Auth.
by_resource_server = {
token_dict['resource_server']: token_dict
for token_dict in tokens
if token_dict['resource_server'] not in self.exclude_tokens
}
return {
'name': username,
'auth_state': {
'client_id': self.client_id,
'tokens': by_resource_server,
},
}
def get_username(self, user_data):
# It's possible for identity provider domains to be namespaced
# https://docs.globus.org/api/auth/specification/#identity_provider_namespaces # noqa
username, domain = user_data.get('preferred_username').split('@', 1)
if self.identity_provider and domain != self.identity_provider:
raise HTTPError(
403,
'This site is restricted to {} accounts. Please link your {}'
' account at {}.'.format(
self.identity_provider,
self.identity_provider,
'globus.org/app/account',
),
)
return username
def get_default_headers(self):
return {"Accept": "application/json", "User-Agent": "JupyterHub"}
def get_client_credential_headers(self):
headers = self.get_default_headers()
b64key = base64.b64encode(
bytes("{}:{}".format(self.client_id, self.client_secret), "utf8")
)
headers["Authorization"] = "Basic {}".format(b64key.decode("utf8"))
return headers
async def revoke_service_tokens(self, services):
"""Revoke live Globus access and refresh tokens. Revoking inert or
non-existent tokens does nothing. Services are defined by dicts
returned by tokens.by_resource_server, for example:
services = { 'transfer.api.globus.org': {'access_token': 'token'}, ...
<Additional services>...
}
"""
access_tokens = [
token_dict.get('access_token') for token_dict in services.values()
]
refresh_tokens = [
token_dict.get('refresh_token') for token_dict in services.values()
]
all_tokens = [tok for tok in access_tokens + refresh_tokens if tok is not None]
for token in all_tokens:
req = HTTPRequest(
self.revocation_url,
method="POST",
headers=self.get_client_credential_headers(),
body=urllib.parse.urlencode({'token': token}),
)
await self.fetch(req)
class LocalGlobusOAuthenticator(LocalAuthenticator, GlobusOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| bsd-3-clause | 4,859,216,885,162,764,000 | 38.214286 | 103 | 0.627936 | false |
aisthesis/opttrack | opttrack/lib/quoteextractor.py | 1 | 2198 | """
.. Copyright (c) 2016 Marshall Farrier
license http://opensource.org/licenses/MIT
Extract specific quotes from a comprehensive DataFrame
Example entry:
{
'Underlying': 'NFLX',
'Strike': 100.0,
'Expiry': datetime.datetime(2016, 3, 18, 23, 0, tzinfo=<bson.tz_util.FixedOffset object at 0x10c4860b8>),
'Opt_Type': 'put',
'Opt_Symbol': 'NFLX160318P00100000',
'Last': 10.25,
'Bid': 9.7,
'Ask': 10.05,
'Vol': 260,
'Open_Int': 23567,
'Quote_Time': datetime.datetime(2016, 2, 22, 16, 0, tzinfo=<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>)
}
"""
from . import constants
class QuoteExtractor(object):
def __init__(self, logger, underlying, opts, tznyse):
self.logger = logger
self.tznyse = tznyse
self.underlying = underlying
self.opts = opts
def get(self, specs):
return self._extract_all(specs)
def _extract_all(self, specs):
entries = []
self.logger.info('getting {} quote(s) for {}'.format(len(specs), self.underlying))
for spec in specs:
try:
entry = self._extract_one(spec)
except KeyError:
continue
else:
entries.append(entry)
return entries
def _extract_one(self, spec):
entry = spec.copy()
selection = (spec['Strike'], spec['Expiry'].astimezone(self.tznyse).replace(tzinfo=None,
hour=0, minute=0, second=0), spec['Opt_Type'],)
try:
entry['Opt_Symbol'] = self.opts.data.loc[selection, :].index[0]
opt = self.opts.data.loc[selection, :].iloc[0]
except KeyError as e:
self.logger.exception('option not found for {} with {}'
.format(self.opts.data.iloc[0, :].loc['Underlying'], selection))
raise
entry['Quote_Time'] = self.tznyse.localize(opt['Quote_Time'].to_datetime())
entry['Underlying'] = opt['Underlying']
for key in constants.INT_COLS:
entry[key] = int(opt[key])
for key in constants.FLOAT_COLS:
entry[key] = float(opt[key])
self.logger.debug(entry)
return entry
| mit | -8,155,537,419,479,421,000 | 31.80597 | 112 | 0.575523 | false |
CIRCL/bgpranking-redis-api | example/export/day_ips/consumer.py | 1 | 1090 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import redis
import bgpranking
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare the information to dump.')
parser.add_argument('-f', '--full', action="store_true", default=False,
help='Do a full dump (asn, block, ip, sources).')
args = parser.parse_args()
r = redis.Redis(unix_socket_path='./redis_export.sock')
date = r.get('date')
weights = bgpranking.get_all_weights(date)
while True:
asn_b = r.spop('asn_b')
if asn_b is None:
break
asn, block = asn_b.split("_")
ip_descs = bgpranking.get_ips_descs(asn, block, date)
if len(ip_descs.get(block)) != 0:
p = r.pipeline(False)
for ip, sources in ip_descs.get(block).iteritems():
p.zincrby('ips', ip, sum([float(weights[s]) for s in sources]))
if args.full:
p.hmset(ip, {'asn': asn, 'block':block,
'sources': '|'.join(sources)})
p.execute()
| bsd-2-clause | 404,525,801,695,069,440 | 31.058824 | 84 | 0.547706 | false |
ndexbio/ndex-enrich | network_gene_analyzer.py | 1 | 4628 | __author__ = 'dexter'
import requests
from bson.json_util import dumps
class NetworkGeneAnalyzer:
def __init__(self, network_id, ndex):
self.ndex = ndex
self.network = self.ndex.get_complete_network(network_id)
self.identifiers = []
self.node_map = self.network.get("nodes")
self.base_term_map = self.network.get("baseTerms")
self.function_term_map = self.network.get("functionTerms")
self.scrubbed_terms = []
def get_genes(self):
self.get_network_identifiers()
self.get_genes_for_identifiers()
scrub_list = []
for scrub_item in self.scrubbed_terms:
scrub_list.append(scrub_item.get('symbol'))
return scrub_list
def get_genes_for_identifiers(self):
#print "TODO"
IDMAP_URL = 'http://ec2-52-34-209-69.us-west-2.compute.amazonaws.com:3000/idmapping'
payload = {'ids':self.identifiers}
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Cache-Control': 'no-cache',
}
r = requests.post(IDMAP_URL, data=dumps(payload), headers=headers)
result_dictionary = r.json()
print dumps(result_dictionary)
#scrubbed_terms = []
if(result_dictionary['matched'] is not None and len(result_dictionary['matched']) > 0):
dedup_list = []
#===========================================================
# if the term we entered is already a gene symbol or a gene
# id the response JSON will identify it in the inType field
#
# de-dup the list
#===========================================================
for term_match in result_dictionary['matched']:
add_this_term = {'symbol': '', 'geneid':''}
if(term_match['inType'] == 'Symbol'):
if(term_match['matches']['GeneID'] not in dedup_list):
add_this_term['symbol'] = term_match['in']
add_this_term['geneid'] = term_match['matches']['GeneID']
dedup_list.append(term_match['matches']['GeneID'])
self.scrubbed_terms.append(add_this_term)
elif(term_match['inType'] == 'GeneID'):
if(term_match['in'] not in dedup_list):
add_this_term['symbol'] = term_match['matches']['Symbol']
add_this_term['geneid'] = term_match['in']
dedup_list.append(term_match['in'])
self.scrubbed_terms.append(add_this_term)
else:
if(term_match['matches']['GeneID'] not in dedup_list):
add_this_term['symbol'] = term_match['matches']['Symbol']
add_this_term['geneid'] = term_match['matches']['GeneID']
dedup_list.append(term_match['matches']['GeneID'])
self.scrubbed_terms.append(add_this_term)
#print dumps(self.scrubbed_terms)
def get_network_identifiers(self):
for node in self.node_map.values():
# get ids from represents
represents_id = node.get('represents')
if represents_id:
self.get_identifiers_from_term_id(represents_id)
# check aliases, take the first that resolves to a gen
alias_ids = node.get('aliases')
for alias_id in alias_ids:
self.get_identifiers_from_term_id(alias_id)
# finally, add the name
name = node.get("name")
if name:
self.identifiers.append(name)
def get_identifiers_from_term_id(self, term_id):
if self.identifier_from_base_term_id(term_id):
return True
elif self.get_identifiers_from_function_term_id(term_id):
return True
else:
return False
def get_identifiers_from_function_term_id(self, function_term_id):
# if it is a function term, process all genes mentioned
function_term = self.function_term_map.get(function_term_id)
if function_term:
for parameter in function_term.get('parameters'):
self.get_identifiers_from_term_id(parameter)
return True
else:
return False
def identifier_from_base_term_id(self, base_term_id):
base_term = self.base_term_map.get(base_term_id)
if base_term:
self.identifiers.append(base_term.name)
return False
| bsd-2-clause | -3,326,323,250,897,176,000 | 38.896552 | 95 | 0.536949 | false |
lukasdragon/PythonScripts | vote.py | 1 | 1741 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#===============================#
#CODE PROPERTY OF LUKAS G. OLSON#
#https://github.com/lukasdragon #
#===============================#
#Color variables!
COKBLUE = '\033[94m';
COKGREEN = '\033[92m';
CWARNING = '\033[93m';
CFAIL = '\033[91m';
CRESET ='\033[0m';
CBOLD = '\033[01m';
CREVERSE = '\033[07m';
COTHER = '\033[33m';
#Helper methods
#header
print "{0}{1}=================".format(COTHER, CBOLD);
print "Voting Calculator";
print "================={0}".format(CRESET);
Canadian = True if raw_input(COKGREEN + "Are you Canadian? [Yes|No]: " + CRESET).lower() == 'yes' else False
if Canadian:
#Age
Age = raw_input("{0}How old are you?: {1}".format(COKGREEN,CRESET));
Fetus = 0;
Baby = 4;
Child = 17;
Adult = 130;
Vampire = 130;
#parse
try:
Age = int(Age);
except:
print CFAIL + "ERROR! INCORRECT VALUE";
#Check Age
if Age <= Fetus:
print CBOLD + "You are aren't born yet! Come back later!";
elif Age <= Baby:
print CBOLD + "Get away from me baby! You shouldn't even be able to read this!";
elif Age <= Child:
print CBOLD + "You are a child! You cannot vote! Come back when you become a human!";
elif Age <= Adult:
if raw_input(COKGREEN + "Are you a registered voter? [Yes|No]: " + CRESET).lower() == 'yes':
print CBOLD + "Congratulations! You are an adult and are able to vote!";
else:
print CFAIL + CBOLD + "YOU NEED TO REGISTER TO VOTE! GO DO THAT ASAP!!!!";
else:
print CBOLD + CWARNING + "GET AWAY FROM ME SATAN!";
else:
print CFAIL + "GET OUT OF HERE YOU FILTHY FOREIGNER!"
print CRESET;
| unlicense | -6,972,836,603,200,723,000 | 23.180556 | 108 | 0.552556 | false |
crashfrog/Dispatch | spades_careful_assembler.py | 1 | 4808 | #!/usr/bin/env python
import subprocess
import tempfile
import shutil
import os
import re
import fasta_statter
"""
Assembly Dispatch bindings for SPAdes Single-Cell assembler (Bankevitch et al,
J Comput Biol, 2012), a de Brujin graph assembler for paired-end single-cell
sequencing.
Trying something new here - using file-like object to try to parse SPAdes output as it
runs. Also SPAdes has a Python script wrapper, so this one is a little different - we
actually import the SPAdes module and invoke its main method with an arg vector.
Jan 6 2014: No, we don't. That was stupid from the get-go.
"""
class SpadesParser(object):
"File-like object to parse IO streams and invoke a function in response to a regex match."
def __init__(self, function, regex, buffer):
self.func = function
self.re = re.compile(regex, re.MULTILINE)
self.buffer = buffer
self.passthrough = False
def write(self, s):
"Capture a text stream, look for regex matches; if found call func with the match"
if 'Error' in s:
self.passthrough=True
r = self.re.search(s)
if r and not self.passthrough:
self.func(r.groups()[0])
if self.passthrough:
self.func(s)
return self.buffer.write(s)
def fileno(self):
return 0
def flush(self):
pass
def close(self):
pass
description = "SPAdes, a de Brujin graph assembler with advanced error correction and handling of paired-end data."
core_load = 1 #number of cores this assembler will max out
#spades_exec = '/home/justin.payne/SPAdes-2.5.0-Linux/bin/'
#spades_exec = '/home/justin.payne/SPAdes-3.0.0-Linux/bin/'
spades_exec = ''
supports = ('MiSeq', 'IonTorrent')
def assemble(path, accession, fasta_file_name=None, callback=lambda s: None, update_callback=lambda d: None, debug=True, **kwargs):
import sys
if not fasta_file_name:
fasta_file_name = "{}.spades.fasta".format(accession)
kwargs['temp_dir'] = tempfile.mkdtemp()
d = {'assembly_version':'SPAdes v. 3.0.0',
'average_coverage':'',
'num_contigs':'',
'n50':'',
'num_bases':'',
'fasta_file':fasta_file_name,
'lib_insert_length':'Not determined',
'matched':'-'
}
try:
#import the spades python wrapper
#sys.path.append(spades_exec)
#import spades
#get version
#d['assembly_version'] = "SPAdes v. {}".format(spades.spades_version.replace("\n", ""))
#assemble
#print "version:", d['assembly_version']
callback("running SPAdes in {temp_dir}...".format(**kwargs))
def status_callback(r):
callback("running: " + r)
if not debug:
sys.stdout = SpadesParser(status_callback, r"^ *=+ ([A-Za-z0-9;:., ]+)", open(os.devnull, 'w'))
#argv = "--disable-gzip-output --careful -t 8 -m 64 -1 {reads1} -2 {reads2} -o {temp_dir}".format(**kwargs).split(" ")
#if debug:
# callback("spades {}".format(" ".join(argv)))
#spades.main(argv)
if 'reads2' in kwargs and kwargs['reads2']:
subprocess.check_call("{spades_exec}spades -t 8 -m 64 -1 {reads1} -2 {reads2} -o {temp_dir} --careful".format(spades_exec=spades_exec, **kwargs), shell=True)
else:
subprocess.check_call("{spades_exec}spades --iontorrent -t 8 -m 64 -s {reads1} -o {temp_dir} --careful".format(spades_exec=spades_exec, **kwargs), shell=True)
update_callback({"k_value":'21, 33, 55, 77, 99, 127'})
callback("Copying {temp_dir}/contigs.fasta...".format(**kwargs))
shutil.copyfile("{temp_dir}/contigs.fasta".format(**kwargs), "{}/{}".format(path, fasta_file_name))
shutil.copyfile("{temp_dir}/spades.log".format(**kwargs), "{}/spades.log".format(path))
d.update(fasta_statter.stat_velvet(os.path.join(path, fasta_file_name), 33))
# except subprocess.CalledProcessError as e:
# raise ValueError(str(type(e)) + str(e) + str(e.output))
except Exception as e:
if debug:
import traceback
traceback.print_exc(sys.stdout)
raise e
raise ValueError("SPAdes assembly failure.")
finally:
callback("Cleaning up {temp_dir}...".format(**kwargs))
shutil.rmtree(kwargs['temp_dir'])
sys.stdout = sys.__stdout__
return d
if __name__ == "__main__":
import sys
import datetime
def cb(s):
print "[{}]".format(datetime.datetime.today().ctime()), s
def ucb(d):
for (key, value) in d.items():
print key, ":", value
print assemble(path='/home/justin.payne/',
reads1='/shared/gn2/CFSANgenomes/CFSAN001656/CFSAN001656_01/CFSAN001656_S8_L001_R1_001.fastq',
reads2='/shared/gn2/CFSANgenomes/CFSAN001656/CFSAN001656_01/CFSAN001656_S8_L001_R2_001.fastq',
accession='CFSAN001656_01',
callback=cb,
update_callback=ucb,
debug=True)
print assemble(path='/home/justin.payne',
reads1='/shared/gn2/CFSANgenomes/CFSAN006329/CFSAN006329_01/CFSAN006329.reads.fastq',
accession='CFSAN006329_01',
callback=cb,
debug=True,
update_callback=ucb) | unlicense | -3,152,752,227,748,538,000 | 29.436709 | 161 | 0.677829 | false |
LeBarbouze/tunacell | tunacell/base/cell.py | 1 | 15188 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines how cells are stored as tunacell's objects
"""
from __future__ import print_function
import numpy as np
import warnings
import treelib as tlib
from tunacell.base.observable import Observable, FunctionalObservable
from tunacell.base.datatools import (Coordinates, compute_rates,
extrapolate_endpoints,
derivative, logderivative, ExtrapolationError)
class CellError(Exception):
pass
class CellChildsError(CellError):
pass
class CellParentError(CellError):
pass
class CellDivisionError(CellError):
pass
class Cell(tlib.Node):
"""General class to handle cell data structure.
Inherits from treelib.Node class to facilitate tree building.
Parameters
----------
identifier : str
cell identifier
container : :class:`Container` instance
container to which cell belongs
Attributes
----------
container : :class:`Container` instance
container to chich cell belongs
childs : list of :class:`Cell` instances
daughter cells of current cell
parent : :class:`Cell` instance
mother cell of current cell
birth_time : float (default None)
time of cell birth (needs to be computed)
division_time : float (default None)
time of cell division (needs to be computed)
Methods
-------
set_division_events()
computes birth/division times when possible
build(obs)
builds timeseries, uses one of the following methods depending on obs
build_timelapse(obs)
builds and stores timeseries associated to obs, in 'dynamics' mode
build_cyclized(obs)
builds and stores cell-cycle value associated to obs, not in 'dynamics'
mode
"""
def __init__(self, identifier=None, container=None):
tlib.Node.__init__(self, identifier=identifier)
self._childs = []
self._parent = None
self._birth_time = None
self._division_time = None
self._sdata = {} # dictionary to contain computed data
self._protected_against_build = set() # set of obs not to re-build
self.container = container # point to Container instance
# cells are built from a specific container instance
# container can be a given field of view, a channel, a microcolony, ...
return
# We add few definitions to be able to chain between Cell instances
@property
def childs(self):
"Get list of child instances."
return self._childs
@childs.setter
def childs(self, value):
if value is None:
self._childs = []
elif isinstance(value, list):
for item in value:
self.childs = item
elif isinstance(value, Cell):
self._childs.append(value)
else:
raise CellChildsError
@property
def parent(self):
"Get parent instance."
return self._parent
@parent.setter
def parent(self, pcell):
if pcell is None:
self._parent = None
elif isinstance(pcell, Cell):
self._parent = pcell
else:
raise CellParentError
@property
def birth_time(self):
"Get cell cycle start time. See below for Setter."
return self._birth_time
@birth_time.setter
def birth_time(self, value):
"Set cell cycle start time. See above for Getter."
self._birth_time = value
@property
def division_time(self):
"Get cell cycle end time. See below for Setter."
return self._division_time
@division_time.setter
def division_time(self, value):
"Set cell cycle end time. See above for Getter."
if self.birth_time is not None:
if value < self.birth_time:
raise CellDivisionError
self._division_time = value
def set_division_event(self):
"method to call when parent is identified"
previous_frame = None
if (self.parent is not None) and (self.parent.data is not None):
previous_frame = self.parent.data['time'][-1]
first_frame = None
if self.data is not None:
first_frame = self.data['time'][0]
if previous_frame is not None and first_frame is not None:
div_time = (previous_frame + first_frame)/2. # halfway
self.birth_time = div_time
self.parent.division_time = div_time
return
def __repr__(self):
cid = str(self.identifier)
if self.parent:
pid = str(self.parent.identifier)
else:
pid = '-'
if self.childs:
ch = ','.join(['{}'.format(c.identifier) for c in self.childs])
else:
ch = '-'
return cid+';p:'+pid+';ch:'+ch
def info(self):
dic = {}
dic['a. Identifier'] = '{}'.format(self.identifier)
pid = 'None'
if self.parent:
pid = '{}'.format(self.parent.identifier)
dic['b. Parent id'] = pid
chids = 'None'
if self.childs:
chids = ', '.join(['{}'.format(ch.identifier)
for ch in self.childs])
dic['c. Childs'] = chids
dic['d. Birth time'] = '{}'.format(self.birth_time)
dic['e. Division time'] = '{}'.format(self.division_time)
if self.data is not None:
dic['f. N_frames'] = '{}'.format(len(self.data))
return dic
def protect_against_build(self, obs):
"""Protect current cell against building obs array/value"""
self._protected_against_build.add(obs)
return
def build(self, obs):
"""Builds timeseries"""
if obs in self._protected_against_build:
return
if isinstance(obs, FunctionalObservable):
# first build every single Observable
for item in obs.observables:
self.build(item)
arrays = [self._sdata[item.label] for item in obs.observables]
self._sdata[obs.label] = obs.f(*arrays)
elif isinstance(obs, Observable):
if obs.mode == 'dynamics':
self.build_timelapse(obs)
else:
self.compute_cyclized(obs)
else:
raise TypeError('obs must be of type Observable or FunctionalObservable')
def build_timelapse(self, obs):
"""Builds timeseries corresponding to observable of mode 'dynamics'.
Result is an array of same length as time array, stored in a dictionary
_sdata, which keys are obs.label. When using sliding windows,
estimate in a given cell actualize data in its parent cell, if and only
if it has not been actualized before (check disjoint time intervals).
Parameters
----------
obs : Observable instance
mode must be 'dynamics'
Note
-----
Some observables carry the 'local_fit' option True. In this case,
local fits over shifting time-windows are performed. If one would keep
only a given cell's data, then the constraints on shifting time-window
would let some 'empty' times, at which no evaluation can be performed.
This is solved by getting data from the cell's parent cell's data. This
operation computes time-window fiited data in the cell's parent cycle.
Two precautions must then be taken:
1. a given cell's data must be used only once for evaluating parent
cell's data,
2. when data has been used from one daughter cell, concatenate
the current cell's evaluated data to it.
.. warning::
For some computations, the time interval between consecutive
acquisitions is needed. If it's defined in the container or the
experiment metadata, this parameter will be imported; otherwise if
there are at least 2 consecutive values, it will be inferred from
data (at the risk of making mistakes if there are too many missing
values)
"""
label = str(obs.label)
raw = obs.raw
coords = Coordinates(self.data['time'], self.data[raw])
if self.parent is not None and len(self.parent.data) > 0:
anteriors = Coordinates(self.parent.data['time'],
self.parent.data[raw])
else:
anteriors = Coordinates(np.array([], dtype=float),
np.array([], dtype=float))
# if empty, return empty array of appropriate type
if len(self.data) == 0: # there is no data, but it has some dtype
return Coordinates(np.array([], dtype=float),
np.array([], dtype=float))
dt = self.container.period
if dt is None:
# automatically finds dt
if len(self.data) > 1:
arr = self.data['time']
time_increments = arr[1:] - arr[:-1]
dt = np.round(np.amin(np.abs(time_increments)), decimals=2)
# case : no local fit, use data, or finite differences
if not obs.local_fit:
if obs.differentiate:
if obs.scale == 'linear':
new = derivative(coords)
elif obs.scale == 'log':
new = logderivative(coords)
else:
new = coords
self._sdata[label] = new.y
# case : local estimates using compute_rates
else:
r, f, ar, af, xx, yy = compute_rates(coords.x, coords.y,
x_break=self.birth_time,
anterior_x=anteriors.x,
anterior_y=anteriors.y,
scale=obs.scale,
time_window=obs.time_window,
dt=dt,
join_points=obs.join_points)
if obs.differentiate:
to_cell = r
to_parent = ar
if len(ar) != len(anteriors.x):
print('This is awkward')
else:
to_cell = f
to_parent = af
self._sdata[label] = to_cell
if self.parent is not None and (not np.all(np.isnan(to_parent))):
if label not in self.parent._sdata.keys():
self.parent._sdata[label] = to_parent
else:
existing = self.parent._sdata[label]
# if existing is nan, try to put addedum values
self.parent._sdata[label] = np.where(np.isnan(existing), to_parent, existing)
return
def compute_cyclized(self, obs):
"""Computes observable when mode is different from 'dynamics'.
Parameters
----------
obs : Observable instance
mode must be different from 'dynamics'
Raises
------
ValueError
when Observable mode is 'dynamics'
Note
----
To compute a cell-cycle observable (e.g. birth growth rate), it is
necessary to know the value of the timelapse counterpart (e.g. growth
rate here). The timelapse observable may work by joining values at
divisions, and hence a single call to Cell.build_timelapse() will
result in a different result array than when it has beenalso called in
a daughter cell (potentially affecting values toward the end of current
cell cycle). Hence, in that circumstances when continuity is used to
join timeseries at divisions, enhancing results with fitting
over sliding windows, it is the user's task to compute first the
timelapse observable over the entire lineage, and only then, evaluate
cell-cycle values. This is why the function below tries first to read
an already present array from timelapse counterpart, and only if it
fails will it compute it using only this current cell data.
"""
scale = obs.scale
npts = obs.join_points
label = obs.label
if obs.mode == 'dynamics':
raise ValueError('Called build_cyclized for dynamics mode')
# associate timelapse counterpart
cobs = obs.as_timelapse()
clabel = cobs.label
time = self.data['time']
# if it has been computed already, the clabel key exists in sdata
try:
array = self._sdata[clabel]
# otherwise compute the timelapse counterpart
except KeyError:
self.build_timelapse(cobs)
array = self._sdata[clabel]
# get value
try:
if obs.mode == 'birth':
value = extrapolate_endpoints(time, array, self.birth_time,
scale=scale, join_points=npts)
elif obs.mode == 'division':
value = extrapolate_endpoints(time, array, self.division_time,
scale=scale, join_points=npts)
elif 'net-increase' in obs.mode:
dval = extrapolate_endpoints(time, array, self.division_time,
scale=scale, join_points=npts)
bval = extrapolate_endpoints(time, array, self.birth_time,
scale=scale, join_points=npts)
if obs.mode == 'net-increase-additive':
value = dval - bval
elif obs.mode == 'net-increase-multiplicative':
value = dval/bval
elif obs.mode == 'average':
value = np.nanmean(array)
elif obs.mode == 'rate':
if len(array) < 2:
value = np.nan # not enough values to estimate rate
if obs.scale == 'log':
array = np.log(array)
value, intercept = np.polyfit(time, array, 1)
except ExtrapolationError as err:
# msg = '{}'.format(err)
# warnings.warn(msg)
value = np.nan # missing information
self._sdata[label] = value
return
def _disjoint_time_sets(ts1, ts2):
if len(ts1) == 0 or len(ts2) == 0:
return True
min1, min2 = map(np.nanmin, [ts1, ts2])
max1, max2 = map(np.nanmax, [ts1, ts2])
return max1 < min2 or max2 < min1
def filiate_from_bpointer(cells):
"""Build in place parent/childs attributes in a set of filiated cells
Parameters
----------
cells : list of Cell instances
"""
for cell in cells:
childs = []
for cc in cells:
if cc.bpointer == cell.identifier:
childs.append(cc)
cc.parent = cell
cc.set_division_event()
cell.childs = childs | mit | 1,801,948,970,167,984,400 | 36.046341 | 97 | 0.55906 | false |
vjousse/viserlalune | blogebook/templatetags/blog_epub.py | 1 | 1532 | from django import template
from ebooklib import epub
register = template.Library()
def posts_epub_link(posts):
book = epub.EpubBook()
# add metadata
book.set_title('Articles de Vincent Jousse')
book.set_language('fr')
book.add_author('Vincent Jousse')
for post in posts:
print post.title
c1 = epub.EpubHtml(title=post.title, file_name='%s.xhtml' % post.slug, lang='fr')
c1.content=u'<html><head></head><body><h1>Introduction</h1><p>Voici une belle introduction.</p></body></html>'
book.add_item(c1)
# add navigation files
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
# define css style
style = '''
@namespace epub "http://www.idpf.org/2007/ops";
body {
font-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif;
}
h2 {
text-align: left;
text-transform: uppercase;
font-weight: 200;
}
ol {
list-style-type: none;
}
ol > li:first-child {
margin-top: 0.3em;
}
nav[epub|type~='toc'] > ol > li > ol {
list-style-type:square;
}
nav[epub|type~='toc'] > ol > li > ol > li {
margin-top: 0.3em;
}
'''
# add css file
nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content=style)
book.add_item(nav_css)
# create spine
book.spine = ['nav', c1 ]
# create epub file
epub.write_epub('test.epub', book, {})
return "/test.epub"
register.simple_tag(posts_epub_link)
| mit | 8,783,613,191,321,780,000 | 19.426667 | 118 | 0.619452 | false |
springload/madewithwagtail | core/migrations/0006_wagtail_1_6_upgrade.py | 1 | 1067 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20161004_1431'),
]
operations = [
migrations.AlterField(
model_name='submitformfield',
name='choices',
field=models.TextField(help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices', blank=True),
),
migrations.AlterField(
model_name='submitformpage',
name='to_address',
field=models.CharField(help_text='Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.', max_length=255, verbose_name='to address', blank=True),
),
migrations.AlterField(
model_name='wagtailcompanypage',
name='company_url',
field=models.URLField(help_text=b'The URL of your site, something like "https://www.springload.co.nz"', null=True, blank=True),
),
]
| mit | 5,708,785,225,295,185,000 | 35.793103 | 205 | 0.626054 | false |
fablab-ka/labtags | blot-gateway/tagscanner.py | 1 | 2414 | import threading, time, traceback
from bluepy import btle
from tag import Tag
from messages import DiscoverTagMessage, GWStartupMessage, GWShutdownMessage
from utils import ANSI_RED, ANSI_OFF
class TagScanner:
def __init__(self):
#btle.Debugging = True
self.timeout = 4
self.hci = 0
self.scanner = btle.Scanner(self.hci)
def scan(self):
result = []
devices = self.scanner.scan(self.timeout)
self.scanner.clear()
for d in devices:
if not d.connectable:
print(ANSI_RED + "[TagScanner] Device not connectable", d.addr + ANSI_OFF )
continue
#print(ANSI_RED + "[TagScanner] Tag found '%s' '%s' '%s'" % (d.addr, d.addrType, d.rssi) + ANSI_OFF)
#print(ANSI_RED + "[TagScanner] " + str(d.getScanData()) + ANSI_OFF)
name = d.getValueText(9)
result.append(Tag(d.addr, d.addrType, name, d.rssi,-1))
return result
class ScanLoopThread(threading.Thread):
def __init__(self, messageQueue, tagCache):
threading.Thread.__init__(self)
self.messageQueue = messageQueue
self.tagCache = tagCache
self.scanner = TagScanner()
self.rediscoverTimeout = 5
def pruneTagCache(self):
now = time.time()
for tag in self.tagCache.getData():
if (now - tag.discovered) > self.rediscoverTimeout:
self.tagCache.remove(tag)
def discoverTags(self):
tags = self.scanner.scan()
for tag in tags:
if not self.tagCache.hasTagByMac(tag.mac):
print(ANSI_RED + "[ScanThread] discovered Tag '" + str(tag.mac) + "' name: '" + str(tag.name) + "'" + ANSI_OFF)
self.tagCache.append(tag)
self.messageQueue.put(DiscoverTagMessage(tag))
def run(self):
print(ANSI_RED + "[ScanThread] scan loop start" + ANSI_OFF)
#self.messageQueue.put(GWStartupMessage()) #todo
while True:
try:
self.pruneTagCache()
self.discoverTags()
except:
print(ANSI_RED + "[ScanThread] " + str(traceback.format_exc()) + ANSI_OFF)
time.sleep(0.1)
#self.messageQueue.put(GWShutdownMessage()) #todo
print(ANSI_RED + "[ScanThread] scan loop shutdown" + ANSI_OFF) #Ralf: Diese Meldung kommt imo nie ! #todo
| mit | -2,211,310,556,143,237,000 | 30.763158 | 127 | 0.584093 | false |
AEDA-Solutions/matweb | backend/Models/Curso/PedidoEditar.py | 1 | 1688 | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoEditar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoEditar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
self.nome = self.corpo['nome']
self.codigo = self.corpo['codigo']
self.id_grau = self.corpo['id_grau']
self.id_campus = self.corpo['id_campus']
self.permanencia_minima = self.corpo['permanencia_minima']
self.permanencia_maxima = self.corpo['permanencia_maxima']
self.creditos_formatura = self.corpo['creditos_formatura']
self.creditos_optativos_conexa = self.corpo['creditos_optativos_conexa']
self.creditos_optativos_concentracao = self.corpo['creditos_optativos_concentracao']
self.creditos_livres_maximo = self.corpo['creditos_livres_maximo']
self.mec = self.corpo['mec']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
def getNome(self):
return self.nome
def getCodigo(self):
return self.codigo
def getId_grau(self):
return self.id_grau
def getId_campus(self):
return self.id_campus
def getPermanencia_minima(self):
return self.permanencia_minima
def getPermanencia_maxima(self):
return self.permanencia_maxima
def getCreditos_formatura(self):
return self.creditos_formatura
def getCreditos_optativos_conexa(self)
return self.creditos_optativos_conexa
def getCreditos_optativos_concentracao(self)
return self.creditos_optativos_concentracao
def getCreditos_livres_maximo(self)
return self.creditos_livres_maximo
def getMec(self)
return self.mec
| mit | 2,563,948,748,711,751,700 | 23.114286 | 87 | 0.709716 | false |
gautam1858/tensorflow | tensorflow/python/kernel_tests/tensor_array_ops_test.py | 1 | 62058 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _make_converter(tf_dtype):
def _converter(x):
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
return np.asarray(x).astype("|S")
x = np.asarray(x).astype(tf_dtype.as_numpy_dtype)
if tf_dtype.is_complex:
# Add a non-zero imaginary component to x.
x -= 1j * x
return x
return _converter
def _make_ta(size, name, dtype=dtypes.float32, infer_shape=False):
return tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name=name, size=size, infer_shape=infer_shape)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class TensorArrayTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(TensorArrayTest, cls).setUpClass()
cls._workers, _ = test.create_local_cluster(num_workers=3, num_ps=0)
@classmethod
def tearDownClass(cls):
super(TensorArrayTest, cls).tearDownClass()
session_lib.Session.reset(cls._workers[0].target)
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteRead(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0)
def _testTensorArrayWritePackMaybeLegacy(self):
self._testTensorArrayWritePack(dtypes.float32)
self._testTensorArrayWritePack(dtypes.float64)
self._testTensorArrayWritePack(dtypes.int32)
self._testTensorArrayWritePack(dtypes.int64)
self._testTensorArrayWritePack(dtypes.complex64)
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
def testEmptyTensorArrayPack(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual([3, 0, 1], c0.shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0)
@test_util.run_deprecated_v1
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
self._testTensorArrayWriteConcat(dtypes.int32)
self._testTensorArrayWriteConcat(dtypes.int64)
self._testTensorArrayWriteConcat(dtypes.complex64)
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=tensor_shape.TensorShape([1, 2]))
self.assertAllEqual([[0.0, 0.0]], self.evaluate(ta.read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.disable_control_flow_v2("b/122324791")
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()
def _testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
self.assertAllEqual(
[[0.0, 0.0]], self.evaluate(ta.write(1, [[4.0, 5.0]]).read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.disable_control_flow_v2("b/122324791")
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros()
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
ta = _make_ta(3, "foo", dtype=tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
# Unpack a matrix into vectors
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
self._testTensorArrayUnpackRead(dtypes.float32)
self._testTensorArrayUnpackRead(dtypes.float64)
self._testTensorArrayUnpackRead(dtypes.int32)
self._testTensorArrayUnpackRead(dtypes.int64)
self._testTensorArrayUnpackRead(dtypes.complex64)
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
# Split an empty vector
ta = _make_ta(3, "foo", dtype=tf_dtype)
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.run_deprecated_v1
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
self._testTensorArraySplitRead(dtypes.int32)
self._testTensorArraySplitRead(dtypes.int64)
self._testTensorArraySplitRead(dtypes.complex64)
self._testTensorArraySplitRead(dtypes.complex128)
self._testTensorArraySplitRead(dtypes.string)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradGrad(self):
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("Legacy TensorArray does not support double derivatives.")
with self.test_session(use_gpu=True) as session:
x = constant_op.constant(4.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=1,
infer_shape=False)
w0 = ta.write(0, x)
r0 = w0.read(0)
y = r0 * r0
g1 = gradients_impl.gradients(ys=[y], xs=[x])
g2 = gradients_impl.gradients(ys=[g1], xs=[x])
self.assertAllEqual([2.0], session.run(g2))
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
# Test writing the wrong datatype
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = ("Invalid data types; op elements string but list elements "
"float")
else:
error_msg = (
"TensorArray dtype is (float|float32) but Op is trying to write "
"dtype string")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(0, "wrong_type_scalar").flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element -1 in a list with 3 elements."
else:
error_msg = "index -1"
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(-1, 3.0).flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element 3 in a list with 3 elements"
else:
error_msg = ("Tried to write to index 3 but array is not "
"resizeable and size is: 3")
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(3, 3.0).flow)
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype (only possible when constructing graphs).
if (not context.executing_eagerly() and
not control_flow_util.ENABLE_CONTROL_FLOW_V2):
r0_bad = gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
self.evaluate(r0_bad)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element -1 in a list with 3 elements."
else:
error_msg = "index -1"
# Test reading from a negative index, which is not allowed
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(-1))
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element 3 in a list with 3 elements."
else:
error_msg = "Tried to read from index 3 but array size is: 3"
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(3))
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
@test_util.run_v1_only("v2 allows multiple writes.")
def testSkipEagerTensorArrayWriteMultipleFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
self.evaluate(w3.concat())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
# The exact error messages differ between eager execution and graph
# construction as the former bubbles up the error from array_op.concat.
with self.assertRaisesOpError("shape"):
self.evaluate(w3.concat())
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(use_gpu=True):
in_eager_mode = context.executing_eagerly()
ta = _make_ta(3, "foo")
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
if in_eager_mode:
self.evaluate(ta.split([1.0, 2.0, 3.0], 1))
else:
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
error_msg = ("Unused values in tensor. Length of tensor: 3 Values used: 1"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not in_eager_mode else
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.split([1.0, 2.0, 3.0], [1]).flow)
ta = _make_ta(1, "baz")
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and not in_eager_mode:
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
self.evaluate(ta.split(1.0, [1]).flow)
else:
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"
):
self.evaluate(ta.split(1.0, [1]).flow)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2 or in_eager_mode:
ta = _make_ta(2, "buz")
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
self.evaluate(ta.split([1.0], [1]).flow)
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3,
dtype=dtypes.float32,
element_shape=tensor_shape.TensorShape([2, 3]))
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertAllEqual([None, None, 2, 3], read_value.shape.as_list())
# Writing with wrong shape should not work.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Could not write to TensorArray"):
fed_value = np.random.random([2, 3])
sess.run(read_value, feed_dict={value: fed_value})
# Writing with correct shape should work.
fed_value = np.random.random([4, 5, 2, 3])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3, dtype=dtypes.float32,
element_shape=None) # Note that element_shape is unknown
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertIsNone(read_value.shape.ndims)
# Write with some shape and check read value.
fed_value = np.random.random([4, 5, 7])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
def testMultiTensorArray(self):
with self.session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
val = self.evaluate(r)
self.assertAllClose(9.0, val)
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.complex64, np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]
]) # concat gradient
grad_vals = self.evaluate(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
@test_util.run_v1_only("v2 does not support clear_after_read.")
def testTensorArrayReadTwice(self):
with self.session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
with ops.control_dependencies([r0_readonce]):
self.evaluate(w_readonce.read(0))
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
def _testTensorArrayGradientUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientSplitConcat(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2,
infer_shape=False)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
def testCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
self.evaluate(ta.close())
def testSizeTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, self.evaluate(s))
def testWriteCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
self.evaluate(w1.close()) # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.cached_session(use_gpu=True):
def func(v0, state0, var):
ta = tensor_array_ops.TensorArray(
dtype=dtype,
tensor_array_name="foo",
size=0 if dynamic_size else 3,
dynamic_size=dynamic_size)
time_0 = array_ops.identity(0)
def body(time, ta_t, state):
sliced = array_ops.slice(
v0, begin=array_ops.stack([time, 0]), size=[1, -1])
sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time + 1, ta_t, state)
(unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
return vout
v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
init_val = np.arange(100, 105, dtype=np_dtype)
var = variable_scope.get_variable(
"var",
shape=init_val.shape,
dtype=np_dtype,
initializer=init_ops.constant_initializer(init_val))
vout = func(v0, state0, var)
grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
if context.executing_eagerly():
grad_fn = backprop.gradients_function(func)
v0_grad, state0_grad, var_grad = grad_fn(v0, state0, var, dy=grad_val)
else:
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
self.evaluate(variables.global_variables_initializer())
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
self.evaluate(
([state0, var, v0, vout, v0_grad, var_grad, state0_grad])))
just_v0_grad_t = self.evaluate(v0_grad)
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array(
[state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :], grad_val[2, :]
])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
@test_util.run_v1_only("b/117943489")
def testSkipEagerWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.session(use_gpu=True):
def loop(x):
num_steps = 100
acc = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=num_steps,
clear_after_read=False,
element_shape=tensor_shape.scalar())
i = constant_op.constant(0, name="i")
c = lambda i, acc: i < 5
def b(i, acc):
x1 = control_flow_ops.cond(
math_ops.equal(i, 0), lambda: x,
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
z = constant_op.constant(0.0)
def fn(i, acc):
return i + 1, acc.write(i, z)
_, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
[i1, acc1])
r = acc2.stack()
return r
x = constant_op.constant(2.0, name="x")
if context.executing_eagerly():
grad = backprop.gradients_function(loop)(x)[0]
else:
grad = gradients_impl.gradients(loop(x), [x])[0]
self.assertAllClose(31.0, self.evaluate(grad))
@test_util.run_deprecated_v1
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo"))
self.assertEqual("gradients",
self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo/bar"))
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_EnclosingScope(self):
self.assertEqual("foo/gradients:0",
self._grad_source_for_name("foo/gradients"))
self.assertEqual("foo/gradients_0:0",
self._grad_source_for_name("foo/gradients_0"))
self.assertEqual("foo/gradients",
self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual("foo/gradients_0",
self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual("foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual("foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
@test_util.run_deprecated_v1
def testSkipEagerGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
@test_util.run_deprecated_v1
def testSkipEagerWriteShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
@test_util.run_deprecated_v1
def testSkipEagerPartlyUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
# TensorArray v2 does not support clear_after_read.
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"
):
with ops.control_dependencies([r0]):
self.evaluate(w1.read(0))
r1 = w1.read(1)
self.assertAllEqual(c1.get_shape(), r1.shape)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
@test_util.run_v1_only("b/117943489")
def testUnpackShape(self):
self._testUnpackShape()
@test_util.run_deprecated_v1
def testSplitShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
dynamic_size=True,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
if context.executing_eagerly():
self.assertEqual((1, 2), r0.get_shape())
self.assertEqual((2, 2), w0.read(1).get_shape())
else:
self.assertEqual(r0.get_shape().ndims, None)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
tensor_shape.TensorShape(
ta1.handle.op.get_attr("element_shape")).ndims, None)
@test_util.run_deprecated_v1
def testSkipEagerWriteUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
# TODO(srbs): Figure out how to enable this. This is probably failing
# because we are trying to stack a TensorList with invalid tensors.
# That is because we do not receive gradients for all list indices.
# Figure out how TensorArray handles this.
def disabletestGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayUnpackDynamic(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArraySplitDynamic(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
def _testTensorArrayEvalEmpty(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
v2_msg = ("Tried to stack elements of a empty list with non-fully-defined"
" element_shape")
v1_msg = (
"TensorArray has size zero, but element shape <unknown> is not "
"fully defined. Currently only static shapes are supported when "
"packing zero-size TensorArrays.")
with self.assertRaisesOpError(
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
ta.stack().eval()
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
# this test is ill-defined for Eager mode --- unpacking an empty tensor
# gives an empty list / there is not equivalent of "mark_used" in Eager
def _testTensorArrayEvalEmptyWithDefault(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
else:
ta.unstack(array_ops.zeros([0, 3, 5])).mark_used()
packed = ta.stack()
concatenated = ta.concat()
self.assertAllEqual([0, 3, 5], self.evaluate(packed).shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], self.evaluate(concatenated).shape)
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_v1_only("b/118890905")
def testTensorArrayWriteGatherAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
def func(values):
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
return g
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
g = func(values)
grad_ys = [[[2.0, 3.0], [4.0, 5.0]]]
# Test combined gradients + aggregation of read(0)
if context.executing_eagerly():
g_vals = [g]
grad_vals = backprop.gradients_function(func)(
values, dy=constant_op.constant(grad_ys[0], dtype=dtypes.float32))
else:
grad = gradients_impl.gradients(ys=[g], xs=[values], grad_ys=grad_ys)
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/job:worker/task:0/cpu:0"):
# this initial device will be ignored.
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
# the first write sets the op's device.
ta = ta.write(0, 1.0)
with ops.device("/job:worker/task:2/cpu:0"):
# subsequent writes do not modify the op's device.
ta = ta.write(1, 1.0)
# The gradient TA will sit on the same device as the forward TA.
ta_grad = ta.grad("grad")
flows = [ta.flow, ta_grad.flow]
# Similar tests for unpack and split
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.unstack([1.0, 2.0])
with ops.device("/job:worker/task:2/cpu:0"):
ta = ta.write(2, 3.0)
flows.append(ta.flow)
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
flows.append(ta.flow)
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(flows, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, colocate_with_first_write_call=False)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: list(d.node_stats)
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:0/" in d and "CPU" in d: # Skip any GPU node stats
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
def testTensorArrayIdentity(self):
with self.session(use_gpu=True):
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = variable_scope.get_variable(
"v0", shape=(), initializer=init_ops.zeros_initializer())
v1 = variable_scope.get_variable(
"v1", shape=(), initializer=init_ops.zeros_initializer())
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
if context.executing_eagerly():
self.assertEqual(tensor_shape.scalar(), read0.get_shape())
else:
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
read0_v, read1_v, size0_v, size1_v = self.evaluate((read0, read1, size0,
size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, self.evaluate(v0))
self.assertEqual(1, self.evaluate(v1))
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
@test_util.run_deprecated_v1
def testSkipEagerTensorArrayGradYsInCorrectScope(self):
n_time = 1
n_dim = 1
x = constant_op.constant([[1.42]])
dy = constant_op.constant([[2.42]])
ta = tensor_array_ops.TensorArray(
dtypes.float32, size=n_time, element_shape=[n_dim])
for t in range(n_time):
ta = ta.write(index=t, value=x[t])
y = ta.stack()
# dy is outside of the gradients name scope; tf.gradients must
# wrap it in the correct name scope.
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
with self.cached_session(use_gpu=True) as sess:
vdx, vdy = self.evaluate([dx, dy])
self.assertAllClose(vdx, vdy)
def testSkipEagerTensorArrayInt64GPU(self):
if not test.is_gpu_available():
return
with self.session(use_gpu=True, force_gpu=True) as sess:
value = array_ops.placeholder(dtypes.int64)
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
ta = ta.scatter([0, 1], value)
r0 = ta.read(0)
r1 = ta.read(1)
v0, v1 = sess.run([r0, r1], feed_dict={value: [-3, 100]})
self.assertAllEqual(v0, -3)
self.assertAllEqual(v1, 100)
if __name__ == "__main__":
test.main()
| apache-2.0 | -7,141,327,032,126,883,000 | 37.213054 | 80 | 0.616407 | false |
Mihai925/EduCoding | LandingPage/migrations/0001_initial.py | 1 | 1234 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feature',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name=b'Title')),
('description', models.TextField(verbose_name=b'Description')),
('glyphicon', models.CharField(max_length=100, verbose_name=b'Glyphicon')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LandingPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(verbose_name=b'Description')),
('features', models.ManyToManyField(related_name='Features', to='LandingPage.Feature')),
],
options={
},
bases=(models.Model,),
),
]
| mit | 7,593,169,094,580,280,000 | 33.277778 | 114 | 0.546191 | false |
EricssonResearch/calvin-base | calvin/actorstore/systemactors/sensor/Accelerometer.py | 1 | 1566 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
class Accelerometer(Actor):
"""
Measure the acceleration. Takes the period of measurements, in microseconds, as input.
Outputs:
acceleration : Acceleration as a dict with the x,y, and z directions.
"""
@manage(['level', 'period'])
def init(self, period):
self.period = period
self.level = calvinsys.open(self, "io.accelerometer", period=self.period)
@stateguard(lambda self: calvinsys.can_read(self.level))
@condition([], ['acceleration'])
def read_measurement(self):
level = calvinsys.read(self.level)
return (level,)
action_priority = (read_measurement,)
requires = ['io.accelerometer']
test_kwargs = {'period': 10}
test_calvinsys = {'io.accelerometer': {'read': [10, 12, 0, 5]}}
test_set = [
{
'outports': {'acceleration': [10, 12, 0, 5]}
}
]
| apache-2.0 | 3,497,507,540,760,737,300 | 31.625 | 90 | 0.666667 | false |
SylvainTakerkart/vobi_one | examples/scripts_ui_reproducibility/script1_lm_tbin1_sbin2_estimatedtaufh02_alldata_script.py | 1 | 5108 | # Author: Flavien Garcia <[email protected]>
# Sylvain Takerkart <[email protected]>
# License: BSD Style.
"""
Description
-----------
This script processes the oidata functions on some selected raw files.
The process is decomposed in 2 steps :
1. Model construction from a parameter file
2. Application of the model on all trials
This script needs to have already import files
Notes
-----
1. Copy the script in a temporary directory of your choice and cd into this directory
2. Change parameters directly in the script itself
3. Write on a shell : brainvisa --noMainWindow --shell.
4. Write : %run script1_linear_model.py
"""
############################## SECTION TO CHANGE ###############################
DATABASE = '/riou/work/crise/takerkart/vodev_0.3_scripts_gui/' # Database
protocol='protocol_sc' # Protocol name
subject='subject_sc_tbin1_sbin2_alldata' # Subject name
session_date='080313' # Sesion date
analysis_name='_estimatedtaufh02_script' # Analysis name
# Linear model parameters definition
param=(
110.0, # Sampling frequency
0.999, # Trial duration
4.75, # Tau for dye bleaching
'(2.22,10,40,41,50)', # Frequencies with heartbeat frequency in first
'(1,1,1,1,1)', # Orders' frequencies
10, # L = Number of main components used
'(0.02,0.,0.03,0.45,0.06,0.,0.,0.)', # Alphas min
'(0.12,0.,0.3,0.6,0.15,0.,0.,0.)') # Alphas max
# corners of the region on the image to define a rectangle
# the figure plots the results averaged on all pixels of this rectangle
corner0=(62,125) # Top left-hand corner for parameters estimation
corner1=(100,150) # Bottom right_hand corner for parameters estimation
format='.nii'
################################################################################
########################## DO NOT TOUCH THIS SECTION ###########################
# Imports
import os
import numpy as np
import oidata.oitrial_processes as oitrial_processes # Trial-level processing functions
import oidata.oisession_preprocesses as oisession_preprocesses # Session-level processing functions
print('CONDITIONS FILE RECOVERY')
info_file_list=[] # Path initialization
try: # Verify if a conditions file already exists
# Conditions file path creation
path_cond=os.path.join(DATABASE\
,protocol\
,subject\
,'session_'+session_date \
,'oitrials_analysis/conditions.txt')
# Conditions file path recovery
raw_name,experiences,trials,conditions,selected=np.loadtxt(path_cond\
,delimiter='\t'\
,unpack=True\
,dtype=str)
# Recovery of files informations
for name in raw_name: # For each trial
session=name[1:7] # Session recovery
exp=name[9:11] # Experiment recovery
trial=name[13:17] # Trial recovery
condition=name[19:22] # Conditions recovery
path=os.path.join(os.path.split(path_cond)[0]\
,'exp'+exp,'trial'+trial,'raw',name) # Path creation
info_file={'session':session\
,'exp':exp,'trial':trial\
,'condition':condition,'path':path} # Put them on info_file
info_file_list.append(info_file) # Add info file
except: # If not, data import is needed
raise ImportError('This script needs to have already import files')
print('MODEL CONSTRUCTION')
# Linear Model construction
info_model_files=oisession_preprocesses.construct_model_process(
database=DATABASE, # Database path
protocol=protocol, # Protocol name
subject=subject, # Subject name
session='session_'+session_date, # Session
param=param, # Paramaters
pathX=os.path.join(DATABASE,protocol,subject,'session_'+session_date\
,'oisession_analysis','glm_based'+analysis_name,'glm.txt'), # GLM matrix path
pathParam=os.path.join(DATABASE,protocol,subject,'session_'+session_date\
,'oisession_analysis','glm_based'+analysis_name,'param.npz'), # Parameters file path
analysis='glm_based'+analysis_name,
mode=True,
script=True)
print('APPLICATION OF THE MODEL')
current_img=0 # Index of current image
for name in raw_name: # For each trial
# Linear Model application
oitrial_processes.estimate_model_process(
info_file_list[current_img]['path'], # Raw data path
glm=info_model_files['path_def'], # GLM matrix path
analysis='glm_based'+analysis_name,
format=format,
data_graph=os.path.join(os.path.split(os.path.split(info_file_list[current_img]['path'])[0])[0],'glm_based'+analysis_name,name[:-4]+'.png'),
corner0=corner0,
corner1=corner1,
mode=True,
script=True)
current_img+=1
print('\tProcessed trials:'+str(current_img)+'/'+str(len(raw_name)))
print('Script was successfully executed')
| gpl-3.0 | 7,174,634,605,701,501,000 | 39.539683 | 148 | 0.618637 | false |
peterjanes/dosage | dosagelib/plugins/v.py | 1 | 3147 | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from __future__ import absolute_import, division, print_function
from re import compile
from ..scraper import _BasicScraper, _ParserScraper
from ..helpers import bounceStarter, indirectStarter, xpath_class
from ..util import tagre
class Vexxarr(_ParserScraper):
baseUrl = 'http://www.vexxarr.com/'
url = baseUrl + 'Index.php'
stripUrl = baseUrl + 'archive.php?seldate=%s'
firstStripUrl = stripUrl % '010105'
imageSearch = '//p/img'
prevSearch = '//a[./img[contains(@src, "previous")]]'
nextSearch = '//a[./img[contains(@src, "next")]]'
starter = bounceStarter
def namer(self, imageUrl, pageUrl):
page = pageUrl.rsplit('=', 1)[-1]
return '20%s-%s-%s' % (page[4:6], page[0:2], page[2:4])
class VGCats(_BasicScraper):
url = 'http://www.vgcats.com/comics/'
stripUrl = url + '?strip_id=%s'
firstStripUrl = stripUrl % '0'
imageSearch = compile(tagre("img", "src", r'(images/\d{6}\.[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(\?strip_id=\d+)') +
tagre("img", "src", r"back\.gif"))
help = 'Index format: n (unpadded)'
class VGCatsAdventure(VGCats):
name = 'VGCats/Adventure'
url = 'http://www.vgcats.com/ffxi/'
stripUrl = url + '?strip_id=%s'
class VGCatsSuper(VGCats):
name = 'VGCats/Super'
url = 'http://www.vgcats.com/super/'
stripUrl = url + '?strip_id=%s'
class VickiFox(_ParserScraper):
url = 'http://www.vickifox.com/comic/strip'
stripUrl = url + '?id=%s'
firstStripUrl = stripUrl % '001'
imageSearch = '//img[contains(@src, "comic/")]'
prevSearch = '//button[@id="btnPrev"]/@value'
def getPrevUrl(self, url, data):
return self.stripUrl % self.getPage(url).xpath(self.prevSearch)[0]
class VictimsOfTheSystem(_BasicScraper):
url = 'http://www.votscomic.com/'
stripUrl = url + '?id=%s.jpg'
firstStripUrl = stripUrl % '070103-002452'
imageSearch = compile(tagre("img", "src", r'(comicpro/strips/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(\?id=\d+-\d+\.jpg)') +
"Previous")
help = 'Index format: nnn-nnn'
class ViiviJaWagner(_ParserScraper):
url = 'http://www.hs.fi/viivijawagner/'
imageSearch = '//meta[@property="og:image"]/@content'
prevSearch = '//a[%s]' % xpath_class('prev')
latestSearch = '//div[%s]//a' % xpath_class('cartoon-content')
starter = indirectStarter
lang = 'fi'
def namer(self, image_url, page_url):
return page_url.rsplit('-', 1)[1].split('.')[0]
class VirmirWorld(_ParserScraper):
url = 'http://world.virmir.com/'
stripUrl = url + 'comic.php?story=%s&page=%s'
firstStripUrl = stripUrl % ('1', '1')
imageSearch = '//div[@class="comic"]//img'
prevSearch = '//a[contains(@class, "prev")]'
def getIndexStripUrl(self, index):
index = index.split('-')
return self.stripUrl % (index[0], index[1])
| mit | -6,427,210,326,709,256,000 | 32.478723 | 74 | 0.616142 | false |
robwarm/gpaw-symm | doc/install/Linux/Niflheim/el6-x3455-tm-gfortran-openmpi-1.6.3-openblaso-0.2.8.1-sl-hdf5-1.8.10.py | 1 | 1993 | nodetype = 'x3455'
scalapack = True
compiler = 'gcc'
libraries =[
'gfortran',
'scalapack',
'mpiblacs',
'mpiblacsCinit',
'openblaso',
'hdf5',
'xc',
'mpi',
'mpi_f77',
]
library_dirs =[
'/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-gfortran-1/lib',
'/home/opt/el6/' + nodetype + '/blacs-1.1-' + nodetype + '-tm-gfortran-openmpi-1.6.3-1/lib',
'/home/opt/el6/' + nodetype + '/scalapack-2.0.2-' + nodetype + '-tm-gfortran-openmpi-1.6.3-acml-4.4.0-1/lib',
'/home/opt/el6/common/openblas-0.2.8-1/lib64',
'/home/opt/el6/' + nodetype + '/hdf5-1.8.10-' + nodetype + '-tm-gfortran-openmpi-1.6.3-1/lib',
'/home/opt/el6/' + nodetype + '/libxc-2.2.0-' + nodetype + '-gfortran-1/lib',
]
include_dirs +=[
'/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-gfortran-1/include',
'/home/opt/el6/' + nodetype + '/hdf5-1.8.10-' + nodetype + '-tm-gfortran-openmpi-1.6.3-1/include',
'/home/opt/el6/' + nodetype + '/libxc-2.2.0-' + nodetype + '-gfortran-1/include',
]
extra_link_args =[
'-Wl,-rpath=/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-gfortran-1/lib'
',-rpath=/home/opt/el6/' + nodetype + '/blacs-1.1-' + nodetype + '-tm-gfortran-openmpi-1.6.3-1/lib'
',-rpath=/home/opt/el6/' + nodetype + '/scalapack-2.0.2-' + nodetype + '-tm-gfortran-openmpi-1.6.3-acml-4.4.0-1/lib'
',-rpath=/home/opt/el6/common/openblas-0.2.8-1/lib64'
',-rpath=/home/opt/el6/' + nodetype + '/hdf5-1.8.10-' + nodetype + '-tm-gfortran-openmpi-1.6.3-1/lib'
',-rpath=/home/opt/el6/' + nodetype + '/libxc-2.2.0-' + nodetype + '-gfortran-1/lib'
]
extra_compile_args =['-O3', '-std=c99', '-fPIC', '-Wall']
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
mpicompiler = '/home/opt/el6/' + nodetype + '/openmpi-1.6.3-' + nodetype + '-tm-gfortran-1/bin/mpicc'
mpilinker = mpicompiler
platform_id = nodetype
hdf5 = True
| gpl-3.0 | -1,347,084,126,252,147,700 | 46.452381 | 120 | 0.594581 | false |
partofthething/home-assistant | homeassistant/components/spaceapi/__init__.py | 1 | 10380 | """Support for the SpaceAPI."""
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_LOCATION,
ATTR_NAME,
ATTR_STATE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ADDRESS,
CONF_EMAIL,
CONF_ENTITY_ID,
CONF_SENSORS,
CONF_STATE,
CONF_URL,
)
import homeassistant.core as ha
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
ATTR_ADDRESS = "address"
ATTR_SPACEFED = "spacefed"
ATTR_CAM = "cam"
ATTR_STREAM = "stream"
ATTR_FEEDS = "feeds"
ATTR_CACHE = "cache"
ATTR_PROJECTS = "projects"
ATTR_RADIO_SHOW = "radio_show"
ATTR_LAT = "lat"
ATTR_LON = "lon"
ATTR_API = "api"
ATTR_CLOSE = "close"
ATTR_CONTACT = "contact"
ATTR_ISSUE_REPORT_CHANNELS = "issue_report_channels"
ATTR_LASTCHANGE = "lastchange"
ATTR_LOGO = "logo"
ATTR_OPEN = "open"
ATTR_SENSORS = "sensors"
ATTR_SPACE = "space"
ATTR_UNIT = "unit"
ATTR_URL = "url"
ATTR_VALUE = "value"
ATTR_SENSOR_LOCATION = "location"
CONF_CONTACT = "contact"
CONF_HUMIDITY = "humidity"
CONF_ICON_CLOSED = "icon_closed"
CONF_ICON_OPEN = "icon_open"
CONF_ICONS = "icons"
CONF_IRC = "irc"
CONF_ISSUE_REPORT_CHANNELS = "issue_report_channels"
CONF_LOCATION = "location"
CONF_SPACEFED = "spacefed"
CONF_SPACENET = "spacenet"
CONF_SPACESAML = "spacesaml"
CONF_SPACEPHONE = "spacephone"
CONF_CAM = "cam"
CONF_STREAM = "stream"
CONF_M4 = "m4"
CONF_MJPEG = "mjpeg"
CONF_USTREAM = "ustream"
CONF_FEEDS = "feeds"
CONF_FEED_BLOG = "blog"
CONF_FEED_WIKI = "wiki"
CONF_FEED_CALENDAR = "calendar"
CONF_FEED_FLICKER = "flicker"
CONF_FEED_TYPE = "type"
CONF_FEED_URL = "url"
CONF_CACHE = "cache"
CONF_CACHE_SCHEDULE = "schedule"
CONF_PROJECTS = "projects"
CONF_RADIO_SHOW = "radio_show"
CONF_RADIO_SHOW_NAME = "name"
CONF_RADIO_SHOW_URL = "url"
CONF_RADIO_SHOW_TYPE = "type"
CONF_RADIO_SHOW_START = "start"
CONF_RADIO_SHOW_END = "end"
CONF_LOGO = "logo"
CONF_PHONE = "phone"
CONF_SIP = "sip"
CONF_KEYMASTERS = "keymasters"
CONF_KEYMASTER_NAME = "name"
CONF_KEYMASTER_IRC_NICK = "irc_nick"
CONF_KEYMASTER_PHONE = "phone"
CONF_KEYMASTER_EMAIL = "email"
CONF_KEYMASTER_TWITTER = "twitter"
CONF_TWITTER = "twitter"
CONF_FACEBOOK = "facebook"
CONF_IDENTICA = "identica"
CONF_FOURSQUARE = "foursquare"
CONF_ML = "ml"
CONF_JABBER = "jabber"
CONF_ISSUE_MAIL = "issue_mail"
CONF_SPACE = "space"
CONF_TEMPERATURE = "temperature"
DATA_SPACEAPI = "data_spaceapi"
DOMAIN = "spaceapi"
ISSUE_REPORT_CHANNELS = [CONF_EMAIL, CONF_ISSUE_MAIL, CONF_ML, CONF_TWITTER]
SENSOR_TYPES = [CONF_HUMIDITY, CONF_TEMPERATURE]
SPACEAPI_VERSION = "0.13"
URL_API_SPACEAPI = "/api/spaceapi"
LOCATION_SCHEMA = vol.Schema({vol.Optional(CONF_ADDRESS): cv.string})
SPACEFED_SCHEMA = vol.Schema(
{
vol.Optional(CONF_SPACENET): cv.boolean,
vol.Optional(CONF_SPACESAML): cv.boolean,
vol.Optional(CONF_SPACEPHONE): cv.boolean,
}
)
STREAM_SCHEMA = vol.Schema(
{
vol.Optional(CONF_M4): cv.url,
vol.Optional(CONF_MJPEG): cv.url,
vol.Optional(CONF_USTREAM): cv.url,
}
)
FEED_SCHEMA = vol.Schema(
{vol.Optional(CONF_FEED_TYPE): cv.string, vol.Required(CONF_FEED_URL): cv.url}
)
FEEDS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FEED_BLOG): FEED_SCHEMA,
vol.Optional(CONF_FEED_WIKI): FEED_SCHEMA,
vol.Optional(CONF_FEED_CALENDAR): FEED_SCHEMA,
vol.Optional(CONF_FEED_FLICKER): FEED_SCHEMA,
}
)
CACHE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CACHE_SCHEDULE): cv.matches_regex(
r"(m.02|m.05|m.10|m.15|m.30|h.01|h.02|h.04|h.08|h.12|d.01)"
)
}
)
RADIO_SHOW_SCHEMA = vol.Schema(
{
vol.Required(CONF_RADIO_SHOW_NAME): cv.string,
vol.Required(CONF_RADIO_SHOW_URL): cv.url,
vol.Required(CONF_RADIO_SHOW_TYPE): cv.matches_regex(r"(mp3|ogg)"),
vol.Required(CONF_RADIO_SHOW_START): cv.string,
vol.Required(CONF_RADIO_SHOW_END): cv.string,
}
)
KEYMASTER_SCHEMA = vol.Schema(
{
vol.Optional(CONF_KEYMASTER_NAME): cv.string,
vol.Optional(CONF_KEYMASTER_IRC_NICK): cv.string,
vol.Optional(CONF_KEYMASTER_PHONE): cv.string,
vol.Optional(CONF_KEYMASTER_EMAIL): cv.string,
vol.Optional(CONF_KEYMASTER_TWITTER): cv.string,
}
)
CONTACT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_EMAIL): cv.string,
vol.Optional(CONF_IRC): cv.string,
vol.Optional(CONF_ML): cv.string,
vol.Optional(CONF_PHONE): cv.string,
vol.Optional(CONF_TWITTER): cv.string,
vol.Optional(CONF_SIP): cv.string,
vol.Optional(CONF_FACEBOOK): cv.string,
vol.Optional(CONF_IDENTICA): cv.string,
vol.Optional(CONF_FOURSQUARE): cv.string,
vol.Optional(CONF_JABBER): cv.string,
vol.Optional(CONF_ISSUE_MAIL): cv.string,
vol.Optional(CONF_KEYMASTERS): vol.All(
cv.ensure_list, [KEYMASTER_SCHEMA], vol.Length(min=1)
),
},
required=False,
)
STATE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Inclusive(CONF_ICON_CLOSED, CONF_ICONS): cv.url,
vol.Inclusive(CONF_ICON_OPEN, CONF_ICONS): cv.url,
},
required=False,
)
SENSOR_SCHEMA = vol.Schema(
{vol.In(SENSOR_TYPES): [cv.entity_id], cv.string: [cv.entity_id]}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTACT): CONTACT_SCHEMA,
vol.Required(CONF_ISSUE_REPORT_CHANNELS): vol.All(
cv.ensure_list, [vol.In(ISSUE_REPORT_CHANNELS)]
),
vol.Optional(CONF_LOCATION): LOCATION_SCHEMA,
vol.Required(CONF_LOGO): cv.url,
vol.Required(CONF_SPACE): cv.string,
vol.Required(CONF_STATE): STATE_SCHEMA,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_SENSORS): SENSOR_SCHEMA,
vol.Optional(CONF_SPACEFED): SPACEFED_SCHEMA,
vol.Optional(CONF_CAM): vol.All(
cv.ensure_list, [cv.url], vol.Length(min=1)
),
vol.Optional(CONF_STREAM): STREAM_SCHEMA,
vol.Optional(CONF_FEEDS): FEEDS_SCHEMA,
vol.Optional(CONF_CACHE): CACHE_SCHEMA,
vol.Optional(CONF_PROJECTS): vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_RADIO_SHOW): vol.All(
cv.ensure_list, [RADIO_SHOW_SCHEMA]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Register the SpaceAPI with the HTTP interface."""
hass.data[DATA_SPACEAPI] = config[DOMAIN]
hass.http.register_view(APISpaceApiView)
return True
class APISpaceApiView(HomeAssistantView):
"""View to provide details according to the SpaceAPI."""
url = URL_API_SPACEAPI
name = "api:spaceapi"
@staticmethod
def get_sensor_data(hass, spaceapi, sensor):
"""Get data from a sensor."""
sensor_state = hass.states.get(sensor)
if not sensor_state:
return None
sensor_data = {ATTR_NAME: sensor_state.name, ATTR_VALUE: sensor_state.state}
if ATTR_SENSOR_LOCATION in sensor_state.attributes:
sensor_data[ATTR_LOCATION] = sensor_state.attributes[ATTR_SENSOR_LOCATION]
else:
sensor_data[ATTR_LOCATION] = spaceapi[CONF_SPACE]
# Some sensors don't have a unit of measurement
if ATTR_UNIT_OF_MEASUREMENT in sensor_state.attributes:
sensor_data[ATTR_UNIT] = sensor_state.attributes[ATTR_UNIT_OF_MEASUREMENT]
return sensor_data
@ha.callback
def get(self, request):
"""Get SpaceAPI data."""
hass = request.app["hass"]
spaceapi = dict(hass.data[DATA_SPACEAPI])
is_sensors = spaceapi.get("sensors")
location = {ATTR_LAT: hass.config.latitude, ATTR_LON: hass.config.longitude}
try:
location[ATTR_ADDRESS] = spaceapi[ATTR_LOCATION][CONF_ADDRESS]
except KeyError:
pass
except TypeError:
pass
state_entity = spaceapi["state"][ATTR_ENTITY_ID]
space_state = hass.states.get(state_entity)
if space_state is not None:
state = {
ATTR_OPEN: space_state.state != "off",
ATTR_LASTCHANGE: dt_util.as_timestamp(space_state.last_updated),
}
else:
state = {ATTR_OPEN: "null", ATTR_LASTCHANGE: 0}
try:
state[ATTR_ICON] = {
ATTR_OPEN: spaceapi["state"][CONF_ICON_OPEN],
ATTR_CLOSE: spaceapi["state"][CONF_ICON_CLOSED],
}
except KeyError:
pass
data = {
ATTR_API: SPACEAPI_VERSION,
ATTR_CONTACT: spaceapi[CONF_CONTACT],
ATTR_ISSUE_REPORT_CHANNELS: spaceapi[CONF_ISSUE_REPORT_CHANNELS],
ATTR_LOCATION: location,
ATTR_LOGO: spaceapi[CONF_LOGO],
ATTR_SPACE: spaceapi[CONF_SPACE],
ATTR_STATE: state,
ATTR_URL: spaceapi[CONF_URL],
}
try:
data[ATTR_CAM] = spaceapi[CONF_CAM]
except KeyError:
pass
try:
data[ATTR_SPACEFED] = spaceapi[CONF_SPACEFED]
except KeyError:
pass
try:
data[ATTR_STREAM] = spaceapi[CONF_STREAM]
except KeyError:
pass
try:
data[ATTR_FEEDS] = spaceapi[CONF_FEEDS]
except KeyError:
pass
try:
data[ATTR_CACHE] = spaceapi[CONF_CACHE]
except KeyError:
pass
try:
data[ATTR_PROJECTS] = spaceapi[CONF_PROJECTS]
except KeyError:
pass
try:
data[ATTR_RADIO_SHOW] = spaceapi[CONF_RADIO_SHOW]
except KeyError:
pass
if is_sensors is not None:
sensors = {}
for sensor_type in is_sensors:
sensors[sensor_type] = []
for sensor in spaceapi["sensors"][sensor_type]:
sensor_data = self.get_sensor_data(hass, spaceapi, sensor)
sensors[sensor_type].append(sensor_data)
data[ATTR_SENSORS] = sensors
return self.json(data)
| mit | -2,758,130,002,359,127,000 | 28.405099 | 86 | 0.607803 | false |
emillynge/python-remoteexecution | remoteexecution/ClientSide.py | 1 | 16737 | from __future__ import (absolute_import, print_function, unicode_literals, division)
__author__ = 'emil'
from .ServerSide import (Manager)
from .Utils import (InvalidUserInput, DummyLogger, RemoteExecutionLogger, WrappedProxy, Commandline)
from .Environments import (communication_environment, execution_environment, EnvironmentFactory)
from Pyro4 import errors as pyro_errors
from time import sleep
import Pyro4
from collections import namedtuple
from subprocess import Popen
import abc
HPC_Time = namedtuple("HPC_Time", ['h', 'm', 's'])
HPC_Time.__new__.__defaults__ = (0, 0, 0)
HPC_resources = namedtuple("HPC_resources", ['nodes', 'ppn', 'gpus', 'pvmem', 'vmem'])
HPC_resources.__new__.__defaults__ = (1, 1, 0, None, None)
ClassInfo = namedtuple('ClassInfo', ['module', 'class_name'])
class Client(object):
def __init__(self, logger=None):
self.logger = logger or RemoteExecutionLogger(logger_name='Client')
InvalidUserInput.isinstance('logger', RemoteExecutionLogger, self.logger)
self.remote_commandline = communication_environment().client2manager_side_cli
self.remote_commandline.logger = self.logger.duplicate(logger_name='CLI/Client')
self.manager_proxy = None
self.start()
def start(self):
if not self.isup_manager():
self.start_manager()
else:
self.get_proxy()
if not self.manager_proxy.is_alive():
self.logger.error('could not start manager')
raise Exception("Could not start manager")
comm_env = communication_environment()
host, port = comm_env.client2manager_tunnel()
self.logger.info("Successfully connected to Manager on {0}".format(port))
def instance_generator(self, object_descriptor=None, rel_dir=".", **requested_resources):
script_generator = execution_environment().script_generator
script_generator.logger = self.logger.duplicate(logger_name='Script')
assert isinstance(script_generator, (HPCScriptGenerator, SimpleScriptGenerator))
script_generator.execution_settings(rel_dir=rel_dir, **requested_resources)
instance = RemoteInstance(self.manager_proxy, self, logger=self.logger,
object_descriptor=object_descriptor, script_generator=script_generator)
return instance
def get_proxy(self):
comm_env = communication_environment()
host, port = comm_env.client2manager_tunnel()
self.manager_proxy = WrappedProxy('remote_execution.manager', host, port,
logger=self.logger.duplicate(append_name='Manager'))
def isup_manager(self):
self.remote_commandline('-i -f mylog -s isup manager')
EnvironmentFactory.set_settings(manager_ip=self.remote_commandline.get('ip')[0])
return self.remote_commandline.get('return')[0]
def start_manager(self):
self.remote_commandline('-i -f mylog -s start manager')
self.get_proxy()
@staticmethod
def get_manager(self):
raise Manager()
def stop_manager(self):
self.manager_proxy.shutdown()
try:
while True:
self.manager_proxy.is_alive()
sleep(1)
except pyro_errors.CommunicationError:
pass
self.manager_proxy.release_socket()
del self.manager_proxy
def restart_manager(self):
self.stop_manager()
sleep(3)
for _ in range(5):
try:
self.start_manager()
e = None
break
except Commandline.CommandLineException as e:
if 'Errno 98' in e.message:
sleep(5)
else:
raise e
if e:
raise e
self.logger.info("Manager restarted")
class BaseScriptGenerator(object):
__metaclass__ = abc.ABCMeta
def __init__(self, base_dir):
self.req_resources = dict()
self.base_dir = base_dir
self.rel_dir = '.'
self.logger = DummyLogger()
self._lines = list()
def execution_settings(self, rel_dir='.', **requested_resources):
"""
called by user or parent
:param rel_dir: where to execute script relative to basedir set in assemble
:param requested_resources: key_value pairs that describes the needed ressources for execution
:return: None
"""
self.rel_dir = rel_dir
self.req_resources = requested_resources
self.check_ressources(**requested_resources)
@abc.abstractmethod
def check_ressources(self, **requested_resources):
"""
input check on requested ressources to be implemented by subclassing.
should raise InvalidInputError if invalid ressources are requested
:param requested_resources:
:return: None
"""
@property
def work_dir(self):
return self.base_dir + '/' + self.rel_dir
def generate_submit_script(self, execute_commands, log_file, sub_id):
""" generate a script that sets up execution on the server side
"""
self._lines = list()
self._write_lines(execute_commands, log_file, sub_id)
return self.get_script()
@abc.abstractmethod
def _write_lines(self, execute_command, log_file, sub_id):
""" Dummy method, must be overridden by subclassing. writes lines for the submission script """
pass
def get_script(self):
if not self._lines:
raise Exception('No script generated yet')
return '\n'.join(self._lines)
class SimpleScriptGenerator(BaseScriptGenerator):
def _write_lines(self, execute_command, log_file, sub_id):
#self._lines.append('#!/bin/sh')
#self._lines.append('cd ' + self.work_dir)
self._lines.append(execute_command)
def check_ressources(self, **requested_resources):
pass
class HPCScriptGenerator(BaseScriptGenerator):
"""
Subclass that writes a execution script tailored to the DTU HPC qsub system.
"""
def __init__(self, base_modules, base_dir, manager):
self.base_modules = base_modules
self.manager = manager
self.mod2script = {'cuda': """if [ -n "$PBS_GPUFILE" ] ; then
export CUDA_DEVICE=`cat $PBS_GPUFILE | rev | cut -d"-" -f1 | rev | tr -cd [:digit:]` ; fi"""}
super(HPCScriptGenerator, self).__init__(base_dir)
def check_ressources(self, wc=HPC_Time(), hw_ressources=HPC_resources(), add_modules=None, username=None):
assert isinstance(wc, HPC_Time)
assert isinstance(hw_ressources, HPC_resources)
if not self.manager.env_call('execution', 'path_exists', self.work_dir):
raise InvalidUserInput("Work directory {0} doesn't exist.".format(self.work_dir))
try:
if hw_ressources.nodes < 1 or hw_ressources.ppn < 1:
raise InvalidUserInput('A job must have at least 1 node and 1 processor', argname='resources',
found=hw_ressources)
if not any(wc):
raise InvalidUserInput('No wall clock time assigned to job', argname='wallclock', found=wc)
self.check_modules()
InvalidUserInput.compare(argname='username', expect=None, found=username, equal=False)
except InvalidUserInput as e:
self.logger.error('Invalid parameters passed to Qsub', exc_info=True)
raise e
@property
def modules(self):
for item in self.req_resources['add_modules'].iteritems():
yield item
for module_name, version in self.base_modules.iteritems():
if module_name in self.req_resources['add_modules']:
continue
yield (module_name, version)
raise StopIteration()
def check_modules(self):
avail_modules = self.manager.env_call('execution', 'available_modules')
for (module, version) in self.modules:
if module not in avail_modules:
raise InvalidUserInput("Required module {0} is not available".format(module))
if version and version not in avail_modules[module]:
raise InvalidUserInput("Required module version {0} is not available for module {1}".format(version,
module))
self.logger.debug("module {0}, version {1} is available".format(module, version if version else "default"))
def _write_lines(self, execute_commands, log_file, sub_id):
self._lines.append('#!/bin/sh')
# noinspection PyTypeChecker
self.write_resources(self.req_resources['hw_ressources'])
# noinspection PyTypeChecker
self.write_wallclock(self.req_resources['wc'])
self.write_name('Remote execution {0}'.format(sub_id))
self.write_mail(self.req_resources['username'] + '@student.dtu.dk')
self.append_pbs_pragma('e', log_file + ".e")
self.append_pbs_pragma('o', log_file + ".o")
for module_name, version in self.req_resources['modules'].iteritems():
self._lines.append('module load ' + module_name)
if version:
self._lines[-1] += '/' + version
if module_name in self.mod2script:
self._lines.append(self.mod2script[module_name])
self._lines.append("export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0}/lib".format(self.base_dir))
self._lines.append('cd {0}'.format(self.work_dir))
if isinstance(execute_commands, list):
self._lines.append('\n'.join(execute_commands))
else:
self._lines.append(execute_commands)
@staticmethod
def make_pbs_pragma(flag, line):
return "#PBS -" + flag.strip(' ') + ' ' + line
def append_pbs_pragma(self, flag, line):
self._lines.append(self.make_pbs_pragma(flag, line))
def write_name(self, name):
self.append_pbs_pragma('N ', name)
def write_mail(self, mail_address):
self.append_pbs_pragma('M', mail_address)
self.append_pbs_pragma('m', 'a')
def write_resources(self, resources):
assert isinstance(resources, HPC_resources)
self.append_pbs_pragma('l', 'nodes={1}:ppn={0}'.format(resources.ppn, resources.nodes))
if resources.gpus:
self.append_pbs_pragma('l', 'gpus={0}'.format(resources.gpus))
if resources.pvmem:
self.append_pbs_pragma('l', 'pvmem={0}'.format(resources.pvmem))
if resources.vmem:
self.append_pbs_pragma('l', 'vmem={0}'.format(resources.vmem))
def write_wallclock(self, wallclock):
assert isinstance(wallclock, HPC_Time)
self.append_pbs_pragma("l", "walltime={0}:{1}:{2}".format(wallclock.h, wallclock.m, wallclock.s))
class RemoteInstance(object):
def __init__(self, manager_proxy, client, script_generator, logger=DummyLogger(), object_descriptor=None):
assert isinstance(client, Client)
self.args = tuple()
self.kwargs = dict()
self.obj_descriptor = object_descriptor
self.manager_proxy = manager_proxy
self.client = client
self.script_generator = script_generator
(self.sub_id, self.logfile) = self.manager_proxy.sub_id_request()
self.logger = logger.duplicate(logger_name='Instance {0}'.format(self.sub_id))
self.logger.info("sub_id {0} received".format(self.sub_id))
self.remote_obj = None
self.executor_local_host = None
self.executor_local_port = None
self.proxy_info = None
self.orphan = False
self.submitted = False
self.execution_controller = None
self.stage_submission()
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return self
def stage_submission(self):
kwargs = {'manager_ip': communication_environment().manager_host,
'sub_id': self.sub_id}
ex_env = execution_environment()
exe = "{1} -f {0}.log -L DEBUG start executor ".format(self.logfile, ex_env.executor_command_line_prefix)
exe += "manager_ip={manager_ip} sub_id={sub_id}".format(**kwargs)
script = self.script_generator.generate_submit_script(exe, self.logfile, self.sub_id)
self.manager_proxy.sub_stage(self.sub_id, script)
def make_tunnel(self):
if self.proxy_info:
comm_env = communication_environment()
self.executor_local_host, self.executor_local_port = comm_env.client2executor_tunnel(
self.proxy_info['host'],
self.proxy_info['port'])
else:
raise Exception('Cannot make tunnel without a ready executor. Have you submitted?')
def get_execution_controller(self):
if self.manager_proxy.in_state(self.sub_id, 'ready') and not self.execution_controller:
self.proxy_info = self.manager_proxy.get_proxy_info(self.sub_id)
self.make_tunnel()
self._get_execution_controller()
def _get_execution_controller(self):
self.execution_controller = WrappedProxy('remote_execution.executor.controller', self.executor_local_host,
self.executor_local_port,
logger=self.logger.duplicate(append_name='Exec'))
def make_obj(self, obj_descriptor):
if not all([self.manager_proxy.in_state(self.sub_id, 'ready'), self.execution_controller]):
raise Exception('Execution controller not ready')
prototype_set = False
if isinstance(obj_descriptor, ClassInfo):
self.execution_controller.set_prototype(cls=obj_descriptor.class_name,
module=obj_descriptor.module)
prototype_set = True
elif hasattr(obj_descriptor, '__name__') and hasattr(obj_descriptor, '__module__'):
self.execution_controller.set_prototype(cls=obj_descriptor.__name__,
module=obj_descriptor.__module__)
prototype_set = True
if prototype_set:
obj_info = self.execution_controller.register_new_object(*self.args, **self.kwargs)
return self._get_obj(obj_info)
else:
raise InvalidUserInput('Descriptor matches no valid ways of setting the prototype',
argname='obj_descriptor',
found=obj_descriptor)
def _get_obj(self, obj_info):
return WrappedProxy(obj_info['object_id'], self.executor_local_host, self.executor_local_port,
logger=self.logger.duplicate(append_name='RemoteObj'))
def wait_for_state(self, target_state, iter_limit=100):
state, t = self.manager_proxy.sub_stat(self.sub_id)
i = 0
while not self.manager_proxy.has_reached_state(self.sub_id, target_state):
# raise KeyboardInterrupt()
if t > 0:
self.client.logger.debug(
'Waiting for remote object to get to {2}.\n\t Current state: {0}\n\t Seconds left: {1}'.format(
state, t, target_state))
sleep(min([t, 30]))
i = 0
elif i > iter_limit:
raise Exception('iter limit reached. no progression.')
i += 1
state, t = self.manager_proxy.sub_stat(self.sub_id)
return state
def submit(self, no_wait=False):
self.manager_proxy.sub_start(self.sub_id)
self.submitted = True
if not no_wait:
self.wait_for_state('ready')
def __enter__(self):
try:
self.submit()
self.get_execution_controller()
return self.make_obj(self.obj_descriptor)
except Exception:
self.close()
raise
# noinspection PyBroadException
def close(self):
if not self.orphan and self.submitted:
try:
self.manager_proxy.sub_shut(self.sub_id)
except Exception as e:
self.logger.warning('Error during sub shut: {0}'.format(e.message))
ex_env = execution_environment()
Popen(ex_env.client_command_line_prefix.split(' ') + ['-r', 'stop', 'executor',
'sub_id={0}'.format(self.sub_id)])
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
self.__exit__(1, 2, 3)
| gpl-3.0 | 26,321,684,994,956,988 | 40.12285 | 119 | 0.602378 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.