repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aringh/odl | examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py | 1 | 2880 | """Performance example of running native ASTRA vs using ODL for reconstruction.
In this example, a 512x512 image is reconstructed using the Conjugate Gradient
Least Squares method on the GPU.
In general, ASTRA is faster than ODL since it does not need to perform any
copies and all arithmetic is performed on the GPU. Despite this, ODL is not
much slower. In this example, the overhead is about 60 %, depending on the
hardware used.
"""
import astra
import numpy as np
import matplotlib.pyplot as plt
import scipy
import odl
# Common geometry parameters
domain_size = np.array([512, 512])
n_angles = 180
det_size = 362
niter = 50
phantom = np.rot90(scipy.misc.ascent().astype('float'), -1)
# --- ASTRA ---
# Define ASTRA geometry
vol_geom = astra.create_vol_geom(domain_size[0], domain_size[1])
proj_geom = astra.create_proj_geom('parallel',
np.linalg.norm(domain_size) / det_size,
det_size,
np.linspace(0, np.pi, n_angles))
# Create ASTRA projector
proj_id = astra.create_projector('cuda', proj_geom, vol_geom)
# Create sinogram
sinogram_id, sinogram = astra.create_sino(phantom, proj_id)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the CUDA backend
cfg = astra.astra_dict('CGLS_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
with odl.util.Timer('ASTRA run'):
# Run the algorithm
astra.algorithm.run(alg_id, niter)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
# --- ODL ---
# Create reconstruction space
reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size)
# Create geometry
geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size)
# Create ray transform
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Create sinogram
data = ray_trafo(phantom)
# Solve with CGLS (aka CGN)
x = reco_space.zero()
with odl.util.Timer('ODL run'):
odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter)
# Display results for comparison
plt.figure('Phantom')
plt.imshow(phantom.T, origin='lower', cmap='bone')
plt.figure('ASTRA sinogram')
plt.imshow(sinogram.T, origin='lower', cmap='bone')
plt.figure('ASTRA reconstruction')
plt.imshow(rec.T, origin='lower', cmap='bone')
plt.figure('ODL sinogram')
plt.imshow(data.asarray().T, origin='lower', cmap='bone')
plt.figure('ODL reconstruction')
plt.imshow(x.asarray().T, origin='lower', cmap='bone')
plt.show()
| mpl-2.0 | -7,807,791,797,508,731,000 | 28.090909 | 79 | 0.711111 | false |
piotrmaslanka/satella | satella/configuration/sources/from_dict.py | 1 | 2451 | import copy
import importlib
import warnings
from satella.coding.recast_exceptions import rethrow_as
from satella.configuration import sources
from satella.configuration.sources.base import BaseSource
from satella.exceptions import ConfigurationError, ConfigurationMisconfiguredError
__all__ = [
'load_source_from_dict',
'load_source_from_list'
]
def handle_import(dct: dict):
def convert(v):
if 'cast_before' in dct:
v = EXTRA_TYPES[dct['cast_before']['type']](dct['cast_before'])(v)
return getattr(importlib.import_module(dct['module']), dct['attribute'])(v)
return convert
EXTRA_TYPES = {
'binary': lambda dct: dct['value'].encode(dct.get('encoding', 'ascii')),
'lambda': lambda dct: eval('lambda x: ' + dct['operation'], globals(),
locals()),
'import': handle_import,
}
@rethrow_as(Exception, ConfigurationError)
def load_source_from_dict(dct: dict) -> BaseSource:
"""
obj has a form of
{
"type": "BaseSource",
"args": [] # optional
... kwargs
}
:raises ConfigurationError: upon failure to instantiate
"""
dct = copy.copy(dct)
type_ = dct.pop('type') # type: str
if 'arg' in dct:
args = dct.pop('arg'),
else:
args = dct.pop('args', []) # type: tp.List
optional = dct.pop('optional', False) # type: bool
def to_arg(arg):
if isinstance(arg, dict) and 'type' in arg:
a_type = arg['type']
if a_type in EXTRA_TYPES:
return EXTRA_TYPES[a_type](arg)
elif a_type in sources.__dict__:
return load_source_from_dict(arg)
else:
warnings.warn(
'Caught %s attempting to parse a dict with type, returning original value' % (
e,), UserWarning)
return arg
else:
return arg
args = map(to_arg, args)
kwargs = {k: to_arg(v) for k, v in dct.items()}
try:
s = sources.__dict__[type_](*args, **kwargs)
except KeyError as e:
raise ConfigurationMisconfiguredError('unknown type %s' % (type_,))
if optional:
s = sources.OptionalSource(s)
return s
def load_source_from_list(obj: list) -> 'sources.MergingSource':
"""
Builds a MergingSource from dict-ed objects
"""
return sources.MergingSource(*map(load_source_from_dict, obj))
| bsd-3-clause | 1,726,175,575,730,562,800 | 26.852273 | 98 | 0.586699 | false |
lalpert/gradsearch-scala | scrapers/gradsearch/spiders/princetonee_spider.py | 1 | 1145 | import scrapy
import urlparse
from gradsearch.items import Professor
class PrincetonEESpider(scrapy.Spider):
name = "princeton_ee"
allowed_domains = ["princeton.edu"]
start_urls = ["http://ee.princeton.edu/people/faculty"]
def cleanup(self, sel):
return sel.xpath('normalize-space(text())').extract()
def parse(self, response):
for prof_box in response.css(".views-row"):
href = prof_box.xpath('./div/span/a/@href').extract()
if href:
yield scrapy.Request(urlparse.urljoin(response.url, href[0]), callback = self.parse_prof)
def parse_prof(self, response):
name = response.css('.node').xpath('.//h1/text()').extract()[0]
keywords = response.css('h4.core-areas').xpath('./a/text()').extract() # TODO: can also get "application thrusts"
research_summary = ''.join(response.css('.field').xpath('./div/div/node()').extract()[1:])
image = response.css('.node').xpath('.//img/@src').extract()[0]
department = "Electrical Engineering"
yield Professor(
name = name,
keywords = keywords,
school = "Princeton",
image = image,
research_summary = research_summary,
department = department)
| mit | -9,096,574,537,370,510,000 | 32.676471 | 115 | 0.676856 | false |
alexbruy/QGIS | python/plugins/processing/gui/GetScriptsAndModels.py | 1 | 14193 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GetScriptsAndModels.py
---------------------
Date : June 2014
Copyright : (C) 2014 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'June 2014'
__copyright__ = '(C) 2014, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
from functools import partial
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QCoreApplication, QUrl
from qgis.PyQt.QtGui import QIcon, QCursor
from qgis.PyQt.QtWidgets import QApplication, QTreeWidgetItem, QPushButton
from qgis.PyQt.QtNetwork import QNetworkReply, QNetworkRequest
from qgis.utils import iface, show_message_log
from qgis.core import QgsNetworkAccessManager, QgsMessageLog
from qgis.gui import QgsMessageBar
from processing.core.alglist import algList
from processing.gui.ToolboxAction import ToolboxAction
from processing.gui import Help2Html
from processing.gui.Help2Html import getDescription, ALG_DESC, ALG_VERSION, ALG_CREATOR
from processing.script.ScriptUtils import ScriptUtils
from processing.algs.r.RUtils import RUtils
from processing.modeler.ModelerUtils import ModelerUtils
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgGetScriptsAndModels.ui'))
class GetScriptsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get scripts from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'script.png'))
def execute(self):
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.SCRIPTS)
dlg.exec_()
if dlg.updateProvider:
algList.reloadProvider('script')
class GetRScriptsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get R scripts from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'r.svg'))
def execute(self):
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.RSCRIPTS)
dlg.exec_()
if dlg.updateProvider:
self.toolbox.updateProvider('r')
class GetModelsAction(ToolboxAction):
def __init__(self):
self.name, self.i18n_name = self.trAction('Get models from on-line scripts collection')
self.group, self.i18n_group = self.trAction('Tools')
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'model.png'))
def execute(self):
dlg = GetScriptsAndModelsDialog(GetScriptsAndModelsDialog.MODELS)
dlg.exec_()
if dlg.updateProvider:
algList.reloadProvider('model')
class GetScriptsAndModelsDialog(BASE, WIDGET):
HELP_TEXT = QCoreApplication.translate('GetScriptsAndModelsDialog',
'<h3> Processing resources manager </h3>'
'<p>Check/uncheck algorithms in the tree to select the ones that you '
'want to install or remove</p>'
'<p>Algorithms are divided in 3 groups:</p>'
'<ul><li><b>Installed:</b> Algorithms already in your system, with '
'the latest version available</li>'
'<li><b>Updatable:</b> Algorithms already in your system, but with '
'a newer version available in the server</li>'
'<li><b>Not installed:</b> Algorithms not installed in your '
'system</li></ul>')
MODELS = 0
SCRIPTS = 1
RSCRIPTS = 2
tr_disambiguation = {0: 'GetModelsAction',
1: 'GetScriptsAction',
2: 'GetRScriptsAction'}
def __init__(self, resourceType):
super(GetScriptsAndModelsDialog, self).__init__(iface.mainWindow())
self.setupUi(self)
if hasattr(self.leFilter, 'setPlaceholderText'):
self.leFilter.setPlaceholderText(self.tr('Search...'))
self.manager = QgsNetworkAccessManager.instance()
self.resourceType = resourceType
if self.resourceType == self.MODELS:
self.folder = ModelerUtils.modelsFolders()[0]
self.urlBase = 'https://raw.githubusercontent.com/qgis/QGIS-Processing/master/models/'
self.icon = QIcon(os.path.join(pluginPath, 'images', 'model.png'))
elif self.resourceType == self.SCRIPTS:
self.folder = ScriptUtils.scriptsFolders()[0]
self.urlBase = 'https://raw.githubusercontent.com/qgis/QGIS-Processing/master/scripts/'
self.icon = QIcon(os.path.join(pluginPath, 'images', 'script.png'))
else:
self.folder = RUtils.RScriptsFolders()[0]
self.urlBase = 'https://raw.githubusercontent.com/qgis/QGIS-Processing/master/rscripts/'
self.icon = QIcon(os.path.join(pluginPath, 'images', 'r.svg'))
self.lastSelectedItem = None
self.updateProvider = False
self.data = None
self.populateTree()
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.cancelPressed)
self.tree.currentItemChanged.connect(self.currentItemChanged)
self.leFilter.textChanged.connect(self.fillTree)
def popupError(self, error=None, url=None):
"""Popups an Error message bar for network errors."""
disambiguation = self.tr_disambiguation[self.resourceType]
widget = iface.messageBar().createMessage(self.tr('Connection problem', disambiguation),
self.tr('Could not connect to scripts/models repository', disambiguation))
if error and url:
QgsMessageLog.logMessage(self.tr(u"Network error code: {} on URL: {}").format(error, url), self.tr(u"Processing"), QgsMessageLog.CRITICAL)
button = QPushButton(QCoreApplication.translate("Python", "View message log"), pressed=show_message_log)
widget.layout().addWidget(button)
iface.messageBar().pushWidget(widget, level=QgsMessageBar.CRITICAL, duration=5)
def grabHTTP(self, url, loadFunction, arguments=None):
"""Grab distant content via QGIS internal classes and QtNetwork."""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
request = QUrl(url)
reply = self.manager.get(QNetworkRequest(request))
if arguments:
reply.finished.connect(partial(loadFunction, reply, arguments))
else:
reply.finished.connect(partial(loadFunction, reply))
while not reply.isFinished():
QCoreApplication.processEvents()
def populateTree(self):
self.grabHTTP(self.urlBase + 'list.txt', self.treeLoaded)
def treeLoaded(self, reply):
"""
update the tree of scripts/models whenever
HTTP request is finished
"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
self.popupError(reply.error(), reply.request().url().toString())
else:
resources = unicode(reply.readAll()).splitlines()
resources = [r.split(',') for r in resources]
self.resources = {f: (v, n) for f, v, n in resources}
reply.deleteLater()
self.fillTree()
def fillTree(self):
self.tree.clear()
self.uptodateItem = QTreeWidgetItem()
self.uptodateItem.setText(0, self.tr('Installed'))
self.toupdateItem = QTreeWidgetItem()
self.toupdateItem.setText(0, self.tr('Updatable'))
self.notinstalledItem = QTreeWidgetItem()
self.notinstalledItem.setText(0, self.tr('Not installed'))
self.toupdateItem.setIcon(0, self.icon)
self.uptodateItem.setIcon(0, self.icon)
self.notinstalledItem.setIcon(0, self.icon)
text = unicode(self.leFilter.text())
for i in sorted(self.resources.keys(), key=lambda kv: kv[2].lower()):
filename = i
version = self.resources[filename][0]
name = self.resources[filename][1]
treeBranch = self.getTreeBranchForState(filename, float(version))
if text == '' or text.lower() in filename.lower():
item = TreeItem(filename, name, self.icon)
treeBranch.addChild(item)
if treeBranch != self.notinstalledItem:
item.setCheckState(0, Qt.Checked)
self.tree.addTopLevelItem(self.toupdateItem)
self.tree.addTopLevelItem(self.notinstalledItem)
self.tree.addTopLevelItem(self.uptodateItem)
if text != '':
self.tree.expandAll()
self.txtHelp.setHtml(self.HELP_TEXT)
def setHelp(self, reply, item):
"""Change the HTML content"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
html = self.tr('<h2>No detailed description available for this script</h2>')
else:
content = unicode(reply.readAll())
descriptions = json.loads(content)
html = '<h2>%s</h2>' % item.name
html += self.tr('<p><b>Description:</b> %s</p>') % getDescription(ALG_DESC, descriptions)
html += self.tr('<p><b>Created by:</b> %s') % getDescription(ALG_CREATOR, descriptions)
html += self.tr('<p><b>Version:</b> %s') % getDescription(ALG_VERSION, descriptions)
reply.deleteLater()
self.txtHelp.setHtml(html)
def currentItemChanged(self, item, prev):
if isinstance(item, TreeItem):
url = self.urlBase + item.filename.replace(' ', '%20') + '.help'
self.grabHTTP(url, self.setHelp, item)
else:
self.txtHelp.setHtml(self.HELP_TEXT)
def getTreeBranchForState(self, filename, version):
if not os.path.exists(os.path.join(self.folder, filename)):
return self.notinstalledItem
else:
helpFile = os.path.join(self.folder, filename + '.help')
try:
with open(helpFile) as f:
helpContent = json.load(f)
currentVersion = float(helpContent[Help2Html.ALG_VERSION])
except Exception:
currentVersion = 0
if version > currentVersion:
return self.toupdateItem
else:
return self.uptodateItem
def cancelPressed(self):
super(GetScriptsAndModelsDialog, self).reject()
def storeFile(self, reply, filename):
"""store a script/model that has been downloaded"""
QApplication.restoreOverrideCursor()
if reply.error() != QNetworkReply.NoError:
if os.path.splitext(filename)[1].lower() == '.help':
content = '{"ALG_VERSION" : %s}' % self.resources[filename[:-5]][0]
else:
self.popupError(reply.error(), reply.request().url().toString())
content = None
else:
content = reply.readAll()
reply.deleteLater()
if content:
path = os.path.join(self.folder, filename)
with open(path, 'w') as f:
f.write(content)
self.progressBar.setValue(self.progressBar.value() + 1)
def okPressed(self):
toDownload = []
for i in xrange(self.toupdateItem.childCount()):
item = self.toupdateItem.child(i)
if item.checkState(0) == Qt.Checked:
toDownload.append(item.filename)
for i in xrange(self.notinstalledItem.childCount()):
item = self.notinstalledItem.child(i)
if item.checkState(0) == Qt.Checked:
toDownload.append(item.filename)
if toDownload:
self.progressBar.setMaximum(len(toDownload) * 2)
for i, filename in enumerate(toDownload):
QCoreApplication.processEvents()
url = self.urlBase + filename.replace(' ', '%20')
self.grabHTTP(url, self.storeFile, filename)
url += '.help'
self.grabHTTP(url, self.storeFile, filename + '.help')
toDelete = []
for i in xrange(self.uptodateItem.childCount()):
item = self.uptodateItem.child(i)
if item.checkState(0) == Qt.Unchecked:
toDelete.append(item.filename)
# Remove py and help files if they exist
for filename in toDelete:
for pathname in (filename, filename + u".help"):
path = os.path.join(self.folder, pathname)
if os.path.exists(path):
os.remove(path)
self.updateProvider = len(toDownload) + len(toDelete) > 0
super(GetScriptsAndModelsDialog, self).accept()
class TreeItem(QTreeWidgetItem):
def __init__(self, filename, name, icon):
QTreeWidgetItem.__init__(self)
self.name = name
self.filename = filename
self.setText(0, name)
self.setIcon(0, icon)
self.setCheckState(0, Qt.Unchecked)
| gpl-2.0 | -6,995,422,623,564,464,000 | 40.258721 | 150 | 0.593462 | false |
italiangrid/grinder-load-testsuite | storm/base/rf.py | 1 | 1096 | from common import TestID, log_surl_call_result
from eu.emi.security.authn.x509.impl import PEMCredential
from exceptions import Exception
from jarray import array
from java.io import FileInputStream
from javax.net.ssl import X509ExtendedKeyManager
from net.grinder.plugin.http import HTTPRequest
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from org.italiangrid.srm.client import SRMClient, SRMClientFactory
import random
import traceback
error = grinder.logger.error
info = grinder.logger.info
debug = grinder.logger.debug
props = grinder.properties
def rf(surl, token, client):
debug("Releasing file %s with token %s" % (surl,token))
res= client.srmReleaseFiles(token,[surl])
debug("File released")
return res
class TestRunner:
def __call__(self, surl, token, client):
if client is None:
raise Exception("Please set a non-null SRM client!")
test = Test(TestID.RF, "StoRM RF")
test.record(rf)
try:
return rf(surl, token, client)
except Exception:
error("Error executing srmRf: %s" % traceback.format_exc())
raise
| apache-2.0 | -7,394,840,733,900,756,000 | 23.355556 | 66 | 0.760036 | false |
snap-stanford/ogb | examples/lsc/mag240m/label_prop.py | 1 | 2884 | # NOTE: More than 256GB CPU memory required to run this script.
# Use `--low-memory` to reduce memory consumption by using half-precision
import os.path as osp
import time
import argparse
import torch
import torch.nn.functional as F
from torch_sparse import SparseTensor
from torch_geometric.nn import LabelPropagation
from torch_geometric.nn.conv.gcn_conv import gcn_norm
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
from root import ROOT
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_layers', type=int, default=3),
parser.add_argument('--alpha', type=float, default=0.9),
parser.add_argument('--low-memory', action='store_true'),
args = parser.parse_args()
print(args)
dataset = MAG240MDataset(ROOT)
evaluator = MAG240MEvaluator()
t = time.perf_counter()
print('Reading adjacency matrix...', end=' ', flush=True)
path = f'{dataset.dir}/paper_to_paper_symmetric.pt'
if osp.exists(path):
adj_t = torch.load(path)
else:
edge_index = dataset.edge_index('paper', 'cites', 'paper')
edge_index = torch.from_numpy(edge_index)
adj_t = SparseTensor(
row=edge_index[0], col=edge_index[1],
sparse_sizes=(dataset.num_papers, dataset.num_papers),
is_sorted=True)
adj_t = adj_t.to_symmetric()
torch.save(adj_t, path)
adj_t = gcn_norm(adj_t, add_self_loops=False)
if args.low_memory:
adj_t = adj_t.to(torch.half)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
train_idx = dataset.get_idx_split('train')
valid_idx = dataset.get_idx_split('valid')
test_idx = dataset.get_idx_split('test')
y_train = torch.from_numpy(dataset.paper_label[train_idx]).to(torch.long)
y_valid = torch.from_numpy(dataset.paper_label[valid_idx]).to(torch.long)
model = LabelPropagation(args.num_layers, args.alpha)
N, C = dataset.num_papers, dataset.num_classes
t = time.perf_counter()
print('Propagating labels...', end=' ', flush=True)
if args.low_memory:
y = torch.zeros(N, C, dtype=torch.half)
y[train_idx] = F.one_hot(y_train, C).to(torch.half)
out = model(y, adj_t, post_step=lambda x: x)
y_pred = out.argmax(dim=-1)
else:
y = torch.zeros(N, C)
y[train_idx] = F.one_hot(y_train, C).to(torch.float)
out = model(y, adj_t)
y_pred = out.argmax(dim=-1)
print(f'Done! [{time.perf_counter() - t:.2f}s]')
train_acc = evaluator.eval({
'y_true': y_train,
'y_pred': y_pred[train_idx]
})['acc']
valid_acc = evaluator.eval({
'y_true': y_valid,
'y_pred': y_pred[valid_idx]
})['acc']
print(f'Train: {train_acc:.4f}, Valid: {valid_acc:.4f}')
res = {'y_pred': y_pred[test_idx]}
evaluator.save_test_submission(res, 'results/label_prop')
| mit | 2,963,788,015,815,800,000 | 34.170732 | 79 | 0.626907 | false |
kmuehlbauer/wradlib | wradlib/qual.py | 1 | 8000 | #!/usr/bin/env python
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Data Quality
^^^^^^^^^^^^
This module will serve two purposes:
#. provide routines to create simple radar data quality related fields.
#. provide routines to decide which radar pixel to choose based on the
competing information in different quality fields.
Data is supposed to be stored in 'aligned' arrays. Aligned here means that
all fields are structured such that in each field the data for a certain index
is representative for the same physical target.
Therefore no assumptions are made on the dimensions or shape of the input
fields except that they exhibit the numpy ndarray interface.
.. autosummary::
:nosignatures:
:toctree: generated/
pulse_volume
beam_block_frac
cum_beam_block_frac
get_bb_ratio
"""
import numpy as np
def pulse_volume(ranges, h, theta):
"""Calculates the sampling volume of the radar beam per bin depending on \
range and aperture.
We assume a cone frustum which has the volume
:math:`V=(\\pi/3) \\cdot h \\cdot (R^2 + R \\cdot r + r^2)`.
R and r are the radii of the two frustum surface circles. Assuming that the
pulse width is small compared to the range, we get
:math:`R=r= \\tan ( 0.5 \\cdot \\theta \\cdot \\pi/180 ) \\cdot range`
with theta being the aperture angle (beam width).
Thus, the pulse volume simply becomes the volume of a cylinder with
:math:`V=\\pi \\cdot h \\cdot range^2 \\cdot \\tan(
0.5 \\cdot \\theta \\cdot \\pi/180)^2`
Parameters
----------
ranges : :class:`numpy:numpy.ndarray`
the distances of each bin from the radar [m]
h : float
pulse width (which corresponds to the range resolution [m])
theta : float
the aperture angle (beam width) of the radar beam [degree]
Returns
-------
output : :class:`numpy:numpy.ndarray`
Volume of radar bins at each range in `ranges` [:math:`m^3`]
Examples
--------
See :ref:`/notebooks/workflow/recipe1.ipynb`.
"""
return np.pi * h * (ranges ** 2) * (np.tan(np.radians(theta/2.))) ** 2
def beam_block_frac(th, bh, a):
"""Partial beam blockage fraction.
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
From Bech et al. (2003), Eqn 2 and Appendix
Parameters
----------
th : float | :class:`numpy:numpy.ndarray` of floats
Terrain height [m]
bh : float | :class:`numpy:numpy.ndarray` of floats
Beam height [m]
a : float | :class:`numpy:numpy.ndarray` of floats
Half power beam radius [m]
Returns
-------
pbb : float
Partial beam blockage fraction [unitless]
Examples
--------
>>> pbb = beam_block_frac(th,bh,a) #doctest: +SKIP
See :ref:`/notebooks/beamblockage/wradlib_beamblock.ipynb`.
Note
----
This procedure uses a simplified interception function where no vertical
gradient of refractivity is considered. Other algorithms treat this
more thoroughly. However, this is accurate in most cases other than
the super-refractive case.
See the the half_power_radius function to calculate variable `a`.
The heights must be the same units!
"""
isfloat = (isinstance(th, float)
and isinstance(bh, float)
and isinstance(a, float))
# convert to numpy array in any case
th = np.atleast_1d(th)
bh = np.atleast_1d(bh)
a = np.atleast_1d(a)
# First find the difference between the terrain and height of
# radar beam (Bech et al. (2003), Fig.3)
y = th - bh
# check if beam is clear or blocked
ya = y / a
clear = ya < -1.
block = ya > 1.
numer = (ya * np.sqrt(a ** 2 - y ** 2)) + \
(a * np.arcsin(ya)) + (np.pi * a / 2.)
denom = np.pi * a
pbb = numer / denom
pbb[clear] = 0.
pbb[block] = 1.
if isfloat:
return pbb[0]
else:
return pbb
def cum_beam_block_frac(pbb):
"""Cumulative beam blockage fraction along a beam.
Computes the cumulative beam blockage (cbb) along a beam from the partial
beam blockage (pbb) fraction of each bin along that beam. CBB in one bin
along a beam will always be at least as high as the maximum PBB of the
preceeding bins.
Parameters
----------
pbb : :class:`numpy:numpy.ndarray`
2-D array of floats of shape (num beams, num range bins)
Partial beam blockage fraction of a bin along a beam [m]
Returns
-------
cbb : :class:`numpy:numpy.ndarray`
Array of floats of the same shape as pbb
Cumulative partial beam blockage fraction [unitless]
Examples
--------
>>> pbb = beam_block_frac(th, bh, a) #doctest: +SKIP
>>> cbb = cum_beam_block_frac(pbb) #doctest: +SKIP
See :ref:`/notebooks/beamblockage/wradlib_beamblock.ipynb`.
"""
# This is the index of the maximum PBB along each beam
maxindex = np.nanargmax(pbb, axis=1)
cbb = np.copy(pbb)
# Iterate over all beams
for ii, index in enumerate(maxindex):
premax = 0.
for jj in range(index):
# Only iterate to max index to make this faster
if pbb[ii, jj] > premax:
cbb[ii, jj] = pbb[ii, jj]
premax = pbb[ii, jj]
else:
cbb[ii, jj] = premax
# beyond max index, everything is max anyway
cbb[ii, index:] = pbb[ii, index]
return cbb
def get_bb_ratio(bb_height, bb_width, quality, zp_r):
"""Returns the Bright Band ratio of each PR bin
With *SR*, we refer to precipitation radars based on space-born platforms
such as TRMM or GPM.
This function basically applies the Bright Band (BB) information as
provided by the corresponding SR datasets per beam, namely BB height and
width, as well as quality flags of the SR beams. A BB ratio of <= 0
indicates that a bin is located below the melting layer (ML), >=1
above the ML, and in between 0 and 1 inside the ML.
Parameters
----------
bb_height : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB heights
in meters.
bb_width : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB widths
in meters.
quality : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB quality
index.
zp_r : :class:`numpy:numpy.ndarray`
Array of SR bin altitudes of shape (nscans, nbeams, nbins).
Returns
-------
ratio : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams, nbins) containing the BB ratio of
every SR bin.
- ratio <= 0: below ml
- 0 < ratio < 1: between ml
- 1 <= ratio: above ml
ibb : :class:`numpy:numpy.ndarray`
Boolean array containing the indices of SR bins connected to the
BB.
"""
# parameters for bb detection
ibb = (bb_height > 0) & (bb_width > 0) & (quality == 1)
# set non-bb-pixels to np.nan
bb_height = bb_height.copy()
bb_height[~ibb] = np.nan
bb_width = bb_width.copy()
bb_width[~ibb] = np.nan
# get median of bb-pixels
bb_height_m = np.nanmedian(bb_height)
bb_width_m = np.nanmedian(bb_width)
# approximation of melting layer top and bottom
zmlt = bb_height_m + bb_width_m / 2.
zmlb = bb_height_m - bb_width_m / 2.
# get ratio connected to brightband height
ratio = (zp_r - zmlb) / (zmlt - zmlb)
return ratio, ibb
if __name__ == '__main__':
print('wradlib: Calling module <qual> as main...')
| mit | 8,926,593,673,090,248,000 | 29.128405 | 79 | 0.6045 | false |
dstaple/z3test | scripts/mk_copyright.py | 3 | 1766 | # Copyright (c) 2015 Microsoft Corporation
import os
import re
cr = re.compile("Copyright")
aut = re.compile("Automatically generated")
aut2 = re.compile("auto-generated")
cr_notice = """
/*++
Copyright (c) 2015 Microsoft Corporation
--*/
"""
smt2_cr_notice = """
; Copyright (c) 2015 Microsoft Corporation
"""
py_cr_notice = """
# Copyright (c) 2015 Microsoft Corporation
"""
def has_cr(file):
ins = open(file)
lines = 0
line = ins.readline()
while line and lines < 20:
m = cr.search(line)
if m:
ins.close()
return True
m = aut.search(line)
if m:
ins.close()
return True
m = aut2.search(line)
if m:
ins.close()
return True
line = ins.readline()
ins.close()
return False
def add_cr(file):
tmp = "%s.tmp" % file
ins = open(file)
ous = open(tmp,'w')
if file.endswith("smt2"):
ous.write(smt2_cr_notice)
elif file.endswith("py"):
ous.write(py_cr_notice)
else:
ous.write(cr_notice)
line = ins.readline()
while line:
ous.write(line)
line = ins.readline()
ins.close()
ous.close()
os.system("move %s %s" % (tmp, file))
def add_missing_cr(dir):
for root, dirs, files in os.walk(dir):
for f in files:
if f.endswith('.cpp') or f.endswith('.h') or f.endswith('.c') or f.endswith('.cs') or f.endswith('.py') or f.endswith('.smt2'):
path = "%s\\%s" % (root, f)
if not has_cr(path):
print("Missing CR for %s" % path)
add_cr(path)
add_missing_cr('regressions')
#add_missing_cr('old-regressions')
#add_missing_cr('ClusterExperiment')
| mit | 3,061,600,513,663,548,000 | 21.641026 | 139 | 0.543601 | false |
sburnett/seattle | autograder/emulab/sample_client.py | 1 | 2122 | #from remote_emulab import *
import remote_emulab
# This is a very rough sample of what a client
# to the remote_emulab.py library will look like
#
# Everything below is hardcoded and is not intended
# to be executed (it will fail because it assumes a
# new and unique exp name). It is just an exmaple
# senerio.
# if you want to run it and see it work, first you'll
# need to set up keys and your login for remote_emulab.py
# then just change exp below from "helloworld"+n to
# "helloworld"+(n+1) so that the name will be unique.
# You should probably be polite and permanately remove the
# the new exp from emulab when you are done.
# SETUP SOME CONSTANTS
# specify the emulab proj name, this is always 'Seattle'
proj = "Seattle"
# specify the exp name, this is unique for any class assignment
exp = "lantest"
#specify the name of an ns file being used
mynsfn = "hello.ns"
# EXECUTE A BASIC SENERIO
# read the ns file into a string
mynsfobj = open(mynsfn)
mynsfilestr = mynsfobj.read()
mynsfobj.close()
# check the ns file for errors
(passed,message) = remote_emulab.checkNS(mynsfilestr)
# did the parsing fail?
if (not passed):
print message
print "checkNS failed, please fix the ns file and try again"
else:
# start a new exp in non-batchmode
print "starting a new exp..."
remote_emulab.startexp(proj,exp,mynsfilestr)
# wait for the exp to go active
# by default times out in 10 minutes
print "exp started, waiting for active..."
remote_emulab.wait_for_active(proj,exp)
print "now active... getting mapping"
mapping = remote_emulab.get_mapping(proj,exp)
print "mapping: "+str(mapping)
simple_mapping = get_ips(mapping)
print " got mapping, getting links"
print "links: "+str(remote_emulab.get_links(proj,exp))
# exit this code, go and do your expirament
# when the exp is done we'll swap it out
print "finished exp, swapping out"
#remote_emulab.swapOUT(proj,exp)
print "swaped out"
# Some additional notes.
# Since we did a swap out and not an endexp
# the exp will still exisit in emulab
# we can re run it, or modify it and re run it
| mit | 8,734,794,021,364,859,000 | 25.525 | 63 | 0.719604 | false |
AlexanderSavelyev/rdkit | Contrib/mmpa/indexing.py | 1 | 21045 | # Copyright (c) 2012, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, September 2012
from __future__ import print_function
import sys
import re
from rdkit import Chem
from optparse import OptionParser
def heavy_atom_count(smi):
m = Chem.MolFromSmiles(smi)
return m.GetNumAtoms()
def add_to_index(smi,attachments,cmpd_heavy):
result = False
core_size = heavy_atom_count(smi) - attachments
if(use_ratio):
core_ratio = float(core_size) / float(cmpd_heavy)
if(core_ratio <= ratio ):
result = True
else:
if(core_size <= max_size):
result = True
return result
def get_symmetry_class(smi):
symmetry = []
m = Chem.MolFromSmiles(smi)
#determine the symmetry class
#see: http://www.mail-archive.com/[email protected]/msg01894.html
#A thanks to Greg (and Alan)
Chem.AssignStereochemistry(m,cleanIt=True,force=True,flagPossibleStereoCenters=True)
#get the symmetry class of the attachements points
#Note: 1st star is the zero index,
#2nd star is first index, etc
for atom in m.GetAtoms():
if(atom.GetMass() == 0):
symmetry.append(atom.GetProp('_CIPRank'))
return symmetry
def cansmirk(lhs,rhs,context):
#cansmirk algorithm
#1) cansmi the LHS.
#2) For the LHS the 1st star will have label 1, 2nd star will have label 2 and so on
#3) Do a symmetry check of lhs and rhs and use that to decide if the labels on
# RHS or/and context need to change.
#4) For the rhs, if you have a choice (ie. two attachement points are symmetrically
# equivalent), always put the label with lower numerical value on the earlier
# attachement point on the cansmi-ed smiles
#print "in: %s,%s" % (lhs,rhs)
isotope_track={}
#if the star count of lhs/context/rhs is 1, single cut
stars = lhs.count("*")
if(stars > 1):
#get the symmetry class of stars of lhs and rhs
lhs_sym = get_symmetry_class(lhs)
rhs_sym = get_symmetry_class(rhs)
#deal with double cuts
if(stars == 2):
#simple cases
#unsymmetric lhs and unsymmetric rhs
if( (lhs_sym[0] != lhs_sym[1]) and (rhs_sym[0] != rhs_sym[1]) ):
#get 1st and 2nd labels and store the new label for it in isotope_track
#structure: isotope_track[old_label]=new_label (as strings)
isotope_track = build_track_dictionary(lhs,stars)
#switch labels using isotope track
lhs = switch_labels_on_position(lhs)
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#symmetric lhs and symmetric rhs
elif( (lhs_sym[0] == lhs_sym[1]) and (rhs_sym[0] == rhs_sym[1]) ):
#the points are all equivalent so change labels on lhs and rhs based on position
#labels on context don't need to change
lhs = switch_labels_on_position(lhs)
rhs = switch_labels_on_position(rhs)
#more difficult cases..
#symmetric lhs and unsymmetric rhs
elif( (lhs_sym[0] == lhs_sym[1]) and (rhs_sym[0] != rhs_sym[1]) ):
#switch labels lhs based on position
lhs = switch_labels_on_position(lhs)
#change labels on rhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(rhs,stars)
rhs = switch_labels_on_position(rhs)
context = switch_labels(isotope_track,stars,context)
#unsymmetric lhs and symmetric rhs
elif( (lhs_sym[0] != lhs_sym[1]) and (rhs_sym[0] == rhs_sym[1]) ):
#change labels on lhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(lhs,stars)
lhs = switch_labels_on_position(lhs)
context = switch_labels(isotope_track,stars,context)
#as rhs is symmetric, positions are equivalent so change labels on position
rhs = switch_labels_on_position(rhs)
#deal with triple cut
#unwieldy code but most readable I can make it
elif(stars == 3):
#simple cases
#completely symmetric lhs and completely symmetric rhs
if( ( (lhs_sym[0] == lhs_sym[1]) and (lhs_sym[1] == lhs_sym[2]) and (lhs_sym[0] == lhs_sym[2]) ) and
( (rhs_sym[0] == rhs_sym[1]) and (rhs_sym[1] == rhs_sym[2]) and (rhs_sym[0] == rhs_sym[2]) ) ):
#the points are all equivalent so change labels on lhs and rhs based on position
#labels on context don't need to change
lhs = switch_labels_on_position(lhs)
rhs = switch_labels_on_position(rhs)
#completely symmetric lhs and completely unsymmetric rhs
elif( ( (lhs_sym[0] == lhs_sym[1]) and (lhs_sym[1] == lhs_sym[2]) and (lhs_sym[0] == lhs_sym[2]) ) and
( (rhs_sym[0] != rhs_sym[1]) and (rhs_sym[1] != rhs_sym[2]) and (rhs_sym[0] != rhs_sym[2]) ) ):
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change labels on rhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(rhs,stars)
rhs = switch_labels_on_position(rhs)
context = switch_labels(isotope_track,stars,context)
#completely unsymmetric lhs and completely unsymmetric rhs
elif( ( (lhs_sym[0] != lhs_sym[1]) and (lhs_sym[1] != lhs_sym[2]) and (lhs_sym[0] != lhs_sym[2]) ) and
( (rhs_sym[0] != rhs_sym[1]) and (rhs_sym[1] != rhs_sym[2]) and (rhs_sym[0] != rhs_sym[2]) ) ):
#build the isotope track
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change rhs and context based on isotope_track
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#completely unsymmetric lhs and completely symmetric rhs
elif( ( (lhs_sym[0] != lhs_sym[1]) and (lhs_sym[1] != lhs_sym[2]) and (lhs_sym[0] != lhs_sym[2]) ) and
( (rhs_sym[0] == rhs_sym[1]) and (rhs_sym[1] == rhs_sym[2]) and (rhs_sym[0] == rhs_sym[2]) ) ):
#build isotope trach on lhs
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change labels on context
context = switch_labels(isotope_track,stars,context)
#all positions on rhs equivalent so add labels on position
rhs = switch_labels_on_position(rhs)
#more difficult cases, partial symmetry
#completely unsymmetric on lhs and partial symmetry on rhs
elif( (lhs_sym[0] != lhs_sym[1]) and (lhs_sym[1] != lhs_sym[2]) and (lhs_sym[0] != lhs_sym[2]) ):
#build the isotope track
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change rhs and context based on isotope_track
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#tweak positions on rhs based on symmetry
#rhs 1,2 equivalent
if(rhs_sym[0] == rhs_sym[1]):
#tweak rhs position 1 and 2 as they are symmetric
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,2)
#rhs 2,3 equivalent
elif(rhs_sym[1] == rhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,2,3)
#rhs 1,3 equivalent - try for larger set in future
elif(rhs_sym[0] == rhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,3)
#now we are left with things with partial symmetry on lhs and not completely symmetric or unsymmetric on rhs
else:
#lhs 1,2,3 equivalent and any sort of partial symmetry on rhs
if( (lhs_sym[0] == lhs_sym[1]) and (lhs_sym[1] == lhs_sym[2]) and (lhs_sym[0] == lhs_sym[2]) ):
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change labels on rhs based on position but need to record
#the changes as need to appy them to the context
isotope_track = build_track_dictionary(rhs,stars)
rhs = switch_labels_on_position(rhs)
context = switch_labels(isotope_track,stars,context)
#now deal partial symmetry on lhs or rhs.
#Cases where:
#lhs 1,2 equivalent
#lhs 2,3 equivalent
#lhs 1,3 equivalent
else:
#build isotope track on lhs
isotope_track = build_track_dictionary(lhs,stars)
#alter lhs in usual way
lhs = switch_labels_on_position(lhs)
#change rhs and context based on isotope_track
rhs = switch_labels(isotope_track,stars,rhs)
context = switch_labels(isotope_track,stars,context)
#tweak positions on rhs based on symmetry
#lhs 1,2 equivalent
if(lhs_sym[0] == lhs_sym[1]):
#tweak rhs position 1 and 2 as they are symmetric on lhs
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,2)
#lhs 2,3 equivalent
elif(lhs_sym[1] == lhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric on lhs
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,2,3)
#lhs 1,3 equivalent - try for larger set in future
elif(lhs_sym[0] == lhs_sym[2]):
#tweak rhs position 1 and 2 as they are symmetric on lhs
rhs = switch_specific_labels_on_symmetry(rhs,rhs_sym,1,3)
smirk = "%s>>%s" % (lhs,rhs)
return smirk,context
def switch_specific_labels_on_symmetry(smi,symmetry_class,a,b):
#check if a and b positions are symmetrically equivalent
#if equivalent, swap labels if the lower numerical label is not on the
#1st symmetrically equivalent attachment points in the smi
if(symmetry_class[a-1] == symmetry_class[b-1]):
#what are the labels on a and b
matchObj = re.search( r'\[\*\:([123])\].*\[\*\:([123])\].*\[\*\:([123])\]', smi )
if matchObj:
#if the higher label comes first, fix
if(int(matchObj.group(a)) > int(matchObj.group(b))):
#if(int(matchObj.group(1)) > int(matchObj.group(2))):
smi = re.sub(r'\[\*\:'+matchObj.group(a)+'\]', '[*:XX' + matchObj.group(b) + 'XX]' , smi)
smi = re.sub(r'\[\*\:'+matchObj.group(b)+'\]', '[*:XX' + matchObj.group(a) + 'XX]' , smi)
smi = re.sub('XX', '' , smi)
return smi
def switch_labels_on_position(smi):
#move the labels in order of position
smi = re.sub(r'\[\*\:[123]\]', '[*:XX1XX]' , smi, 1)
smi = re.sub(r'\[\*\:[123]\]', '[*:XX2XX]' , smi, 1)
smi = re.sub(r'\[\*\:[123]\]', '[*:XX3XX]' , smi, 1)
smi = re.sub('XX', '' , smi)
return smi
def switch_labels(track,stars,smi):
#switch labels based on the input dictionary track
if(stars > 1):
#for k in track:
# print "old: %s, new: %s" % (k,track[k])
if(track['1'] != '1'):
smi = re.sub(r'\[\*\:1\]', '[*:XX' + track['1'] + 'XX]' , smi)
if(track['2'] != '2'):
smi = re.sub(r'\[\*\:2\]', '[*:XX' + track['2'] + 'XX]' , smi)
if(stars == 3):
if(track['3'] != '3'):
smi = re.sub(r'\[\*\:3\]', '[*:XX' + track['3'] + 'XX]' , smi)
#now remove the XX
smi = re.sub('XX', '' , smi)
return smi
def build_track_dictionary(smi,stars):
isotope_track = {}
#find 1st label, record it in isotope_track as key, with value being the
#new label based on its position (1st star is 1, 2nd star 2 etc.)
if(stars ==2):
matchObj = re.search( r'\[\*\:([123])\].*\[\*\:([123])\]', smi )
if matchObj:
isotope_track[matchObj.group(1)] = '1'
isotope_track[matchObj.group(2)] = '2'
elif(stars ==3):
matchObj = re.search( r'\[\*\:([123])\].*\[\*\:([123])\].*\[\*\:([123])\]', smi )
if matchObj:
isotope_track[matchObj.group(1)] = '1'
isotope_track[matchObj.group(2)] = '2'
isotope_track[matchObj.group(3)] = '3'
return isotope_track
def index_hydrogen_change():
#Algorithm details
#have an index of common fragment(key) => fragments conected to it (values)
#Need to add *-H to the values where appropriate - and its
#appropriate when the key is what you would get if you chopped a H off a cmpd.
#Therefore simply need to check if key with the * replaced with a H is
#the same as any full smiles in the set
#
#Specific details:
#1) Loop through keys of index
#2) If key is the result of a single cut (so contains only 1 *) replace the * with H, and cansmi
#3) If full smiles matches key in hash above, add *-H to that fragment index.
for key in index:
attachments = key.count('*')
#print attachments
if(attachments==1):
smi = key
#simple method
smi = re.sub(r'\[\*\:1\]', '[H]' , smi)
#now cansmi it
temp = Chem.MolFromSmiles(smi)
if(temp == None):
sys.stderr.write('Error with key: %s, Added H: %s\n' %(key,smi) )
else:
c_smi = Chem.MolToSmiles( temp, isomericSmiles=True )
if(c_smi in smi_to_id):
core = "[*:1][H]"
id = smi_to_id[c_smi]
value = "%s;t%s" % (id,core)
#add to index
index[key].append(value)
if __name__=='__main__':
#note max heavy atom count does not
#include the attachement points (*)
max_size = 10
ratio = 0.3
use_ratio = False
index={}
smi_to_id={}
id_to_smi={}
id_to_heavy={}
#set up the command line options
#parser = OptionParser()
parser = OptionParser(description="Program to generate MMPs")
parser.add_option('-s', '--symmetric', default=False, action='store_true', dest='sym',
help='Output symmetrically equivalent MMPs, i.e output both cmpd1,cmpd2, SMIRKS:A>>B and cmpd2,cmpd1, SMIRKS:B>>A')
parser.add_option('-m','--maxsize',action='store', dest='maxsize', type='int',
help='Maximum size of change (in heavy atoms) allowed in matched molecular pairs identified. DEFAULT=10. \
Note: This option overrides the ratio option if both are specified.')
parser.add_option('-r','--ratio',action='store', dest='ratio', type='float',
help='Maximum ratio of change allowed in matched molecular pairs identified. The ratio is: size of change / \
size of cmpd (in terms of heavy atoms). DEFAULT=0.3. Note: If this option is used with the maxsize option, the maxsize option will be used.')
#parse the command line options
(options, args) = parser.parse_args()
#print options
if(options.maxsize != None):
max_size = options.maxsize
elif(options.ratio != None):
ratio = options.ratio
if(ratio >= 1):
print("Ratio specified: %s. Ratio needs to be less than 1.")
sys.exit(1)
use_ratio = True
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
smi,id,core,context = line.split(',')
#fill in dictionaries
smi_to_id[smi]=id
id_to_smi[id]=smi
#if using the ratio option, check if heavy atom
#of mol already calculated. If not, calculate and store
cmpd_heavy = None
if(use_ratio):
if( (id in id_to_heavy) == False):
id_to_heavy[id] = heavy_atom_count(smi)
cmpd_heavy = id_to_heavy[id]
#deal with cmpds that have not been fragmented
if(len(core) == 0) and (len(context) == 0):
continue
#deal with single cuts
if(len(core) == 0):
side_chains = context.split('.')
#minus 1 for the attachement pt
if( add_to_index(side_chains[1],1,cmpd_heavy)==True ):
context = side_chains[0]
core = side_chains[1]
value = "%s;t%s" % (id,core)
#add the array if no key exists
#add the context with id to index
index.setdefault(context, []).append(value)
#minus 1 for the attachement pt
if( add_to_index(side_chains[0],1,cmpd_heavy)==True ):
context = side_chains[1]
core = side_chains[0]
value = "%s;t%s" % (id,core)
#add the array if no key exists
#add the context with id to index
index.setdefault(context, []).append(value)
#double or triple cut
else:
attachments = core.count('*')
if( add_to_index(core,attachments,cmpd_heavy)==True ):
value = "%s;t%s" % (id,core)
#add the array if no key exists
#add the context with id to index
index.setdefault(context, []).append(value)
#index the H change
index_hydrogen_change()
#Now index is ready
#loop through the index
for key in index:
total = len(index[key])
#check if have more than one value
if(total == 1):
continue
for xa in xrange(total):
for xb in xrange(xa, total):
if(xa != xb):
#now generate the pairs
id_a,core_a = index[key][xa].split(";t")
id_b,core_b = index[key][xb].split(";t")
#make sure pairs are not same molecule
if(id_a != id_b):
#make sure LHS and RHS of SMIRKS are not the same
if(core_a != core_b):
smirks,context = cansmirk(core_a,core_b,key)
print("%s,%s,%s,%s,%s,%s" % ( id_to_smi[id_a], id_to_smi[id_b], id_a, id_b, smirks, context ))
#deal with symmetry switch
if(options.sym == True):
smirks,context = cansmirk(core_b,core_a,key)
print("%s,%s,%s,%s,%s,%s" % ( id_to_smi[id_b], id_to_smi[id_a], id_b, id_a, smirks, context ))
| bsd-3-clause | 3,132,523,979,875,397,600 | 39.627413 | 163 | 0.5665 | false |
rahul-ramadas/BagOfTricks | InsertMarkdownLink.py | 1 | 1987 | import sublime
import sublime_plugin
MARKDOWN_LINK_SNIPPET = "[${{1:{}}}](${{2:{}}})"
class InsertMarkdownLinkCommand(sublime_plugin.TextCommand):
def decode_page(self, page_bytes, potential_encoding=None):
if potential_encoding:
try:
text = page_bytes.decode(potential_encoding)
return text
except:
pass
encodings_to_try = ["utf-8", "iso-8859-1"]
for encoding in encodings_to_try:
if encoding == potential_encoding:
continue
try:
text = page_bytes.decode(encoding)
return text
except:
pass
raise UnicodeDecodeError
def run(self, edit):
import re
def on_done(link):
import urllib.request
request = urllib.request.Request(link, headers={'User-Agent' : 'Google Internal-Only Browser'})
with urllib.request.urlopen(request) as page:
encoding = page.headers.get_content_charset()
text = self.decode_page(page.read(), encoding)
match = re.search("<title>(.+?)</title>", text, re.IGNORECASE | re.DOTALL)
if match is None:
title = link
else:
title = match.group(1).strip()
markdown_link = MARKDOWN_LINK_SNIPPET.format(title, link)
self.view.run_command("insert_snippet", {"contents": markdown_link})
clipboard_text = sublime.get_clipboard(2000)
if re.match("https?://", clipboard_text, re.IGNORECASE) is not None:
initial_text = clipboard_text
else:
initial_text = ""
input_view = self.view.window().show_input_panel("Link", initial_text, on_done, None, None)
input_view.sel().clear()
input_view.sel().add(sublime.Region(0, input_view.size()))
| unlicense | -25,537,689,219,243,780 | 32.859649 | 107 | 0.535984 | false |
djaodjin/djaodjin-signup | signup/serializers.py | 1 | 13054 | # Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from django.core import validators
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
import phonenumbers
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from .models import Activity, Contact, Notification
from .utils import get_account_model, has_invalid_password
from .validators import (validate_email_or_phone,
validate_username_or_email_or_phone)
LOGGER = logging.getLogger(__name__)
class PhoneField(serializers.CharField):
def to_internal_value(self, data):
"""
Returns a formatted phone number as a string.
"""
if self.required:
try:
phone_number = phonenumbers.parse(data, None)
except phonenumbers.NumberParseException as err:
LOGGER.info("tel %s:%s", data, err)
phone_number = None
if not phone_number:
try:
phone_number = phonenumbers.parse(data, "US")
except phonenumbers.NumberParseException:
raise ValidationError(self.error_messages['invalid'])
if phone_number and not phonenumbers.is_valid_number(phone_number):
raise ValidationError(self.error_messages['invalid'])
return phonenumbers.format_number(
phone_number, phonenumbers.PhoneNumberFormat.E164)
return None
class CommField(serializers.CharField):
"""
Either an e-mail address or a phone number
"""
default_error_messages = {
'invalid': _('Enter a valid email address or phone number.')
}
def __init__(self, **kwargs):
super(CommField, self).__init__(**kwargs)
self.validators.append(validate_email_or_phone)
class UsernameOrCommField(serializers.CharField):
"""
Either a username, e-mail address or a phone number
"""
default_error_messages = {
'invalid': _('Enter a valid username, email address or phone number.')
}
def __init__(self, **kwargs):
super(UsernameOrCommField, self).__init__(**kwargs)
self.validators.append(validate_username_or_email_or_phone)
class NoModelSerializer(serializers.Serializer):
def create(self, validated_data):
raise RuntimeError('`create()` should not be called.')
def update(self, instance, validated_data):
raise RuntimeError('`update()` should not be called.')
class ActivateUserSerializer(serializers.ModelSerializer):
username = serializers.CharField(required=False,
help_text=_("Username to identify the account"))
new_password = serializers.CharField(required=False, write_only=True,
style={'input_type': 'password'}, help_text=_("Password with which"\
" a user can authenticate with the service"))
full_name = serializers.CharField(required=False,
help_text=_("Full name (effectively first name followed by last name)"))
class Meta:
model = get_user_model()
fields = ('username', 'new_password', 'full_name')
class ActivitySerializer(serializers.ModelSerializer):
account = serializers.SlugRelatedField(allow_null=True,
slug_field='slug', queryset=get_account_model().objects.all(),
help_text=_("Account the activity is associated to"))
created_by = serializers.SlugRelatedField(
read_only=True, slug_field='username',
help_text=_("User that created the activity"))
class Meta:
model = Activity
fields = ('created_at', 'created_by', 'text', 'account')
read_only_fields = ('created_at', 'created_by')
class AuthenticatedUserPasswordSerializer(NoModelSerializer):
password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("Password of the user making the HTTP request"))
class Meta:
fields = ('password',)
class APIKeysSerializer(NoModelSerializer):
"""
username and password for authentication through API.
"""
secret = serializers.CharField(max_length=128, read_only=True,
help_text=_("Secret API Key used to authenticate user on every HTTP"\
" request"))
class Meta:
fields = ('secret',)
class PublicKeySerializer(AuthenticatedUserPasswordSerializer):
"""
Updates a user public key
"""
pubkey = serializers.CharField(max_length=500,
style={'input_type': 'password'},
help_text=_("New public key for the user referenced in the URL"))
class ContactSerializer(serializers.ModelSerializer):
"""
This serializer is used in lists and other places where a Contact/User
profile is referenced.
For a detailed profile, see `ContactDetailSerializer`.
"""
printable_name = serializers.CharField(
help_text=_("Printable name"), read_only=True)
credentials = serializers.SerializerMethodField(read_only=True,
help_text=_("True if the user has valid login credentials"))
class Meta:
model = Contact
fields = ('slug', 'printable_name', 'picture', 'email', 'created_at',
'credentials',)
read_only_fields = ('slug', 'printable_name', 'created_at',
'credentials',)
@staticmethod
def get_credentials(obj):
return (not has_invalid_password(obj.user)) if obj.user else False
class ContactDetailSerializer(ContactSerializer):
"""
This serializer is used in APIs where a single Contact/User
profile is returned.
For a summary profile, see `ContactSerializer`.
"""
activities = ActivitySerializer(many=True, read_only=True)
class Meta(ContactSerializer.Meta):
fields = ContactSerializer.Meta.fields + ('phone',
'full_name', 'nick_name', 'lang', 'extra', 'activities',)
read_only_fields = ContactSerializer.Meta.read_only_fields + (
'activities',)
class StringListField(serializers.ListField):
child = serializers.CharField()
class NotificationsSerializer(serializers.ModelSerializer):
notifications = StringListField(allow_empty=True,
help_text=_("List of notifications from %s") %
', '.join([item[0] for item in Notification.NOTIFICATION_TYPE]))
class Meta:
model = get_user_model()
fields = ('notifications',)
class CredentialsSerializer(NoModelSerializer):
"""
username and password for authentication through API.
"""
username = UsernameOrCommField(
help_text=_("Username, e-mail address or phone number to identify"\
" the account"))
password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("Secret password for the account"))
code = serializers.IntegerField(required=False, write_only=True,
style={'input_type': 'password'},
help_text=_("One-time code. This field will be checked against"\
" an expected code when multi-factor authentication (MFA)"\
" is enabled."))
class CreateUserSerializer(serializers.ModelSerializer):
username = serializers.CharField(required=False,
help_text=_("Username to identify the account"))
password = serializers.CharField(required=False, write_only=True,
style={'input_type': 'password'}, help_text=_("Password with which"\
" a user can authenticate with the service"))
email = serializers.EmailField(
help_text=_("Primary e-mail to contact user"), required=False)
phone = PhoneField(
help_text=_("Primary phone number to contact user"), required=False)
full_name = serializers.CharField(
help_text=_("Full name (effectively first name followed by last name)"))
lang = serializers.CharField(
help_text=_("Preferred communication language"), required=False)
class Meta:
model = get_user_model()
fields = ('username', 'password', 'email', 'phone', 'full_name', 'lang')
class PasswordResetConfirmSerializer(NoModelSerializer):
new_password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("New password for the user referenced in the URL"))
class PasswordChangeSerializer(PasswordResetConfirmSerializer):
password = serializers.CharField(write_only=True,
style={'input_type': 'password'},
help_text=_("Password of the user making the HTTP request"))
class PasswordResetSerializer(NoModelSerializer):
"""
Serializer to send an e-mail to a user in order to recover her account.
"""
email = CommField(
help_text=_("Email or phone number to recover the account"))
class TokenSerializer(NoModelSerializer):
"""
token to verify or refresh.
"""
token = serializers.CharField(
help_text=_("Token used to authenticate user on every HTTP request"))
class ValidationErrorSerializer(NoModelSerializer):
"""
Details on why token is invalid.
"""
detail = serializers.CharField(help_text=_("Describes the reason for"\
" the error in plain text"))
class UploadBlobSerializer(NoModelSerializer):
"""
Upload a picture or other POD content
"""
location = serializers.URLField(
help_text=_("URL to uploaded content"))
class UserSerializer(serializers.ModelSerializer):
"""
This serializer is a substitute for `ContactSerializer` whose intent is to
facilitate composition of this App with other Django Apps which references
a `django.contrib.auth.User model`. It is not used in this App.
XXX currently used in `api.auth.JWTBase` for payloads.
"""
# Only way I found out to remove the ``UniqueValidator``. We are not
# interested to create new instances here.
slug = serializers.CharField(source='username', validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _("Enter a valid username."),
'invalid')],
help_text=_("Username"))
printable_name = serializers.CharField(source='get_full_name',
help_text=_("Full name"))
picture = serializers.SerializerMethodField(read_only=True,
help_text=_("Picture"))
email = serializers.EmailField(
help_text=_("Primary e-mail to contact user"), required=False)
phone = PhoneField(
help_text=_("Primary phone number to contact user"), required=False)
created_at = serializers.DateTimeField(source='date_joined',
help_text=_("date at which the account was created"))
credentials = serializers.SerializerMethodField(read_only=True,
help_text=_("True if the user has valid login credentials"))
# XXX username and full_name are duplicates of slug and printable_name
# respectively. They are still included in this version for backward
# compatibility.
username = serializers.CharField(validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _("Enter a valid username."),
'invalid')],
help_text=_("Username"))
full_name = serializers.CharField(source='get_full_name',
help_text=_("Full name"))
class Meta:
model = get_user_model()
fields = ('slug', 'printable_name', 'picture', 'email', 'phone',
'created_at', 'credentials', 'username', 'full_name')
read_only_fields = ('slug', 'printable_name', 'created_at',
'credentials',)
@staticmethod
def get_credentials(obj):
return not has_invalid_password(obj)
@staticmethod
def get_picture(obj):
contact = obj.contacts.filter(picture__isnull=False).order_by(
'created_at').first()
if contact:
return contact.picture
return None
| bsd-2-clause | 8,961,634,846,136,696,000 | 36.085227 | 80 | 0.673816 | false |
mariocannistra/radio-astronomy | findsessionrange.py | 1 | 1973 | #!/usr/bin/python
# this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617
# this program will determine the overall range of signal strengths received during the whole session.
# this program can be run standalone but is usually run at end of session by doscan.py
# Its output will be stored in 2 files:
# dbminmax.txt and session-overview.png . The first contains two rows of text with just the maximum
# and minimum of the whole session. The second contains a chart of all the min and max values for each of
# the scan files
from glob import glob
import numpy as np
import radioConfig
import subprocess
import os
import datetime
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
globmax = -9000
globmin = 9000
sessmin = np.empty(shape=[0, 1])
sessmax = np.empty(shape=[0, 1])
scantimeline = np.empty(shape=[0, 1])
files_in_dir = sorted(glob("*.csv"))
for fname in files_in_dir:
dbs = np.genfromtxt(fname,dtype='float',delimiter = ',', skip_header=0, skip_footer=0, usecols=(6,),usemask=True)
thismin=dbs.min()
thismax=dbs.max()
scantime=str(fname)[11:17]
print scantime,thismin,thismax
if thismin < globmin:
globmin = thismin
if thismax > globmax:
globmax = thismax
sessmin = np.append(sessmin, thismin)
sessmax = np.append(sessmax, thismax)
scantimeline = np.append(scantimeline, scantime)
mytitle = 'Signal strength range: min %f .. max %f' % (globmin,globmax)
print mytitle
xs = range(len(scantimeline))
plt.plot(xs,sessmin )
plt.plot(xs,sessmax )
plt.xticks(xs,scantimeline,rotation=70)
plt.grid()
plt.title(mytitle)
#plt.show()
plt.savefig('session-overview.png')
sessfile = open("dbminmax.txt", "w")
sessfile.write(str(globmax))
sessfile.write("\n")
sessfile.write(str(globmin))
sessfile.write("\n")
sessfile.close()
| mit | -5,841,826,111,265,500,000 | 28.893939 | 155 | 0.729346 | false |
r39132/airflow | airflow/operators/check_operator.py | 1 | 9963 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import zip
from builtins import str
from typing import Iterable
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CheckOperator(BaseOperator):
"""
Performs checks against a db. The ``CheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed. (templated)
:type sql: str
"""
template_fields = ('sql',) # type: Iterable[str]
template_ext = ('.hql', '.sql',) # type: Iterable[str]
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql,
conn_id=None,
*args, **kwargs):
super(CheckOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.sql = sql
def execute(self, context=None):
self.log.info('Executing SQL check: %s', self.sql)
records = self.get_db_hook().get_first(self.sql)
self.log.info('Record: %s', records)
if not records:
raise AirflowException("The query returned None")
elif not all([bool(r) for r in records]):
exceptstr = "Test failed.\nQuery:\n{q}\nResults:\n{r!s}"
raise AirflowException(exceptstr.format(q=self.sql, r=records))
self.log.info("Success.")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
def _convert_to_float_if_possible(s):
"""
A small helper function to convert a string to a numeric value
if appropriate
:param s: the string to be converted
:type s: str
"""
try:
ret = float(s)
except (ValueError, TypeError):
ret = s
return ret
class ValueCheckOperator(BaseOperator):
"""
Performs a simple value check using sql code.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed. (templated)
:type sql: str
"""
__mapper_args__ = {
'polymorphic_identity': 'ValueCheckOperator'
}
template_fields = ('sql', 'pass_value',) # type: Iterable[str]
template_ext = ('.hql', '.sql',) # type: Iterable[str]
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
conn_id=None,
*args, **kwargs):
super(ValueCheckOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.conn_id = conn_id
self.pass_value = str(pass_value)
tol = _convert_to_float_if_possible(tolerance)
self.tol = tol if isinstance(tol, float) else None
self.has_tolerance = self.tol is not None
def execute(self, context=None):
self.log.info('Executing SQL check: %s', self.sql)
records = self.get_db_hook().get_first(self.sql)
if not records:
raise AirflowException("The query returned None")
pass_value_conv = _convert_to_float_if_possible(self.pass_value)
is_numeric_value_check = isinstance(pass_value_conv, float)
tolerance_pct_str = None
if self.tol is not None:
tolerance_pct_str = str(self.tol * 100) + '%'
except_temp = ("Test failed.\nPass value:{pass_value_conv}\n"
"Tolerance:{tolerance_pct_str}\n"
"Query:\n{self.sql}\nResults:\n{records!s}")
if not is_numeric_value_check:
tests = [str(r) == pass_value_conv for r in records]
elif is_numeric_value_check:
try:
num_rec = [float(r) for r in records]
except (ValueError, TypeError):
cvestr = "Converting a result to float failed.\n"
raise AirflowException(cvestr + except_temp.format(**locals()))
if self.has_tolerance:
tests = [
pass_value_conv * (1 - self.tol) <=
r <= pass_value_conv * (1 + self.tol)
for r in num_rec]
else:
tests = [r == pass_value_conv for r in num_rec]
if not all(tests):
raise AirflowException(except_temp.format(**locals()))
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
class IntervalCheckOperator(BaseOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics
:type metrics_threshold: dict
"""
__mapper_args__ = {
'polymorphic_identity': 'IntervalCheckOperator'
}
template_fields = ('sql1', 'sql2') # type: Iterable[str]
template_ext = ('.hql', '.sql',) # type: Iterable[str]
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
conn_id=None,
*args, **kwargs):
super(IntervalCheckOperator, self).__init__(*args, **kwargs)
self.table = table
self.metrics_thresholds = metrics_thresholds
self.metrics_sorted = sorted(metrics_thresholds.keys())
self.date_filter_column = date_filter_column
self.days_back = -abs(days_back)
self.conn_id = conn_id
sqlexp = ', '.join(self.metrics_sorted)
sqlt = ("SELECT {sqlexp} FROM {table}"
" WHERE {date_filter_column}=").format(**locals())
self.sql1 = sqlt + "'{{ ds }}'"
self.sql2 = sqlt + "'{{ macros.ds_add(ds, " + str(self.days_back) + ") }}'"
def execute(self, context=None):
hook = self.get_db_hook()
self.log.info('Executing SQL check: %s', self.sql2)
row2 = hook.get_first(self.sql2)
self.log.info('Executing SQL check: %s', self.sql1)
row1 = hook.get_first(self.sql1)
if not row2:
raise AirflowException("The query {q} returned None".format(q=self.sql2))
if not row1:
raise AirflowException("The query {q} returned None".format(q=self.sql1))
current = dict(zip(self.metrics_sorted, row1))
reference = dict(zip(self.metrics_sorted, row2))
ratios = {}
test_results = {}
for m in self.metrics_sorted:
if current[m] == 0 or reference[m] == 0:
ratio = None
else:
ratio = float(max(current[m], reference[m])) / \
min(current[m], reference[m])
self.log.info("Ratio for %s: %s \n Ratio threshold : %s", m, ratio, self.metrics_thresholds[m])
ratios[m] = ratio
test_results[m] = ratio < self.metrics_thresholds[m]
if not all(test_results.values()):
failed_tests = [it[0] for it in test_results.items() if not it[1]]
j = len(failed_tests)
n = len(self.metrics_sorted)
self.log.warning("The following %s tests out of %s failed:", j, n)
for k in failed_tests:
self.log.warning(
"'%s' check failed. %s is above %s", k, ratios[k], self.metrics_thresholds[k]
)
raise AirflowException("The following tests have failed:\n {0}".format(", ".join(failed_tests)))
self.log.info("All tests have passed")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
| apache-2.0 | 3,269,989,418,193,832,000 | 37.467181 | 108 | 0.614072 | false |
rwaldron/Espruino | boards/STM32F429IDISCOVERY.py | 1 | 5051 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "STM32 F429 Discovery",
'link' : [ "http://www.st.com/web/catalog/tools/FM116/SC959/SS1532/LN1199/PF259090" ],
'default_console' : "EV_SERIAL1",
'variables' : 5450,
'binary_name' : 'espruino_%v_stm32f429idiscovery.bin',
};
chip = {
'part' : "STM32F429ZIT6",
'family' : "STM32F4",
'package' : "LQFP144",
'ram' : 128,#256,
'flash' : 512, #2048,
'speed' : 168,
'usart' : 6,
'spi' : 3,
'i2c' : 3,
'adc' : 3,
'dac' : 2,
};
# left-right, or top-bottom order
board = {
'left' : [ ], # fixme
'left2' : [ ],
'right2' : [ ],
'right' : [ ],
};
devices = {
'OSC' : { 'pin_1' : 'H0',
'pin_2' : 'H1' },
'OSC_RTC' : { 'pin_1' : 'C14',
'pin_2' : 'C15' },
'LED1' : { 'pin' : 'G13' }, # green
'LED2' : { 'pin' : 'G14' }, # red
'BTN1' : { 'pin' : 'A0' },
'USB' : { 'pin_dm' : 'B14',
'pin_dp' : 'B15',
'pin_vbus' : 'B13',
'pin_id' : 'B12',
'pin_pso' : 'C4', # Power supply enable
'pin_oc' : 'C5', # Overcurrent
},
'MEMS' : { 'device' : 'L3GD20',
'pin_cs' : 'C1',
'pin_int1' : 'A1',
'pin_int2' : 'A2',
'pin_mosi' : 'F9',
'pin_miso' : 'F8',
'pin_sck' : 'F7' },
'TOUCHSCREEN' : {
'pin_irq' : 'A15',
'pin_cs' : '',
'pin_scl' : 'A8',
'pin_sda' : 'C9',
},
'LCD' : {
'width' : 320, 'height' : 240, 'bpp' : 16, 'controller' : 'fsmc', 'controller2' : 'ili9341',
'pin_d0' : 'D6',
'pin_d1' : 'G11',
'pin_d2' : 'G12',
'pin_d3' : 'A3',
'pin_d4' : 'B8',
'pin_d5' : 'B9',
'pin_d6' : 'A6',
'pin_d7' : 'G10',
'pin_d8' : 'B10',
'pin_d9' : 'B11',
'pin_d10' : 'C7',
'pin_d11' : 'D3',
'pin_d12' : 'C10',
'pin_d13' : 'B0',
'pin_d14' : 'A11',
'pin_d15' : 'A12',
'pin_d16' : 'B1',
'pin_d16' : 'G6',
'pin_rd' : 'D12', # RDX
'pin_wr' : 'D13',# WRQ
'pin_cs' : 'C2', # CSX
'pin_en' : 'F10',
'pin_vsync' : 'A4',
'pin_hsync' : 'C6',
'pin_dotlck' : 'G7',
'pin_dc' : 'F7', # DCX
'pin_sda' : 'F9',
'pin_im0' : 'D2', # pulled to 0
'pin_im1' : 'D4', # pulled to 1
'pin_im2' : 'D5', # pulled to 1
'pin_im3' : 'D7', # pulled to 0
},
'SDRAM' : {
'pin_sdcke1' : 'B5',
'pin_sdne1' : 'B6',
'pin_sdnwe' : 'C0',
'pin_d2' : 'D0',
'pin_d3' : 'D1',
'pin_d13' : 'D8',
'pin_d14' : 'D9',
'pin_d15' : 'D10',
'pin_d0' : 'D14',
'pin_d1' : 'D15',
'pin_nbl0' : 'E0',
'pin_nbl1' : 'E1',
'pin_d4' : 'E7',
'pin_d5' : 'E8',
'pin_d6' : 'E9',
'pin_d7' : 'E10',
'pin_d8' : 'E11',
'pin_d9' : 'E12',
'pin_d10' : 'E13',
'pin_d11' : 'E14',
'pin_d12' : 'E15',
'pin_a0' : 'F0',
'pin_a1' : 'F1',
'pin_a2' : 'F2',
'pin_a3' : 'F3',
'pin_a4' : 'F4',
'pin_a5' : 'F5',
'pin_sdnras' : 'F11',
'pin_a6' : 'F12',
'pin_a7' : 'F13',
'pin_a8' : 'F14',
'pin_a9' : 'F15',
'pin_a10' : 'G0',
'pin_a11' : 'G1',
'pin_ba0' : 'G4',
'pin_ba1' : 'G5',
'pin_sdclk' : 'G8',
'pin_sdncas' : 'G15',
},
};
board_css = """
#board {
width: 680px;
height: 1020px;
left: 200px;
background-image: url(img/STM32F429IDISCOVERY.jpg);
}
#boardcontainer {
height: 1020px;
}
#left {
top: 375px;
right: 590px;
}
#left2 {
top: 375px;
left: 105px;
}
#right {
top: 375px;
left: 550px;
}
#right2 {
top: 375px;
right: 145px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f40x.csv', 6, 9, 10)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])
| mpl-2.0 | 6,059,209,135,743,455,000 | 27.061111 | 104 | 0.402891 | false |
TAMU-CPT/galaxy-tools | tools/genome_viz/brigaid.py | 1 | 36126 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
Pedro Cerqueira
github: @pedrorvc
DESCRIPTION
This script serves to create xml files contaning the information necessary
for the execution of BRIG (Blast Ring Image Generator), reducing the time
performing the tedious task of setting up all the information on the GUI
and provides a quick way to produce an image.
The arguments for this script provide some (but not all)
of the available options in BRIG, which were the ones I used to change the most.
USAGE:
brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file
-oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt
--contig-order contig_order.tsv
"""
import argparse
import csv
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from xml.dom import minidom
from Bio import SeqIO
from matplotlib import cm
def listdir_fullpath(path):
""" Gets the full path of the files from a directory
Args:
path (str): full path to a directory
Returns:
list containing the full path of every file contained in the input directory
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def ring_attributes(colour, name, position):
""" Creates ring attributes.
Args:
colour (str): color of the ring.
name (str): name of the ring.
position (str): position of the ring.
Returns:
ring_attrs (dict): attributes of any regular ring of the BRIG xml.
"""
ring_attrs = {"colour" : colour,
"name": name,
"position" : position,
"upperInt" : "90",
"lowerInt" : "70",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return ring_attrs
def annotation_ring_attributes(position):
""" Creates annotation ring attributes.
Args:
position (str): position of the ring.
Returns:
annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml.
"""
annotation_ring_attrs = {"colour" : '172,14,225',
"name": 'null',
"position" : position,
"upperInt" : "70",
"lowerInt" : "50",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return annotation_ring_attrs
def create_feature_attrs(label, colour, decoration, start, stop):
""" Create attributes for the Feature SubElements of the annotation ring.
Args:
label (str): name of the gene/CDS to annotate
colour (str): colour of the decoration for the annotation
decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow'
start (str): start of the gene/CDS to annotate
stop (str): stop of the gene/CDS to annotate
Results:
feature_element_attrs (dict): attributes of the feature element.
feature_range_element_attrs (dict): attributes of the feature range element
"""
feature_element_attrs = {'label' : label,
'colour' : colour,
'decoration' : decoration}
feature_range_element_attrs = {'start' : start,
'stop' : stop}
return feature_element_attrs, feature_range_element_attrs
def create_annotation_ring_tsv(annotation_ring, annotation_file):
""" Uses a tsv file to annotate the reference genome.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
"""
with open(annotation_file) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Obtain the annotations from the file contents
for row in reader:
start = row['#START']
stop = row['STOP']
label = row['Label']
colour = row['Colour']
decoration = row['Decoration']
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
#if type(genome_size) == int:
# Obtain the features of the Genbank file records
for fea in record.features:
# Get the start and end position of the genome
# Also get the strand
if fea.type == 'CDS':
start = str(fea.location.start.position)
end = str(fea.location.end.position)
strand = fea.location.strand
# Get the label of the gene or product
if 'gene' in fea.qualifiers:
label = str(fea.qualifiers['gene'][0])
elif 'product' in fea.qualifiers:
product = fea.qualifiers['product'][0]
label = str(product)
else:
continue
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if fea.type == 'source':
size = fea.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file
and specific gene annotations.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
for f in record.features:
if f.type == 'CDS':
# Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated
if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes:
label = f.qualifiers['gene'][0]
elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes:
product = f.qualifiers['product'][0]
label = product
else:
continue
# Determine the start, stop and strand of the gene
start = str(f.location.start.position + genome_size)
end = str(f.location.end.position + genome_size)
strand = f.location.strand
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if f.type == "source":
size = f.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records):
""" Create annotation ring using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
"""
if genes_of_interest != []:
# Get the genes to serach in the Genbank file
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
# Create feature elements of the annotation ring
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order):
""" Create annotation ring using a Genbank annotation file divided by contigs.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
contig_order (str): Full path to the file containing the order of the contigs.
"""
if contig_order != []:
with open(contig_order) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Create an OrderedDict with the contents of the file
# The keys are the order are a number representing the order of the contig
# The values are the names of the contigs
content_dict = OrderedDict()
for r in reader:
content_dict[r["order"]] = r["contig"]
# Create an OrderedDict with the content of each contig
# The keys are the names of the contigs
# The values are SeqRecord objects from BipPython
seq_records_dict = OrderedDict()
for record in records:
seq_records_dict[record.id] = record
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size)
genome_size = gsize
else:
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size)
genome_size = gsize
else:
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def write_xml(root_elem, output_file):
""" Writes a xml file.
Args:
root_elem is a ElementTree Element object containing all the information
required for the output file.
output_file (str): full path to the output file
"""
xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8')
pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ')
output_file = output_file + ".xml"
with open(output_file, "w") as f:
f.write(pretty_xml_file)
####### Create xml elemnts
# Create root element
def create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format):
"""
Creates the root element of the xml file and its attributes.
Args:
blast_options (str): additional options for blast, for example, -evalue or num_threads
legend_position (str): position of the legend on the image
query_file (str): full path to the query file
output_folder (str): full path to the output folder
image_output_file (str): full path to the image output file
title (str): title of the output image
image_format (str): format of the image output file
Returns:
root: ElementTree Element object containing the BRIG tag and its attributes
"""
root_attrs = {"blastOptions" : blast_options,
"legendPosition" : legend_position,
"queryFile" : query_file,
"outputFolder" : output_folder,
"blastPlus" : "yes",
"outputFile" : os.path.join(output_folder, image_output_file),
"title" : title,
"imageFormat" : image_format,
"queryFastaFile" : query_file,
"cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")}
root = ET.Element('BRIG', attrib=root_attrs)
return root
#### Create root children
# Create cgview_settings element
def create_cgview_settings_element(root, height, width):
""" Creates the cgview_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
height (str): height of the output image in pixels
width (str): width of the output image in pixels
Returns:
cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes
"""
cgview_settings_attrs = {"arrowheadLength" : "medium",
"backboneColor" : "black",
"backboneRadius" : "600",
"backboneThickness" : "medium",
"backgroundColor" : "white",
"borderColor" : "black",
"featureSlotSpacing" : "medium",
"featureThickness" : "30",
"giveFeaturePositions" : "false",
"globalLabel" : "true",
"height" : height,
"isLinear" : "false",
"labelFont" : "SansSerif,plain,25",
"labelLineLength" : "medium",
"labelLineThickness" : "medium",
"labelPlacementQuality" : "best",
"labelsToKeep" : "1000",
"longTickColor" : "black",
"minimumFeatureLength" : "medium",
"moveInnerLabelsToOuter" :"true",
"origin" : "12",
"rulerFont" : "SansSerif,plain,35",
"rulerFontColor" : "black",
"rulerPadding" : "40",
"rulerUnits" : "bases",
"shortTickColor" : "black",
"shortTickThickness" : "medium",
"showBorder" : "false",
"showShading" : "true",
"showWarning" : "false",
"tickDensity" : "0.2333",
"tickThickness" : "medium",
"titleFont" : "SansSerif,plain,45",
"titleFontColor" : "black",
"useColoredLabelBackgrounds" : "false",
"useInnerLabels" : "true",
"warningFont" : "Default,plain,35",
"warningFontColor" : "black",
"width" : width,
"zeroTickColor" : "black",
"tickLength" : "medium"}
cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs)
return cgview_settings
# Create brig_settings element
def create_brig_settings_element(root, java_memory):
""" Creates the brig_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG
Returns:
brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes
"""
brig_settings_attrs = {"Ring1" : "172,14,225",
"Ring2" : "222,149,220",
"Ring3" : "161,221,231",
"Ring4" : "49,34,221",
"Ring5" : "116,152,226",
"Ring6" : "224,206,38",
"Ring7" : "40,191,140",
"Ring8" : "158,223,139",
"Ring9" : "226,38,122",
"Ring10" :"211,41,77",
"defaultUpper" : "70",
"defaultLower" : "50",
"defaultMinimum" : "50",
"genbankFiles" : "gbk,gb,genbank",
"fastaFiles" : "fna,faa,fas,fasta,fa",
"emblFiles" : "embl",
"blastLocation" : "",
"divider" : "3",
"multiplier" : "3",
"memory" : java_memory,
"defaultSpacer" : "0"}
brig_settings = ET.SubElement(root,
"brig_settings",
attrib=brig_settings_attrs)
return brig_settings
## Create special element
def create_special_element(root):
"""Creates the 'special' element of the xml file and its attributes
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
Returns:
gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes
gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes
"""
gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'})
gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'})
return gc_content_special, gc_skew_special
# Create reference dir element
def create_reference_directory_element(root, reference_directory):
""" Creates the 'reference directory' element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
Returns:
ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes
"""
ref_dir = ET.SubElement(root,
"refDir",
attrib={"location" : reference_directory})
# Obtain the full path for all the files in the directory
ref_dir_list = listdir_fullpath(reference_directory)
for f in ref_dir_list:
ref_file = ET.SubElement(ref_dir,
"refFile",
attrib={"location" : f})
return ref_file
# Create the ring where the annotations are defined
def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order):
""" Creates the ring that will contain the annotations for the reference genome.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing a list of specific genes.
contig_order (str): Full path to the tab-delimited file containing the order of the contigs.
"""
# Determine the position of the annotation ring, which will be the position after the last reference genome
ring_position = len(os.listdir(reference_directory)) + 2
# Create the annotation ring element
annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position)))
# Check for tab-delimited annotation file input
if list(SeqIO.parse(annotation_file, "genbank")) == []:
create_annotation_ring_tsv(annotation_ring, annotation_file)
else:
# Get the records of the Genbank file
records = [r for r in SeqIO.parse(annotation_file, "genbank")]
### Check if a contig order file has been provided
if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs
create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order)
else:
create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records)
## Create remaining rings
def create_ring_element(root, reference_directory, colormap):
""" Creates the ring elements of the xml file, containing the position and color of the rings.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings
Returns:
ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes
ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
ref_dir_list = listdir_fullpath(reference_directory)
# Gets the colormap from matplotlib with as many colors as the number of files
cmap = cm.get_cmap(colormap, len(ref_dir_list))
list_colormap = cmap.colors.tolist()
# Remove the fourth element (transparency) because it is not necessary
colors_to_use = []
for l in list_colormap:
convert = [round(x * 255) for x in l]
convert.pop()
colors_to_use.append(convert)
#reversed_colors_to_use = colors_to_use[::-1]
# Check if the user provided an order for the rings
has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list]
if True in has_digit:
# Obtain the ring positions
ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list]
# Reverse sort the positions of the rings, because they will be created
# in a descending order of their positions
ring_positions.sort(reverse=True)
ref_dir_list.sort(reverse=True)
for ring in range(len(ref_dir_list)):
# The ring positions start at 2 due to the special rings (GC Content and GC Skew)
ring_position = int(ring_positions[ring]) + 1
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_position)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
else:
# Sort files by lowercase
ref_dir_list.sort(key=lambda y: y.lower())
# The number of rings starts at 2 due to the GC Content and GC Skew
ring_number = len(ref_dir_list) + 1
for ring in range(len(ref_dir_list)):
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_number)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
ring_number -= 1
return ring_number_element, ring_sequence_element
## Create special rings
def create_special_ring_element(root):
""" Create the 'special' ring element and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
Returns:
gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
# Create ring attributes
gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0")
gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1")
# Add ring element to root
gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs)
gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs)
# Add sequence element to ring
gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'})
gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'})
return gc_content_location, gc_skew_location
def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file,
genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap):
root = create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format)
cgview_settings = create_cgview_settings_element(root, height, width)
brig_settings = create_brig_settings_element(root, java_memory)
special = create_special_element(root)
refdir = create_reference_directory_element(root, reference_directory)
if annotation_file:
create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order)
rings = create_ring_element(root, reference_directory, colormap)
special_ring = create_special_ring_element(root)
write_xml(root, output_xml)
print("\n File written to {}".format(output_xml))
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-q', '--query', type=str, required=True, dest='query_file',
help='Path to the query/reference FASTA file.')
parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory',
help='Path to the directory where the FASTA files to compare against the reference are located.')
parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder',
help='Path to the output directory for the results of BRIG.')
parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file',
help='Path to the output of this script.')
parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file',
help='Path to the output file of the resulting image of BRIG.')
parser.add_argument('-t', '--title', type=str, required=True, dest='title',
help='Title of the resulting image from BRIG.')
parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False,
help='File containing annotations for the reference genome. '
'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)')
parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[],
help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ')
parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[],
help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. '
'Example: order contig '
'1 Contig8')
parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6",
help='Options for running BLAST.')
parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right",
help='Positon of the legend on the resulting image.'
'The options available are upper, center or lower, '
'paired with left, center or right')
parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg",
help='Format of the resulting image file.'
'The available options are: jpg, png, svg or svgz.')
parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000",
help='Height (in pixels) of the resulting image.')
parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000",
help='Width (in pixels) of the resulting image.')
parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500",
help='Amount of memory (in bytes) that Java is allowed to use for BRIG.')
parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis",
help='Colormap from matplotlib to use for the color of the rings. '
'The available options are: viridis, plasma, inferno, magma and cividis.'
'More options for colormaps at: '
'https://matplotlib.org/users/colormaps.html')
args = parser.parse_args()
return [args.query_file, args.reference_directory, args.output_folder, args.output_file,
args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order,
args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap]
if __name__ == '__main__':
args = parse_arguments()
main(args[0], args[1], args[2], args[3], args[4], args[5], args[6],
args[7], args[8], args[9], args[10], args[11], args[12], args[13],
args[14], args[15])
| gpl-3.0 | 5,687,784,411,646,860,000 | 40.8125 | 151 | 0.560455 | false |
GaneshPandey/alex-scraper | alexscrapper/spiders/luckyshops_spider.py | 1 | 3002 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request, FormRequest
from scrapy.spiders import CrawlSpider
from alexscrapper.items import *
from datetime import datetime
from scrapy.conf import settings
import urllib
import csv
import json
import re
from datetime import datetime, timedelta
from dateutil import parser
from urllib import urlencode
from HTMLParser import HTMLParser
import requests
class LuckyshopsSider(CrawlSpider):
store_name = "Lucky Shops"
name = "luckyshops"
allowed_domains = ["rewards.luckyshops.com"]
start_urls = ['http://rewards.luckyshops.com/shopping/b____alpha.htm']
base_url = 'http://rewards.luckyshops.com'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.10) Firefox/3.6.10 GTB7.1',
'Accept-Language': 'en-us,en;q=0.5'
}
def __init__(self, *args, **kwargs):
super(LuckyshopsSider, self).__init__(*args, **kwargs)
settings.set('RETRY_HTTP_CODES', [500, 503, 504, 400, 408, 404] )
settings.set('RETRY_TIMES', 5 )
settings.set('REDIRECT_ENABLED', True)
settings.set('METAREFRESH_ENABLED', True)
settings.set('USER_AGENT', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36')
def start_requests(self):
for url in self.start_urls:
yield Request(url=url, callback=self.parse_product, headers=self.headers)
def parse_product(self, response):
item = Yaging()
pattern = ur'([\d.]+)'
store = response.xpath('//ul[@class="mn_splitListRt" or @class="mn_splitListLt"]/li')
for data in store:
name = str(data.xpath('a[2]/text()').extract()[0])
cashback = str(data.xpath('span').extract()[0])
link = str([(self.base_url + self.parse_link(link)) for link in data.xpath('a/@href').extract()][:][1])
item['name'] = name.replace("'", "''")
item['link'] = link
cashback = cashback.replace("<span>", "").replace("</span>", "")
if "$" in cashback:
cashback = "$"+ str(self.getNumbers(cashback))
elif "%" in cashback:
cashback = str(self.getNumbers(cashback)) + "%"
else:
pass
item['cashback'] = cashback.replace("'", "''")
item['sid'] = self.store_name
item['ctype'] = 1
item['numbers'] = self.getNumbers(cashback).replace('%','').replace('$','')
item['domainurl'] = self.base_url
yield item
def parse_link(self, jstring):
start = jstring.find("../") + 2
return jstring[start:]
def getNumbers(self, cashback):
cash = cashback
pattern = r'\d+(?:\.\d+)?'
ret = re.findall(pattern, cash)
if len(ret):
return ret[0]
else:
return "100" | gpl-3.0 | 8,431,650,365,857,330,000 | 33.125 | 146 | 0.570286 | false |
jrsmith3/gits | test/test_fs_utils.py | 1 | 6045 | # -*- coding: utf-8 -*-
import os
import shutil
import unittest
from gits import fs_utils
test_dir_root = os.path.dirname(os.path.realpath(__file__))
class MethodsInput(unittest.TestCase):
"""
Tests behavior of methods which take input arguments.
"""
scratch_dir = os.path.join(test_dir_root, "scratch")
path_to_dummy_file = os.path.join(scratch_dir, "dummy.txt")
good_input_dict = {"dir1": {}}
def setUp(self):
"""
Creates the scratch dir.
Creates a dummy file in the scratch dir.
"""
os.mkdir(self.scratch_dir)
with open(self.path_to_dummy_file, "w"):
pass
def tearDown(self):
"""
Removes scratch dir and contents.
"""
shutil.rmtree(self.scratch_dir)
def test_dict_to_fs_fs_dict_non_dict(self):
"""
First argument to dict_to_fs must be a dictionary.
"""
self.assertRaises(TypeError, fs_utils.dict_to_fs, "not a dict", self.scratch_dir)
def test_dict_to_fs_fs_dict_values_non_dict_string(self):
"""
Values of fs_dict must be either strings or dictionaries.
"""
bad_input = {"neither_string_nor_dict": 42.}
self.assertRaises(TypeError, fs_utils.dict_to_fs, bad_input, self.scratch_dir)
def test_dict_to_fs_fqpn_root_non_str(self):
"""
Second argument to dict_to_fs must be a string.
"""
self.assertRaises(TypeError, fs_utils.dict_to_fs, self.good_input_dict, 42.)
def test_dict_to_fs_fqpn_root_string(self):
"""
Second argument to dict_to_fs can be str.
"""
try:
fs_utils.dict_to_fs(self.good_input_dict, str(self.scratch_dir))
except:
self.fail("An exception was raised, so this method can't handle strings.")
def test_dict_to_fs_fqpn_root_unicode(self):
"""
Second argument to dict_to_fs can be unicode.
"""
try:
fs_utils.dict_to_fs(self.good_input_dict, unicode(self.scratch_dir))
except:
self.fail("An exception was raised, so this method can't handle unicode.")
def test_dict_to_fs_fqpn_root_nonexistant_path(self):
"""
Second arg to dict_to_fs must correspond to exitant path.
"""
nonexistant_subdir = "does_not_exist"
bad_fqpn_root = os.path.join(self.scratch_dir, nonexistant_subdir)
self.assertRaises(OSError, fs_utils.dict_to_fs, self.good_input_dict, bad_fqpn_root)
def test_dict_to_fs_fqpn_root_non_directory_path(self):
"""
Second arg to dict_to_fs must correspond to a dir, not a file.
"""
self.assertRaises(OSError, fs_utils.dict_to_fs, self.good_input_dict, self.path_to_dummy_file)
class MethodsFunctionality(unittest.TestCase):
"""
Tests proper functionality of the methods.
"""
scratch_dir = os.path.join(test_dir_root, "scratch")
def setUp(self):
"""
Creates a scratch directory for the tests.
"""
os.mkdir(self.scratch_dir)
def tearDown(self):
"""
Removes the scratch dir (and its contents).
"""
shutil.rmtree(self.scratch_dir)
def test_dict_to_fs_filename(self):
"""
dict_to_fs should be able to create a file with a specified filename.
"""
fs_dict = {"dummy.txt": ""}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
scratch_names = os.listdir(self.scratch_dir)
self.assertEqual(scratch_names, fs_dict.keys())
def test_dict_to_fs_isfile(self):
"""
dict_to_fs should be able to create a file.
"""
dummy_filename = "dummy.txt"
fs_dict = {dummy_filename: ""}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_filename)
self.assertTrue(os.path.isfile(dummy_fqpn))
def test_dict_to_fs_empty_file(self):
"""
An empty string should generate an empty file.
"""
dummy_filename = "dummy.txt"
fs_dict = {dummy_filename: ""}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_filename)
self.assertEqual(os.path.getsize(dummy_fqpn), 0)
def test_dict_to_fs_nonempty_file(self):
"""
A nonempty string should generate a nonempty file.
"""
dummy_filename = "dummy.txt"
fs_dict = {dummy_filename: "Hello world.\n"}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_filename)
self.assertTrue(os.path.getsize(dummy_fqpn) > 0)
def test_dict_to_fs_isdir(self):
"""
dict_to_fs should be able to create a directory.
"""
dummy_dirname = "dummy"
fs_dict = {dummy_dirname: {}}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_dirname)
self.assertTrue(os.path.isdir(dummy_fqpn))
def test_dict_to_fs_dir_isempty(self):
"""
dict_to_fs should be able to create an empty directory.
"""
dummy_dirname = "dummy"
fs_dict = {dummy_dirname: {}}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_dirname)
should_be_empty_list = os.listdir(os.path.join(self.scratch_dir, dummy_dirname))
self.assertEqual(should_be_empty_list, [])
def test_dict_to_fs_dir_nonempty(self):
"""
dict_to_fs should be able to create a populated directory.
"""
dummy_dirname = "dummy"
fs_dict = {dummy_dirname: {"test_file.txt":""}}
fs_utils.dict_to_fs(fs_dict, self.scratch_dir)
dummy_fqpn = os.path.join(self.scratch_dir, dummy_dirname)
should_not_be_empty_list = os.listdir(os.path.join(self.scratch_dir, dummy_dirname))
self.assertTrue(len(should_not_be_empty_list) > 0)
| mit | 2,823,540,586,295,067,600 | 30.815789 | 102 | 0.599504 | false |
luca-morreale/reinforcement-gym | generalizer/state_generalizer.py | 1 | 1668 | # -*- coding: utf-8 -*-
import numpy as np
from state_action import StateAction
class StateGeneralizer:
""" Creates the Generalizer.
Args:
updater: object in charge of update the value of actions
"""
def __init__(self, m):
self.Q = {}
self.m = m
def getRepresentation(self, state_action):
return NotImplementedError()
""" Returns the StateAction estimated value.
Args:
state_action: the state to look for
Returns:
number
"""
def getQValue(self, state_action):
return NotImplementedError()
def getCombinedValue(self, state, action):
return self.getQValue(StateAction(state, action))
""" Returns an array containing the value of the corrisponding action.
Args:
obs: the state to look for
Returns:
array of numbers
"""
def getPossibleActions(self, obs):
actions = np.zeros(self.m)
for i in range(self.m):
actions[i] = self.getQValue(StateAction(obs, i))
return actions
""" Update the value of a state-action pair adding the given value.
Args:
state_action: object representing the state-action
value: value to add to the current value
"""
def addDeltaToQValue(self, state_action, value):
return NotImplementedError()
def newEpisode(self):
pass
"""
Prints the content of Q in a readable way
"""
def prettyPrintQ(self):
for key in self.Q:
print(str(key) + "-> ", end="")
for v in self.Q[key]:
print(str(v) + " ", end="")
print()
| gpl-3.0 | -5,542,995,542,859,738,000 | 25.903226 | 74 | 0.585731 | false |
asoplata/dynasim-benchmark-brette-2007 | Brian2/brian2_benchmark_COBAHH_nosyn_compiled_500.py | 1 | 3481 | """
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008) - although it uses the
COBAHH model of (Brette et al. 2007), not CUBA.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian2_benchmark_COBAHH_nosyn_compiled_500/pbsout/brian2_benchmark_COBAHH_nosyn_compiled_500.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
set_device('cpp_standalone')
prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native']
# Parameters
cells = 500
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# # Time constants
# taue = 5*ms
# taui = 10*ms
# # Reversal potentials
# Ee = 0*mV
# Ei = -80*mV
# we = 6*nS # excitatory synaptic weight
# wi = 67*nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
# dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
# dge/dt = -ge*(1./taue) : siemens
# dgi/dt = -gi*(1./taui) : siemens
P = NeuronGroup(cells, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Ce = Synapses(Pe, P, on_pre='ge+=we')
# Ci = Synapses(Pi, P, on_pre='gi+=wi')
# Ce.connect(p=0.98)
# Ci.connect(p=0.98)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# P.ge = '(randn() * 1.5 + 4) * 10.*nS'
# P.gi = '(randn() * 12 + 20) * 10.*nS'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
| gpl-3.0 | 3,238,071,321,243,270,700 | 31.212963 | 110 | 0.641851 | false |
LilithWittmann/bootev-tickets | tickets/conference/migrations/0001_initial.py | 1 | 1388 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conference',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='name', max_length=80)),
('image', models.ImageField(verbose_name='logo', upload_to='media/')),
('description', models.CharField(verbose_name='description', max_length=200, blank=True)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=False)),
('slug', models.SlugField()),
('owner', models.ForeignKey(verbose_name='owner', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Conference',
'verbose_name_plural': 'Conferences',
},
),
]
| gpl-2.0 | 476,390,771,540,662,000 | 38.657143 | 114 | 0.564121 | false |
ssdi-drive/nuxeo-drive | nuxeo-drive-client/nxdrive/engine/next/engine_next.py | 1 | 1582 | # coding: utf-8
""" Evolution to try new engine solution. """
from nxdrive.client.remote_document_client import RemoteDocumentClient
from nxdrive.client.remote_file_system_client import RemoteFileSystemClient
from nxdrive.client.remote_filtered_file_system_client import \
RemoteFilteredFileSystemClient
from nxdrive.engine.engine import Engine
from nxdrive.logging_config import get_logger
from nxdrive.options import Options
log = get_logger(__name__)
class EngineNext(Engine):
def __init__(self, manager, definition, binder=None, processors=5,
remote_doc_client_factory=RemoteDocumentClient,
remote_fs_client_factory=RemoteFileSystemClient,
remote_filtered_fs_client_factory=RemoteFilteredFileSystemClient):
super(EngineNext, self).__init__(manager, definition, binder, processors,
remote_doc_client_factory, remote_fs_client_factory, remote_filtered_fs_client_factory)
self._type = "NXDRIVENEXT"
def create_processor(self, item_getter, name=None):
from nxdrive.engine.next.processor import Processor
return Processor(self, item_getter, name=name)
def _create_queue_manager(self, processors):
from nxdrive.engine.next.queue_manager import QueueManager
if Options.debug:
return QueueManager(self, self._dao, max_file_processors=2)
return QueueManager(self, self._dao)
def _create_local_watcher(self):
from nxdrive.engine.next.simple_watcher import SimpleWatcher
return SimpleWatcher(self, self._dao)
| lgpl-2.1 | 5,264,211,132,705,381,000 | 41.756757 | 104 | 0.721871 | false |
AmI-2015/python-intermediate | metrics.py | 1 | 2662 | '''
Created on Mar 19, 2014
@author: Dario Bonino <[email protected]>
Copyright (c) 2014 Dario Bonino
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
import os,psutil,time,tts
def print_sys_metrics():
'''
Prints some system metric in an os-independent way
'''
#get uname data
uname = os.uname()
# print the operating system information
print "OS Type:%s\nHost:%s\nKernel:%s %s\nArch:%s\n"%uname
#get the current system load average (last min, 5min, 15min)
load = os.getloadavg()
#print the load average
print "load_avg:\n \t%f (1min)\n \t%f (5min)\n \t%f (15min)"%(load)
#get the current virtual memory statistics
virtual_memory = psutil.virtual_memory()
#print total memory
print "Total memory:\n \t%s"%virtual_memory.total
#print available memory
print "Available memory:\n \t%s"%virtual_memory.available
#print free memory
print "Free memory:\n \t%s"%virtual_memory.available
#print cpu usage
print "CPU usage:\n \t%f"%psutil.cpu_percent(None, False)
#get disk counters
disk_io = psutil.disk_io_counters(False)
#print the number of reads and corresponding bytes
print "Reads: %d (%d bytes)"%(disk_io.read_count,disk_io.read_bytes)
#print the number of writes and the corresponding bytes
print "Writes: %d (%d bytes)"%(disk_io.write_count, disk_io.write_bytes)
'''
Monitors the cpu occupation and if raises over a given threshold, calls a specified function
'''
def monitor_cpu(threshold,interval,callback=None):
while(True):
#get the cpu percentage
percent = psutil.cpu_percent()
#check the thrashold
if(percent > threshold):
#callback
callback(percent)
#debug
print "calling callback: %s"%percent
#wait for the given time
time.sleep(interval)
if __name__ == '__main__':
#print the system metrics
print_sys_metrics()
#monitors the current cpu status
monitor_cpu(10, 1, lambda x: tts.say("warning, CPU percent raised up to %s"%x)) | apache-2.0 | -8,028,598,815,019,101,000 | 28.588889 | 92 | 0.654771 | false |
scribblemaniac/MCEdit2Blender | blocks/Transparent.py | 1 | 3837 | import bpy
import mathutils
from Block import Block
class Transparent(Block):
"""A block with a texture that contains transparent or translucent pixels"""
def makeObject(self, x, y, z, metadata):
mesh = bpy.data.meshes.new(name="Block")
mesh.from_pydata([[-0.5,-0.5,-0.5],[0.5,-0.5,-0.5],[-0.5,0.5,-0.5],[0.5,0.5,-0.5],[-0.5,-0.5,0.5],[0.5,-0.5,0.5],[-0.5,0.5,0.5],[0.5,0.5,0.5]],[],[[0,1,3,2],[4,5,7,6],[0,1,5,4],[0,2,6,4],[2,3,7,6],[1,3,7,5]])
mesh.update()
obj = bpy.data.objects.new("Block", mesh)
obj.location.x = x + 0.5
obj.location.y = y + 0.5
obj.location.z = z + 0.5
obj.scale = (0.9998999834060669, 0.9998999834060669, 0.9998999834060669) # workaround for overlapping object shading issue
obj.blockId = self._id
obj.blockMetadata = metadata
bpy.context.scene.objects.link(obj)
activeObject = bpy.context.scene.objects.active
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.editmode_toggle()
bpy.context.scene.objects.active = activeObject
return obj
def applyMaterial(self, obj, metadata):
try:
mat = bpy.data.materials[self._unlocalizedName]
except KeyError:
mat = bpy.data.materials.new(self._unlocalizedName)
mat.preview_render_type = "CUBE"
mat.use_nodes = True
mat.node_tree.nodes["Material Output"].location = [400, 0]
mat.node_tree.nodes["Diffuse BSDF"].location = [0, -75]
mat.node_tree.links.remove(mat.node_tree.links[0])
#Mix Shader
mat.node_tree.nodes.new(type="ShaderNodeMixShader")
mat.node_tree.nodes["Mix Shader"].location = [200, 0]
mat.node_tree.links.new(mat.node_tree.nodes["Diffuse BSDF"].outputs[0], mat.node_tree.nodes["Mix Shader"].inputs[2])
mat.node_tree.links.new(mat.node_tree.nodes["Mix Shader"].outputs[0], mat.node_tree.nodes["Material Output"].inputs[0])
#Transparent Shader
mat.node_tree.nodes.new(type="ShaderNodeBsdfTransparent")
mat.node_tree.nodes["Transparent BSDF"].location = [0, 100]
mat.node_tree.links.new(mat.node_tree.nodes["Transparent BSDF"].outputs[0], mat.node_tree.nodes["Mix Shader"].inputs[1])
#Initialize Texture
try:
tex = bpy.data.images[self._unlocalizedName]
except KeyError:
tex = bpy.data.images.load(self.getBlockTexturePath(self._textureName))
tex.name = self._unlocalizedName
#First Image Texture
mat.node_tree.nodes.new(type="ShaderNodeTexImage")
mat.node_tree.nodes["Image Texture"].location = [-200, 75]
mat.node_tree.nodes["Image Texture"].image = tex
mat.node_tree.nodes["Image Texture"].interpolation = "Closest"
mat.node_tree.nodes["Image Texture"].projection = "FLAT"
mat.node_tree.links.new(mat.node_tree.nodes["Image Texture"].outputs[0], mat.node_tree.nodes["Diffuse BSDF"].inputs[0])
mat.node_tree.links.new(mat.node_tree.nodes["Image Texture"].outputs[1], mat.node_tree.nodes["Mix Shader"].inputs[0])
#UV Map
mat.node_tree.nodes.new(type="ShaderNodeUVMap")
mat.node_tree.nodes["UV Map"].location = [-400, 0]
mat.node_tree.nodes["UV Map"].uv_map = "UVMap"
mat.node_tree.links.new(mat.node_tree.nodes["UV Map"].outputs[0], mat.node_tree.nodes["Image Texture"].inputs[0])
obj.data.materials.append(mat)
| gpl-3.0 | 8,452,518,236,062,519,000 | 49.486842 | 216 | 0.594996 | false |
ambyte/Vertaler | src/modules/startupapp.py | 1 | 2108 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (c) 2011 Sergey Gulyaev <[email protected]>
#
# This file is part of Vertaler.
#
# Vertaler is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Vertaler is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
# ----------------------------------------------------------------------------
""" startup application when start Windows """
import os
if os.name == "nt":
import winshell
import sys
def is_start_up():
try:
startup = winshell.startup(1)
if os.path.exists(startup + '\\Vertaler.lnk'):
return True
else:
return False
except Exception:
pass
def set_startup():
try:
# get path and file name for application
startFile = os.path.abspath(sys.argv[0])
# get startup folder
startup = winshell.startup(1)
# create shortcut in startup folder
winshell.CreateShortcut(
Path=os.path.join(startup, "Vertaler.lnk"),
Target=startFile,
Icon=(startFile, 0),
Description="Vertaler",
StartIn=os.path.abspath(None)
)
except Exception:
pass
def delete_startup():
try:
startup = winshell.startup(1)
# remove shortcut from startup folder
if os.path.isfile(startup + '\\Vertaler.lnk'):
os.remove(startup + '\\Vertaler.lnk')
except Exception:
pass
| gpl-2.0 | -8,542,072,036,966,153,000 | 28.690141 | 78 | 0.591556 | false |
SoCdesign/EHA | Tools/Minimization_Tool/essential_checker_extraction.py | 1 | 7029 | # copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand
import package_file
import copy
def extract_checker_info(name_string):
package_file.list_of_detection_info_sa0[name_string] = []
package_file.list_of_detection_info_sa1[name_string] = []
package_file.list_of_true_misses_sa0[name_string] = []
package_file.list_of_true_misses_sa1[name_string] = []
area_report_file = open("coverage_results/fstat" + str(name_string), 'r')
line = area_report_file.readline()
while line != "":
line = area_report_file.readline()
if ".CHECKERS DETECTION INFO - amount of detections" in line:
line = area_report_file.readline()
for item in line.split(" "):
if "|" in item:
package_file.list_of_detection_info_sa0[name_string].append(item.split("|")[0])
package_file.list_of_detection_info_sa1[name_string].append(item.split("|")[1])
if "amount of True Misses" in line:
line = area_report_file.readline()
for item in line.split(" "):
if "|" in item:
package_file.list_of_true_misses_sa0[name_string].append(item.split("|")[0])
package_file.list_of_true_misses_sa1[name_string].append(item.split("|")[1])
# print package_file.list_of_detection_info_sa0
# print package_file.list_of_detection_info_sa1
return None
def find_essential_checker():
print "------------------------------------------------------------------------------------------------"
print " Extracting essential checkers"
print "------------------------------------------------------------------------------------------------"
temp_copy_sa0 = copy.deepcopy(package_file.list_of_true_misses_sa0)
temp_copy_sa1 = copy.deepcopy(package_file.list_of_true_misses_sa1)
random_item = temp_copy_sa0.keys()[0]
selected_checkers_sa0 = []
selected_checkers_sa1 = []
checkers_for_optimization = []
for node in range(0, len(temp_copy_sa0[random_item])):
best_checker = None
best_true_miss_rate = float('inf')
for checker in temp_copy_sa0:
true_miss_rate = int(temp_copy_sa0[checker][node])
if int(package_file.list_of_detection_info_sa0[str(checker)][node]) > 0:
if true_miss_rate >= 0:
if true_miss_rate < best_true_miss_rate:
best_true_miss_rate = true_miss_rate
best_checker = checker
# if best_true_miss_rate == 0:
count = 0
for checker in temp_copy_sa0:
if int(package_file.list_of_true_misses_sa0[checker][node]) == best_true_miss_rate:
if int(package_file.list_of_detection_info_sa0[str(checker)][node]) > 0:
temp_copy_sa0[checker][node] = 1
count += 1
else:
temp_copy_sa0[checker][node] = 0
else:
temp_copy_sa0[checker][node] = 0
if count == 1:
if best_checker not in selected_checkers_sa0:
selected_checkers_sa0.append(best_checker)
# else:
# for checker in temp_copy_sa0:
# temp_copy_sa0[checker][node] = 0
print "single dominant checkers for sta0:", selected_checkers_sa0
for node in range(0, len(temp_copy_sa1[random_item])):
best_checker = None
best_true_miss_rate = float('inf')
for checker in temp_copy_sa1:
true_miss_rate = int(temp_copy_sa1[checker][node])
if int(package_file.list_of_detection_info_sa1[str(checker)][node]) > 0:
# print checker, int(package_file.list_of_detection_info_sa1[str(checker)][node])
if true_miss_rate >= 0:
if true_miss_rate < best_true_miss_rate:
best_true_miss_rate = true_miss_rate
best_checker = checker
# if best_true_miss_rate == 0:
count = 0
for checker in temp_copy_sa1:
if int(package_file.list_of_true_misses_sa1[checker][node]) == best_true_miss_rate:
if int(package_file.list_of_detection_info_sa1[str(checker)][node]) > 0:
temp_copy_sa1[checker][node] = 1
count += 1
else:
temp_copy_sa1[checker][node] = 0
else:
temp_copy_sa1[checker][node] = 0
# print "best checker", best_checker
if count == 1:
if best_checker not in selected_checkers_sa1:
selected_checkers_sa1.append(best_checker)
# else:
# for checker in temp_copy_sa1:
# temp_copy_sa1[checker][node] = 0
print "single dominant checkers for sta1:", selected_checkers_sa1
for checker in selected_checkers_sa0:
for node in range(0, len(temp_copy_sa0[checker])):
if temp_copy_sa0[checker][node] == 1:
for checker2 in temp_copy_sa0.keys():
if checker2 not in selected_checkers_sa0:
if temp_copy_sa0[checker2][node] == 1:
temp_copy_sa0[checker2][node] = 0
for checker in selected_checkers_sa1:
for node in range(0, len(temp_copy_sa1[checker])):
if temp_copy_sa1[checker][node] == 1:
for checker2 in temp_copy_sa1.keys():
if checker2 not in selected_checkers_sa1:
if temp_copy_sa1[checker2][node] == 1:
temp_copy_sa1[checker2][node] = 0
if package_file.debug:
print "-----------------"
print "printing the checkers true misses table (0-1)"
print "stuck at 0:"
for checker in sorted(temp_copy_sa0.keys()):
print checker,
for item in temp_copy_sa0[checker]:
print item,
print ""
print "-----------------"
print "printing the checkers true misses table (0-1)"
print "stuck at 1:"
for checker in sorted(temp_copy_sa1.keys()):
print checker,
for item in temp_copy_sa1[checker]:
print item,
print ""
print "-----------------"
final_selected_list = []
for item in selected_checkers_sa0:
final_selected_list.append(str(item))
for item in selected_checkers_sa1:
if item not in selected_checkers_sa0:
final_selected_list.append(str(item))
for item in temp_copy_sa0.keys():
if str(item) not in final_selected_list:
if str(item) not in checkers_for_optimization:
checkers_for_optimization.append(str(item))
print "selected single dominant checkers:", final_selected_list
print "selected checkers for optimization:", checkers_for_optimization
return final_selected_list, checkers_for_optimization
| gpl-3.0 | 3,876,197,300,479,161,300 | 43.487342 | 108 | 0.543463 | false |
funkbit/django-funky-user | funky_user/managers.py | 1 | 1568 | from datetime import datetime
from django.contrib.auth.models import BaseUserManager as DjangoBaseUserManager
from django.utils import timezone
class UserManager(DjangoBaseUserManager):
"""
Default manager for the User model.
"""
###################################
# Required Django manager methods #
###################################
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
# We set last login in the past so we know which users has logged in once
last_login_date = datetime(1970, 1, 1).replace(tzinfo=timezone.utc)
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(
email=email,
is_staff=False,
is_active=False,
last_login=last_login_date,
date_joined=timezone.now(),
**extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
user = self.create_user(email, password, **extra_fields)
user.is_staff = True
user.is_active = True
user.save(using=self._db)
return user
##################
# Custom methods #
##################
def active(self):
"""
Returns only active users.
"""
return self.filter(is_active=True)
| bsd-2-clause | -2,770,687,478,345,300,000 | 26.508772 | 81 | 0.55676 | false |
tdozat/Parser | dataset.py | 1 | 5660 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from collections import Counter
from lib.etc.k_means import KMeans
from configurable import Configurable
from vocab import Vocab
from metabucket import Metabucket
#***************************************************************
class Dataset(Configurable):
""""""
#=============================================================
def __init__(self, filename, vocabs, builder, *args, **kwargs):
""""""
super(Dataset, self).__init__(*args, **kwargs)
self._file_iterator = self.file_iterator(filename)
self._train = (filename == self.train_file)
self._metabucket = Metabucket(self._config, n_bkts=self.n_bkts)
self._data = None
self.vocabs = vocabs
self.rebucket()
self.inputs = tf.placeholder(dtype=tf.int32, shape=(None,None,None), name='inputs')
self.targets = tf.placeholder(dtype=tf.int32, shape=(None,None,None), name='targets')
self.builder = builder()
#=============================================================
def file_iterator(self, filename):
""""""
with open(filename) as f:
if self.lines_per_buffer > 0:
buff = [[]]
while True:
line = f.readline()
while line:
line = line.strip().split()
if line:
buff[-1].append(line)
else:
if len(buff) < self.lines_per_buffer:
if buff[-1]:
buff.append([])
else:
break
line = f.readline()
if not line:
f.seek(0)
else:
buff = self._process_buff(buff)
yield buff
line = line.strip().split()
if line:
buff = [[line]]
else:
buff = [[]]
else:
buff = [[]]
for line in f:
line = line.strip().split()
if line:
buff[-1].append(line)
else:
if buff[-1]:
buff.append([])
if buff[-1] == []:
buff.pop()
buff = self._process_buff(buff)
while True:
yield buff
#=============================================================
def _process_buff(self, buff):
""""""
words, tags, rels = self.vocabs
for i, sent in enumerate(buff):
for j, token in enumerate(sent):
word, tag1, tag2, head, rel = token[words.conll_idx], token[tags.conll_idx[0]], token[tags.conll_idx[1]], token[6], token[rels.conll_idx]
buff[i][j] = (word,) + words[word] + tags[tag1] + tags[tag2] + (int(head),) + rels[rel]
sent.insert(0, ('root', Vocab.ROOT, Vocab.ROOT, Vocab.ROOT, Vocab.ROOT, 0, Vocab.ROOT))
return buff
#=============================================================
def reset(self, sizes):
""""""
self._data = []
self._targets = []
self._metabucket.reset(sizes)
return
#=============================================================
def rebucket(self):
""""""
buff = self._file_iterator.next()
len_cntr = Counter()
for sent in buff:
len_cntr[len(sent)] += 1
self.reset(KMeans(self.n_bkts, len_cntr).splits)
for sent in buff:
self._metabucket.add(sent)
self._finalize()
return
#=============================================================
def _finalize(self):
""""""
self._metabucket._finalize()
return
#=============================================================
def get_minibatches(self, batch_size, input_idxs, target_idxs, shuffle=True):
""""""
minibatches = []
for bkt_idx, bucket in enumerate(self._metabucket):
if batch_size == 0:
n_splits = 1
else:
n_tokens = len(bucket) * bucket.size
n_splits = max(n_tokens // batch_size, 1)
if shuffle:
range_func = np.random.permutation
else:
range_func = np.arange
arr_sp = np.array_split(range_func(len(bucket)), n_splits)
for bkt_mb in arr_sp:
minibatches.append( (bkt_idx, bkt_mb) )
if shuffle:
np.random.shuffle(minibatches)
for bkt_idx, bkt_mb in minibatches:
feed_dict = {}
data = self[bkt_idx].data[bkt_mb]
sents = self[bkt_idx].sents[bkt_mb]
maxlen = np.max(np.sum(np.greater(data[:,:,0], 0), axis=1))
feed_dict.update({
self.inputs: data[:,:maxlen,input_idxs],
self.targets: data[:,:maxlen,target_idxs]
})
yield feed_dict, sents
#=============================================================
@property
def n_bkts(self):
if self._train:
return super(Dataset, self).n_bkts
else:
return super(Dataset, self).n_valid_bkts
#=============================================================
def __getitem__(self, key):
return self._metabucket[key]
def __len__(self):
return len(self._metabucket)
| apache-2.0 | 8,636,058,868,906,911,000 | 29.928962 | 145 | 0.510247 | false |
SWENG500-Team1/FitnessForSplunk | misc/python-oauth2-test/python_server.py | 1 | 1192 | from bottle import route, run, request # Python server library
import sys
import httplib2, urllib
import base64
# Hello World route example
@route('/hello')
def hello():
return "Hello World!"
# Fitbit callback route
@route('/auth/fitbit/callback')
def fitbit_callback():
# Edit these variables to suit you
clientID = '227MVJ'
clientSecret = 'df8009bd0ddcb975f9a812e3587e54dd'
encoded = base64.b64encode( (clientID + ':' + clientSecret) )
callback_url = 'https://localhost:8089/services/fitness_for_splunk/fitbit_callback'
authCode = '' # Need to fill in auth cod
# Request for a token
url = 'https://api.fitbit.com/oauth2/token'
authHeader_value = ('Basic ' + encoded)
headers = {'Authorization': authHeader_value, 'Content-Type': 'application/x-www-form-urlencoded'}
data = {'clientId': clientID, 'grant_type': 'authorization_code', 'redirect_uri': callback_url, 'code': authCode}
body = urllib.urlencode(data)
http = httplib2.Http()
resp, cont = http.request(url, 'POST', headers=headers, body=body)
# Print response content (token) to screen
return cont
run(host='localhost', port=3000, debug=True) | mit | -3,756,327,715,078,418,000 | 33.085714 | 117 | 0.687081 | false |
HEP-DL/root2hdf5 | root2hdf5/plugins/larcv/pmt.py | 1 | 1321 | from root2hdf5.data_types.base import BaseData
import numpy as np
import logging
class PMTData(BaseData):
logger = logging.getLogger('root2hdf5.data_types.pmt')
tree_name = 'image2d_pmt_tree'
def __init__(self, _file, output_file):
super(PMTData, self).__init__(_file)
from larcv import larcv
self.array_converter = larcv.as_ndarray
self.dataset = output_file.create_dataset("image2d/pmt", (10,1,1500,32), maxshape=(None,1,1500,32),
chunks=(10,1,1500,32), dtype='f',compression="gzip")
self.dataset.attrs['name'] = 'image2d_pmt'
self.dataset.attrs['index0_name'] = 'eventN'
self.dataset.attrs['index1_name'] = 'layerN'
self.dataset.attrs['index3_name'] = 'pixelX'
self.dataset.attrs['index4_name'] = 'pixelY'
self.logger.info("Setting Up PMT Data Stream")
self.buffer = np.ndarray((10,1,1500,32), dtype='H')
self.buffer_index=0
def process_branch(self, branch):
layerimage = self.array_converter(branch.at(0))
layerimage.resize(1500,32)
self.buffer[self.buffer_index, 0] = layerimage
self.buffer_index+=1
if self.event_index %10==0:
self.buffer_index=0
self.dataset.resize( (self.event_index+10,1,1500,32) )
self.dataset[self.event_index:self.event_index+10,:,:,:] = self.buffer
| gpl-3.0 | -8,310,864,700,203,725,000 | 40.28125 | 103 | 0.657835 | false |
jolyonb/edx-platform | lms/djangoapps/courseware/management/commands/clean_xml.py | 1 | 4518 | from __future__ import print_function
import os
import sys
import traceback
import lxml.etree
from django.core.management.base import BaseCommand
from fs.osfs import OSFS
from path import Path as path
from xmodule.modulestore.xml import XMLModuleStore
def traverse_tree(course):
"""
Load every descriptor in course. Return bool success value.
"""
queue = [course]
while len(queue) > 0:
node = queue.pop()
queue.extend(node.get_children())
return True
def export(course, export_dir):
"""
Export the specified course to course_dir. Creates dir if it doesn't
exist. Overwrites files, does not clean out dir beforehand.
"""
fs = OSFS(export_dir, create=True)
if not fs.isdirempty('.'):
print(u'WARNING: Directory {dir} not-empty. May clobber/confuse things'.format(dir=export_dir))
try:
course.runtime.export_fs = fs
root = lxml.etree.Element('root')
course.add_xml_to_node(root)
with fs.open('course.xml', mode='w') as f:
root.write(f)
return True
except:
print('Export failed!')
traceback.print_exc()
return False
def import_with_checks(course_dir):
all_ok = True
print(u'Attempting to load "{}"'.format(course_dir))
course_dir = path(course_dir)
data_dir = course_dir.dirname()
source_dirs = [course_dir.basename()]
# No default class--want to complain if it doesn't find plugins for any
# module.
modulestore = XMLModuleStore(
data_dir,
default_class=None,
source_dirs=source_dirs
)
def str_of_err(tpl):
(msg, exc_str) = tpl
return '{msg}\n{exc}'.format(msg=msg, exc=exc_str)
courses = modulestore.get_courses()
n = len(courses)
if n != 1:
print(u'ERROR: Expect exactly 1 course. Loaded {n}: {lst}'.format(n=n, lst=courses))
return (False, None)
course = courses[0]
errors = modulestore.get_course_errors(course.id)
if len(errors) != 0:
all_ok = False
print(
'\n' +
'========================================' +
'ERRORs during import:' +
'\n'.join(map(str_of_err, errors)) +
'========================================' +
'\n'
)
# print course
validators = (
traverse_tree,
)
print('========================================')
print('Running validators...')
for validate in validators:
print(u'Running {}'.format(validate.__name__))
all_ok = validate(course) and all_ok
if all_ok:
print('Course passes all checks!')
else:
print('Course fails some checks. See above for errors.')
return all_ok, course
def check_roundtrip(course_dir):
"""
Check that import->export leaves the course the same
"""
print('====== Roundtrip import =======')
(ok, course) = import_with_checks(course_dir)
if not ok:
raise Exception('Roundtrip import failed!')
print('====== Roundtrip export =======')
export_dir = course_dir + '.rt'
export(course, export_dir)
# dircmp doesn't do recursive diffs.
# diff = dircmp(course_dir, export_dir, ignore=[], hide=[])
print('======== Roundtrip diff: =========')
sys.stdout.flush() # needed to make diff appear in the right place
os.system(u'diff -r {} {}'.format(course_dir, export_dir))
print('======== ideally there is no diff above this =======')
class Command(BaseCommand):
help = 'Imports specified course, validates it, then exports it in a canonical format.'
def add_arguments(self, parser):
parser.add_argument('course_dir',
help='path to the input course directory')
parser.add_argument('output_dir',
help='path to the output course directory')
parser.add_argument('--force',
action='store_true',
help='export course even if there were import errors')
def handle(self, *args, **options):
course_dir = options['course_dir']
output_dir = options['output_dir']
force = options['force']
(ok, course) = import_with_checks(course_dir)
if ok or force:
if not ok:
print('WARNING: Exporting despite errors')
export(course, output_dir)
check_roundtrip(output_dir)
else:
print('Did NOT export')
| agpl-3.0 | -2,033,734,661,561,714,200 | 27.77707 | 104 | 0.568172 | false |
ocr-doacao/ocr | ocrDoacao/testes/teste_notafiscal.py | 1 | 2960 | from django.test import TestCase
from ocrDoacao.models import NotaFiscal
import datetime
class NotaFiscalTest(TestCase):
def test_validacao_cnpj(self):
validacao = NotaFiscal.validaCNPJ('11.111.111/1111-11')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCNPJ('12.aaa11a/1111-a11')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCNPJ('36.564.851/0001-04')
self.assertEqual(validacao, True)
def test_validacao_coo(self):
validacao = NotaFiscal.validaCOO('1')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCOO('12')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCOO('123')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCOO('1234')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCOO('12345')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaCOO('123456')
self.assertEqual(validacao, True)
validacao = NotaFiscal.validaCOO('abcs23')
self.assertEqual(validacao, False)
def test_validacao_valor(self):
validacao = NotaFiscal.validaValor('0')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaValor('a.01')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaValor('-10')
self.assertEqual(validacao, False)
validacao = NotaFiscal.validaValor('0.1')
self.assertEqual(validacao, True)
validacao = NotaFiscal.validaValor('10')
self.assertEqual(validacao, True)
def test_validacao_data(self):
hoje = datetime.datetime.now()
amanha = datetime.datetime(hoje.year, hoje.month, hoje.day + 1)
mes_passado = datetime.datetime(hoje.year, hoje.month - 1, 1)
dois_meses_atras = datetime.datetime(hoje.year, hoje.month - 2, 1)
formato_data = '%d/%m/%Y'
validacao = NotaFiscal.validaData(hoje.strftime('%d-%m-%Y'))
self.assertEqual(validacao, 0)
validacao = NotaFiscal.validaData('')
self.assertEqual(validacao, 0)
validacao = NotaFiscal.validaData('string')
self.assertEqual(validacao, 0)
validacao = NotaFiscal.validaData(dois_meses_atras.strftime(formato_data))
self.assertEqual(validacao, 1)
validacao = NotaFiscal.validaData('01/01/2015')
self.assertEqual(validacao, 1)
if (hoje.day <= 20):
validacao = NotaFiscal.validaData(mes_passado.strftime(formato_data))
self.assertEqual(validacao, 2)
else:
validacao = NotaFiscal.validaData(mes_passado.strftime(formato_data))
self.assertEqual(validacao, 1)
validacao = NotaFiscal.validaData(hoje.strftime(formato_data))
self.assertEqual(validacao, 2) | apache-2.0 | -59,007,315,650,297,800 | 40.125 | 82 | 0.645946 | false |
juiceinc/mandoline | mandoline/test/test.py | 1 | 2314 | import glob
from nose.tools import with_setup
import os
from mandoline import *
from mandoline import FieldCleaner as _
# def TestCleaner(TestCase):
# def setup(self):
# pass
#
# def teardown(self):
# pass
#
# def test_cleaner(self):
# cleaner = MandolineCleaner()
# cleaner.files("Sample.xlsx").set_fields(_('Organization'),
# _('Facility'),
# _('Department'),
# _('Job Category'),
# _('Course'),
# _('Date', Date()),
# _('Completion Count', Int()),
# _('City'),
# _('State'),
# _('Zip')).clean().refine_fieldnames().to_json()
#
# def testit(self):
# assert 1== 1
def datapath(fn):
""" Returns a filename in the data directory
"""
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
return os.path.join(path, "data", fn)
def testpath(fn):
pass
def setup():
""" Remove all files from the data directory except for our test files
"""
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
for fn in glob.glob(os.path.join(path, "data", "*")):
p, f = os.path.split(fn)
if f not in ("Sample.csv", "Sample.xlsx"):
os.remove(fn)
def teardown():
pass
@with_setup(setup, teardown)
def test():
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
fn = os.path.join(path, "data", "Sample.xlsx")
cleaner = MandolineCleaner()
cleaner.files(fn).set_fields(_('Organization'),
_('Facility'),
_('Department'),
_('Job Category'),
_('Course'),
_('Date', Date()),
_('Completion Count', Int()),
_('City'),
_('State'),
_('Zip')).clean().refine_fieldnames().to_json("output.json")
try:
print datapath("output.json")
# open(datapath("output.json"))
except IOError:
assert 1 == 0, "File does not exist"
| mit | -3,489,284,772,643,382,000 | 28.666667 | 84 | 0.468453 | false |
lmaycotte/quark | quark/plugin_modules/ports.py | 1 | 26370 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.extensions import securitygroup as sg_ext
from neutron import quota
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from quark.db import api as db_api
from quark.drivers import registry
from quark.environment import Capabilities
from quark import exceptions as q_exc
from quark import ipam
from quark import network_strategy
from quark import plugin_views as v
from quark import tags
from quark import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
PORT_TAG_REGISTRY = tags.PORT_TAG_REGISTRY
STRATEGY = network_strategy.STRATEGY
# HACK(amir): RM9305: do not allow a tenant to associate a network to a port
# that does not belong to them unless it is publicnet or servicenet
# NOTE(blogan): allow advanced services, such as lbaas, the ability
# to associate a network to a port that does not belong to them
def _raise_if_unauthorized(context, net):
if (not STRATEGY.is_provider_network(net["id"]) and
net["tenant_id"] != context.tenant_id and
not context.is_advsvc):
raise n_exc.NotAuthorized()
def _get_net_driver(network, port=None):
port_driver = None
if port and port.get("network_plugin"):
port_driver = port.get("network_plugin")
try:
return registry.DRIVER_REGISTRY.get_driver(
network["network_plugin"], port_driver=port_driver)
except Exception as e:
raise n_exc.BadRequest(resource="ports",
msg="invalid network_plugin: %s" % e)
def _get_ipam_driver(network, port=None):
network_id = network["id"]
network_strategy = network["ipam_strategy"]
# Ask the net driver for a IPAM strategy to use
# with the given network/default strategy.
net_driver = _get_net_driver(network, port=port)
strategy = net_driver.select_ipam_strategy(
network_id, network_strategy)
# If the driver has no opinion about which strategy to use,
# we use the one specified by the network.
if not strategy:
strategy = network_strategy
try:
return ipam.IPAM_REGISTRY.get_strategy(strategy)
except Exception as e:
raise n_exc.BadRequest(resource="ports",
msg="invalid ipam_strategy: %s" % e)
# NOTE(morgabra) Backend driver operations return a lot of stuff. We use a
# small subset of this data, so we filter out things we don't care about
# so we can avoid any collisions with real port data.
def _filter_backend_port(backend_port):
# Collect a list of allowed keys in the driver response
required_keys = ["uuid", "bridge"]
tag_keys = [tag for tag in PORT_TAG_REGISTRY.tags.keys()]
allowed_keys = required_keys + tag_keys
for k in backend_port.keys():
if k not in allowed_keys:
del backend_port[k]
def split_and_validate_requested_subnets(context, net_id, segment_id,
fixed_ips):
subnets = []
ip_addresses = {}
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not subnet_id:
raise n_exc.BadRequest(resource="fixed_ips",
msg="subnet_id required")
if ip_address:
ip_addresses[ip_address] = subnet_id
else:
subnets.append(subnet_id)
subnets = ip_addresses.values() + subnets
sub_models = db_api.subnet_find(context, id=subnets, scope=db_api.ALL)
if len(sub_models) == 0:
raise n_exc.SubnetNotFound(subnet_id=subnets)
for s in sub_models:
if s["network_id"] != net_id:
raise n_exc.InvalidInput(
error_message="Requested subnet doesn't belong to requested "
"network")
if segment_id and segment_id != s["segment_id"]:
raise q_exc.AmbiguousNetworkId(net_id=net_id)
return ip_addresses, subnets
def create_port(context, port):
"""Create a port
Create a port which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 Neutron network.
: param context: neutron api request context
: param port: dictionary describing the port, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
"""
LOG.info("create_port for tenant %s" % context.tenant_id)
port_attrs = port["port"]
admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up",
"use_forbidden_mac_range", "network_plugin",
"instance_node_id"]
utils.filter_body(context, port_attrs, admin_only=admin_only)
port_attrs = port["port"]
mac_address = utils.pop_param(port_attrs, "mac_address", None)
use_forbidden_mac_range = utils.pop_param(port_attrs,
"use_forbidden_mac_range", False)
segment_id = utils.pop_param(port_attrs, "segment_id")
fixed_ips = utils.pop_param(port_attrs, "fixed_ips")
if "device_id" not in port_attrs:
port_attrs['device_id'] = ""
device_id = port_attrs['device_id']
# NOTE(morgabra) This should be instance.node from nova, only needed
# for ironic_driver.
if "instance_node_id" not in port_attrs:
port_attrs['instance_node_id'] = ""
instance_node_id = port_attrs['instance_node_id']
net_id = port_attrs["network_id"]
port_id = uuidutils.generate_uuid()
net = db_api.network_find(context, None, None, None, False, id=net_id,
scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=net_id)
_raise_if_unauthorized(context, net)
# NOTE (Perkins): If a device_id is given, try to prevent multiple ports
# from being created for a device already attached to the network
if device_id:
existing_ports = db_api.port_find(context,
network_id=net_id,
device_id=device_id,
scope=db_api.ONE)
if existing_ports:
raise n_exc.BadRequest(
resource="port", msg="This device is already connected to the "
"requested network via another port")
# Try to fail early on quotas and save ourselves some db overhead
if fixed_ips:
quota.QUOTAS.limit_check(context, context.tenant_id,
fixed_ips_per_port=len(fixed_ips))
if not STRATEGY.is_provider_network(net_id):
# We don't honor segmented networks when they aren't "shared"
segment_id = None
port_count = db_api.port_count_all(context, network_id=[net_id],
tenant_id=[context.tenant_id])
quota.QUOTAS.limit_check(
context, context.tenant_id,
ports_per_network=port_count + 1)
else:
if not segment_id:
raise q_exc.AmbiguousNetworkId(net_id=net_id)
network_plugin = utils.pop_param(port_attrs, "network_plugin")
if not network_plugin:
network_plugin = net["network_plugin"]
port_attrs["network_plugin"] = network_plugin
ipam_driver = _get_ipam_driver(net, port=port_attrs)
net_driver = _get_net_driver(net, port=port_attrs)
# NOTE(morgabra) It's possible that we select a driver different than
# the one specified by the network. However, we still might need to use
# this for some operations, so we also fetch it and pass it along to
# the backend driver we are actually using.
base_net_driver = _get_net_driver(net)
# TODO(anyone): security groups are not currently supported on port create.
# Please see JIRA:NCP-801
security_groups = utils.pop_param(port_attrs, "security_groups")
if security_groups is not None:
raise q_exc.SecurityGroupsNotImplemented()
group_ids, security_groups = _make_security_group_list(context,
security_groups)
quota.QUOTAS.limit_check(context, context.tenant_id,
security_groups_per_port=len(group_ids))
addresses = []
backend_port = None
with utils.CommandManager().execute() as cmd_mgr:
@cmd_mgr.do
def _allocate_ips(fixed_ips, net, port_id, segment_id, mac):
fixed_ip_kwargs = {}
if fixed_ips:
if (STRATEGY.is_provider_network(net_id) and
not context.is_admin):
raise n_exc.NotAuthorized()
ips, subnets = split_and_validate_requested_subnets(context,
net_id,
segment_id,
fixed_ips)
fixed_ip_kwargs["ip_addresses"] = ips
fixed_ip_kwargs["subnets"] = subnets
ipam_driver.allocate_ip_address(
context, addresses, net["id"], port_id,
CONF.QUARK.ipam_reuse_after, segment_id=segment_id,
mac_address=mac, **fixed_ip_kwargs)
@cmd_mgr.undo
def _allocate_ips_undo(addr):
LOG.info("Rolling back IP addresses...")
if addresses:
for address in addresses:
try:
with context.session.begin():
ipam_driver.deallocate_ip_address(context, address)
except Exception:
LOG.exception("Couldn't release IP %s" % address)
@cmd_mgr.do
def _allocate_mac(net, port_id, mac_address,
use_forbidden_mac_range=False):
mac = ipam_driver.allocate_mac_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after,
mac_address=mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
return mac
@cmd_mgr.undo
def _allocate_mac_undo(mac):
LOG.info("Rolling back MAC address...")
if mac:
try:
with context.session.begin():
ipam_driver.deallocate_mac_address(context,
mac["address"])
except Exception:
LOG.exception("Couldn't release MAC %s" % mac)
@cmd_mgr.do
def _allocate_backend_port(mac, addresses, net, port_id):
backend_port = net_driver.create_port(
context, net["id"],
port_id=port_id,
security_groups=group_ids,
device_id=device_id,
instance_node_id=instance_node_id,
mac_address=mac,
addresses=addresses,
base_net_driver=base_net_driver)
_filter_backend_port(backend_port)
return backend_port
@cmd_mgr.undo
def _allocate_back_port_undo(backend_port):
LOG.info("Rolling back backend port...")
try:
backend_port_uuid = None
if backend_port:
backend_port_uuid = backend_port.get("uuid")
net_driver.delete_port(context, backend_port_uuid)
except Exception:
LOG.exception(
"Couldn't rollback backend port %s" % backend_port)
@cmd_mgr.do
def _allocate_db_port(port_attrs, backend_port, addresses, mac):
port_attrs["network_id"] = net["id"]
port_attrs["id"] = port_id
port_attrs["security_groups"] = security_groups
LOG.info("Including extra plugin attrs: %s" % backend_port)
port_attrs.update(backend_port)
with context.session.begin():
new_port = db_api.port_create(
context, addresses=addresses, mac_address=mac["address"],
backend_key=backend_port["uuid"], **port_attrs)
return new_port
@cmd_mgr.undo
def _allocate_db_port_undo(new_port):
LOG.info("Rolling back database port...")
if not new_port:
return
try:
with context.session.begin():
db_api.port_delete(context, new_port)
except Exception:
LOG.exception(
"Couldn't rollback db port %s" % backend_port)
# addresses, mac, backend_port, new_port
mac = _allocate_mac(net, port_id, mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
_allocate_ips(fixed_ips, net, port_id, segment_id, mac)
backend_port = _allocate_backend_port(mac, addresses, net, port_id)
new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac)
return v._make_port_dict(new_port)
def update_port(context, id, port):
"""Update values of a port.
: param context: neutron api request context
: param id: UUID representing the port to update.
: param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
"""
LOG.info("update_port %s for tenant %s" % (id, context.tenant_id))
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise n_exc.PortNotFound(port_id=id)
port_dict = port["port"]
fixed_ips = port_dict.pop("fixed_ips", None)
admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up",
"device_id"]
always_filter = ["network_id", "backend_key", "network_plugin"]
utils.filter_body(context, port_dict, admin_only=admin_only,
always_filter=always_filter)
# Pre-check the requested fixed_ips before making too many db trips.
# Note that this is the only check we need, since this call replaces
# the entirety of the IP addresses document if fixed_ips are provided.
if fixed_ips:
quota.QUOTAS.limit_check(context, context.tenant_id,
fixed_ips_per_port=len(fixed_ips))
new_security_groups = utils.pop_param(port_dict, "security_groups")
if new_security_groups is not None:
if (Capabilities.TENANT_NETWORK_SG not in
CONF.QUARK.environment_capabilities):
if not STRATEGY.is_provider_network(port_db["network_id"]):
raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled()
if new_security_groups is not None and not port_db["device_id"]:
raise q_exc.SecurityGroupsRequireDevice()
group_ids, security_group_mods = _make_security_group_list(
context, new_security_groups)
quota.QUOTAS.limit_check(context, context.tenant_id,
security_groups_per_port=len(group_ids))
if fixed_ips is not None:
# NOTE(mdietz): we want full control over IPAM since
# we're allocating by subnet instead of
# network.
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(
ipam.QuarkIpamANY.get_name())
addresses, subnet_ids = [], []
ip_addresses = {}
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id or ip_address):
raise n_exc.BadRequest(
resource="fixed_ips",
msg="subnet_id or ip_address required")
if ip_address and not subnet_id:
raise n_exc.BadRequest(
resource="fixed_ips",
msg="subnet_id required for ip_address allocation")
if subnet_id and ip_address:
ip_netaddr = None
try:
ip_netaddr = netaddr.IPAddress(ip_address).ipv6()
except netaddr.AddrFormatError:
raise n_exc.InvalidInput(
error_message="Invalid format provided for ip_address")
ip_addresses[ip_netaddr] = subnet_id
else:
subnet_ids.append(subnet_id)
port_ips = set([netaddr.IPAddress(int(a["address"]))
for a in port_db["ip_addresses"]])
new_ips = set([a for a in ip_addresses.keys()])
ips_to_allocate = list(new_ips - port_ips)
ips_to_deallocate = list(port_ips - new_ips)
for ip in ips_to_allocate:
if ip in ip_addresses:
# NOTE: Fix for RM10187 - we were losing the list of IPs if
# more than one IP was to be allocated. Track an
# aggregate list instead, and add it to the running total
# after each allocate
allocated = []
ipam_driver.allocate_ip_address(
context, allocated, port_db["network_id"],
port_db["id"], reuse_after=None, ip_addresses=[ip],
subnets=[ip_addresses[ip]])
addresses.extend(allocated)
for ip in ips_to_deallocate:
ipam_driver.deallocate_ips_by_port(
context, port_db, ip_address=ip)
for subnet_id in subnet_ids:
ipam_driver.allocate_ip_address(
context, addresses, port_db["network_id"], port_db["id"],
reuse_after=CONF.QUARK.ipam_reuse_after,
subnets=[subnet_id])
# Need to return all existing addresses and the new ones
if addresses:
port_dict["addresses"] = port_db["ip_addresses"]
port_dict["addresses"].extend(addresses)
# NOTE(morgabra) Updating network_plugin on port objects is explicitly
# disallowed in the api, so we use whatever exists in the db.
net_driver = _get_net_driver(port_db.network, port=port_db)
base_net_driver = _get_net_driver(port_db.network)
# TODO(anyone): What do we want to have happen here if this fails? Is it
# ok to continue to keep the IPs but fail to apply security
# groups? Is there a clean way to have a multi-status? Since
# we're in a beta-y status, I'm going to let this sit for
# a future patch where we have time to solve it well.
kwargs = {}
if new_security_groups is not None:
kwargs["security_groups"] = security_group_mods
net_driver.update_port(context, port_id=port_db["backend_key"],
mac_address=port_db["mac_address"],
device_id=port_db["device_id"],
base_net_driver=base_net_driver,
**kwargs)
port_dict["security_groups"] = security_group_mods
with context.session.begin():
port = db_api.port_update(context, port_db, **port_dict)
# NOTE(mdietz): fix for issue 112, we wanted the IPs to be in
# allocated_at order, so get a fresh object every time
if port_db in context.session:
context.session.expunge(port_db)
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
return v._make_port_dict(port_db)
def get_port(context, id, fields=None):
"""Retrieve a port.
: param context: neutron api request context
: param id: UUID representing the port to fetch.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_port %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
results = db_api.port_find(context, id=id, fields=fields,
scope=db_api.ONE)
if not results:
raise n_exc.PortNotFound(port_id=id)
return v._make_port_dict(results)
def get_ports(context, limit=None, sorts=None, marker=None, page_reverse=False,
filters=None, fields=None):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a port as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_ports for tenant %s filters %s fields %s" %
(context.tenant_id, filters, fields))
if filters is None:
filters = {}
if "ip_address" in filters:
if not context.is_admin:
raise n_exc.NotAuthorized()
ips = []
try:
ips = [netaddr.IPAddress(ip) for ip in filters.pop("ip_address")]
except netaddr.AddrFormatError:
raise n_exc.InvalidInput(
error_message="Invalid format provided for ip_address")
query = db_api.port_find_by_ip_address(context, ip_address=ips,
scope=db_api.ALL, **filters)
ports = []
for ip in query:
ports.extend(ip.ports)
else:
ports = db_api.port_find(context, limit, sorts, marker,
fields=fields, join_security_groups=True,
**filters)
return v._make_ports_list(ports, fields)
def get_ports_count(context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a port as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info("get_ports_count for tenant %s filters %s" %
(context.tenant_id, filters))
return db_api.port_count_all(context, join_security_groups=True, **filters)
def delete_port(context, id):
"""Delete a port.
: param context: neutron api request context
: param id: UUID representing the port to delete.
"""
LOG.info("delete_port %s for tenant %s" % (id, context.tenant_id))
port = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=id)
if 'device_id' in port: # false is weird, but ignore that
LOG.info("delete_port %s for tenant %s has device %s" %
(id, context.tenant_id, port['device_id']))
backend_key = port["backend_key"]
mac_address = netaddr.EUI(port["mac_address"]).value
ipam_driver = _get_ipam_driver(port["network"], port=port)
ipam_driver.deallocate_mac_address(context, mac_address)
ipam_driver.deallocate_ips_by_port(
context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
net_driver = _get_net_driver(port["network"], port=port)
base_net_driver = _get_net_driver(port["network"])
net_driver.delete_port(context, backend_key, device_id=port["device_id"],
mac_address=port["mac_address"],
base_net_driver=base_net_driver)
with context.session.begin():
db_api.port_delete(context, port)
def _diag_port(context, port, fields):
p = v._make_port_dict(port)
net_driver = _get_net_driver(port.network, port=port)
if 'config' in fields:
p.update(net_driver.diag_port(
context, port["backend_key"], get_status='status' in fields))
return p
def diagnose_port(context, id, fields):
if not context.is_admin:
raise n_exc.NotAuthorized()
if id == "*":
return {'ports': [_diag_port(context, port, fields) for
port in db_api.port_find(context).all()]}
db_port = db_api.port_find(context, id=id, scope=db_api.ONE)
if not db_port:
raise n_exc.PortNotFound(port_id=id)
port = _diag_port(context, db_port, fields)
return {'ports': port}
def _make_security_group_list(context, group_ids):
if not group_ids or not utils.attr_specified(group_ids):
return ([], [])
group_ids = list(set(group_ids))
groups = []
for gid in group_ids:
group = db_api.security_group_find(context, id=gid,
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(id=gid)
groups.append(group)
return (group_ids, groups)
| apache-2.0 | 2,644,880,706,528,607,000 | 39.382848 | 79 | 0.594539 | false |
ctmil/meli_oerp | models/posting.py | 1 | 7027 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import fields, osv, models, api
from odoo.tools.translate import _
import logging
from ..melisdk.meli import Meli
import logging
_logger = logging.getLogger(__name__)
from .meli_oerp_config import *
from dateutil.parser import *
from datetime import *
def _ml_datetime(datestr):
try:
#return parse(datestr).isoformat().replace("T"," ")
return parse(datestr).strftime('%Y-%m-%d %H:%M:%S')
except:
return ""
class mercadolibre_posting_update(models.TransientModel):
_name = "mercadolibre.posting.update"
_description = "Update Posting Questions"
def posting_update(self, context=None ):
context = context or self.env.context
posting_ids = False
_logger.info("context:")
_logger.info(context)
if ('active_ids' in context):
posting_ids = context['active_ids']
#_logger.info("ids %s", ''.join(ids))
#posting_ids = ids
posting_obj = self.env['mercadolibre.posting']
if (posting_ids):
for posting_id in posting_ids:
# _logger.info("posting_update: %s " % (posting_id) )
posting = posting_obj.browse(posting_id)
posting.posting_query_questions()
return {}
mercadolibre_posting_update()
class mercadolibre_posting(models.Model):
_name = "mercadolibre.posting"
_description = "Posting en MercadoLibre"
def _posting_update( self ):
company = self.env.user.company_id
posting_obj = self.env['mercadolibre.posting']
for posting in self:
update_status = "ok"
posting.posting_update = update_status
posting.posting_query_questions()
#res = {}
#res[posting.id] = update_status
#return res
def posting_query_questions( self ):
#get with an item id
company = self.env.user.company_id
posting_obj = self.env['mercadolibre.posting']
for posting in self:
log_msg = 'posting_query_questions: %s' % (posting.meli_id)
#_logger.info(log_msg)
CLIENT_ID = company.mercadolibre_client_id
CLIENT_SECRET = company.mercadolibre_secret_key
ACCESS_TOKEN = company.mercadolibre_access_token
REFRESH_TOKEN = company.mercadolibre_refresh_token
#
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN )
if (posting.meli_id):
pass;
else:
continue;
response = meli.get("/items/"+posting.meli_id, {'access_token':meli.access_token})
product_json = response.json()
#_logger.info( product_json )
if "error" in product_json:
ML_status = product_json["error"]
else:
ML_status = product_json["status"]
ML_permalink = product_json["permalink"]
ML_price = product_json["price"]
#ML_sku = product_json["seller_custom_field"]
posting.write( { 'meli_status': ML_status, 'meli_permalink': ML_permalink, 'meli_price': ML_price } )
if (not company.mercadolibre_cron_get_questions):
return {}
response = meli.get("/questions/search?item_id="+posting.meli_id, {'access_token':meli.access_token})
questions_json = response.json()
questions_obj = self.env['mercadolibre.questions']
if 'questions' in questions_json:
questions = questions_json['questions']
#_logger.info( questions )
cn = 0
for Question in questions:
cn = cn + 1
question_answer = Question['answer']
question_fields = {
'posting_id': posting.id,
'question_id': Question['id'],
'date_created': _ml_datetime(Question['date_created']),
'item_id': Question['item_id'],
'seller_id': Question['seller_id'],
'text': str(Question['text'].encode("utf-8")),
'status': Question['status'],
}
if (question_answer):
question_fields['answer_text'] = str(question_answer['text'].encode("utf-8"))
question_fields['answer_status'] = question_answer['status']
question_fields['answer_date_created'] = _ml_datetime(question_answer['date_created'])
question = questions_obj.search( [('question_id','=',question_fields['question_id'])])
if not question:
question = questions_obj.create( ( question_fields ))
else:
if question:
question.write( (question_fields) )
return {}
def posting_query_all_questions( self, cr, uid, ids, context=None ):
return {}
posting_date = fields.Date('Fecha del posting');
name = fields.Char('Name');
meli_id = fields.Char('Id del item asignado por Meli', size=256);
product_id = fields.Many2one('product.product','product_id');
meli_status = fields.Char( string="Estado del producto en MLA", size=256 );
meli_permalink = fields.Char( string="Permalink en MercadoLibre", size=512 );
meli_price = fields.Char(string='Precio de venta', size=128);
posting_questions = fields.One2many( 'mercadolibre.questions','posting_id','Questions' );
posting_update = fields.Char( compute=_posting_update, string="Posting Update", store=False );
meli_seller_custom_field = fields.Char('Sellect Custom Field or SKU',size=256);
mercadolibre_posting()
| agpl-3.0 | -4,238,164,900,268,997,000 | 37.700565 | 129 | 0.556994 | false |
neozhangthe1/coverage_model | build/lib/groundhog/mainLoop.py | 1 | 13913 | """
Main loop (early stopping).
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
import traceback
sys.stdout = Unbuffered(sys.stdout)
# Generic imports
import numpy
import pickle
import gzip
import time
import signal
from groundhog.utils import print_mem, print_time
class MainLoop(object):
def __init__(self,
train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
hooks=None,
reset=-1,
train_cost=False,
validate_postprocess=None,
l2_params=False):
"""
:type train_data: groundhog dataset object
:param train_data: data iterator used for training
:type valid_data: groundhog dataset object
:param valid_data: data iterator used for validation
:type test_data: groundhog dataset object
:param test_data: data iterator used for testing
:type model: groundhog model object
:param model: the model that is supposed to be trained
:type algo: groundhog trainer object
:param algo: optimization algorithm used to optimized the model
:type state: dictionary (or jobman dictionary)
:param state: dictionary containing various hyper-param choices,
but also the current state of the job (the dictionary is used by
jobman to fill in a psql table)
:type channel: jobman handler
:param channel: jobman handler used to communicate with a psql
server
:type hooks: function or list of functions
:param hooks: list of functions that are called every `hookFreq`
steps to carry on various diagnostics
:type reset: int
:param reset: if larger than 0, the train_data iterator position is
reseted to 0 every `reset` number of updates
:type train_cost: bool
:param train_cost: flag saying if the training error (over the
entire training set) should be computed every time the validation
error is computed
:type validate_postprocess: None or function
:param validate_postprocess: function called on the validation cost
every time before applying the logic of the early stopper
:type l2_params: bool
:param l2_params: save parameter norms at each step
"""
###################
# Step 0. Set parameters
###################
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.state = state
self.channel = channel
self.model = model
self.algo = algo
self.valid_id = 0
self.old_cost = 1e21
self.validate_postprocess = validate_postprocess
self.patience = state['patience']
self.l2_params = l2_params
self.train_cost = train_cost
if hooks and not isinstance(hooks, (list, tuple)):
hooks = [hooks]
if self.state['validFreq'] < 0:
self.state['validFreq'] = self.train_data.get_length()
print('Validation computed every', self.state['validFreq'])
elif self.state['validFreq'] > 0:
print('Validation computed every', self.state['validFreq'])
if self.state['trainFreq'] < 0:
self.state['trainFreq'] = self.train_data.get_length()
print('Train frequency set to ', self.state['trainFreq'])
state['bvalidcost'] = 1e21
for (pname, _) in model.properties:
self.state[pname] = 1e20
n_elems = state['loopIters'] // state['trainFreq'] + 1
self.timings = {'step' : 0, 'next_offset' : -1}
for name in self.algo.return_names:
self.timings[name] = numpy.zeros((n_elems,), dtype='float32')
if self.l2_params:
for param in model.params:
self.timings["l2_" + param.name] = numpy.zeros(n_elems, dtype="float32")
n_elems = state['loopIters'] // state['validFreq'] + 1
for pname in model.valid_costs:
self.state['valid'+pname] = 1e20
self.state['test'+pname] = 1e20
self.timings['fulltrain'+pname] = numpy.zeros((n_elems,),
dtype='float32')
self.timings['valid'+pname] = numpy.zeros((n_elems,),
dtype='float32')
self.timings['test'+pname] = numpy.zeros((n_elems,),
dtype='float32')
if self.channel is not None:
self.channel.save()
self.hooks = hooks
self.reset = reset
self.start_time = time.time()
self.batch_start_time = time.time()
def validate(self):
rvals = self.model.validate(self.valid_data)
msg = '** %d validation:' % self.valid_id
self.valid_id += 1
self.batch_start_time = time.time()
pos = self.step // self.state['validFreq']
for k, v in rvals:
msg = msg + ' ' + k + ':%f ' % float(v)
self.timings['valid'+k][pos] = float(v)
self.state['valid'+k] = float(v)
msg += 'whole time %s' % print_time(time.time() - self.start_time)
msg += ' patience %d' % self.patience
print(msg)
if self.train_cost:
valid_rvals = rvals
rvals = self.model.validate(self.train_data, True)
msg = '** %d train:' % (self.valid_id - 1)
for k, v in rvals:
msg = msg + ' ' + k + ':%6.3f ' % float(v)
self.timings['fulltrain' + k] = float(v)
self.state['fulltrain' + k] = float(v)
print(msg)
rvals = valid_rvals
self.state['validtime'] = float(time.time() - self.start_time)/60.
# Just pick the first thing that the cost returns
cost = rvals[0][1]
if self.state['bvalidcost'] > cost:
self.state['bvalidcost'] = float(cost)
for k, v in rvals:
self.state['bvalid'+k] = float(v)
self.state['bstep'] = int(self.step)
self.state['btime'] = int(time.time() - self.start_time)
self.test()
else:
print('No testing', cost, '>', self.state['bvalidcost'])
for k, v in list(self.state.items()):
if 'test' in k:
print(k, v)
print_mem('validate')
if self.validate_postprocess:
return self.validate_postprocess(cost)
return cost
def test(self):
self.model.best_params = [(x.name, x.get_value()) for x in
self.model.params]
numpy.savez(self.state['prefix'] + '_best_params',
**dict(self.model.best_params))
self.state['best_params_pos'] = self.step
if self.test_data is not None:
rvals = self.model.validate(self.test_data)
else:
rvals = []
msg = '>>> Test'
pos = self.step // self.state['validFreq']
for k, v in rvals:
msg = msg + ' ' + k + ':%6.3f ' % v
self.timings['test' + k][pos] = float(v)
self.state['test' + k] = float(v)
print(msg)
self.state['testtime'] = float(time.time()-self.start_time)/60.
def save(self):
start = time.time()
print("Saving the model...")
# ignore keyboard interrupt while saving
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
numpy.savez(self.state['prefix']+'timing.npz',
**self.timings)
if self.state['overwrite']:
self.model.save(self.state['prefix']+'model.npz')
else:
self.model.save(self.state['prefix'] +
'model%d.npz' % self.save_iter)
pickle.dump(self.state, open(self.state['prefix']+'state.pkl', 'wb'))
self.save_iter += 1
signal.signal(signal.SIGINT, s)
print("Model saved, took {}".format(time.time() - start))
# FIXME
def load(self, model_path=None, timings_path=None, skip_timing=False):
if model_path is None:
model_path = self.state['prefix'] + 'model.npz'
if timings_path is None:
timings_path = self.state['prefix'] + 'timing.npz'
try:
self.model.load(model_path)
except Exception:
print('mainLoop: Corrupted model file')
traceback.print_exc()
if not skip_timing:
try:
self.timings = dict(iter(numpy.load(timings_path).items()))
print(self.timings)
except Exception:
print('mainLoop: Corrupted timings file')
traceback.print_exc()
def main(self):
assert self.reset == -1
print_mem('start')
self.state['gotNaN'] = 0
start_time = time.time()
self.start_time = start_time
self.batch_start_time = time.time()
self.step = int(self.timings['step'])
self.algo.step = self.step
self.save_iter = 0
self.save()
if self.channel is not None:
self.channel.save()
self.save_time = time.time()
last_cost = 1.
self.state['clr'] = self.state['lr']
self.train_data.start(self.timings['next_offset']
if 'next_offset' in self.timings
else -1)
while (self.step < self.state['loopIters'] and
last_cost > .1*self.state['minerr'] and
(time.time() - start_time)/60. < self.state['timeStop'] and
self.state['lr'] > self.state['minlr']):
if self.step > 0 and (time.time() - self.save_time)/60. >= self.state['saveFreq']:
self.save()
if self.channel is not None:
self.channel.save()
self.save_time = time.time()
st = time.time()
try:
rvals = self.algo()
self.state['traincost'] = float(rvals['cost'])
self.state['step'] = self.step
last_cost = rvals['cost']
for name in list(rvals.keys()):
self.timings[name][self.step] = float(numpy.array(rvals[name]))
if self.l2_params:
for param in self.model.params:
self.timings["l2_" + param.name][self.step] =\
numpy.mean(param.get_value() ** 2) ** 0.5
if (numpy.isinf(rvals['cost']) or
numpy.isnan(rvals['cost'])) and\
self.state['on_nan'] == 'raise':
self.state['gotNaN'] = 1
self.save()
if self.channel:
self.channel.save()
print('Got NaN while training')
last_cost = 0
if self.valid_data is not None and\
self.step % self.state['validFreq'] == 0 and\
self.step > 1:
valcost = self.validate()
if valcost > self.old_cost * self.state['cost_threshold']:
self.patience -= 1
if 'lr_start' in self.state and\
self.state['lr_start'] == 'on_error':
self.state['lr_start'] = self.step
elif valcost < self.old_cost:
self.patience = self.state['patience']
self.old_cost = valcost
if self.state['divide_lr'] and \
self.patience < 1:
# Divide lr by 2
self.algo.lr = self.algo.lr / self.state['divide_lr']
bparams = dict(self.model.best_params)
self.patience = self.state['patience']
for p in self.model.params:
p.set_value(bparams[p.name])
if self.state['hookFreq'] > 0 and \
self.step % self.state['hookFreq'] == 0 and \
self.hooks:
[fn() for fn in self.hooks]
if self.reset > 0 and self.step > 1 and \
self.step % self.reset == 0:
print('Resetting the data iterator')
self.train_data.reset()
self.step += 1
self.timings['step'] = self.step
self.timings['next_offset'] = self.train_data.next_offset
except KeyboardInterrupt:
break
self.state['wholetime'] = float(time.time() - start_time)
if self.valid_data is not None:
self.validate()
self.save()
if self.channel:
self.channel.save()
print('Took', (time.time() - start_time)/60., 'min')
avg_step = self.timings['time_step'][:self.step].mean()
avg_cost2expl = self.timings['log2_p_expl'][:self.step].mean()
print("Average step took {}".format(avg_step))
print("That amounts to {} sentences in a day".format(1 / avg_step * 86400 * self.state['bs']))
print("Average log2 per example is {}".format(avg_cost2expl))
| bsd-3-clause | -9,135,636,892,475,876,000 | 37.327824 | 102 | 0.51707 | false |
japsu/desugaala | status/models.py | 1 | 1102 | from collections import defaultdict
from django.db import models
from vote.models import Category, Option
class Watch(models.Model):
category = models.ForeignKey(Category)
def evaluate(self):
results = dict((wo.option, 0) for wo in self.watchoption_set.all())
for ballot_category in self.category.ballotcategory_set.all():
for ballot_option in ballot_category.ballotoption_set.all().order_by('order'):
if self.watchoption_set.filter(option=ballot_option.option):
results[ballot_option.option] += 1
break
results = list(i for i in results.iteritems())
results.sort(key=lambda (option, num_votes): -num_votes)
return results
def __unicode__(self):
options = u" vs. ".join(i.option.title for i in self.watchoption_set.all())
category = self.category.title if self.category else u"None"
return u"{category}: {options}".format(**locals())
class WatchOption(models.Model):
watch = models.ForeignKey(Watch)
option = models.ForeignKey(Option)
def __unicode__(self):
return self.option.title if self.option else u"None" | mit | -4,102,352,009,066,950,000 | 31.441176 | 84 | 0.702359 | false |
beatrizChagas/scrm-solutions | extracao/rsoservices/service_preprocessing_twitter_v1_ACS.py | 1 | 5056 | # -*- coding: UTF-8 -*-
import sys
import emoji
import mysql.connector
from extracao.rsoservices.config import config
from extracao.rsoservices.emoji_dict import emoticon_dict
from extracao.rsoservices.preprocessing_dict import EMOJI_CARACTER
add_message_table0 = ("INSERT INTO extracao_processamento_tweet "
"(tweet_id, workspace_id, tweet_origin, tweet_tratament, tweet_demojize, tweet_process) "
"VALUES (%s, %s, %s, %s, %s, %s)")
def preprocessamento_tweets(workspace_id):
conex = mysql.connector.connect(**config)
con = conex.cursor()
con.execute("SELECT id, tweet FROM extracao_tweet WHERE workspace_id=%s;", (workspace_id,))
try:
mensagens = con.fetchall()
for msn in mensagens:
id_tweet=msn[0]
message_origin=msn[1]
con.execute("SELECT tweet_id FROM extracao_processamento_tweet WHERE tweet_id=%s;", (id_tweet,))
if con.fetchall():
continue
else:
message_tratament = tratament(message_origin)
if message_tratament == None:
message_origin=None
elif message_tratament == message_origin:
message = emoji(message_tratament)
message_demojize = None
message_tratament=None
con.execute(add_message_table0, (id_tweet, workspace_id, message_origin, message_tratament, message_demojize, message))
conex.commit()
else:
message_demojize = None
message = emoji(message_tratament)
con.execute(add_message_table0, (id_tweet, workspace_id, message_origin, message_tratament, message_demojize, message))
conex.commit()
continue
continue
except Exception as e:
print("EXCECAO!!!!!!!Insert no db", e)
conex.close()
print("fim")
def tratament(s):
if (s == '') or (s == None):
s = None
else:
s = s.replace('\n', ' ')
s = s.replace('\r', ' ')
s = s.replace('\t', ' ')
s = s.replace('\v', ' ')
s = s.replace(",),", ' ')
s = s.replace("('", ' ')
s = s.replace(",)]", ' ')
s = s.replace("'", ' ')
s = s.replace('("', ' ')
return s
def emoji(origin):
try:
import emoji
s = emoji.demojize(origin)
s = s.replace('::', ': :')
lista_texto = s.split()
print(lista_texto)
lista_demoj=[]
for palavra in lista_texto:
parada=False
cont=0
while not parada:
for group in EMOJI_CARACTER.items():
cont+=1
qtd_emojis=EMOJI_CARACTER.__len__()
chave=group[0]
valor=group[1]
if chave != palavra:
if chave in palavra:
palavra=palavra.split(chave)
palavra=''.join(palavra)
lista_demoj.append(palavra)
lista_demoj.append(valor)
#print(lista_demoj)
#demoj=''.join(lista_demoj)
parada=True
break
else:
if palavra in lista_demoj:
parada=True
break
elif palavra==chave:
lista_demoj.append(valor)
parada=True
break
elif chave not in palavra and cont <= qtd_emojis:
continue
else:
lista_demoj.append(palavra)
#demoj=''.join(lista_demoj)
parada=True
break
#print(lista_demoj)
#demoj=''.join(lista_demoj)
#print(demoj)
else:
lista_demoj.append(valor)
#print(lista_demoj)
#demoj=''.join(lista_demoj)
parada=True
break
demoj=' '.join(lista_demoj)
print(origin)
print(demoj)
if demoj == origin:
demoj=None
return demoj
else:
return demoj
except Exception as e:
print(e)
def process_tweet(workspace_id):
preprocessamento_tweets(workspace_id)
if (__name__ == '__main__'):
process_tweet() | gpl-3.0 | 3,370,406,689,246,566,000 | 35.746269 | 139 | 0.433347 | false |
mattman00000/inventory-tweaks | invtweaks_docs/conf.py | 1 | 7895 | # -*- coding: utf-8 -*-
#
# Inventory Tweaks documentation build configuration file, created by
# sphinx-quickstart on Sat May 12 12:32:31 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# INVENTORY TWEAKS VERSION
invtweaks_version = '1.58-dev'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Inventory Tweaks'
copyright = u'2012, Jimeo Wan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = invtweaks_version
# The full version, including alpha/beta/rc tags.
release = invtweaks_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature-custom'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'InventoryTweaksdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'InventoryTweaks.tex', u'Inventory Tweaks Documentation',
u'Jimeo Wan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'inventorytweaks', u'Inventory Tweaks Documentation',
[u'Jimeo Wan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'InventoryTweaks', u'Inventory Tweaks Documentation',
u'Jimeo Wan', 'InventoryTweaks', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -6,287,224,849,585,732,000 | 30.834677 | 80 | 0.707283 | false |
sio2project/filetracker | setup.py | 1 | 1364 | from os import path
import io
from setuptools import setup, find_packages
with io.open(
path.join(path.abspath(path.dirname(__file__)), 'README.md'), encoding='utf-8'
) as f:
long_description = f.read()
setup(
name='filetracker',
version='2.1.5',
author='SIO2 Project Team',
author_email='[email protected]',
description='Filetracker caching file storage',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/sio2project/filetracker',
license='GPL',
packages=find_packages(),
install_requires=[
'bsddb3==6.2.7',
'flup6',
'gunicorn==19.9.0',
'gevent==1.3.1',
'greenlet==0.4.13', # for compatibility with this version of gevent
'progressbar2',
'requests',
'six',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
entry_points={
'console_scripts': [
'filetracker = filetracker.client.shell:main',
'filetracker-server = filetracker.servers.run:main',
'filetracker-cache-cleaner = filetracker.scripts.cachecleaner:main',
'filetracker-migrate = filetracker.scripts.migrate:main',
'filetracker-recover = filetracker.scripts.recover:main',
],
},
)
| gpl-3.0 | 2,672,298,170,505,186,000 | 28.652174 | 82 | 0.611437 | false |
mohamedhagag/community-addons | analytic_resource_plan/model/analytic_resource_plan.py | 1 | 10135 | # -*- coding: utf-8 -*-
# © 2015 Eficent Business and IT Consulting Services S.L.
# (Jordi Ballester Alomar)
#
# © 2015 Serpent Consulting Services Pvt. Ltd.
# (Sudhir Arya)
#
# © 2016 Matmoz d.o.o.
# (Matjaž Mozetič)
#
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import time
from openerp import api, fields, models
from openerp.tools.translate import _
from openerp.exceptions import Warning as UserError
from openerp.exceptions import ValidationError
class AnalyticResourcePlanLine(models.Model):
_name = 'analytic.resource.plan.line'
_description = "Analytic Resource Planning lines"
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.multi
@api.depends('child_ids')
def _has_child(self):
res = {}
for line in self:
res[line.id] = False
if line.child_ids:
res[line.id] = True
return res
account_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
required=True,
ondelete='cascade',
select=True,
domain=[('type', '<>', 'view')],
readonly=True,
states={'draft': [('readonly', False)]}
)
name = fields.Char(
'Activity description',
required=True,
readonly=True,
states={'draft': [('readonly', False)]}
)
date = fields.Date(
'Date',
required=True,
select=True,
readonly=True,
states={'draft': [('readonly', False)]},
default=lambda *a: time.strftime('%Y-%m-%d')
)
state = fields.Selection(
[
('draft', 'Draft'),
('confirm', 'Confirmed')
],
'Status',
select=True,
required=True,
readonly=True,
help=' * The \'Draft\' status is '
'used when a user is encoding a new and '
'unconfirmed resource plan line. \n* '
'The \'Confirmed\' status is used for to confirm '
'the execution of the resource plan lines.',
default='draft'
)
product_id = fields.Many2one(
'product.product',
'Product',
readonly=True,
required=True,
states={'draft': [('readonly', False)]}
)
product_uom_id = fields.Many2one(
'product.uom',
'UoM',
required=True,
readonly=True,
states={'draft': [('readonly', False)]}
)
unit_amount = fields.Float(
'Planned Quantity',
readonly=True,
required=True,
states={'draft': [('readonly', False)]},
help='Specifies the quantity that has '
'been planned.',
default=1
)
notes = fields.Text(
'Notes'
)
parent_id = fields.Many2one(
'analytic.resource.plan.line',
'Parent',
readonly=True,
ondelete='cascade'
)
child_ids = fields.One2many(
comodel_name='analytic.resource.plan.line',
inverse_name='parent_id',
string='Child lines'
)
has_child = fields.Boolean(
compute='_has_child',
string="Child lines"
)
analytic_line_plan_ids = fields.One2many(
'account.analytic.line.plan',
'resource_plan_id',
'Planned costs',
readonly=True
)
price_unit = fields.Float(
string='Cost Price',
groups='project.group_project_manager',
)
price_total = fields.Float(
store=False,
compute='_compute_get_price_total',
string='Total Cost',
groups='project.group_project_manager',
)
resource_type = fields.Selection(
selection=[('task', 'Task'), ('procurement', 'Procurement')],
string='Type',
required=True,
default='task'
)
user_id = fields.Many2one(
comodel_name='res.users',
string='Assign To',
ondelete='set null'
)
@api.multi
def copy(self, default=None):
self.ensure_one()
if default is None:
default = {}
default['parent_id'] = False
default['analytic_line_plan_ids'] = []
res = super(AnalyticResourcePlanLine, self).copy(default)
return res
# TODO: Solve TypeError: can only concatenate list (not "NoneType") to list
# on raise error
@api.model
def _prepare_analytic_lines(self):
plan_version_obj = self.env['account.analytic.plan.version']
journal_id = (
self.product_id.expense_analytic_plan_journal_id
and self.product_id.expense_analytic_plan_journal_id.id
or False
)
general_account_id = (
self.product_id.product_tmpl_id.property_account_expense.id
)
if not general_account_id:
general_account_id = (
self.product_id.categ_id.property_account_expense_categ.id
)
if not general_account_id:
raise UserError(
_(
'There is no expense account defined '
'for this product: "%s" (id:%d)'
) % (self.product_id.name, self.product_id.id,)
)
default_plan = plan_version_obj.search(
[('default_resource_plan', '=', True)],
limit=1
)
if not default_plan:
raise UserError(
_(
'No active planning version for resource '
'plan exists.'
)
)
return [{
'resource_plan_id': self.id,
'account_id': self.account_id.id,
'name': self.name,
'date': self.date,
'product_id': self.product_id.id,
'product_uom_id': self.product_uom_id.id,
'unit_amount': self.unit_amount,
'amount': -1 * self.product_id.standard_price * self.unit_amount,
'general_account_id': general_account_id,
'journal_id': journal_id,
'notes': self.notes,
'version_id': default_plan.id,
'currency_id': self.account_id.company_id.currency_id.id,
# 'amount_currency': (
# -1 * self.product_id.standard_price * self.unit_amount
# ),
}]
@api.model
def create_analytic_lines(self):
res = []
line_plan_obj = self.env['account.analytic.line.plan']
lines_vals = self._prepare_analytic_lines()
for line_vals in lines_vals:
line = line_plan_obj.create(line_vals)
return res
@api.model
def _delete_analytic_lines(self):
line_plan_obj = self.env['account.analytic.line.plan']
ana_line = line_plan_obj.search([('resource_plan_id', '=', self.id)])
ana_line.unlink()
return True
@api.multi
def action_button_draft(self):
for line in self:
for child in line.child_ids:
if child.state not in ('draft', 'plan'):
raise UserError(
_(
'All the child resource plan lines must '
' be in Draft state.'
)
)
line._delete_analytic_lines()
return self.write({'state': 'draft'})
@api.multi
def action_button_confirm(self):
for line in self:
if line.unit_amount == 0:
raise UserError(
_(
'Quantity should be greater than 0.'
)
)
if not line.child_ids:
line.create_analytic_lines()
return self.write({'state': 'confirm'})
@api.onchange('product_id')
def on_change_product_id(self):
if self.product_id:
self.name = self.product_id.name
self.product_uom_id = (
self.product_id.uom_id
and self.product_id.uom_id.id
or False
)
self.price_unit = self.product_id.standard_price
@api.onchange('account_id')
def on_change_account_id(self):
if self.account_id:
if self.account_id.date:
self.date = self.account_id.date
@api.multi
def write(self, vals):
analytic_obj = self.env['account.analytic.account']
if 'account_id' in vals:
analytic = analytic_obj.browse(vals['account_id'])
if vals.get('date', False):
vals['date'] = analytic.date
return super(AnalyticResourcePlanLine, self).write(vals)
@api.multi
def unlink(self):
for line in self:
if line.analytic_line_plan_ids:
raise UserError(
_(
'You cannot delete a record that refers to '
'analytic plan lines!'
)
)
return super(AnalyticResourcePlanLine, self).unlink()
# PRICE DEFINITIONS
@api.multi
@api.depends('price_unit', 'unit_amount')
def _compute_get_price_total(self):
for resource in self:
resource.price_total = resource.price_unit * resource.unit_amount
@api.multi
def _get_pricelist(self):
self.ensure_one()
partner_id = self._get_partner()
if partner_id:
if partner_id.property_product_pricelist:
return partner_id.property_product_pricelist
else:
return False
# RESOURCE TYPE
@api.onchange('resource_type')
def resource_type_change(self):
if self.resource_type == 'procurement':
self.user_id = False
@api.multi
@api.constrains('resource_type', 'product_uom_id')
def _check_description(self):
for resource in self:
if self.resource_type == 'task' and (
self.product_uom_id.category_id != (
self.env.ref('product.uom_categ_wtime'))):
raise ValidationError(_(
"When resource type is task, "
"the uom category should be time"))
| agpl-3.0 | -7,276,925,191,341,370,000 | 30.169231 | 79 | 0.531096 | false |
petershvets/spark-etl | spark_etl_extract.py | 1 | 9475 | #!/usr/bin/python
from simple_salesforce import Salesforce
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from pyspark import Row
from pyspark.sql.types import *
#from pyspark.sql.functions import *
from optparse import OptionParser
from pyspark.sql import DataFrameWriter
import json
import re
import os
from datetime import datetime
# *** SPARK-ETL packages
import util
import udf_spark_etl
def main(sc, sqlContext, properties_file, spark_etl_logger):
""" This is main data extraction functionality
Data is extracted from SFDC and loaded into Spark SQL temp tables
"""
startTime = datetime.now()
# Enable logging
spark_etl_logger.info("***** Main process execution started at: "+str(startTime))
# Get app environment variables
d_app_variables = util.get_app_variables()
spark_etl_logger.info("Application environment variables: %s" %(d_app_variables))
spark_etl_logger.info("Processing Spark ETL properties file: %s" %(properties_file))
##### Get table properties defined in respective table ETL config file ####
# Store table properties in local dictionary for servicing the script
#No need to pass SPARK_ETL_CONF_DIR variable as driver script passes file with absolute path
dict_tbl_properties = util.get_json_config('', properties_file)
##### Defined SOQL statement takes precedence over list of source columns #####
##### SOQL statement will be proccessed and related metadata will be extracted from it
if len(dict_tbl_properties["soql_query"]) > 0:
# Process SOQL query if it is defined in config file
soqlStmt = dict_tbl_properties["soql_query"]
spark_etl_logger.info("Defined SOQL statement: "+ soqlStmt)
# Process list of fields and define schema for creating RDD
schemaCol = re.findall('SELECT\s(.+)\sFROM', dict_tbl_properties["soql_query"], flags=re.IGNORECASE)[0]
spark_etl_logger.info("Columns extracted from SOQL: " + schemaCol)
# Removing extra whitespaces from string elements while converting
schemaList = [rec.strip() for rec in schemaCol.split(',')]
# Convert column names into StructType for RDD
fields = [StructField(field_name, StringType(), True) for field_name in schemaList]
schema = StructType(fields)
# Define source table name - extract from SOQL Query
src_tbl_name = re.findall("FROM\s(\S+)", soqlStmt, flags=re.IGNORECASE)[0]
spark_etl_logger.info("Source table name: " + src_tbl_name)
# Define target table name
tgt_table_name = dict_tbl_properties["tgt_table"]
spark_etl_logger.info("Target table name: " + tgt_table_name)
else:
spark_etl_logger.info("SOQL statement is not defined, will process src_table and src_columns properties")
# Constructing SOQL statement from properties provided, converting list to str
soqlStmt = "SELECT " + ', '.join(dict_tbl_properties["src_columns"]) \
+ " FROM " \
+ dict_tbl_properties["src_table"] \
+ " " + dict_tbl_properties["where"] \
+ " " + dict_tbl_properties["limit"]
spark_etl_logger.info("Constructed SOQL statement: %s" %(soqlStmt))
# Process list of fields and define schema for creating RDD
schemaList = dict_tbl_properties["src_columns"]
spark_etl_logger.info("Schema from config file: %s" %(schemaList))
fields = [StructField(field_name, StringType(), True) for field_name in schemaList]
schema = StructType(fields)
# Define source table name
src_tbl_name = dict_tbl_properties["src_table"]
spark_etl_logger.info("Source table name: " + src_tbl_name)
# Define target table name for load into target data storage of your choice
tgt_table_name = dict_tbl_properties["tgt_table"]
spark_etl_logger.info("Target table name: ",tgt_table_name)
################### End process table properties defined in table ETL config file ##################
# Get Salesforce connection details from connections json file
spark_etl_logger.info("Processing SFDC connections information file sfdc_connections.json")
d_sfdc_conn = util.get_json_config(d_app_variables['SPARK_ETL_CONN_DIR'], "sfdc_connections.json")
spark_etl_logger.info("SFDC Connections: %s" %(list(d_sfdc_conn.keys())))
# Process SFDC Connection details
spark_etl_logger.info("SFDC Connection details: %s" %(d_sfdc_conn[dict_tbl_properties["sfdc_connection"]]))
# Establish connection to Salesforce. Using Simple-Salesforce package
exec("sf=" + util.get_sfdc_conn(**d_sfdc_conn[dict_tbl_properties["sfdc_connection"]]), globals())
###### Retrieve source table properties - use it to define target table DDL ####
#
# Store object description in list of dictionaries
# This structure returned by Simple-Salesforce
exec("tblDesc = sf."+src_tbl_name+".describe()", globals())
lColProperties = ['name', 'type', 'length', 'precision', 'custom', 'scale']
columnProperties = list()
for line in tblDesc['fields']: # Iterate through the list of dictionaries
# Keep only needed properties listed in lColProperties list and
# columns mapped in config properties file and remove the rest
rec = {k:line[k] for k in (lColProperties) if line["name"] in list(dict_tbl_properties["columns_map"].keys())}
if len(rec) == 0:continue
columnProperties.append(rec)
spark_etl_logger.info("Column properties: %s" %(rec))
# Record table properties in json file
with open(os.path.join(d_app_variables['SPARK_ETL_LOG_DIR'],tgt_table_name+"_schema.json"), "w") as tableMetadata_file:
json.dump(columnProperties, tableMetadata_file)
# Build DDL in order to create table in MySQL db
for record in columnProperties:
spark_etl_logger.info("Column MySQL datatype: " + record["name"]+" Type:"+record["type"]+" New: "+util.get_sfdc_mysql_dt(record["type"], str(record["length"]), str(record["precision"]), str(record["scale"])))
#*********************** Start Data Acquisition **************************#
#
# Extract data from SFDC - run SOQL statement.
# sf.query returns a list of OrderedDict
queryResultRaw = sf.query_all(soqlStmt)
#*********************** End Data Acquisition ****************************#
#********************* Clean up dataset *************************#
# Remove unrelated record metadata provided by SFDC
queryResult = list()
for line in queryResultRaw['records']:
rec=[(k,str(v)) for k, v in line.items() if k not in "attributes"]
queryResult.append(rec)
# Create RDD
v_rdd = sc.parallelize(queryResult)
rddElemCount = v_rdd.count()
spark_etl_logger.info("RDD was successfully created")
spark_etl_logger.info("Dataset contains: "+ str(rddElemCount) + " records")
# Create DataFrame from RDD
global sqlDataFrame, sqlDFPK
sqlDataFrame = v_rdd.map(lambda l: Row(**dict(l))).toDF()
spark_etl_logger.info("Generating PK")
sqlDFPK = udf_spark_etl.generate_pk('WID', sqlDataFrame)
#sqlDFPK = sqlDataFrame.withColumn('WID', monotonicallyIncreasingId()+1)
spark_etl_logger.info("Done generating PK")
spark_etl_logger.info("Created dataframe with extracted data:: ")
sqlDFPK.printSchema()
sqlDFPK.show()
####################### UDF functions #########################
# Create UDFs
#
# logic to handle null values
slen = udf(lambda s: 0 if s is None else len(s), IntegerType())
StrConcat = udf(lambda s: "ADD_SOMETHING"+s, StringType())
####################### End UDF functions #########################
######################## Mapping columns ############################
# Create a dict out of column list in form
for k,v in sorted(dict_tbl_properties["columns_map"].items()):
spark_etl_logger.info("Column mapping: "+k+":"+v)
# Construct command for column mapping
wCol =''
v_dfSQL_col = ''
for k,v in sorted(dict_tbl_properties["columns_map"].items()):
#wCol = wCol + ".withColumn(\'"+v+"\' , "+dfColumnsOrig+"."+k+")"
wCol = wCol + ".withColumnRenamed(\'"+k+"\' , \'"+v+"\')"
v_dfSQL_col = v_dfSQL_col + "\""+v+"\","
dfSQL_col = v_dfSQL_col.rstrip(',')
spark_etl_logger.info("The following command will be executed: dfRemapped = sqlDFPK %s" %(wCol))
# exec(dfColumnsRenamed+" = "+dfColumnsOrig+wCol, globals())
exec("global dfRemapped; dfRemapped = sqlDFPK"+wCol, globals())
dfRemapped.printSchema()
dfRemapped.show()
######################## End mapping columns ########################
# Generate PK
# Sample
#df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
#df0.select(monotonicallyIncreasingId().alias('id')).collect()
#################### Register DataFrame as Temp Table for SQL operatoins ####################
spark_etl_logger.info("Registering remapped data frame as Spark SQL temp table")
dfRemapped.registerTempTable(tgt_table_name)
# Run SQL (returns RDD)
rddSQL = sqlContext.sql("SELECT * FROM "+ tgt_table_name)
# Write DataFrame into AWS S3 bucket
print("Serialize DF into S3")
# dfRemapped.repartition(1).write.save("s3n://hive-qs-data/"+tgt_table_name+".json", "json", )
# dfRemapped.write.mode('append').json("s3n://hive-qs-data/"+tgt_table_name)
# rddSQL.rdd.saveAsTextFile(tgt_table_name+".csv")
# dfRemapped.rdd.map(lambda rec: ",".join([str(col) for col in rec])).saveAsTextFile("s3n://hive-qs-data/"+tgt_table_name)
# dfRemapped.repartition(1).rdd.map(lambda rec: ",".join([str(col) for col in rec])).saveAsTextFile("s3n://hive-qs-data/"+tgt_table_name)
print("Done serialize DF into S3")
endTime = datetime.now()
spark_etl_logger.info("***** Main process execution completed at: " + str(endTime))
spark_etl_logger.info("***** Main process execution took: " + str(endTime - startTime))
| apache-2.0 | -803,472,950,311,587,300 | 45.446078 | 210 | 0.688021 | false |
JensAstrup/pyEchosign | tests/test_agreement.py | 1 | 8056 | from unittest import TestCase
from six import StringIO
from pyEchosign.exceptions.echosign import PermissionDenied
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
from pyEchosign.classes.agreement import Agreement
from pyEchosign.classes.account import EchosignAccount
from pyEchosign.exceptions.internal import ApiError
class TestAccount(TestCase):
@classmethod
def setup_class(cls):
cls.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
cls.mock_get = cls.mock_get_patcher.start()
cls.mock_put_patcher = patch('pyEchosign.classes.agreement.requests.put')
cls.mock_put = cls.mock_put_patcher.start()
cls.mock_post_patcher = patch('pyEchosign.classes.agreement.requests.post')
cls.mock_post = cls.mock_post_patcher.start()
def test_cancel_agreement_passes(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
e = EchosignAccount('a string')
e.api_access_point = 'http://echosign.com'
agreement = Agreement(account=e)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 200
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
agreement.cancel()
def test_cancel_agreement_401_raises_error(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
e = EchosignAccount('an invalid string')
e.api_access_point = 'http://echosign.com'
agreement = Agreement(account=e)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 401
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
with self.assertRaises(PermissionDenied):
agreement.cancel()
def test_cancel_agreement_500_raises_error(self):
""" Test that an invalid response due to an issue with the API, not the package, raises an Exception """
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('an invalid string')
account.api_access_point = 'http://echosign.com'
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 500
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
with self.assertRaises(ApiError):
agreement.cancel()
def test_delete_agreement_passes(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('an invalid string')
account.api_access_point = 'http://echosign.com'
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 200
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
agreement.cancel()
def test_delete_agreement_401_raises_error(self):
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('an invalid string')
account.api_access_point = 'http://echosign.com'
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.status_code = 401
# Assign our mock response as the result of our patched function
self.mock_put.return_value = mock_response
with self.assertRaises(PermissionDenied):
agreement.cancel()
def test_create_agreement(self):
json_response = dict(userAgreementList=[dict(displayDate='2017-09-09T09:33:53-07:00', esign=True, displayUserSetInfos=[
{'displayUserSetMemberInfos': [{'email': '[email protected]'}]}], agreementId='123', name='test_agreement',
latestVersionId='v1', status='WAITING_FOR_MY_SIGNATURE')])
mock_response = Mock()
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
account = EchosignAccount('account')
account.api_access_point = 'http://echosign.com'
mock_response.json.return_value = json_response
mock_response.status_code = 200
mock_agreement_get_patcher = patch('pyEchosign.classes.agreement.requests.get')
mock_agreement_get = mock_agreement_get_patcher.start()
mock_agreement_get.return_value = mock_response
agreements = account.get_agreements()
agreements = list(agreements)
self.assertEqual(len(agreements), 1)
self.assertEqual(agreements[0].name, 'test_agreement')
# Reset the patch for the Account - otherwise exceptions will ensue
self.mock_get_patcher = patch('pyEchosign.classes.account.requests.get')
self.mock_get = self.mock_get_patcher.start()
def test_send_reminder(self):
""" Test that reminders are sent without exceptions """
mock_response = Mock()
account = EchosignAccount('account')
account.api_access_point = 'http://echosign.com'
mock_response.status_code = 200
self.mock_post.return_value = mock_response
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
agreement.send_reminder()
agreement.send_reminder('Test')
agreement.send_reminder(None)
def test_get_form_data(self):
""" Test that form data is retrieved and returned correctly """
mock_response = Mock()
account = EchosignAccount('account')
account.api_access_point = 'http://echosign.com'
mock_response.status_code = 200
agreement = Agreement(account=account)
agreement.name = 'Test Agreement'
agreement.fully_retrieved = False
agreement.echosign_id = '123'
agreement.date = '2017-02-19T08:22:34-08:00'
mock_response.text = 'Column,Column2,Column3'
mock_response.status_code = 200
mock_get_patcher = patch('pyEchosign.classes.agreement.requests.get')
mock_get = mock_get_patcher.start()
mock_get.return_value = mock_response
form_data = agreement.get_form_data()
self.assertIsInstance(form_data, StringIO)
data = form_data.read()
self.assertEqual(data, mock_response.text)
mock_get_patcher.stop() | mit | -2,151,252,365,031,162,400 | 34.963303 | 127 | 0.636172 | false |
numb3r33/StumbpleUponChallenge | src/data/make_dataset.py | 1 | 1475 | import pandas as pd
import numpy as np
import json
from unidecode import unidecode
def extract_domain(url):
# extract domains
domain = url.lower().split('/')[2]
domain_parts = domain.split('.')
# e.g. co.uk
if domain_parts[-2] not in ['com', 'co']:
return '.'.join(domain_parts[-2:])
else:
return '.'.join(domain_parts[-3:])
def load_csv(filename):
return pd.read_table(filename)
def parse_data(df):
data = []
columns = df.columns
for key, row in df.iterrows():
item = {}
for column in columns:
item[column] = row[column]
# parse url
item['real_url'] = row['url'].lower()
item['domain'] = extract_domain(row['url'])
item['tld'] = item['domain'].split('.')[-1]
# parse boilerplate
boilerplate = json.loads(row['boilerplate'])
for f in ['title', 'url', 'body']:
item[f] = boilerplate[f] if f in boilerplate else u''
item[f] = unidecode(item[f]) if item[f] else ''
if 'label' in row:
item['label'] = row['label']
else:
item['label'] = np.nan
data.append(item)
return data
def get_train():
train = load_csv('../data/raw/train.tsv')
return (parse_data(train))
def get_test():
test = load_csv('../data/raw/test.tsv')
return (parse_data(test))
| mit | 4,679,320,032,139,094,000 | 21.692308 | 65 | 0.51661 | false |
brunoliveira8/managyment | project/gym_app/migrations/0001_initial.py | 1 | 7312 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Athlete',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(default=b'BG', max_length=2, choices=[(b'BG', b'Beginner'), (b'IN', b'Intermediate'), (b'AD', b'Advanced')])),
('training_period', models.CharField(default=b'MO', max_length=2, choices=[(b'MO', b'Morning'), (b'AF', b'Afternoon'), (b'NI', b'Night')])),
('gender', models.CharField(default=b'M', max_length=2, choices=[(b'M', b'Male'), (b'F', b'Female')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BodyScreening',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('screeningDate', models.DateField(default=datetime.datetime.now)),
('triceps', models.IntegerField(default=0, max_length=3)),
('biceps', models.IntegerField(default=0, max_length=3)),
('subscapular', models.IntegerField(default=0, max_length=3)),
('supraspinale', models.IntegerField(default=0, max_length=3)),
('suprailic', models.IntegerField(default=0, max_length=3)),
('abdominal', models.IntegerField(default=0, max_length=3)),
('chest', models.IntegerField(default=0, max_length=3)),
('thigh', models.IntegerField(default=0, max_length=3)),
('calf', models.IntegerField(default=0, max_length=3)),
('weight', models.IntegerField(default=0, max_length=4)),
('feet', models.IntegerField(default=0, max_length=4)),
('inches', models.IntegerField(default=0, max_length=4)),
('bodyfat', models.DecimalField(default=0, max_digits=6, decimal_places=2)),
('bmi', models.DecimalField(default=0, max_digits=6, decimal_places=1)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Exercise',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('weight', models.IntegerField(default=1, max_length=4)),
('repetition', models.IntegerField(default=1, max_length=4)),
('sets', models.IntegerField(default=1, max_length=4)),
('day', models.IntegerField(default=1, max_length=7, choices=[(1, b'Day 1'), (2, b'Day 2'), (3, b'Day 3'), (4, b'Day 4'), (5, b'Day 5'), (6, b'Day 6'), (7, b'Day 7')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailBox',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('owner', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sbj', models.CharField(max_length=50)),
('body', models.TextField(max_length=500)),
('src', models.CharField(max_length=50)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PersonalTrainer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('gender', models.CharField(default=b'M', max_length=2, choices=[(b'M', b'Male'), (b'F', b'Female')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tracker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('startWeightDate', models.DateField(auto_now_add=True)),
('startWeight', models.IntegerField(default=0, max_length=4)),
('previousWeightDate', models.DateField(auto_now=True)),
('previousWeight', models.IntegerField(default=0, max_length=4)),
('currentWeightDate', models.DateField(auto_now=True)),
('currentWeight', models.IntegerField(default=170, max_length=4)),
('goalWeight', models.IntegerField(default=160, max_length=4)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WorkoutPlan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('exercises', models.ManyToManyField(to='gym_app.Exercise')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='mailbox',
name='messages',
field=models.ManyToManyField(to='gym_app.Message'),
preserve_default=True,
),
migrations.AddField(
model_name='exercise',
name='task',
field=models.ForeignKey(to='gym_app.Task'),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='screenings',
field=models.ManyToManyField(to='gym_app.BodyScreening'),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='tracker',
field=models.OneToOneField(to='gym_app.Tracker'),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='athlete',
name='workout_plan',
field=models.OneToOneField(to='gym_app.WorkoutPlan'),
preserve_default=True,
),
]
| mit | 6,572,833,183,942,440,000 | 42.011765 | 185 | 0.526258 | false |
cobbler/cobbler | cobbler/cobbler_collections/distros.py | 1 | 4476 | """
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os.path
import glob
from cobbler.cobbler_collections import collection
from cobbler.items import distro
from cobbler import utils
from cobbler.cexceptions import CX
class Distros(collection.Collection):
"""
A distro represents a network bootable matched set of kernels and initrd files.
"""
@staticmethod
def collection_type() -> str:
return "distro"
@staticmethod
def collection_types() -> str:
return "distros"
def factory_produce(self, api, item_dict):
"""
Return a Distro forged from item_dict
"""
new_distro = distro.Distro(api)
new_distro.from_dict(item_dict)
return new_distro
def remove(self, name, with_delete: bool = True, with_sync: bool = True, with_triggers: bool = True,
recursive: bool = False):
"""
Remove element named 'name' from the collection
:raises CX: In case any subitem (profiles or systems) would be orphaned. If the option ``recursive`` is set then
the orphaned items would be removed automatically.
"""
name = name.lower()
# first see if any Groups use this distro
if not recursive:
for profile in self.api.profiles():
if profile.distro and profile.distro.name.lower() == name:
raise CX("removal would orphan profile: %s" % profile.name)
obj = self.find(name=name)
if obj is not None:
kernel = obj.kernel
if recursive:
kids = obj.get_children()
for k in kids:
self.api.remove_profile(k, recursive=recursive, delete=with_delete, with_triggers=with_triggers)
if with_delete:
if with_triggers:
utils.run_triggers(self.api, obj, "/var/lib/cobbler/triggers/delete/distro/pre/*", [])
if with_sync:
lite_sync = self.api.get_sync()
lite_sync.remove_single_distro(name)
self.lock.acquire()
try:
del self.listing[name]
finally:
self.lock.release()
self.collection_mgr.serialize_delete(self, obj)
if with_delete:
if with_triggers:
utils.run_triggers(self.api, obj, "/var/lib/cobbler/triggers/delete/distro/post/*", [])
utils.run_triggers(self.api, obj, "/var/lib/cobbler/triggers/change/*", [])
# look through all mirrored directories and find if any directory is holding this particular distribution's
# kernel and initrd
settings = self.api.settings()
possible_storage = glob.glob(settings.webdir + "/distro_mirror/*")
path = None
for storage in possible_storage:
if os.path.dirname(obj.kernel).find(storage) != -1:
path = storage
continue
# if we found a mirrored path above, we can delete the mirrored storage /if/ no other object is using the
# same mirrored storage.
if with_delete and path is not None and os.path.exists(path) and kernel.find(settings.webdir) != -1:
# this distro was originally imported so we know we can clean up the associated storage as long as
# nothing else is also using this storage.
found = False
distros = self.api.distros()
for d in distros:
if d.kernel.find(path) != -1:
found = True
if not found:
utils.rmtree(path)
| gpl-2.0 | -4,830,930,041,022,205,000 | 37.586207 | 120 | 0.605004 | false |
bennylope/django-site-contacts | contact/migrations/0001_initial.py | 1 | 4272 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
USER_MODEL_NAME = USER_MODEL.split('.')[1]
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Recipient'
db.create_table('contact_recipient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm[USER_MODEL], unique=True)),
))
db.send_create_signal('contact', ['Recipient'])
def backwards(self, orm):
# Deleting model 'Recipient'
db.delete_table('contact_recipient')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
USER_MODEL: {
'Meta': {'object_name': USER_MODEL_NAME},
#'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
#'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
#'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
#'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
#'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
#'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
#'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
#'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
#'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
#'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
#'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
#'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contact.recipient': {
'Meta': {'object_name': 'Recipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['{0}']".format(USER_MODEL), 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['contact']
| bsd-3-clause | 1,464,142,631,011,156,500 | 58.333333 | 182 | 0.562968 | false |
nathantypanski/zombierl | character.py | 1 | 2941 | from libtcod import libtcodpy as libtcod
import object as O
import map_vars as M
import status as S
import random
class Character (O.Object):
def __init__ (self, name, max_health, x, y, char, color, npc=False,
strength=5, to_hit=0.8, view_distance=10):
self.name = name
self.health = max_health
self.max_health = max_health
self._x = x
self._y = y
self.char = char
self.color = color
self.items = []
self.hand = None
self.npc = npc
self.strength = strength
self.to_hit = to_hit
self.view_distance=view_distance
M.gameworld[self.x][self.y].characters.append(self)
def move (self, dx, dy):
if (M.gameworld[self.x + dx][self.y + dy].characters
or not M.gameworld[self.x + dx][self.y + dy].is_floor()):
characters = M.gameworld[self.x + dx][self.y + dy].characters
if characters:
for character in characters:
if not character.npc:
self.attack(character)
else:
M.gameworld[self.x][self.y].characters.remove(self)
self.x = self.x + dx
self.y = self.y + dy
M.gameworld[self.x][self.y].characters.append(self)
def pick_up(self):
if M.gameworld[self.x][self.y].items:
item = M.gameworld[self.x][self.y].items.pop()
self.items.append(item)
def drop(self):
if self.items:
item = self.items.pop()
M.gameworld[self.x][self.y].items.append(item)
def drop_all(self):
for item in self.items:
self.items.remove(item)
M.gameworld[self.x][self.y].items.append(item)
# Moves toward coordinates. Only moves one step.
def move_to_coordinates (self, dx, dy):
if dx > self.x:
newx = 1
elif dx < self.x:
newx = -1
else:
newx = 0
if dy > self.y:
newy = 1
elif dy < self.y:
newy = -1
else:
newy = 0
self.move(newx, newy)
# Set the character's health.
def set_health (self, health):
self.health = health
def attack (self, character):
damage = self.strength*random.randint(self.strength//2, self.strength*2)
if random.random() <= self.to_hit:
S.add_status("%s hits %s!" % (self.name, character.name))
if damage > (0.5*character.max_health):
S.add_status("It's super effective!")
character.take_damage(damage)
else:
S.add_status("%s swings and misses." % (self.name))
def take_damage (self, damage):
self.health -= damage
if 0 > self.health:
S.add_status("%s is killed!" % (self.name))
self.health = 0
M.gameworld[self.x][self.y].characters.remove(self)
self.drop_all()
def compute_fov(self):
for x in range (M.MAP_WIDTH):
for y in range (M.MAP_HEIGHT):
if M.gameworld[x][y].is_floor():
libtcod.map_set_properties (self.fov, x , y, True, True)
libtcod.map_compute_fov (self.fov, self.x, self.y, self.view_distance,
True,libtcod.FOV_DIAMOND)
| gpl-3.0 | -7,985,737,944,965,132,000 | 28.707071 | 76 | 0.606256 | false |
matthagy/Jamenson | jamenson/compiler/Attic/constant_reduction.py | 1 | 4970 | '''Evaluate constant expressions in ir
'''
from __future__ import absolute_import
from __future__ import with_statement
import operator as O
from ..runtime.multimethod import MultiMethod, defmethod, around
from .resolution import compile_time_resolve, UnresolvableError
from .walk import propigate_location
from . import ir as I
from . import codegen
constant_reduce = MultiMethod('constant_reduce',
signature='node',
doc='''If possible reduce expression to simpler expression.
Called after children nodes have been reduced to simpler nodes
''')
def reduce_constants(node):
#reduce children first
for child in list(I.iter_children(node)):
r_child = reduce_constants(child)
if r_child is not child:
I.replace_child(child, r_child)
return constant_reduce(node)
class NotConstant(Exception):
pass
no_default = object()
def as_value(op, default=no_default):
if op is None and default is not no_default:
return default
if not isinstance(op, I.constant):
raise NotConstant
return op.value
def catch_notconstant(func):
def inner(node, *args, **kwds):
try:
return func(node, *args, **kwds)
except NotConstant:
return node
return inner
def mkcnst(node, value):
return propigate_location(node, I.make_constant(value))
@catch_notconstant
def reduce_through_function(node, func):
return mkcnst(node, evaluate_catch(node, func, *map(as_value, I.iter_children(node))))
def evaluate_catch(node, func, *args):
try:
return func(*args)
except Exception:
#could insert code to handle errors here
raise
#by default do nothing
@defmethod(constant_reduce, [I.node])
def meth(node):
return node
unary_functions = {
I.neg : O.neg,
I.pos : O.pos,
I.not_ : O.not_,
I.convert : repr,
I.invert : O.invert,
I.get_iter : iter,
}
@defmethod(constant_reduce, [I.unary_base])
def meth(node):
return reduce_through_function(node, unary_functions[type(node)])
binary_functions = {
I.add : O.add,
I.subtract : O.sub,
I.multiply : O.mul,
I.divide : O.div,
I.floor_divide : O.floordiv,
I.true_divide : O.truediv,
I.modulo : O.mod,
I.iadd : O.iadd,
I.isubtract : O.isub,
I.imultiply : O.imul,
I.idivide : O.idiv,
I.ifloor_divide : O.ifloordiv,
I.itrue_divide : O.itruediv,
I.imodulo : O.imod,
I.lshift : O.lshift,
I.rshift : O.rshift,
I.binand : O.and_,
I.binor : O.or_,
I.binxor : O.xor,
I.ibinand : O.iand,
I.ibinor : O.ior,
I.ibinxor : O.ixor,
I.gt : O.gt,
I.ge : O.ge,
I.eq : O.eq,
I.le : O.le,
I.lt : O.lt,
I.in_ : O.contains,
I.notin : lambda x,seq: x not in seq,
I.is_ : O.is_,
I.isnot : O.is_not,
I.exception_match : isinstance,
}
@defmethod(constant_reduce, [I.binary_base])
def meth(node):
return reduce_through_function(node, binary_functions[type(node)])
@defmethod(constant_reduce, [I.attrget])
@catch_notconstant
def meth(node):
return evaluate_catch(node, getattr, as_value(node.obj), node.name)
@defmethod(constant_reduce, [I.getitem])
@catch_notconstant
def meth(node):
return evaluate_catch(node, lambda op, item: op[item], as_value(node.op), as_value(node.item))
@defmethod(constant_reduce, [I.progn])
@catch_notconstant
def meth(node):
if not node.exprs:
return I.copy_loc(I.make_nop(), node)
for expr in node.exprs:
value = as_value(expr)
return mkcnst(node, value)
@defmethod(constant_reduce, [I.call])
@catch_notconstant
def meth(node):
callee = as_value(node.callee)
star_args = as_value(node.star_args, [])
star_kwds = as_value(node.star_kwds, {})
args = map(as_value, node.args)
kwds = dict(zip(node.kwd_names, map(as_value, node.kwd_values)))
def perform_call():
if set(kwds) & set(star_kwds):
raise ValueError("multiple values for same keyword")
kwds.update(star_kwds)
return callee(*(args + star_args), **kwds)
return mkcnst(node, evaluate_catch(node, perform_call))
@defmethod(constant_reduce, [I.if_])
@catch_notconstant
def meth(node):
return node.then if as_value(node.condition) else node.else_
@defmethod(constant_reduce, [I.function])
@catch_notconstant
def meth(func):
if codegen.get_function_free_bindings(func):
return func
map(as_value, func.defaults)
#must import here to prevent cyclic imports
from .function import make_function
return mkcnst(func, make_function(func))
| apache-2.0 | 2,729,221,520,801,868,300 | 27.895349 | 98 | 0.602817 | false |
frappe/frappe | frappe/utils/file_manager.py | 1 | 13378 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
import os, base64, re, json
import hashlib
import mimetypes
import io
from frappe.utils import get_hook_method, get_files_path, random_string, encode, cstr, call_hook_method, cint
from frappe import _
from frappe import conf
from copy import copy
from urllib.parse import unquote
class MaxFileSizeReachedError(frappe.ValidationError):
pass
def get_file_url(file_data_name):
data = frappe.db.get_value("File", file_data_name, ["file_name", "file_url"], as_dict=True)
return data.file_url or data.file_name
def upload():
# get record details
dt = frappe.form_dict.doctype
dn = frappe.form_dict.docname
file_url = frappe.form_dict.file_url
filename = frappe.form_dict.filename
frappe.form_dict.is_private = cint(frappe.form_dict.is_private)
if not filename and not file_url:
frappe.msgprint(_("Please select a file or url"),
raise_exception=True)
file_doc = get_file_doc()
comment = {}
if dt and dn:
comment = frappe.get_doc(dt, dn).add_comment("Attachment",
_("added {0}").format("<a href='{file_url}' target='_blank'>{file_name}</a>{icon}".format(**{
"icon": ' <i class="fa fa-lock text-warning"></i>' \
if file_doc.is_private else "",
"file_url": file_doc.file_url.replace("#", "%23") \
if file_doc.file_name else file_doc.file_url,
"file_name": file_doc.file_name or file_doc.file_url
})))
return {
"name": file_doc.name,
"file_name": file_doc.file_name,
"file_url": file_doc.file_url,
"is_private": file_doc.is_private,
"comment": comment.as_dict() if comment else {}
}
def get_file_doc(dt=None, dn=None, folder=None, is_private=None, df=None):
'''returns File object (Document) from given parameters or form_dict'''
r = frappe.form_dict
if dt is None: dt = r.doctype
if dn is None: dn = r.docname
if df is None: df = r.docfield
if folder is None: folder = r.folder
if is_private is None: is_private = r.is_private
if r.filedata:
file_doc = save_uploaded(dt, dn, folder, is_private, df)
elif r.file_url:
file_doc = save_url(r.file_url, r.filename, dt, dn, folder, is_private, df)
return file_doc
def save_uploaded(dt, dn, folder, is_private, df=None):
fname, content = get_uploaded_content()
if content:
return save_file(fname, content, dt, dn, folder, is_private=is_private, df=df)
else:
raise Exception
def save_url(file_url, filename, dt, dn, folder, is_private, df=None):
# if not (file_url.startswith("http://") or file_url.startswith("https://")):
# frappe.msgprint("URL must start with 'http://' or 'https://'")
# return None, None
file_url = unquote(file_url)
file_size = frappe.form_dict.file_size
f = frappe.get_doc({
"doctype": "File",
"file_url": file_url,
"file_name": filename,
"attached_to_doctype": dt,
"attached_to_name": dn,
"attached_to_field": df,
"folder": folder,
"file_size": file_size,
"is_private": is_private
})
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_uploaded_content():
# should not be unicode when reading a file, hence using frappe.form
if 'filedata' in frappe.form_dict:
if "," in frappe.form_dict.filedata:
frappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(",", 1)[1]
frappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)
frappe.uploaded_filename = frappe.form_dict.filename
return frappe.uploaded_filename, frappe.uploaded_content
else:
frappe.msgprint(_('No file attached'))
return None, None
def save_file(fname, content, dt, dn, folder=None, decode=False, is_private=0, df=None):
if decode:
if isinstance(content, str):
content = content.encode("utf-8")
if b"," in content:
content = content.split(b",")[1]
content = base64.b64decode(content)
file_size = check_max_file_size(content)
content_hash = get_content_hash(content)
content_type = mimetypes.guess_type(fname)[0]
fname = get_file_name(fname, content_hash[-6:])
file_data = get_file_data_from_hash(content_hash, is_private=is_private)
if not file_data:
call_hook_method("before_write_file", file_size=file_size)
write_file_method = get_hook_method('write_file', fallback=save_file_on_filesystem)
file_data = write_file_method(fname, content, content_type=content_type, is_private=is_private)
file_data = copy(file_data)
file_data.update({
"doctype": "File",
"attached_to_doctype": dt,
"attached_to_name": dn,
"attached_to_field": df,
"folder": folder,
"file_size": file_size,
"content_hash": content_hash,
"is_private": is_private
})
f = frappe.get_doc(file_data)
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_file_data_from_hash(content_hash, is_private=0):
for name in frappe.db.sql_list("select name from `tabFile` where content_hash=%s and is_private=%s", (content_hash, is_private)):
b = frappe.get_doc('File', name)
return {k: b.get(k) for k in frappe.get_hooks()['write_file_keys']}
return False
def save_file_on_filesystem(fname, content, content_type=None, is_private=0):
fpath = write_file(content, fname, is_private)
if is_private:
file_url = "/private/files/{0}".format(fname)
else:
file_url = "/files/{0}".format(fname)
return {
'file_name': os.path.basename(fpath),
'file_url': file_url
}
def get_max_file_size():
return conf.get('max_file_size') or 10485760
def check_max_file_size(content):
max_file_size = get_max_file_size()
file_size = len(content)
if file_size > max_file_size:
frappe.msgprint(_("File size exceeded the maximum allowed size of {0} MB").format(
max_file_size / 1048576),
raise_exception=MaxFileSizeReachedError)
return file_size
def write_file(content, fname, is_private=0):
"""write file to disk with a random name (to compare)"""
file_path = get_files_path(is_private=is_private)
# create directory (if not exists)
frappe.create_folder(file_path)
# write the file
if isinstance(content, str):
content = content.encode()
with open(os.path.join(file_path.encode('utf-8'), fname.encode('utf-8')), 'wb+') as f:
f.write(content)
return get_files_path(fname, is_private=is_private)
def remove_all(dt, dn, from_delete=False):
"""remove all files in a transaction"""
try:
for fid in frappe.db.sql_list("""select name from `tabFile` where
attached_to_doctype=%s and attached_to_name=%s""", (dt, dn)):
remove_file(fid, dt, dn, from_delete)
except Exception as e:
if e.args[0]!=1054: raise # (temp till for patched)
def remove_file_by_url(file_url, doctype=None, name=None):
if doctype and name:
fid = frappe.db.get_value("File", {"file_url": file_url,
"attached_to_doctype": doctype, "attached_to_name": name})
else:
fid = frappe.db.get_value("File", {"file_url": file_url})
if fid:
return remove_file(fid)
def remove_file(fid, attached_to_doctype=None, attached_to_name=None, from_delete=False):
"""Remove file and File entry"""
file_name = None
if not (attached_to_doctype and attached_to_name):
attached = frappe.db.get_value("File", fid,
["attached_to_doctype", "attached_to_name", "file_name"])
if attached:
attached_to_doctype, attached_to_name, file_name = attached
ignore_permissions, comment = False, None
if attached_to_doctype and attached_to_name and not from_delete:
doc = frappe.get_doc(attached_to_doctype, attached_to_name)
ignore_permissions = doc.has_permission("write") or False
if frappe.flags.in_web_form:
ignore_permissions = True
if not file_name:
file_name = frappe.db.get_value("File", fid, "file_name")
comment = doc.add_comment("Attachment Removed", _("Removed {0}").format(file_name))
frappe.delete_doc("File", fid, ignore_permissions=ignore_permissions)
return comment
def delete_file_data_content(doc, only_thumbnail=False):
method = get_hook_method('delete_file_data_content', fallback=delete_file_from_filesystem)
method(doc, only_thumbnail=only_thumbnail)
def delete_file_from_filesystem(doc, only_thumbnail=False):
"""Delete file, thumbnail from File document"""
if only_thumbnail:
delete_file(doc.thumbnail_url)
else:
delete_file(doc.file_url)
delete_file(doc.thumbnail_url)
def delete_file(path):
"""Delete file from `public folder`"""
if path:
if ".." in path.split("/"):
frappe.msgprint(_("It is risky to delete this file: {0}. Please contact your System Manager.").format(path))
parts = os.path.split(path.strip("/"))
if parts[0]=="files":
path = frappe.utils.get_site_path("public", "files", parts[-1])
else:
path = frappe.utils.get_site_path("private", "files", parts[-1])
path = encode(path)
if os.path.exists(path):
os.remove(path)
def get_file(fname):
"""Returns [`file_name`, `content`] for given file name `fname`"""
file_path = get_file_path(fname)
# read the file
with io.open(encode(file_path), mode='rb') as f:
content = f.read()
try:
# for plain text files
content = content.decode()
except UnicodeDecodeError:
# for .png, .jpg, etc
pass
return [file_path.rsplit("/", 1)[-1], content]
def get_file_path(file_name):
"""Returns file path from given file name"""
f = frappe.db.sql("""select file_url from `tabFile`
where name=%s or file_name=%s""", (file_name, file_name))
if f:
file_name = f[0][0]
file_path = file_name
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(_("There is some problem with the file url: {0}").format(file_path))
return file_path
def get_content_hash(content):
if isinstance(content, str):
content = content.encode()
return hashlib.md5(content).hexdigest()
def get_file_name(fname, optional_suffix):
# convert to unicode
fname = cstr(fname)
n_records = frappe.db.sql("select name from `tabFile` where file_name=%s", fname)
if len(n_records) > 0 or os.path.exists(encode(get_files_path(fname))):
f = fname.rsplit('.', 1)
if len(f) == 1:
partial, extn = f[0], ""
else:
partial, extn = f[0], "." + f[1]
return '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)
return fname
@frappe.whitelist()
def download_file(file_url):
"""
Download file using token and REST API. Valid session or
token is required to download private files.
Method : GET
Endpoint : frappe.utils.file_manager.download_file
URL Params : file_name = /path/to/file relative to site path
"""
file_doc = frappe.get_doc("File", {"file_url":file_url})
file_doc.check_permission("read")
path = os.path.join(get_files_path(), os.path.basename(file_url))
with open(path, "rb") as fileobj:
filedata = fileobj.read()
frappe.local.response.filename = os.path.basename(file_url)
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
def extract_images_from_doc(doc, fieldname):
content = doc.get(fieldname)
content = extract_images_from_html(doc, content)
if frappe.flags.has_dataurl:
doc.set(fieldname, content)
def extract_images_from_html(doc, content):
frappe.flags.has_dataurl = False
def _save_file(match):
data = match.group(1)
data = data.split("data:")[1]
headers, content = data.split(",")
if "filename=" in headers:
filename = headers.split("filename=")[-1]
# decode filename
if not isinstance(filename, str):
filename = str(filename, 'utf-8')
else:
mtype = headers.split(";")[0]
filename = get_random_filename(content_type=mtype)
doctype = doc.parenttype if doc.parent else doc.doctype
name = doc.parent or doc.name
if doc.doctype == "Comment":
doctype = doc.reference_doctype
name = doc.reference_name
# TODO fix this
file_url = save_file(filename, content, doctype, name, decode=True).get("file_url")
if not frappe.flags.has_dataurl:
frappe.flags.has_dataurl = True
return '<img src="{file_url}"'.format(file_url=file_url)
if content:
content = re.sub(r'<img[^>]*src\s*=\s*["\'](?=data:)(.*?)["\']', _save_file, content)
return content
def get_random_filename(extn=None, content_type=None):
if extn:
if not extn.startswith("."):
extn = "." + extn
elif content_type:
extn = mimetypes.guess_extension(content_type)
return random_string(7) + (extn or "")
@frappe.whitelist(allow_guest=True)
def validate_filename(filename):
from frappe.utils import now_datetime
timestamp = now_datetime().strftime(" %Y-%m-%d %H:%M:%S")
fname = get_file_name(filename, timestamp)
return fname
@frappe.whitelist()
def add_attachments(doctype, name, attachments):
'''Add attachments to the given DocType'''
if isinstance(attachments, str):
attachments = json.loads(attachments)
# loop through attachments
files =[]
for a in attachments:
if isinstance(a, str):
attach = frappe.db.get_value("File", {"name":a}, ["file_name", "file_url", "is_private"], as_dict=1)
# save attachments to new doc
f = save_url(attach.file_url, attach.file_name, doctype, name, "Home/Attachments", attach.is_private)
files.append(f)
return files
| mit | -1,703,635,252,731,318,000 | 28.662971 | 130 | 0.690537 | false |
lig/picket_deadend | apps/picket/middleware.py | 1 | 1686 | """
Copyright 2010 Serge Matveenko
This file is part of Picket.
Picket is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Picket is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Picket. If not, see <http://www.gnu.org/licenses/>.
"""
from mongoengine import ValidationError
from documents import Project, Department
class PicketMiddleware(object):
def process_request(self, request):
# set project
if 'set_project' in request.GET:
request.session['current_project'] = request.GET['set_project']
# attach project object to request
current_project_id = request.session.get('current_project')
# current_project_id could be None for all projects
current_project = (current_project_id and
Project.objects.with_id(current_project_id))
# current_project could be None after lookup
request.project = current_project
# get headed departments and managed projects
if request.user.is_authenticated():
request.my_departments = Department.objects(head=request.user)
request.my_projects = Project.objects(manager=request.user)
else:
request.my_departments, request.my_projects = None, None
| gpl-3.0 | 5,305,808,671,908,993,000 | 35.652174 | 75 | 0.708185 | false |
whatitslike/spiders | zhihu/roundtables.py | 1 | 1046 | from .agent import do_request
from .base import BaseSource
from .types import Types
class RoundTables(BaseSource):
def __init__(self):
super(RoundTables, self).__init__()
self._start_urls = [
'https://api.zhihu.com/roundtables?excerpt_len=75'
]
def _parse(self, json_objs):
urls = []
for obj in json_objs['data']:
t = obj.get('type')
if t != 'roundtable':
continue
urls.append(obj['url'])
questions_url = [u + '/questions?excerpt_len=75' for u in urls]
for url in questions_url:
objs = do_request(url)
while not objs['paging']['is_end']:
for obj in objs['data']:
if obj['type'] != 'question':
continue
self.publish(obj['url'], Types.QUESTION)
self.get_answer_url_by_question_url(obj['url'])
next_url = objs['paging']['next']
objs = do_request(next_url)
| gpl-3.0 | -4,115,169,817,327,442,000 | 28.055556 | 71 | 0.5 | false |
adelina-t/compute-hyperv | hyperv/nova/livemigrationops.py | 1 | 5536 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from hyperv.i18n import _
from hyperv.nova import imagecache
from hyperv.nova import serialconsoleops
from hyperv.nova import utilsfactory
from hyperv.nova import vmops
from hyperv.nova import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
def check_os_version_requirement(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
if not self._livemigrutils:
raise NotImplementedError(_('Live migration is supported '
'starting with Hyper-V Server '
'2012'))
return function(self, *args, **kwds)
return wrapper
class LiveMigrationOps(object):
def __init__(self):
# Live migration is supported starting from Hyper-V Server 2012
if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
self._livemigrutils = utilsfactory.get_livemigrationutils()
else:
self._livemigrutils = None
self._pathutils = utilsfactory.get_pathutils()
self._vmops = vmops.VMOps()
self._volumeops = volumeops.VolumeOps()
self._serial_console_ops = serialconsoleops.SerialConsoleOps()
self._imagecache = imagecache.ImageCache()
@check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug("live_migration called", instance=instance_ref)
instance_name = instance_ref["name"]
try:
self._vmops.copy_vm_dvd_disks(instance_name, dest)
# We must make sure that the console log workers are stopped,
# otherwise we won't be able to delete / move VM log files.
self._serial_console_ops.stop_console_handler(instance_name)
self._pathutils.copy_vm_console_logs(instance_name, dest)
self._livemigrutils.live_migrate_vm(instance_name,
dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Calling live migration recover_method "
"for instance: %s", instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug("Calling live migration post_method for instance: %s",
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug("pre_live_migration called", instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume and instance.image_ref:
self._imagecache.get_cached_image(context, instance)
self._volumeops.initialize_volumes_connection(block_device_info)
@check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
self._pathutils.get_instance_dir(instance.name,
create_dir=False,
remove_dir=True)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug("post_live_migration_at_destination called",
instance=instance_ref)
@check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug("check_can_live_migrate_destination_cleanup called")
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug("check_can_live_migrate_source called", instance_ref)
return dest_check_data
| apache-2.0 | 8,935,619,094,054,086,000 | 40.62406 | 78 | 0.621568 | false |
JimDuggan/SDMR | pysd2r/pysd2r_scripts/models/SIR.py | 1 | 4264 | """
Python model "SIR.py"
Translated using PySD version 0.9.0
"""
from __future__ import division
import numpy as np
from pysd import utils
import xarray as xr
from pysd.py_backend.functions import cache
from pysd.py_backend import functions
_subscript_dict = {}
_namespace = {
'TIME': 'time',
'Time': 'time',
'Contact Rate': 'contact_rate',
'Infected': 'infected',
'Infectivity': 'infectivity',
'IR': 'ir',
'Net Flow': 'net_flow',
'R Delay': 'r_delay',
'R0': 'r0',
'Recovered': 'recovered',
'RR': 'rr',
'Susceptible': 'susceptible',
'Total Population': 'total_population',
'FINAL TIME': 'final_time',
'INITIAL TIME': 'initial_time',
'SAVEPER': 'saveper',
'TIME STEP': 'time_step'
}
__pysd_version__ = "0.9.0"
@cache('run')
def contact_rate():
"""
Real Name: b'Contact Rate'
Original Eqn: b'4'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 4
@cache('step')
def infected():
"""
Real Name: b'Infected'
Original Eqn: b'INTEG ( IR-RR, 1)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_infected()
@cache('run')
def infectivity():
"""
Real Name: b'Infectivity'
Original Eqn: b'0.25'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 0.25
@cache('step')
def ir():
"""
Real Name: b'IR'
Original Eqn: b'Contact Rate*Susceptible*(Infected/Total Population)*Infectivity'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return contact_rate() * susceptible() * (infected() / total_population()) * infectivity()
@cache('step')
def net_flow():
"""
Real Name: b'Net Flow'
Original Eqn: b'IR-RR'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return ir() - rr()
@cache('run')
def r_delay():
"""
Real Name: b'R Delay'
Original Eqn: b'2'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 2
@cache('step')
def r0():
"""
Real Name: b'R0'
Original Eqn: b'Contact Rate*Infectivity*R Delay'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return contact_rate() * infectivity() * r_delay()
@cache('step')
def recovered():
"""
Real Name: b'Recovered'
Original Eqn: b'INTEG ( RR, 0)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_recovered()
@cache('step')
def rr():
"""
Real Name: b'RR'
Original Eqn: b'Infected/R Delay'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return infected() / r_delay()
@cache('step')
def susceptible():
"""
Real Name: b'Susceptible'
Original Eqn: b'INTEG ( -IR, 9999)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return integ_susceptible()
@cache('run')
def total_population():
"""
Real Name: b'Total Population'
Original Eqn: b'10000'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 10000
@cache('run')
def final_time():
"""
Real Name: b'FINAL TIME'
Original Eqn: b'100'
Units: b'Month'
Limits: (None, None)
Type: constant
b'The final time for the simulation.'
"""
return 100
@cache('run')
def initial_time():
"""
Real Name: b'INITIAL TIME'
Original Eqn: b'0'
Units: b'Month'
Limits: (None, None)
Type: constant
b'The initial time for the simulation.'
"""
return 0
@cache('step')
def saveper():
"""
Real Name: b'SAVEPER'
Original Eqn: b'TIME STEP'
Units: b'Month'
Limits: (0.0, None)
Type: component
b'The frequency with which output is stored.'
"""
return time_step()
@cache('run')
def time_step():
"""
Real Name: b'TIME STEP'
Original Eqn: b'0.0078125'
Units: b'Month'
Limits: (0.0, None)
Type: constant
b'The time step for the simulation.'
"""
return 0.0078125
integ_infected = functions.Integ(lambda: ir() - rr(), lambda: 1)
integ_recovered = functions.Integ(lambda: rr(), lambda: 0)
integ_susceptible = functions.Integ(lambda: -ir(), lambda: 9999)
| mit | 5,341,325,515,212,766,000 | 15.920635 | 93 | 0.56379 | false |
bennylope/sysenv | sysenv/__init__.py | 1 | 1422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Ben Lopatin'
__email__ = '[email protected]'
__version__ = '0.1.0'
import os
import re
import logging
from .data import EnvDict
logger = logging.getLogger(__name__)
def read_file_values(env_file, fail_silently=True):
"""
Borrowed from Honcho
"""
env_data = {}
try:
with open(env_file) as f:
content = f.read()
except IOError:
if fail_silently:
logging.error("Could not read file '{0}'".format(env_file))
return env_data
raise
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
env_data[key] = val
return env_data
def load(env_file=None, fail_silently=True, load_globally=True, **kwargs):
"""
Returns an instance of EnvDict after reading the system environment an
optionally provided file.
"""
data = {}
data.update(os.environ)
if env_file:
data.update(read_file_values(env_file, fail_silently))
if load_globally:
os.environ.update(data)
return EnvDict(data, **kwargs)
| bsd-3-clause | 1,805,632,655,056,492,500 | 22.311475 | 74 | 0.539381 | false |
FEniCS/dolfin | python/dolfin/function/functionspace.py | 1 | 8437 | # -*- coding: utf-8 -*-
"""Main module for DOLFIN"""
# Copyright (C) 2017 Chris N. Richardson and Garth N. Wells
#
# Distributed under the terms of the GNU Lesser Public License (LGPL),
# either version 3 of the License, or (at your option) any later
# version.
import types
import ffc
import ufl
import dolfin.cpp as cpp
from . import function
class FunctionSpace(ufl.FunctionSpace):
def __init__(self, *args, **kwargs):
"""Create finite element function space."""
if len(args) == 1:
# Do we relly want to do it this way? Can we get the
# sub-element from UFL?
self._init_from_cpp(*args, **kwargs)
else:
if len(args) == 0 or not isinstance(args[0], cpp.mesh.Mesh):
#cpp.dolfin_error("functionspace.py",
# "create function space",
# "Illegal argument, not a mesh: "
# + str(args[0]))
pass
elif len(args) == 2:
self._init_from_ufl(*args, **kwargs)
else:
self._init_convenience(*args, **kwargs)
def _init_from_ufl(self, mesh, element, constrained_domain=None):
# Initialize the ufl.FunctionSpace first to check for good
# meaning
ufl.FunctionSpace.__init__(self, mesh.ufl_domain(), element)
# Compile dofmap and element
ufc_element, ufc_dofmap = ffc.jit(element, parameters=None)
ufc_element = cpp.fem.make_ufc_finite_element(ufc_element)
# Create DOLFIN element and dofmap
dolfin_element = cpp.fem.FiniteElement(ufc_element)
ufc_dofmap = cpp.fem.make_ufc_dofmap(ufc_dofmap)
if constrained_domain is None:
dolfin_dofmap = cpp.fem.DofMap(ufc_dofmap, mesh)
else:
dolfin_dofmap = cpp.fem.DofMap(ufc_dofmap, mesh,
constrained_domain)
# Initialize the cpp.FunctionSpace
self._cpp_object = cpp.function.FunctionSpace(mesh,
dolfin_element,
dolfin_dofmap)
def _init_from_cpp(self, cppV, **kwargs):
"""
if not isinstance(cppV, cpp.FunctionSpace):
cpp.dolfin_error("functionspace.py",
"create function space",
"Illegal argument for C++ function space, "
"not a cpp.FunctionSpace: " + str(cppV))
# We don't want to support copy construction. This would
# indicate internal defficiency in the library
if isinstance(cppV, FunctionSpace):
cpp.dolfin_error("functionspace.py",
"create function space",
"Illegal argument for C++ function space, "
"should not be functions.functionspace.FunctionSpace: " + str(cppV))
if len(kwargs) > 0:
cpp.dolfin_error("functionspace.py",
"create function space",
"Illegal arguments, did not expect C++ "
"function space and **kwargs: " + str(kwargs))
"""
# Reconstruct UFL element from signature
ufl_element = eval(cppV.element().signature(), ufl.__dict__)
# Get mesh
ufl_domain = cppV.mesh().ufl_domain()
# Initialize the ufl.FunctionSpace (not calling cpp.Function.__init__)
self._cpp_object = cppV
# Initialize the ufl.FunctionSpace
ufl.FunctionSpace.__init__(self, ufl_domain, ufl_element)
def _init_convenience(self, mesh, family, degree, form_degree=None,
constrained_domain=None, restriction=None):
# Create UFL element
element = ufl.FiniteElement(family, mesh.ufl_cell(), degree,
form_degree=form_degree)
self._init_from_ufl(mesh, element, constrained_domain=constrained_domain)
def dolfin_element(self):
"Return the DOLFIN element."
return self._cpp_object.element()
def num_sub_spaces(self):
"Return the number of sub spaces"
return self.dolfin_element().num_sub_elements()
def sub(self, i):
"Return the i-th sub space"
# FIXME: Should we have a more extensive check other than
# whats includeding the cpp code?
if not isinstance(i, int):
raise TypeError("expected an int for 'i'")
if self.num_sub_spaces() == 1:
raise ValueError("no SubSpaces to extract")
if i >= self.num_sub_spaces():
raise ValueError("Can only extract SubSpaces with i = 0 ... %d" % \
(self.num_sub_spaces() - 1))
assert hasattr(self.ufl_element(), "sub_elements")
# Extend with the python layer
return FunctionSpace(cpp.function.FunctionSpace.sub(self._cpp_object, i))
def component(self):
return self._cpp_object.component()
def contains(self, V):
"Check whether a function is in the FunctionSpace"
return self._cpp_object.contains(V._cpp_object)
#if isinstance(u, cpp.function.Function):
# return u._in(self)
#elif isinstance(u, function.Function):
# return u._cpp_object._in(self)
#return False
def __contains__(self, u):
"Check whether a function is in the FunctionSpace"
if isinstance(u, cpp.function.Function):
return u._in(self._cpp_object)
elif isinstance(u, function.Function):
return u._cpp_object._in(self._cpp_object)
return False
def __eq__(self, other):
"Comparison for equality."
return ufl.FunctionSpace.__eq__(self, other) and self._cpp_object == other._cpp_object
def __ne__(self, other):
"Comparison for inequality."
return ufl.FunctionSpace.__ne__(self, other) or self._cpp_object != other._cpp_object
def ufl_cell(self):
return self._cpp_object.mesh().ufl_cell()
def ufl_function_space(self):
return self
def dim(self):
return self._cpp_object.dim()
def id(self):
return self._cpp_object.id()
def element(self):
return self._cpp_object.element()
def dofmap(self):
return self._cpp_object.dofmap()
def mesh(self):
return self._cpp_object.mesh()
def set_x(self, basis, x, component):
return self._cpp_object.set_x(basis, x, component)
def collapse(self, collapsed_dofs=False):
"""Collapse a subspace and return a new function space and a map from
new to old dofs
*Arguments*
collapsed_dofs (bool)
Return the map from new to old dofs
*Returns*
_FunctionSpace_
The new function space.
dict
The map from new to old dofs (optional)
"""
# Get the cpp version of the FunctionSpace
cpp_space, dofs = self._cpp_object.collapse()
# Extend with the python layer
V = FunctionSpace(cpp_space)
if collapsed_dofs:
return V, dofs
else:
return V
def extract_sub_space(self, component):
V = self._cpp_object.extract_sub_space(component)
return FunctionSpace(V)
def tabulate_dof_coordinates(self):
return self._cpp_object.tabulate_dof_coordinates()
def VectorFunctionSpace(mesh, family, degree, dim=None, form_degree=None,
constrained_domain=None, restriction=None):
"""Create finite element function space."""
# Create UFL element
element = ufl.VectorElement(family, mesh.ufl_cell(), degree,
form_degree=form_degree, dim=dim)
# Return (Py)DOLFIN FunctionSpace
return FunctionSpace(mesh, element, constrained_domain=constrained_domain)
def TensorFunctionSpace(mesh, family, degree, shape=None, symmetry=None,
constrained_domain=None, restriction=None):
"""Create finite element function space."""
# Create UFL element
element = ufl.TensorElement(family, mesh.ufl_cell(), degree,
shape, symmetry)
# Return (Py)DOLFIN FunctionSpace
return FunctionSpace(mesh, element, constrained_domain=constrained_domain)
| lgpl-3.0 | -3,374,285,540,439,778,300 | 34.902128 | 97 | 0.576271 | false |
peterlharding/PDQ | examples/ppa_1998/chap3/multi_class.py | 1 | 2445 | #!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2009, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
import pdq
# Based on closed_center.c
#
# Illustrate import of PDQ solver for multiclass workload.
#---- Model specific variables -------------------------------------------------
think = 0.0
#---- Initialize the model -----------------------------------------------------
tech = pdq.APPROX
if (tech == pdq.EXACT):
technique = "EXACT"
else:
technique = "APPROX"
print "**** %s Solution ****:\n" % technique
print " N R (w1) R (w2)"
for pop in range(1, 10):
pdq.Init("Test_Exact_calc")
#---- Define the workload and circuit type ----------------------------------
pdq.streams = pdq.CreateClosed("w1", pdq.TERM, 1.0 * pop, think)
pdq.streams = pdq.CreateClosed("w2", pdq.TERM, 1.0 * pop, think)
#---- Define the queueing center --------------------------------------------
pdq.nodes = pdq.CreateNode("node", pdq.CEN, pdq.FCFS)
#---- service demand --------------------------------------------------------
pdq.SetDemand("node", "w1", 1.0)
pdq.SetDemand("node", "w2", 0.5)
#---- Solve the model -------------------------------------------------------
pdq.Solve(tech)
print "%3.0f %8.4f %8.4f" % (pop,
pdq.GetResponse(pdq.TERM, "w1"),
pdq.GetResponse(pdq.TERM, "w2"));
| mit | -8,317,115,764,989,448,000 | 34.716418 | 80 | 0.42454 | false |
kleinfeld/medpy | medpy/graphcut/generate.py | 1 | 16523 | """
@package medpy.graphcut.generate
Provides functionality to generate graphs efficiently from nD label-images and image voxels.
Functions:
- def graph_from_labels(label_image,
fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False): Creates a Graph object from a nD label image.
- def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False): Creates a Graph object from the voxels of an image.
@author Oskar Maier
@version r0.3.0
@since 2012-01-18
@status Release
"""
# build-in modules
import inspect
# third-party modules
import scipy
# own modules
from ..core import Logger
from ..graphcut import GCGraph
def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graphcut.maxflow.GraphDouble object for all voxels of an image with a
ndim * 2 neighbourhood.
Every voxel of the image is regarded as a node. They are connected to their immediate
neighbours via arcs. If to voxels are neighbours is determined using
ndim*2-connectedness (e.g. 3*2=6 for 3D). In the next step the arcs weights
(n-weights) are computed using the supplied boundary_term function.
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All voxels that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered voxels receive a maximum
(graphcut.graph.GCGraph.MAX) t-weight for their arc towards the sink.
@note If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
@note all arcs whose weight is not explicitly set are assumed to carry a weight of
zero.
@param fg_markers The foreground markers as binary array of the same shape as the original image.
@type fg_markers ndarray
@param bg_markers The background markers as binary array of the same shape as the original image.
@type bg_markers ndarray
@param regional_term This can be either
False - all t-weights are set to 0, except for the nodes that are
directly connected to the source or sink.
, or a function -
The supplied function is used to compute the t_edges. It has to
have the following signature
regional_term(graph, regional_term_args),
and is supposed to compute (source_t_weight, sink_t_weight) for
all voxels of the image and add these to the passed graph.GCGraph
object. The weights have only to be computed for nodes where
they do not equal zero. Additional parameters can be passed via
the regional_term_args argument.
@type regional_term function
@param boundary_term This can be either
False -
In which case the weight of all n_edges i.e. between all nodes
that are not source or sink, are set to 0.
, or a function -
In which case it is used to compute the edges weights. The
supplied function has to have the following signature
fun(graph, boundary_term_args), and is supposed to compute the
edges between the graphs node and to add them to the supplied
graph.GCGraph object. Additional parameters
can be passed via the boundary_term_args argument.
@type boundary_term function
@param regional_term_args Use this to pass some additional parameters to the
regional_term function.
@param boundary_term_args Use this to pass some additional parameters to the
boundary_term function.
@return the created graph
@rtype graphcut.maxflow.GraphDouble
@raise AttributeError If an argument is maleformed.
@raise FunctionError If one of the supplied functions returns unexpected results.
"""
# prepare logger
logger = Logger.getInstance()
# prepare result graph
logger.debug('Assuming {} nodes and {} edges for image of shape {}'.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape))
graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_voxel
if not boundary_term: boundary_term = __boundary_term_voxel
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 2 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes two parameter.')
if not hasattr(boundary_term, '__call__') or not 2 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes two parameters.')
logger.debug('#nodes={}, #hardwired-nodes source/sink={}/{}'.format(fg_markers.size,
len(fg_markers.ravel().nonzero()[0]),
len(bg_markers.ravel().nonzero()[0])))
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
regional_term(graph, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, boundary_term_args)
# collect all voxels that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
if not 0 == scipy.count_nonzero(fg_markers):
graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
if not 0 == scipy.count_nonzero(bg_markers):
graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])
return graph.get_graph()
def graph_from_labels(label_image,
fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graphcut.maxflow.GraphDouble object from a nD label image.
Every region of the label image is regarded as a node. They are connected to their
immediate neighbours by arcs. If to regions are neighbours is determined using
ndim*2-connectedness (e.g. 3*2=6 for 3D).
In the next step the arcs weights (n-weights) are computed using the supplied
boundary_term function.
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All regions that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered regions receive a
maximum (graphcut.graph.GCGraph.MAX) t-weight for their arc towards the sink.
@note If a region is marked as both, foreground and background, the background marker
is given higher priority.
@note all arcs whose weight is not explicitly set are assumed to carry a weight of
zero.
@param label_image The label image as an array containing uint values. Note that the
region labels have to start from 1 and be continuous (filter.label.relabel()).
@type label_image numpy.ndarray
@param fg_markers The foreground markers as binary array of the same shape as the label image.
@type fg_markers ndarray
@param bg_markers The background markers as binary array of the same shape as the label image.
@type bg_markers ndarray
@param regional_term This can be either
False - all t-weights are set to 0, except for the nodes that are
directly connected to the source or sink.
, or a function -
The supplied function is used to compute the t_edges. It has to
have the following signature
regional_term(graph, label_image, regional_term_args), and is
supposed to compute the weights between the regions of the
label_image and the sink resp. source. The computed values it
should add directly to the supplied graph.GCGraph object.
Additional parameters can be passed via the regional_term_args
argument.
@type regional_term function
@param boundary_term This can be either
False -
In which case the weight of all n_edges i.e. between all nodes
that are not source or sink, are set to 0.
, or a function -
In which case it is used to compute the edges weights. The
supplied function has to have the following signature
fun(graph, label_image, boundary_term_args), and is supposed to
compute the (directed or undirected) edges between any two
adjunct regions of the label image. These computed weights it
adds directly to the supplied graph.GCGraph object. Additional
parameters can be passed via the boundary_term_args argument.
@type boundary_term function
@param regional_term_args Use this to pass some additional parameters to the
regional_term function.
@param boundary_term_args Use this to pass some additional parameters to the
boundary_term function.
@return the created graph
@rtype graphcut.maxflow.GraphDouble
@raise AttributeError If an argument is maleformed.
@raise FunctionError If one of the supplied functions returns unexpected results.
"""
# prepare logger
logger = Logger.getInstance()
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
label_image = scipy.asarray(label_image)
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
# check supplied labels image
if not 1 == min(label_image.flat):
raise AttributeError('The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.')
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_label
if not boundary_term: boundary_term = __boundary_term_label
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 3 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes three parameters.')
if not hasattr(boundary_term, '__call__') or not 3 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes three parameters.')
logger.info('Determining number of nodes and edges.')
# compute number of nodes and edges
nodes = len(scipy.unique(label_image))
# POSSIBILITY 1: guess the number of edges (in the best situation is faster but requires a little bit more memory. In the worst is slower.)
edges = 10 * nodes
logger.debug('guessed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# POSSIBILITY 2: compute the edges (slow)
#edges = len(__compute_edges(label_image))
#logger.debug('computed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# prepare result graph
graph = GCGraph(nodes, edges)
logger.debug('#hardwired-nodes source/sink={}/{}'.format(len(scipy.unique(label_image[fg_markers])),
len(scipy.unique(label_image[bg_markers]))))
#logger.info('Extracting the regions bounding boxes...')
# extract the bounding boxes
#bounding_boxes = find_objects(label_image)
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
#regions = set(graph.get_nodes()) - set(graph.get_source_nodes()) - set(graph.get_sink_nodes())
regional_term(graph, label_image, regional_term_args) # bounding boxes indexed from 0 # old version: regional_term(graph, label_image, regions, bounding_boxes, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, label_image, boundary_term_args)
# collect all regions that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
graph.set_source_nodes(scipy.unique(label_image[fg_markers] - 1)) # requires -1 to adapt to node id system
graph.set_sink_nodes(scipy.unique(label_image[bg_markers] - 1))
return graph.get_graph()
def __regional_term_voxel(graph, regional_term_args):
"""Fake regional_term function with the appropriate signature."""
return {}
def __regional_term_label(graph, label_image, regional_term_args):
"""Fake regional_term function with the appropriate signature."""
return {}
def __boundary_term_voxel(graph, boundary_term_args):
"""Fake regional_term function with the appropriate signature."""
# supplying no boundary term contradicts the whole graph cut idea.
return {}
def __boundary_term_label(graph, label_image, boundary_term_args):
"""Fake regional_term function with the appropriate signature."""
# supplying no boundary term contradicts the whole graph cut idea.
return {}
def __voxel_4conectedness(shape):
"""
Returns the number of edges for the supplied image shape assuming 4-connectedness.
The name of the function has historical reasons. Essentially it returns the number
of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness,
etc.
@param shape the shape of the image
@type shape sequence
@return the number of edges
@rtype int
"""
shape = list(shape)
while 1 in shape: shape.remove(1) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array)
return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape)))
| gpl-3.0 | 8,061,059,661,752,991,000 | 50.313665 | 183 | 0.638988 | false |
twisted/mantissa | xmantissa/_recordattr.py | 1 | 6833 | # -*- test-case-name: xmantissa.test.test_recordattr -*-
"""
Utility support for attributes on items which compose multiple Axiom attributes
into a single epsilon.structlike.record attribute. This can be handy when
composing a simple, common set of columns that several tables share into a
recognizable object that is not an item itself. For example, the pair of
'localpart', 'domain' into a user object, or the triple of 'realname',
'nickname', 'hostmask', 'network' into an IRC nickname. This functionality is
currently used to make L{sharing.Identifier} objects.
This is a handy utility that should really be moved to L{axiom.attributes} and
made public as soon as a few conditions are met:
* L{WithRecordAttributes} needs to be integrated into L{Item}, or
otherwise made obsolete such that normal item instantiation works and
users don't need to call a bogus classmethod.
* L{RecordAttribute} needs to implement the full set of comparison
operators required by the informal axiom constraint language (__gt__,
__lt__, __ge__, __le__, probably some other stuff). It would also be
great if that informal language got documented somewhere.
"""
from axiom.attributes import AND
class RecordAttribute(object):
"""
A descriptor which maps a group of axiom attributes into a single attribute
which returns a record composing them all.
Use this within an Item class definition, like so::
class Address(record('localpart domain')):
'An email address.'
class Email(Item, WithRecordAttributes):
senderLocalpart = text()
senderDomain = text()
receipientLocalpart = text()
recipientDomain = text()
body = text()
sender = RecordAttribute(Address, senderLocalpart, senderDomain)
recipient = RecordAttribute(Address, recipientLocalpart,
recipientDomain)
# ...
myEmail = Email._recordCreate(sender=Address(localpart=u'hello',
domain=u'example.com'),
recipient=Address(localpart=u'goodbye',
domain=u'example.com'))
print myEmail.sender.localpart
Note: the ugly _recordCreate method is required to create items which use
this feature due to some problems with Axiom's initialization order. See
L{WithRecordAttributes} for details.
"""
def __init__(self, recordType, attrs):
"""
Create a L{RecordAttribute} for a certain record type and set of Axiom
attributes.
@param recordType: the result, or a subclass of the result, of
L{axiom.structlike.record}.
@param attrs: a tuple of L{axiom.attributes.SQLAttribute} instances
that were defined as part of the schema on the same item type.
"""
self.recordType = recordType
self.attrs = attrs
def __get__(self, oself, type=None):
"""
Retrieve this compound attribute from the given item.
@param oself: an L{axiom.item.Item} instance, of a type which has this
L{RecordAttribute}'s L{attrs} defined in its schema.
"""
if oself is None:
return self
constructData = {}
for n, attr in zip(self.recordType.__names__, self.attrs):
constructData[n] = attr.__get__(oself, type)
return self.recordType(**constructData)
def _decompose(self, value):
"""
Decompose an instance of our record type into a dictionary mapping
attribute names to values.
@param value: an instance of self.recordType
@return: L{dict} containing the keys declared on L{record}.
"""
data = {}
for n, attr in zip(self.recordType.__names__, self.attrs):
data[attr.attrname] = getattr(value, n)
return data
def __set__(self, oself, value):
"""
Set each component attribute of this L{RecordAttribute} in turn.
@param oself: an instance of the type where this attribute is defined.
@param value: an instance of self.recordType whose values should be
used.
"""
for n, attr in zip(self.recordType.__names__, self.attrs):
attr.__set__(oself, getattr(value, n))
def __eq__(self, other):
"""
@return: a comparison object resulting in all of the component
attributes of this attribute being equal to all of the attribute values
on the other object.
@rtype: L{IComparison}
"""
return AND(*[attr == getattr(other, name)
for attr, name
in zip(self.attrs, self.recordType.__names__)])
def __ne__(self, other):
"""
@return: a comparison object resulting in all of the component
attributes of this attribute being unequal to all of the attribute
values on the other object.
@rtype: L{IComparison}
"""
return AND(*[attr != getattr(other, name)
for attr, name
in zip(self.attrs, self.recordType.__names__)])
class WithRecordAttributes(object):
"""
Axiom has an unfortunate behavior, which is a rather deep-seated bug in the
way Item objects are initialized. Default parameters are processed before
the attributes in the constructor's dictionary are actually set. In other
words, if you have a custom descriptor like L{RecordAttribute}, it can't be
passed in the constructor; if the public way to fill in a required
attribute's value is via such an API, it becomes impossible to properly
construct an object.
This mixin implements a temporary workaround, by adding a classmethod for
creating instances of classes that use L{RecordAttribute} by explicitly
decomposing the structured record instances into their constitutent values
before actually passing them on to L{Item.__init__}.
This workaround needs to be promoted to a proper resolution before this can
be a public API; users should be able to create their own descriptors that
modify underlying database state and have them behave in the expected way
during item creation.
"""
def create(cls, **kw):
"""
Create an instance of this class, first cleaning up the keyword
arguments so they will fill in any required values.
@return: an instance of C{cls}
"""
for k, v in kw.items():
attr = getattr(cls, k, None)
if isinstance(attr, RecordAttribute):
kw.pop(k)
kw.update(attr._decompose(v))
return cls(**kw)
create = classmethod(create)
| mit | 748,060,930,660,187,400 | 36.961111 | 79 | 0.635299 | false |
TheHonestGene/thehonestgene-pipeline | thehonestgenepipeline/riskprediction.py | 1 | 1383 | from celery.utils.log import get_task_logger
from celery.signals import after_setup_task_logger
from thehonestgenepipeline.celery import celery
from riskpredictor.core import predictor as pred
from os import path
from . import GENOTYPE_FOLDER,DATA_FOLDER
from . import get_platform_from_genotype
from .progress_logger import CeleryProgressLogHandler
import h5py
import logging
logger = get_task_logger(pred.__name__)
# pass through environment
@after_setup_task_logger.connect
def setup_task_logger(**kwargs):
progress_handler = CeleryProgressLogHandler(celery,'riskprediction')
logger.addHandler(progress_handler)
@celery.task(serialiazer='json')
def run(id,trait):
try:
log_extra={'id':id,'progress':0,'data':trait}
logger.info('Starting Risk Prediction',extra=log_extra)
genotype_file= '%s/IMPUTED/%s.hdf5' % (GENOTYPE_FOLDER,id)
platform = get_platform_from_genotype(genotype_file)
trait_folder = '%s/PRED_DATA/%s/%s/' % (DATA_FOLDER,trait,platform)
risk = pred.predict(genotype_file,trait_folder,log_extra=log_extra)
result = {'trait':trait,'risk':risk}
logger.info('Finished Risk Prediction',extra={'id':id,'progress':100,'state':'FINISHED','data':trait})
except Exception as err:
logger.error('Error calculating risk prediction',extra=log_extra)
raise err
return result | mit | -3,424,874,194,230,446,000 | 37.444444 | 110 | 0.723066 | false |
kaushik94/sympy | sympy/core/function.py | 1 | 112629 | """
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import print_function, division
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic, _atomic
from .cache import cacheit
from .compatibility import iterable, is_sequence, as_int, ordered, Iterable
from .decorators import _sympifyit
from .expr import Expr, AtomicExpr
from .numbers import Rational, Float
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
from sympy.core.compatibility import string_types, with_metaclass, PY3, range
from sympy.core.containers import Tuple, Dict
from sympy.core.evaluate import global_evaluate
from sympy.core.logic import fuzzy_and
from sympy.utilities import default_sort_key
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import has_dups, sift
from sympy.utilities.misc import filldedent
import mpmath
import mpmath.libmp as mlib
import inspect
from collections import Counter
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
For matrix expressions:
>>> from sympy import MatrixSymbol, sqrt
>>> A = MatrixSymbol("A", 3, 3)
>>> _coeff_isneg(-sqrt(2)*A)
True
>>> _coeff_isneg(sqrt(2)*A)
False
"""
if a.is_MatMul:
a = a.args[0]
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_extended_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class BadSignatureError(TypeError):
'''Raised when a Lambda is created with an invalid signature'''
pass
class BadArgumentsError(TypeError):
'''Raised when a Lambda is called with an incorrect number of arguments'''
pass
# Python 2/3 version that does not raise a Deprecation warning
def arity(cls):
"""Return the arity of the function if it is known, else None.
When default values are specified for some arguments, they are
optional and the arity is reported as a tuple of possible values.
Examples
========
>>> from sympy.core.function import arity
>>> from sympy import log
>>> arity(lambda x: x)
1
>>> arity(log)
(1, 2)
>>> arity(lambda *x: sum(x)) is None
True
"""
eval_ = getattr(cls, 'eval', cls)
if PY3:
parameters = inspect.signature(eval_).parameters.items()
if [p for _, p in parameters if p.kind == p.VAR_POSITIONAL]:
return
p_or_k = [p for _, p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD]
# how many have no default and how many have a default value
no, yes = map(len, sift(p_or_k,
lambda p:p.default == p.empty, binary=True))
return no if not yes else tuple(range(no, no + yes + 1))
else:
cls_ = int(hasattr(cls, 'eval')) # correction for cls arguments
evalargspec = inspect.getargspec(eval_)
if evalargspec.varargs:
return
else:
evalargs = len(evalargspec.args) - cls_
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
fewest = evalargs - len(evalargspec.defaults)
return tuple(range(fewest, evalargs + 1))
return evalargs
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', arity(cls)))
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError(filldedent('''
Incorrectly specified nargs as %s:
if there are no arguments, it should be
`nargs = 0`;
if there are any number of arguments,
it should be
`nargs = None`''' % str(nargs)))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
super(FunctionClass, cls).__init__(*args, **kwargs)
@property
def __signature__(self):
"""
Allow Python 3's inspect.signature to give a useful signature for
Function subclasses.
"""
# Python 3 only, but backports (like the one in IPython) still might
# call this.
try:
from inspect import signature
except ImportError:
return None
# TODO: Look at nargs
return signature(self.eval)
@property
def free_symbols(self):
return set()
@property
def xreplace(self):
# Function needs args so we define a property that returns
# a function that takes args...and then use that function
# to return the right value
return lambda rule, **_: rule.get(self, self)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy.core.function import Function
>>> from sympy.abc import x, y
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
FiniteSet(1)
>>> Function('f', nargs=(2, 1)).nargs
FiniteSet(1, 2)
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(cls):
return cls.__name__
class Application(with_metaclass(FunctionClass, Basic)):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super(Application, cls).__new__(cls, *args, **options)
# make nargs uniform here
sentinel = object()
objnargs = getattr(obj, "nargs", sentinel)
if objnargs is not sentinel:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(objnargs):
nargs = tuple(ordered(set(objnargs)))
elif objnargs is not None:
nargs = (as_int(objnargs),)
else:
nargs = None
else:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
.. code-block:: python
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg.is_zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*[i._subs(old, new) for i in self.args])
class Function(Application, Expr):
"""
Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
Assumptions can be passed to Function, and if function is initialized with a
Symbol, the function inherits the name and assumptions associated with the Symbol:
>>> f_real = Function('f', real=True)
>>> f_real(x).is_real
True
>>> f_real_inherit = Function(Symbol('f', real=True))
>>> f_real_inherit(x).is_real
True
Note that assumptions on a function are unrelated to the assumptions on
the variable it is called on. If you want to add a relationship, subclass
Function and define the appropriate ``_eval_is_assumption`` methods.
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x.is_zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``my_func`` can take one or two arguments
then,
>>> class my_func(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
return False
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super(Function, cls).__new__(cls, *args, **options)
if evaluate and isinstance(result, cls) and result.args:
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
pr = max(cls._should_evalf(a) for a in result.args)
result = result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
Returns the precision to evalf to, or -1 if it shouldn't evalf.
"""
from sympy.core.evalf import pure_complex
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
m = pure_complex(arg)
if m is None or not (m[0].is_Float or m[1].is_Float):
return -1
l = [i._prec for i in m if i.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
@property
def is_commutative(self):
"""
Returns whether the function is commutative.
"""
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
def _get_mpmath_func(fname):
"""Lookup mpmath function based on name"""
if isinstance(self, AppliedUndef):
# Shouldn't lookup in mpmath but might have ._imp_
return None
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS.get(fname, None)
if fname is None:
return None
return getattr(mpmath, fname)
func = _get_mpmath_func(self.func.__name__)
# Fall-back evaluation
if func is None:
imp = getattr(self, '_imp_', None)
if imp is None:
return None
try:
return Float(imp(*[i.evalf(prec) for i in self.args]), prec)
except (TypeError, ValueError):
return None
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da.is_zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from sympy.utilities.misc import filldedent
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from sympy import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from sympy import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for _ in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = Dummy('x')
e = e.subs(x, _x)
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = int(nterms / cf)
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
ix = argindex - 1
A = self.args[ix]
if A._diff_wrt:
if len(self.args) == 1:
return Derivative(self, A)
if A.is_Symbol:
for i, v in enumerate(self.args):
if i != ix and A in v.free_symbols:
# it can't be in any other argument's free symbols
# issue 8510
break
else:
return Derivative(self, A)
else:
free = A.free_symbols
for i, a in enumerate(self.args):
if ix != i and a.free_symbols & free:
break
else:
# there is no possible interaction bewtween args
return Derivative(self, A)
# See issue 4624 and issue 4719, 5600 and 8510
D = Dummy('xi_%i' % argindex, dummy_index=hash(A))
args = self.args[:ix] + (D,) + self.args[ix + 1:]
return Subs(Derivative(self.func(*args), D), D, A)
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
def _sage_(self):
import sage.all as sage
fname = self.func.__name__
func = getattr(sage, fname, None)
args = [arg._sage_() for arg in self.args]
# In the case the function is not known in sage:
if func is None:
import sympy
if getattr(sympy, fname, None) is None:
# abstract function
return sage.function(fname)(*args)
else:
# the function defined in sympy is not known in sage
# this exception is caught in sage
raise AttributeError
return func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
is_number = False
def __new__(cls, *args, **options):
args = list(map(sympify, args))
u = [a.name for a in args if isinstance(a, UndefinedFunction)]
if u:
raise TypeError('Invalid argument: expecting an expression, not UndefinedFunction%s: %s' % (
's'*(len(u) > 1), ', '.join(u)))
obj = super(AppliedUndef, cls).__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
def _sage_(self):
import sage.all as sage
fname = str(self.func)
args = [arg._sage_() for arg in self.args]
func = sage.function(fname)(*args)
return func
@property
def _diff_wrt(self):
"""
Allow derivatives wrt to undefined functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
>>> f(x).diff(x)
Derivative(f(x), x)
"""
return True
class UndefSageHelper(object):
"""
Helper to facilitate Sage conversion.
"""
def __get__(self, ins, typ):
import sage.all as sage
if ins is None:
return lambda: sage.function(typ.__name__)
else:
args = [arg._sage_() for arg in ins.args]
return lambda : sage.function(ins.__class__.__name__)(*args)
_undef_sage_helper = UndefSageHelper()
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs):
from .symbol import _filter_assumptions
# Allow Function('f', real=True)
# and/or Function(Symbol('f', real=True))
assumptions, kwargs = _filter_assumptions(kwargs)
if isinstance(name, Symbol):
assumptions = name._merge(assumptions)
name = name.name
elif not isinstance(name, string_types):
raise TypeError('expecting string or Symbol for name')
else:
commutative = assumptions.get('commutative', None)
assumptions = Symbol(name, **assumptions).assumptions0
if commutative is None:
assumptions.pop('commutative')
__dict__ = __dict__ or {}
# put the `is_*` for into __dict__
__dict__.update({'is_%s' % k: v for k, v in assumptions.items()})
# You can add other attributes, although they do have to be hashable
# (but seriously, if you want to add anything other than assumptions,
# just subclass Function)
__dict__.update(kwargs)
# add back the sanitized assumptions without the is_ prefix
kwargs.update(assumptions)
# Save these for __eq__
__dict__.update({'_kwargs': kwargs})
# do this for pickling
__dict__['__module__'] = None
obj = super(UndefinedFunction, mcl).__new__(mcl, name, bases, __dict__)
obj.name = name
obj._sage_ = _undef_sage_helper
return obj
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
_kwargs = {}
def __hash__(self):
return hash((self.class_key(), frozenset(self._kwargs.items())))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.class_key() == other.class_key() and
self._kwargs == other._kwargs)
def __ne__(self, other):
return not self == other
@property
def _diff_wrt(self):
return False
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
FiniteSet(2)
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
FiniteSet(1, 2)
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include = set()
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict={}, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
Examples
========
>>> from sympy import Derivative, Function, symbols, Subs
>>> from sympy.abc import x, y
>>> f, g = symbols('f g', cls=Function)
>>> Derivative(x**2, x, evaluate=True)
2*x
Denesting of derivatives retains the ordering of variables:
>>> Derivative(Derivative(f(x, y), y), x)
Derivative(f(x, y), y, x)
Contiguously identical symbols are merged into a tuple giving
the symbol and the count:
>>> Derivative(f(x), x, x, y, x)
Derivative(f(x), (x, 2), y, x)
If the derivative cannot be performed, and evaluate is True, the
order of the variables of differentiation will be made canonical:
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Derivatives with respect to undefined functions can be calculated:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
Such derivatives will show up when the chain rule is used to
evalulate a derivative:
>>> f(g(x)).diff(x)
Derivative(f(g(x)), g(x))*Derivative(g(x), x)
Substitution is used to represent derivatives of functions with
arguments that are not symbols or functions:
>>> f(2*x + 3).diff(x) == 2*Subs(f(y).diff(y), y, 2*x + 3)
True
Notes
=====
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import cos, sin, sqrt, diff, Function, symbols
>>> from sympy.abc import x, y, z
>>> f, g = symbols('f,g', cls=Function)
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression cannot be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked.
Derivative wrt non-Symbols:
For the most part, one may not differentiate wrt non-symbols.
For example, we do not allow differentiation wrt `x*y` because
there are multiple ways of structurally defining where x*y appears
in an expression: a very strict definition would make
(x*y*z).diff(x*y) == 0. Derivatives wrt defined functions (like
cos(x)) are not allowed, either:
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't calculate derivative wrt x*y.
To make it easier to work with variational calculus, however,
derivatives wrt AppliedUndef and Derivatives are allowed.
For example, in the Euler-Lagrange method one may write
F(t, u, v) where u = f(t) and v = f'(t). These variables can be
written explicitly as functions of time::
>>> from sympy.abc import t
>>> F = Function('F')
>>> U = f(t)
>>> V = U.diff(t)
The derivative wrt f(t) can be obtained directly:
>>> direct = F(t, U, V).diff(U)
When differentiation wrt a non-Symbol is attempted, the non-Symbol
is temporarily converted to a Symbol while the differentiation
is performed and the same answer is obtained:
>>> indirect = F(t, U, V).subs(U, x).diff(x).subs(x, U)
>>> assert direct == indirect
The implication of this non-symbol replacement is that all
functions are treated as independent of other functions and the
symbols are independent of the functions that contain them::
>>> x.diff(f(x))
0
>>> g(x).diff(f(x))
0
It also means that derivatives are assumed to depend only
on the variables of differentiation, not on anything contained
within the expression being differentiated::
>>> F = f(x)
>>> Fx = F.diff(x)
>>> Fx.diff(F) # derivative depends on x, not F
0
>>> Fxx = Fx.diff(x)
>>> Fxx.diff(Fx) # derivative depends on x, not Fx
0
The last example can be made explicit by showing the replacement
of Fx in Fxx with y:
>>> Fxx.subs(Fx, y)
Derivative(y, x)
Since that in itself will evaluate to zero, differentiating
wrt Fx will also be zero:
>>> _.doit()
0
Replacing undefined functions with concrete expressions
One must be careful to replace undefined functions with expressions
that contain variables consistent with the function definition and
the variables of differentiation or else insconsistent result will
be obtained. Consider the following example:
>>> eq = f(x)*g(y)
>>> eq.subs(f(x), x*y).diff(x, y).doit()
y*Derivative(g(y), y) + g(y)
>>> eq.diff(x, y).subs(f(x), x*y).doit()
y*Derivative(g(y), y)
The results differ because `f(x)` was replaced with an expression
that involved both variables of differentiation. In the abstract
case, differentiation of `f(x)` by `y` is 0; in the concrete case,
the presence of `y` made that derivative nonvanishing and produced
the extra `g(y)` term.
Defining differentiation for an object
An object must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Any class can allow derivatives to be taken with respect to
itself (while indicating its scalar nature). See the
docstring of Expr._diff_wrt.
See Also
========
_sort_variable_count
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""An expression may be differentiated wrt a Derivative if
it is in elementary form.
Examples
========
>>> from sympy import Function, Derivative, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> Derivative(f(x), x)._diff_wrt
True
>>> Derivative(cos(x), x)._diff_wrt
False
>>> Derivative(x + 1, x)._diff_wrt
False
A Derivative might be an unevaluated form of what will not be
a valid variable of differentiation if evaluated. For example,
>>> Derivative(f(f(x)), x).doit()
Derivative(f(x), x)*Derivative(f(f(x)), f(x))
Such an expression will present the same ambiguities as arise
when dealing with any other product, like ``2*x``, so ``_diff_wrt``
is False:
>>> Derivative(f(f(x)), x)._diff_wrt
False
"""
return self.expr._diff_wrt and isinstance(self.doit(), Derivative)
def __new__(cls, expr, *variables, **kwargs):
from sympy.matrices.common import MatrixCommon
from sympy import Integer, MatrixExpr
from sympy.tensor.array import Array, NDimArray
from sympy.utilities.misc import filldedent
expr = sympify(expr)
symbols_or_none = getattr(expr, "free_symbols", None)
has_symbol_set = isinstance(symbols_or_none, set)
if not has_symbol_set:
raise ValueError(filldedent('''
Since there are no variables in the expression %s,
it cannot be differentiated.''' % expr))
# determine value for variables if it wasn't given
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
if expr.is_number:
return S.Zero
if len(variables) == 0:
raise ValueError(filldedent('''
Since there are no variables in the expression,
the variable(s) of differentiation must be supplied
to differentiate %s''' % expr))
else:
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Standardize the variables by sympifying them:
variables = list(sympify(variables))
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
array_likes = (tuple, list, Tuple)
for i, v in enumerate(variables):
if isinstance(v, Integer):
if i == 0:
raise ValueError("First variable cannot be a number: %i" % v)
count = v
prev, prevcount = variable_count[-1]
if prevcount != 1:
raise TypeError("tuple {0} followed by number {1}".format((prev, prevcount), v))
if count == 0:
variable_count.pop()
else:
variable_count[-1] = Tuple(prev, count)
else:
if isinstance(v, array_likes):
if len(v) == 0:
# Ignore empty tuples: Derivative(expr, ... , (), ... )
continue
if isinstance(v[0], array_likes):
# Derive by array: Derivative(expr, ... , [[x, y, z]], ... )
if len(v) == 1:
v = Array(v[0])
count = 1
else:
v, count = v
v = Array(v)
else:
v, count = v
if count == 0:
continue
elif isinstance(v, UndefinedFunction):
raise TypeError(
"cannot differentiate wrt "
"UndefinedFunction: %s" % v)
else:
count = 1
variable_count.append(Tuple(v, count))
# light evaluation of contiguous, identical
# items: (x, 1), (x, 1) -> (x, 2)
merged = []
for t in variable_count:
v, c = t
if c.is_negative:
raise ValueError(
'order of differentiation must be nonnegative')
if merged and merged[-1][0] == v:
c += merged[-1][1]
if not c:
merged.pop()
else:
merged[-1] = Tuple(v, c)
else:
merged.append(t)
variable_count = merged
# sanity check of variables of differentation; we waited
# until the counts were computed since some variables may
# have been removed because the count was 0
for v, c in variable_count:
# v must have _diff_wrt True
if not v._diff_wrt:
__ = '' # filler to make error message neater
raise ValueError(filldedent('''
Can't calculate derivative wrt %s.%s''' % (v,
__)))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if len(variable_count) == 0:
return expr
evaluate = kwargs.get('evaluate', False)
if evaluate:
if isinstance(expr, Derivative):
expr = expr.canonical
variable_count = [
(v.canonical if isinstance(v, Derivative) else v, c)
for v, c in variable_count]
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannot check non-symbols like
# Derivatives as those can be created by intermediate
# derivatives.
zero = False
free = expr.free_symbols
for v, c in variable_count:
vfree = v.free_symbols
if c.is_positive and vfree:
if isinstance(v, AppliedUndef):
# these match exactly since
# x.diff(f(x)) == g(x).diff(f(x)) == 0
# and are not created by differentiation
D = Dummy()
if not expr.xreplace({v: D}).has(D):
zero = True
break
elif isinstance(v, MatrixExpr):
zero = False
break
elif isinstance(v, Symbol) and v not in free:
zero = True
break
else:
if not free & vfree:
# e.g. v is IndexedBase or Matrix
zero = True
break
if zero:
if isinstance(expr, (MatrixCommon, NDimArray)):
return expr.zeros(*expr.shape)
elif isinstance(expr, MatrixExpr):
from sympy import ZeroMatrix
return ZeroMatrix(*expr.shape)
elif expr.is_scalar:
return S.Zero
# make the order of symbols canonical
#TODO: check if assumption of discontinuous derivatives exist
variable_count = cls._sort_variable_count(variable_count)
# denest
if isinstance(expr, Derivative):
variable_count = list(expr.variable_count) + variable_count
expr = expr.expr
return Derivative(expr, *variable_count, **kwargs)
# we return here if evaluate is False or if there is no
# _eval_derivative method
if not evaluate or not hasattr(expr, '_eval_derivative'):
# return an unevaluated Derivative
if evaluate and variable_count == [(expr, 1)] and expr.is_scalar:
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
return S.One
return Expr.__new__(cls, expr, *variable_count)
# evaluate the derivative by calling _eval_derivative method
# of expr for each variable
# -------------------------------------------------------------
nderivs = 0 # how many derivatives were performed
unhandled = []
for i, (v, count) in enumerate(variable_count):
old_expr = expr
old_v = None
is_symbol = v.is_symbol or isinstance(v,
(Iterable, Tuple, MatrixCommon, NDimArray))
if not is_symbol:
old_v = v
v = Dummy('xi')
expr = expr.xreplace({old_v: v})
# Derivatives and UndefinedFunctions are independent
# of all others
clashing = not (isinstance(old_v, Derivative) or \
isinstance(old_v, AppliedUndef))
if not v in expr.free_symbols and not clashing:
return expr.diff(v) # expr's version of 0
if not old_v.is_scalar and not hasattr(
old_v, '_eval_derivative'):
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
expr *= old_v.diff(old_v)
# Evaluate the derivative `n` times. If
# `_eval_derivative_n_times` is not overridden by the current
# object, the default in `Basic` will call a loop over
# `_eval_derivative`:
obj = expr._eval_derivative_n_times(v, count)
if obj is not None and obj.is_zero:
return obj
nderivs += count
if old_v is not None:
if obj is not None:
# remove the dummy that was used
obj = obj.subs(v, old_v)
# restore expr
expr = old_expr
if obj is None:
# we've already checked for quick-exit conditions
# that give 0 so the remaining variables
# are contained in the expression but the expression
# did not compute a derivative so we stop taking
# derivatives
unhandled = variable_count[i:]
break
expr = obj
# what we have so far can be made canonical
expr = expr.replace(
lambda x: isinstance(x, Derivative),
lambda x: x.canonical)
if unhandled:
if isinstance(expr, Derivative):
unhandled = list(expr.variable_count) + unhandled
expr = expr.expr
expr = Expr.__new__(cls, expr, *unhandled)
if (nderivs > 1) == True and kwargs.get('simplify', True):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@property
def canonical(cls):
return cls.func(cls.expr,
*Derivative._sort_variable_count(cls.variable_count))
@classmethod
def _sort_variable_count(cls, vc):
"""
Sort (variable, count) pairs into canonical order while
retaining order of variables that do not commute during
differentiation:
* symbols and functions commute with each other
* derivatives commute with each other
* a derivative doesn't commute with anything it contains
* any other object is not allowed to commute if it has
free symbols in common with another object
Examples
========
>>> from sympy import Derivative, Function, symbols, cos
>>> vsort = Derivative._sort_variable_count
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
Contiguous items are collapsed into one pair:
>>> vsort([(x, 1), (x, 1)])
[(x, 2)]
>>> vsort([(y, 1), (f(x), 1), (y, 1), (f(x), 1)])
[(y, 2), (f(x), 2)]
Ordering is canonical.
>>> def vsort0(*v):
... # docstring helper to
... # change vi -> (vi, 0), sort, and return vi vals
... return [i[0] for i in vsort([(i, 0) for i in v])]
>>> vsort0(y, x)
[x, y]
>>> vsort0(g(y), g(x), f(y))
[f(y), g(x), g(y)]
Symbols are sorted as far to the left as possible but never
move to the left of a derivative having the same symbol in
its variables; the same applies to AppliedUndef which are
always sorted after Symbols:
>>> dfx = f(x).diff(x)
>>> assert vsort0(dfx, y) == [y, dfx]
>>> assert vsort0(dfx, x) == [dfx, x]
"""
from sympy.utilities.iterables import uniq, topological_sort
if not vc:
return []
vc = list(vc)
if len(vc) == 1:
return [Tuple(*vc[0])]
V = list(range(len(vc)))
E = []
v = lambda i: vc[i][0]
D = Dummy()
def _block(d, v, wrt=False):
# return True if v should not come before d else False
if d == v:
return wrt
if d.is_Symbol:
return False
if isinstance(d, Derivative):
# a derivative blocks if any of it's variables contain
# v; the wrt flag will return True for an exact match
# and will cause an AppliedUndef to block if v is in
# the arguments
if any(_block(k, v, wrt=True)
for k in d._wrt_variables):
return True
return False
if not wrt and isinstance(d, AppliedUndef):
return False
if v.is_Symbol:
return v in d.free_symbols
if isinstance(v, AppliedUndef):
return _block(d.xreplace({v: D}), D)
return d.free_symbols & v.free_symbols
for i in range(len(vc)):
for j in range(i):
if _block(v(j), v(i)):
E.append((j,i))
# this is the default ordering to use in case of ties
O = dict(zip(ordered(uniq([i for i, c in vc])), range(len(vc))))
ix = topological_sort((V, E), key=lambda i: O[v(i)])
# merge counts of contiguously identical items
merged = []
for v, c in [vc[i] for i in ix]:
if merged and merged[-1][0] == v:
merged[-1][1] += c
else:
merged.append([v, c])
return [Tuple(*i) for i in merged]
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If v (the variable of differentiation) is not in
# self.variables, we might be able to take the derivative.
if v not in self._wrt_variables:
dedv = self.expr.diff(v)
if isinstance(dedv, Derivative):
return dedv.func(dedv.expr, *(self.variable_count + dedv.variable_count))
# dedv (d(self.expr)/dv) could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when dedv is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(dedv, *self.variables, evaluate=True)
# In this case v was in self.variables so the derivative wrt v has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
variable_count = list(self.variable_count)
variable_count.append((v, 1))
return self.func(self.expr, *variable_count, evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
rv = self.func(expr, *self.variable_count, **hints)
if rv!= self and rv.has(Derivative):
rv = rv.doit(**hints)
return rv
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def _wrt_variables(self):
# return the variables of differentiation without
# respect to the type of count (int or symbolic)
return [i[0] for i in self.variable_count]
@property
def variables(self):
# TODO: deprecate? YES, make this 'enumerated_variables' and
# name _wrt_variables as variables
# TODO: support for `d^n`?
rv = []
for v, count in self.variable_count:
if not count.is_Integer:
raise TypeError(filldedent('''
Cannot give expansion for symbolic count. If you just
want a list of all variables of differentiation, use
_wrt_variables.'''))
rv.extend([v]*count)
return tuple(rv)
@property
def variable_count(self):
return self._args[1:]
@property
def derivative_count(self):
return sum([count for var, count in self.variable_count], 0)
@property
def free_symbols(self):
ret = self.expr.free_symbols
# Add symbolic counts to free_symbols
for var, count in self.variable_count:
ret.update(count.free_symbols)
return ret
def _eval_subs(self, old, new):
# The substitution (old, new) cannot be done inside
# Derivative(expr, vars) for a variety of reasons
# as handled below.
if old in self._wrt_variables:
# first handle the counts
expr = self.func(self.expr, *[(v, c.subs(old, new))
for v, c in self.variable_count])
if expr != self:
return expr._eval_subs(old, new)
# quick exit case
if not getattr(new, '_diff_wrt', False):
# case (0): new is not a valid variable of
# differentiation
if isinstance(old, Symbol):
# don't introduce a new symbol if the old will do
return Subs(self, old, new)
else:
xi = Dummy('xi')
return Subs(self.xreplace({old: xi}), xi, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
if self.canonical == old.canonical:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all((a[i] <= b[i]) == True for i in a)
old_vars = Counter(dict(reversed(old.variable_count)))
self_vars = Counter(dict(reversed(self.variable_count)))
if _subset(old_vars, self_vars):
return Derivative(new, *(self_vars - old_vars).items()).canonical
args = list(self.args)
newargs = list(x._subs(old, new) for x in args)
if args[0] == old:
# complete replacement of self.expr
# we already checked that the new is valid so we know
# it won't be a problem should it appear in variables
return Derivative(*newargs)
if newargs[0] != args[0]:
# case (1) can't change expr by introducing something that is in
# the _wrt_variables if it was already in the expr
# e.g.
# for Derivative(f(x, g(y)), y), x cannot be replaced with
# anything that has y in it; for f(g(x), g(y)).diff(g(y))
# g(x) cannot be replaced with anything that has g(y)
syms = {vi: Dummy() for vi in self._wrt_variables
if not vi.is_Symbol}
wrt = set(syms.get(vi, vi) for vi in self._wrt_variables)
forbidden = args[0].xreplace(syms).free_symbols & wrt
nfree = new.xreplace(syms).free_symbols
ofree = old.xreplace(syms).free_symbols
if (nfree - ofree) & forbidden:
return Subs(self, old, new)
viter = ((i, j) for ((i, _), (j, _)) in zip(newargs[1:], args[1:]))
if any(i != j for i, j in viter): # a wrt-variable change
# case (2) can't change vars by introducing a variable
# that is contained in expr, e.g.
# for Derivative(f(z, g(h(x), y)), y), y cannot be changed to
# x, h(x), or g(h(x), y)
for a in _atomic(self.expr, recursive=True):
for i in range(1, len(newargs)):
vi, _ = newargs[i]
if a == vi and vi != args[i][0]:
return Subs(self, old, new)
# more arg-wise checks
vc = newargs[1:]
oldv = self._wrt_variables
newe = self.expr
subs = []
for i, (vi, ci) in enumerate(vc):
if not vi._diff_wrt:
# case (3) invalid differentiation expression so
# create a replacement dummy
xi = Dummy('xi_%i' % i)
# replace the old valid variable with the dummy
# in the expression
newe = newe.xreplace({oldv[i]: xi})
# and replace the bad variable with the dummy
vc[i] = (xi, ci)
# and record the dummy with the new (invalid)
# differentiation expression
subs.append((xi, vi))
if subs:
# handle any residual substitution in the expression
newe = newe._subs(old, new)
# return the Subs-wrapped derivative
return Subs(Derivative(newe, *vc), *zip(*subs))
# everything was ok
return Derivative(*newargs)
def _eval_lseries(self, x, logx):
dx = self.variables
for term in self.expr.lseries(x, logx=logx):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.variables
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
series_gen = self.expr.lseries(x)
d = S.Zero
for leading_term in series_gen:
d = diff(leading_term, *self.variables)
if d != 0:
break
return d
def _sage_(self):
import sage.all as sage
args = [arg._sage_() for arg in self.args]
return sage.derivative(*args)
def as_finite_difference(self, points=1, x0=None, wrt=None):
""" Expresses a Derivative instance as a finite difference.
Parameters
==========
points : sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. Default: 1 (step-size 1)
x0 : number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt : Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> f(x).diff(x).as_finite_difference()
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and
``order + 1`` respectively. We can change the step size by
passing a symbol as a parameter:
>>> f(x).diff(x).as_finite_difference(h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a
sequence:
>>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/...
To approximate ``Derivative`` around ``x0`` using a non-equidistant
spacing step, the algorithm supports assignment of undefined
functions to ``points``:
>>> dx = Function('dx')
>>> f(x).diff(x).as_finite_difference(points=dx(x), x0=x-h)
-f(-h + x - dx(-h + x)/2)/dx(-h + x) + f(-h + x + dx(-h + x)/2)/dx(-h + x)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> d2fdxdy.as_finite_difference(wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
We can apply ``as_finite_difference`` to ``Derivative`` instances in
compound expressions using ``replace``:
>>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative,
... lambda arg: arg.as_finite_difference())
42**(-f(x - 1/2) + f(x + 1/2)) + 1
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.differentiate_finite
sympy.calculus.finite_diff.finite_diff_weights
"""
from ..calculus.finite_diff import _as_finite_diff
return _as_finite_diff(self, points, x0, wrt)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
It is also possible to unpack tuple arguments:
>>> f = Lambda( ((x, y), z) , x + y + z)
>>> f((1, 2), 3)
6
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, signature, expr):
if iterable(signature) and not isinstance(signature, (tuple, Tuple)):
SymPyDeprecationWarning(
feature="non tuple iterable of argument symbols to Lambda",
useinstead="tuple of argument symbols",
issue=17474,
deprecated_since_version="1.5").warn()
signature = tuple(signature)
sig = signature if iterable(signature) else (signature,)
sig = sympify(sig)
cls._check_signature(sig)
if len(sig) == 1 and sig[0] == expr:
return S.IdentityFunction
return Expr.__new__(cls, sig, sympify(expr))
@classmethod
def _check_signature(cls, sig):
syms = set()
def rcheck(args):
for a in args:
if a.is_symbol:
if a in syms:
raise BadSignatureError("Duplicate symbol %s" % a)
syms.add(a)
elif isinstance(a, Tuple):
rcheck(a)
else:
raise BadSignatureError("Lambda signature should be only tuples"
" and symbols, not %s" % a)
if not isinstance(sig, Tuple):
raise BadSignatureError("Lambda signature should be a tuple not %s" % sig)
# Recurse through the signature:
rcheck(sig)
@property
def signature(self):
"""The expected form of the arguments to be unpacked into variables"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def variables(self):
"""The variables used in the internal representation of the function"""
def _variables(args):
if isinstance(args, Tuple):
for arg in args:
for a in _variables(arg):
yield a
else:
yield args
return tuple(_variables(self.signature))
@property
def nargs(self):
from sympy.sets.sets import FiniteSet
return FiniteSet(len(self.signature))
bound_symbols = variables
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise BadArgumentsError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
d = self._match_signature(self.signature, args)
return self.expr.xreplace(d)
def _match_signature(self, sig, args):
symargmap = {}
def rmatch(pars, args):
for par, arg in zip(pars, args):
if par.is_symbol:
symargmap[par] = arg
elif isinstance(par, Tuple):
if not isinstance(arg, (tuple, Tuple)) or len(args) != len(pars):
raise BadArgumentsError("Can't match %s and %s" % (args, pars))
rmatch(par, arg)
rmatch(sig, args)
return symargmap
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
try:
d = self._match_signature(other.signature, self.signature)
except BadArgumentsError:
return False
return self.args == other.xreplace(d).args
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.expr.xreplace(self.canonical_variables),)
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
return self.signature == self.expr
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or
list of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
Examples
========
>>> from sympy import Subs, Function, sin, cos
>>> from sympy.abc import x, y, z
>>> f = Function('f')
Subs are created when a particular substitution cannot be made. The
x in the derivative cannot be replaced with 0 because 0 is not a
valid variables of differentiation:
>>> f(x).diff(x).subs(x, 0)
Subs(Derivative(f(x), x), x, 0)
Once f is known, the derivative and evaluation at 0 can be done:
>>> _.subs(f, sin).doit() == sin(x).diff(x).subs(x, 0) == cos(0)
True
Subs can also be created directly with one or more variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
Notes
=====
In order to allow expressions to combine before doit is done, a
representation of the Subs expression is used internally to make
expressions that are superficially different compare the same:
>>> a, b = Subs(x, x, 0), Subs(y, y, 0)
>>> a + b
2*Subs(x, x, 0)
This can lead to unexpected consequences when using methods
like `has` that are cached:
>>> s = Subs(x, x, 0)
>>> s.has(x), s.has(y)
(True, False)
>>> ss = s.subs(x, y)
>>> ss.has(x), ss.has(y)
(True, False)
>>> s, ss
(Subs(x, x, 0), Subs(y, y, 0))
"""
def __new__(cls, expr, variables, point, **assumptions):
from sympy import Symbol
if not is_sequence(variables, Tuple):
variables = [variables]
variables = Tuple(*variables)
if has_dups(variables):
repeated = [str(v) for v, i in Counter(variables).items() if i > 1]
__ = ', '.join(repeated)
raise ValueError(filldedent('''
The following expressions appear more than once: %s
''' % __))
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
if not point:
return sympify(expr)
# denest
if isinstance(expr, Subs):
variables = expr.variables + variables
point = expr.point + point
expr = expr.expr
else:
expr = sympify(expr)
# use symbols with names equal to the point value (with prepended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-prepended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.xreplace(dict(reps))
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self, **hints):
e, v, p = self.args
# remove self mappings
for i, (vi, pi) in enumerate(zip(v, p)):
if vi == pi:
v = v[:i] + v[i + 1:]
p = p[:i] + p[i + 1:]
if not v:
return self.expr
if isinstance(e, Derivative):
# apply functions first, e.g. f -> cos
undone = []
for i, vi in enumerate(v):
if isinstance(vi, FunctionClass):
e = e.subs(vi, p[i])
else:
undone.append((vi, p[i]))
if not isinstance(e, Derivative):
e = e.doit()
if isinstance(e, Derivative):
# do Subs that aren't related to differentiation
undone2 = []
D = Dummy()
for vi, pi in undone:
if D not in e.xreplace({vi: D}).free_symbols:
e = e.subs(vi, pi)
else:
undone2.append((vi, pi))
undone = undone2
# differentiate wrt variables that are present
wrt = []
D = Dummy()
expr = e.expr
free = expr.free_symbols
for vi, ci in e.variable_count:
if isinstance(vi, Symbol) and vi in free:
expr = expr.diff((vi, ci))
elif D in expr.subs(vi, D).free_symbols:
expr = expr.diff((vi, ci))
else:
wrt.append((vi, ci))
# inject remaining subs
rv = expr.subs(undone)
# do remaining differentiation *in order given*
for vc in wrt:
rv = rv.diff(vc)
else:
# inject remaining subs
rv = e.subs(undone)
else:
rv = e.doit(**hints).subs(list(zip(v, p)))
if hints.get('deep', True) and rv != self:
rv = rv.doit(**hints)
return rv
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
bound_symbols = variables
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
@property
def expr_free_symbols(self):
return (self.expr.expr_free_symbols - set(self.variables) |
set(self.point.expr_free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),
) + tuple(ordered([(v, p) for v, p in
zip(self.variables, self.point) if not self.expr.has(v)]))
def _eval_subs(self, old, new):
# Subs doit will do the variables in order; the semantics
# of subs for Subs is have the following invariant for
# Subs object foo:
# foo.doit().subs(reps) == foo.subs(reps).doit()
pt = list(self.point)
if old in self.variables:
if _atomic(new) == set([new]) and not any(
i.has(new) for i in self.args):
# the substitution is neutral
return self.xreplace({old: new})
# any occurrence of old before this point will get
# handled by replacements from here on
i = self.variables.index(old)
for j in range(i, len(self.variables)):
pt[j] = pt[j]._subs(old, new)
return self.func(self.expr, self.variables, pt)
v = [i._subs(old, new) for i in self.variables]
if v != list(self.variables):
return self.func(self.expr, self.variables + (old,), pt + [new])
expr = self.expr._subs(old, new)
pt = [i._subs(old, new) for i in self.point]
return self.func(expr, v, pt)
def _eval_derivative(self, s):
# Apply the chain rule of the derivative on the substitution variables:
val = Add.fromiter(p.diff(s) * Subs(self.expr.diff(v), self.variables, self.point).doit() for v, p in zip(self.variables, self.point))
# Check if there are free symbols in `self.expr`:
# First get the `expr_free_symbols`, which returns the free symbols
# that are directly contained in an expression node (i.e. stop
# searching if the node isn't an expression). At this point turn the
# expressions into `free_symbols` and check if there are common free
# symbols in `self.expr` and the deriving factor.
fs1 = {j for i in self.expr_free_symbols for j in i.free_symbols}
if len(fs1 & s.free_symbols) > 0:
val += Subs(self.expr.diff(s), self.variables, self.point).doit()
return val
def _eval_nseries(self, x, n, logx):
if x in self.point:
# x is the variable being substituted into
apos = self.point.index(x)
other = self.variables[apos]
else:
other = x
arg = self.expr.nseries(other, n=n, logx=logx)
o = arg.getO()
terms = Add.make_args(arg.removeO())
rv = Add(*[self.func(a, *self.args[1:]) for a in terms])
if o:
rv += o.subs(other, x)
return rv
def _eval_as_leading_term(self, x):
if x in self.point:
ipos = self.point.index(x)
xvar = self.variables[ipos]
return self.expr.as_leading_term(xvar)
if x in self.variables:
# if `x` is a dummy variable, it means it won't exist after the
# substitution has been performed:
return self
# The variable is independent of the substitution:
return self.expr.as_leading_term(x)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), (x, 3))
>>> diff(f(x), x, 3)
Derivative(f(x), (x, 3))
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
idiff: computes the derivative implicitly
"""
if hasattr(f, 'diff'):
return f.diff(*symbols, **kwargs)
kwargs.setdefault('evaluate', True)
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""
Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, sympy.simplify.hyperexpand.hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
sympy.core.expr.Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
See the expand docstring for more information.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy import Integral, Symbol
from sympy.core.relational import Relational
from sympy.simplify.radsimp import fraction
from sympy.logic.boolalg import BooleanFunction
from sympy.utilities.misc import func_name
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul or a.is_MatMul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add or a.is_MatAdd:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif isinstance(expr, Dict):
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, (Relational, BooleanFunction)):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(func_name(expr, short=True).upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(a.func.__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False, dkeys=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True). When processing
dictionaries, don't modify the keys unless ``dkeys=True``.
Examples
========
>>> from sympy.core.function import nfloat
>>> from sympy.abc import x, y
>>> from sympy import cos, pi, sqrt
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
Container types are not modified:
>>> type(nfloat((1, 2))) is tuple
True
"""
from sympy.core.power import Pow
from sympy.polys.rootoftools import RootOf
from sympy import MatrixBase
kw = dict(n=n, exponent=exponent, dkeys=dkeys)
if isinstance(expr, MatrixBase):
return expr.applyfunc(lambda e: nfloat(e, **kw))
# handling of iterable containers
if iterable(expr, exclude=string_types):
if isinstance(expr, (dict, Dict)):
if dkeys:
args = [tuple(map(lambda i: nfloat(i, **kw), a))
for a in expr.items()]
else:
args = [(k, nfloat(v, **kw)) for k, v in expr.items()]
if isinstance(expr, dict):
return type(expr)(args)
else:
return expr.func(*args)
elif isinstance(expr, Basic):
return expr.func(*[nfloat(a, **kw) for a in expr.args])
return type(expr)([nfloat(a, **kw) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
elif rv.is_Atom:
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)})
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
from sympy.core.symbol import Dummy, Symbol
| bsd-3-clause | 1,533,985,883,591,207,000 | 33.35906 | 142 | 0.548837 | false |
ioram7/keystone-federado-pgid2013 | build/sqlalchemy/test/sql/test_types.py | 1 | 69356 | # coding: utf-8
from test.lib.testing import eq_, assert_raises, assert_raises_message
import decimal
import datetime, os, re
from sqlalchemy import *
from sqlalchemy import exc, types, util, schema, dialects
for name in dialects.__all__:
__import__("sqlalchemy.dialects.%s" % name)
from sqlalchemy.sql import operators, column, table
from test.lib.testing import eq_
import sqlalchemy.engine.url as url
from sqlalchemy.engine import default
from test.lib.schema import Table, Column
from test.lib import *
from test.lib.util import picklers
from sqlalchemy.util.compat import decimal
from test.lib.util import round_decimal
from test.lib import fixtures
class AdaptTest(fixtures.TestBase):
def _all_dialect_modules(self):
return [
getattr(dialects, d)
for d in dialects.__all__
if not d.startswith('_')
]
def _all_dialects(self):
return [d.base.dialect() for d in
self._all_dialect_modules()]
def _types_for_mod(self, mod):
for key in dir(mod):
typ = getattr(mod, key)
if not isinstance(typ, type) or not issubclass(typ, types.TypeEngine):
continue
yield typ
def _all_types(self):
for typ in self._types_for_mod(types):
yield typ
for dialect in self._all_dialect_modules():
for typ in self._types_for_mod(dialect):
yield typ
def test_uppercase_importable(self):
import sqlalchemy as sa
for typ in self._types_for_mod(types):
if typ.__name__ == typ.__name__.upper():
assert getattr(sa, typ.__name__) is typ
assert typ.__name__ in types.__all__
def test_uppercase_rendering(self):
"""Test that uppercase types from types.py always render as their
type.
As of SQLA 0.6, using an uppercase type means you want specifically
that type. If the database in use doesn't support that DDL, it (the DB
backend) should raise an error - it means you should be using a
lowercased (genericized) type.
"""
for dialect in self._all_dialects():
for type_, expected in (
(REAL, "REAL"),
(FLOAT, "FLOAT"),
(NUMERIC, "NUMERIC"),
(DECIMAL, "DECIMAL"),
(INTEGER, "INTEGER"),
(SMALLINT, "SMALLINT"),
(TIMESTAMP, ("TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE")),
(DATETIME, "DATETIME"),
(DATE, "DATE"),
(TIME, ("TIME", "TIME WITHOUT TIME ZONE")),
(CLOB, "CLOB"),
(VARCHAR(10), ("VARCHAR(10)","VARCHAR(10 CHAR)")),
(NVARCHAR(10), ("NVARCHAR(10)", "NATIONAL VARCHAR(10)",
"NVARCHAR2(10)")),
(CHAR, "CHAR"),
(NCHAR, ("NCHAR", "NATIONAL CHAR")),
(BLOB, ("BLOB", "BLOB SUB_TYPE 0")),
(BOOLEAN, ("BOOLEAN", "BOOL", "INTEGER"))
):
if isinstance(expected, str):
expected = (expected, )
try:
compiled = types.to_instance(type_).\
compile(dialect=dialect)
except NotImplementedError:
continue
assert compiled in expected, \
"%r matches none of %r for dialect %s" % \
(compiled, expected, dialect.name)
assert str(types.to_instance(type_)) in expected, \
"default str() of type %r not expected, %r" % \
(type_, expected)
@testing.uses_deprecated()
def test_adapt_method(self):
"""ensure all types have a working adapt() method,
which creates a distinct copy.
The distinct copy ensures that when we cache
the adapted() form of a type against the original
in a weak key dictionary, a cycle is not formed.
This test doesn't test type-specific arguments of
adapt() beyond their defaults.
"""
for typ in self._all_types():
if typ in (types.TypeDecorator, types.TypeEngine, types.Variant):
continue
elif typ is dialects.postgresql.ARRAY:
t1 = typ(String)
else:
t1 = typ()
for cls in [typ] + typ.__subclasses__():
if not issubclass(typ, types.Enum) and \
issubclass(cls, types.Enum):
continue
t2 = t1.adapt(cls)
assert t1 is not t2
for k in t1.__dict__:
if k == 'impl':
continue
# assert each value was copied, or that
# the adapted type has a more specific
# value than the original (i.e. SQL Server
# applies precision=24 for REAL)
assert \
getattr(t2, k) == t1.__dict__[k] or \
t1.__dict__[k] is None
def test_python_type(self):
eq_(types.Integer().python_type, int)
eq_(types.Numeric().python_type, decimal.Decimal)
eq_(types.Numeric(asdecimal=False).python_type, float)
# Py3K
#eq_(types.LargeBinary().python_type, bytes)
# Py2K
eq_(types.LargeBinary().python_type, str)
# end Py2K
eq_(types.Float().python_type, float)
eq_(types.Interval().python_type, datetime.timedelta)
eq_(types.Date().python_type, datetime.date)
eq_(types.DateTime().python_type, datetime.datetime)
# Py3K
#eq_(types.String().python_type, unicode)
# Py2K
eq_(types.String().python_type, str)
# end Py2K
eq_(types.Unicode().python_type, unicode)
eq_(types.String(convert_unicode=True).python_type, unicode)
assert_raises(
NotImplementedError,
lambda: types.TypeEngine().python_type
)
@testing.uses_deprecated()
def test_repr(self):
for typ in self._all_types():
if typ in (types.TypeDecorator, types.TypeEngine, types.Variant):
continue
elif typ is dialects.postgresql.ARRAY:
t1 = typ(String)
else:
t1 = typ()
repr(t1)
def test_plain_init_deprecation_warning(self):
for typ in (Integer, Date, SmallInteger):
assert_raises_message(
exc.SADeprecationWarning,
"Passing arguments to type object "
"constructor %s is deprecated" % typ,
typ, 11
)
class TypeAffinityTest(fixtures.TestBase):
def test_type_affinity(self):
for type_, affin in [
(String(), String),
(VARCHAR(), String),
(Date(), Date),
(LargeBinary(), types._Binary)
]:
eq_(type_._type_affinity, affin)
for t1, t2, comp in [
(Integer(), SmallInteger(), True),
(Integer(), String(), False),
(Integer(), Integer(), True),
(Text(), String(), True),
(Text(), Unicode(), True),
(LargeBinary(), Integer(), False),
(LargeBinary(), PickleType(), True),
(PickleType(), LargeBinary(), True),
(PickleType(), PickleType(), True),
]:
eq_(t1._compare_type_affinity(t2), comp, "%s %s" % (t1, t2))
def test_decorator_doesnt_cache(self):
from sqlalchemy.dialects import postgresql
class MyType(TypeDecorator):
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(CHAR(32))
t1 = MyType()
d = postgresql.dialect()
assert t1._type_affinity is String
assert t1.dialect_impl(d)._type_affinity is postgresql.UUID
class PickleMetadataTest(fixtures.TestBase):
def testmeta(self):
for loads, dumps in picklers():
column_types = [
Column('Boo', Boolean()),
Column('Str', String()),
Column('Tex', Text()),
Column('Uni', Unicode()),
Column('Int', Integer()),
Column('Sma', SmallInteger()),
Column('Big', BigInteger()),
Column('Num', Numeric()),
Column('Flo', Float()),
Column('Dat', DateTime()),
Column('Dat', Date()),
Column('Tim', Time()),
Column('Lar', LargeBinary()),
Column('Pic', PickleType()),
Column('Int', Interval()),
Column('Enu', Enum('x','y','z', name="somename")),
]
for column_type in column_types:
#print column_type
meta = MetaData()
Table('foo', meta, column_type)
ct = loads(dumps(column_type))
mt = loads(dumps(meta))
class UserDefinedTest(fixtures.TablesTest, AssertsCompiledSQL):
"""tests user-defined types."""
def test_processing(self):
users = self.tables.users
users.insert().execute(
user_id=2, goofy='jack', goofy2='jack', goofy4=u'jack',
goofy7=u'jack', goofy8=12, goofy9=12)
users.insert().execute(
user_id=3, goofy='lala', goofy2='lala', goofy4=u'lala',
goofy7=u'lala', goofy8=15, goofy9=15)
users.insert().execute(
user_id=4, goofy='fred', goofy2='fred', goofy4=u'fred',
goofy7=u'fred', goofy8=9, goofy9=9)
l = users.select().order_by(users.c.user_id).execute().fetchall()
for assertstr, assertint, assertint2, row in zip(
["BIND_INjackBIND_OUT", "BIND_INlalaBIND_OUT", "BIND_INfredBIND_OUT"],
[1200, 1500, 900],
[1800, 2250, 1350],
l
):
for col in list(row)[1:5]:
eq_(col, assertstr)
eq_(row[5], assertint)
eq_(row[6], assertint2)
for col in row[3], row[4]:
assert isinstance(col, unicode)
def test_typedecorator_impl(self):
for impl_, exp, kw in [
(Float, "FLOAT", {}),
(Float, "FLOAT(2)", {'precision':2}),
(Float(2), "FLOAT(2)", {'precision':4}),
(Numeric(19, 2), "NUMERIC(19, 2)", {}),
]:
for dialect_ in (dialects.postgresql, dialects.mssql, dialects.mysql):
dialect_ = dialect_.dialect()
raw_impl = types.to_instance(impl_, **kw)
class MyType(types.TypeDecorator):
impl = impl_
dec_type = MyType(**kw)
eq_(dec_type.impl.__class__, raw_impl.__class__)
raw_dialect_impl = raw_impl.dialect_impl(dialect_)
dec_dialect_impl = dec_type.dialect_impl(dialect_)
eq_(dec_dialect_impl.__class__, MyType)
eq_(raw_dialect_impl.__class__ , dec_dialect_impl.impl.__class__)
self.assert_compile(
MyType(**kw),
exp,
dialect=dialect_
)
def test_user_defined_typedec_impl(self):
class MyType(types.TypeDecorator):
impl = Float
def load_dialect_impl(self, dialect):
if dialect.name == 'sqlite':
return String(50)
else:
return super(MyType, self).load_dialect_impl(dialect)
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
self.assert_compile(t, "VARCHAR(50)", dialect=sl)
self.assert_compile(t, "FLOAT", dialect=pg)
eq_(
t.dialect_impl(dialect=sl).impl.__class__,
String().dialect_impl(dialect=sl).__class__
)
eq_(
t.dialect_impl(dialect=pg).impl.__class__,
Float().dialect_impl(pg).__class__
)
def test_user_defined_typedec_impl_bind(self):
class TypeOne(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " ONE"
return go
class TypeTwo(types.TypeEngine):
def bind_processor(self, dialect):
def go(value):
return value + " TWO"
return go
class MyType(types.TypeDecorator):
impl = TypeOne
def load_dialect_impl(self, dialect):
if dialect.name == 'sqlite':
return TypeOne()
else:
return TypeTwo()
def process_bind_param(self, value, dialect):
return "MYTYPE " + value
sl = dialects.sqlite.dialect()
pg = dialects.postgresql.dialect()
t = MyType()
eq_(
t._cached_bind_processor(sl)('foo'),
"MYTYPE foo ONE"
)
eq_(
t._cached_bind_processor(pg)('foo'),
"MYTYPE foo TWO"
)
def test_user_defined_dialect_specific_args(self):
class MyType(types.UserDefinedType):
def __init__(self, foo='foo', **kwargs):
super(MyType, self).__init__()
self.foo = foo
self.dialect_specific_args = kwargs
def adapt(self, cls):
return cls(foo=self.foo, **self.dialect_specific_args)
t = MyType(bar='bar')
a = t.dialect_impl(testing.db.dialect)
eq_(a.foo, 'foo')
eq_(a.dialect_specific_args['bar'], 'bar')
@testing.provide_metadata
def test_type_coerce(self):
"""test ad-hoc usage of custom types with type_coerce()."""
metadata = self.metadata
class MyType(types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return value[0:-8]
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
t = Table('t', metadata, Column('data', String(50)))
metadata.create_all()
t.insert().values(data=type_coerce('d1BIND_OUT',MyType)).execute()
eq_(
select([type_coerce(t.c.data, MyType)]).execute().fetchall(),
[('d1BIND_OUT', )]
)
eq_(
select([t.c.data, type_coerce(t.c.data, MyType)]).execute().fetchall(),
[('d1', 'd1BIND_OUT')]
)
eq_(
select([t.c.data, type_coerce(t.c.data, MyType)]).\
where(type_coerce(t.c.data, MyType) == 'd1BIND_OUT').\
execute().fetchall(),
[('d1', 'd1BIND_OUT')]
)
eq_(
select([t.c.data, type_coerce(t.c.data, MyType)]).\
where(t.c.data == type_coerce('d1BIND_OUT', MyType)).\
execute().fetchall(),
[('d1', 'd1BIND_OUT')]
)
eq_(
select([t.c.data, type_coerce(t.c.data, MyType)]).\
where(t.c.data == type_coerce(None, MyType)).\
execute().fetchall(),
[]
)
eq_(
select([t.c.data, type_coerce(t.c.data, MyType)]).\
where(type_coerce(t.c.data, MyType) == None).\
execute().fetchall(),
[]
)
@classmethod
def define_tables(cls, metadata):
class MyType(types.UserDefinedType):
def get_col_spec(self):
return "VARCHAR(100)"
def bind_processor(self, dialect):
def process(value):
return "BIND_IN"+ value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value + "BIND_OUT"
return process
def adapt(self, typeobj):
return typeobj()
class MyDecoratedType(types.TypeDecorator):
impl = String
def bind_processor(self, dialect):
impl_processor = super(MyDecoratedType, self).bind_processor(dialect)\
or (lambda value:value)
def process(value):
return "BIND_IN"+ impl_processor(value)
return process
def result_processor(self, dialect, coltype):
impl_processor = super(MyDecoratedType, self).result_processor(dialect, coltype)\
or (lambda value:value)
def process(value):
return impl_processor(value) + "BIND_OUT"
return process
def copy(self):
return MyDecoratedType()
class MyNewUnicodeType(types.TypeDecorator):
impl = Unicode
def process_bind_param(self, value, dialect):
return "BIND_IN" + value
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
def copy(self):
return MyNewUnicodeType(self.impl.length)
class MyNewIntType(types.TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
return value * 10
def process_result_value(self, value, dialect):
return value * 10
def copy(self):
return MyNewIntType()
class MyNewIntSubClass(MyNewIntType):
def process_result_value(self, value, dialect):
return value * 15
def copy(self):
return MyNewIntSubClass()
class MyUnicodeType(types.TypeDecorator):
impl = Unicode
def bind_processor(self, dialect):
impl_processor = super(MyUnicodeType, self).bind_processor(dialect)\
or (lambda value:value)
def process(value):
return "BIND_IN"+ impl_processor(value)
return process
def result_processor(self, dialect, coltype):
impl_processor = super(MyUnicodeType, self).result_processor(dialect, coltype)\
or (lambda value:value)
def process(value):
return impl_processor(value) + "BIND_OUT"
return process
def copy(self):
return MyUnicodeType(self.impl.length)
Table('users', metadata,
Column('user_id', Integer, primary_key = True),
# totall custom type
Column('goofy', MyType, nullable = False),
# decorated type with an argument, so its a String
Column('goofy2', MyDecoratedType(50), nullable = False),
Column('goofy4', MyUnicodeType(50), nullable = False),
Column('goofy7', MyNewUnicodeType(50), nullable = False),
Column('goofy8', MyNewIntType, nullable = False),
Column('goofy9', MyNewIntSubClass, nullable = False),
)
class VariantTest(fixtures.TestBase, AssertsCompiledSQL):
def setup(self):
class UTypeOne(types.UserDefinedType):
def get_col_spec(self):
return "UTYPEONE"
def bind_processor(self, dialect):
def process(value):
return value + "UONE"
return process
class UTypeTwo(types.UserDefinedType):
def get_col_spec(self):
return "UTYPETWO"
def bind_processor(self, dialect):
def process(value):
return value + "UTWO"
return process
class UTypeThree(types.UserDefinedType):
def get_col_spec(self):
return "UTYPETHREE"
self.UTypeOne = UTypeOne
self.UTypeTwo = UTypeTwo
self.UTypeThree = UTypeThree
self.variant = self.UTypeOne().with_variant(
self.UTypeTwo(), 'postgresql')
self.composite = self.variant.with_variant(
self.UTypeThree(), 'mysql')
def test_illegal_dupe(self):
v = self.UTypeOne().with_variant(
self.UTypeTwo(), 'postgresql'
)
assert_raises_message(
exc.ArgumentError,
"Dialect 'postgresql' is already present "
"in the mapping for this Variant",
lambda: v.with_variant(self.UTypeThree(), 'postgresql')
)
def test_compile(self):
self.assert_compile(
self.variant,
"UTYPEONE",
use_default_dialect=True
)
self.assert_compile(
self.variant,
"UTYPEONE",
dialect=dialects.mysql.dialect()
)
self.assert_compile(
self.variant,
"UTYPETWO",
dialect=dialects.postgresql.dialect()
)
def test_compile_composite(self):
self.assert_compile(
self.composite,
"UTYPEONE",
use_default_dialect=True
)
self.assert_compile(
self.composite,
"UTYPETHREE",
dialect=dialects.mysql.dialect()
)
self.assert_compile(
self.composite,
"UTYPETWO",
dialect=dialects.postgresql.dialect()
)
def test_bind_process(self):
eq_(
self.variant._cached_bind_processor(
dialects.mysql.dialect())('foo'),
'fooUONE'
)
eq_(
self.variant._cached_bind_processor(
default.DefaultDialect())('foo'),
'fooUONE'
)
eq_(
self.variant._cached_bind_processor(
dialects.postgresql.dialect())('foo'),
'fooUTWO'
)
def test_bind_process_composite(self):
assert self.composite._cached_bind_processor(
dialects.mysql.dialect()) is None
eq_(
self.composite._cached_bind_processor(
default.DefaultDialect())('foo'),
'fooUONE'
)
eq_(
self.composite._cached_bind_processor(
dialects.postgresql.dialect())('foo'),
'fooUTWO'
)
class UnicodeTest(fixtures.TestBase, AssertsExecutionResults):
"""tests the Unicode type. also tests the TypeDecorator with instances in the types package."""
@classmethod
def setup_class(cls):
global unicode_table, metadata
metadata = MetaData(testing.db)
unicode_table = Table('unicode_table', metadata,
Column('id', Integer, Sequence('uni_id_seq', optional=True), primary_key=True),
Column('unicode_varchar', Unicode(250)),
Column('unicode_text', UnicodeText),
)
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@engines.close_first
def teardown(self):
unicode_table.delete().execute()
def test_native_unicode(self):
"""assert expected values for 'native unicode' mode"""
if \
(testing.against('mssql+pyodbc') and not testing.db.dialect.freetds):
assert testing.db.dialect.returns_unicode_strings == 'conditional'
return
if testing.against('mssql+pymssql'):
assert testing.db.dialect.returns_unicode_strings == ('charset' in testing.db.url.query)
return
assert testing.db.dialect.returns_unicode_strings == \
((testing.db.name, testing.db.driver) in \
(
('postgresql','psycopg2'),
('postgresql','pypostgresql'),
('postgresql','pg8000'),
('postgresql','zxjdbc'),
('mysql','oursql'),
('mysql','zxjdbc'),
('mysql','mysqlconnector'),
('mysql','pymysql'),
('sqlite','pysqlite'),
('oracle','zxjdbc'),
('oracle','cx_oracle'),
)), \
"name: %s driver %s returns_unicode_strings=%s" % \
(testing.db.name,
testing.db.driver,
testing.db.dialect.returns_unicode_strings)
def test_round_trip(self):
unicodedata = u"Alors vous imaginez ma surprise, au lever du jour, "\
u"quand une drôle de petite voix m’a réveillé. Elle "\
u"disait: « S’il vous plaît… dessine-moi un mouton! »"
unicode_table.insert().execute(unicode_varchar=unicodedata,unicode_text=unicodedata)
x = unicode_table.select().execute().first()
assert isinstance(x['unicode_varchar'], unicode)
assert isinstance(x['unicode_text'], unicode)
eq_(x['unicode_varchar'], unicodedata)
eq_(x['unicode_text'], unicodedata)
def test_round_trip_executemany(self):
# cx_oracle was producing different behavior for cursor.executemany()
# vs. cursor.execute()
unicodedata = u"Alors vous imaginez ma surprise, au lever du jour, quand "\
u"une drôle de petite voix m’a réveillé. "\
u"Elle disait: « S’il vous plaît… dessine-moi un mouton! »"
unicode_table.insert().execute(
dict(unicode_varchar=unicodedata,unicode_text=unicodedata),
dict(unicode_varchar=unicodedata,unicode_text=unicodedata)
)
x = unicode_table.select().execute().first()
assert isinstance(x['unicode_varchar'], unicode)
eq_(x['unicode_varchar'], unicodedata)
assert isinstance(x['unicode_text'], unicode)
eq_(x['unicode_text'], unicodedata)
def test_union(self):
"""ensure compiler processing works for UNIONs"""
unicodedata = u"Alors vous imaginez ma surprise, au lever du jour, quand "\
u"une drôle de petite voix m’a réveillé. "\
u"Elle disait: « S’il vous plaît… dessine-moi un mouton! »"
unicode_table.insert().execute(unicode_varchar=unicodedata,unicode_text=unicodedata)
x = union(
select([unicode_table.c.unicode_varchar]),
select([unicode_table.c.unicode_varchar])
).execute().first()
assert isinstance(x['unicode_varchar'], unicode)
eq_(x['unicode_varchar'], unicodedata)
@testing.fails_on('oracle', 'oracle converts empty strings to a blank space')
def test_blank_strings(self):
unicode_table.insert().execute(unicode_varchar=u'')
assert select([unicode_table.c.unicode_varchar]).scalar() == u''
def test_unicode_warnings(self):
"""test the warnings raised when SQLA must coerce unicode binds,
*and* is using the Unicode type.
"""
unicodedata = u"Alors vous imaginez ma surprise, au lever du jour, quand "\
u"une drôle de petite voix m’a réveillé. "\
u"Elle disait: « S’il vous plaît… dessine-moi un mouton! »"
# using Unicode explicly - warning should be emitted
u = Unicode()
uni = u.dialect_impl(testing.db.dialect).bind_processor(testing.db.dialect)
if testing.db.dialect.supports_unicode_binds:
# Py3K
#assert_raises(exc.SAWarning, uni, b'x')
#assert isinstance(uni(unicodedata), str)
# Py2K
assert_raises(exc.SAWarning, uni, 'x')
assert isinstance(uni(unicodedata), unicode)
# end Py2K
eq_(uni(unicodedata), unicodedata)
else:
# Py3K
#assert_raises(exc.SAWarning, uni, b'x')
#assert isinstance(uni(unicodedata), bytes)
# Py2K
assert_raises(exc.SAWarning, uni, 'x')
assert isinstance(uni(unicodedata), str)
# end Py2K
eq_(uni(unicodedata), unicodedata.encode('utf-8'))
# using convert unicode at engine level -
# this should not be raising a warning
unicode_engine = engines.utf8_engine(options={'convert_unicode':True,})
unicode_engine.dialect.supports_unicode_binds = False
s = String()
uni = s.dialect_impl(unicode_engine.dialect).bind_processor(unicode_engine.dialect)
# this is not the unicode type - no warning
# Py3K
#uni(b'x')
#assert isinstance(uni(unicodedata), bytes)
# Py2K
uni('x')
assert isinstance(uni(unicodedata), str)
# end Py2K
eq_(uni(unicodedata), unicodedata.encode('utf-8'))
# Py3K
#@testing.fails_if(
# lambda: testing.db_spec("postgresql+pg8000")(testing.db),
# "pg8000 appropriately does not accept 'bytes' for a VARCHAR column."
# )
def test_ignoring_unicode_error(self):
"""checks String(unicode_error='ignore') is passed to underlying codec."""
unicodedata = u"Alors vous imaginez ma surprise, au lever du jour, quand "\
u"une drôle de petite voix m’a réveillé. "\
u"Elle disait: « S’il vous plaît… dessine-moi un mouton! »"
asciidata = unicodedata.encode('ascii', 'ignore')
m = MetaData()
table = Table('unicode_err_table', m,
Column('sort', Integer),
Column('plain_varchar_no_coding_error', \
String(248, convert_unicode='force', unicode_error='ignore'))
)
m2 = MetaData()
utf8_table = Table('unicode_err_table', m2,
Column('sort', Integer),
Column('plain_varchar_no_coding_error', \
String(248, convert_unicode=True))
)
engine = engines.testing_engine(options={'encoding':'ascii'})
m.create_all(engine)
try:
# insert a row that should be ascii and
# coerce from unicode with ignore on the bind side
engine.execute(
table.insert(),
sort=1,
plain_varchar_no_coding_error=unicodedata
)
# switch to utf-8
engine.dialect.encoding = 'utf-8'
from binascii import hexlify
# the row that we put in was stored as hexlified ascii
row = engine.execute(utf8_table.select()).first()
x = row['plain_varchar_no_coding_error']
connect_opts = engine.dialect.create_connect_args(testing.db.url)[1]
if isinstance(x, unicode):
x = x.encode('utf-8')
a = hexlify(x)
b = hexlify(asciidata)
eq_(a, b)
# insert another row which will be stored with
# utf-8 only chars
engine.execute(
utf8_table.insert(),
sort=2,
plain_varchar_no_coding_error=unicodedata
)
# switch back to ascii
engine.dialect.encoding = 'ascii'
# one row will be ascii with ignores,
# the other will be either ascii with the ignores
# or just the straight unicode+ utf8 value if the
# dialect just returns unicode
result = engine.execute(table.select().order_by(table.c.sort))
ascii_row = result.fetchone()
utf8_row = result.fetchone()
result.close()
x = ascii_row['plain_varchar_no_coding_error']
# on python3 "x" comes back as string (i.e. unicode),
# hexlify requires bytes
a = hexlify(x.encode('utf-8'))
b = hexlify(asciidata)
eq_(a, b)
x = utf8_row['plain_varchar_no_coding_error']
if testing.against('mssql+pyodbc') and not testing.db.dialect.freetds:
# TODO: no clue what this is
eq_(
x,
u'Alors vous imaginez ma surprise, au lever du jour, quand une '
u'drle de petite voix ma rveill. Elle disait: Sil vous plat '
u'dessine-moi un mouton! '
)
elif engine.dialect.returns_unicode_strings:
eq_(x, unicodedata)
else:
a = hexlify(x)
eq_(a, b)
finally:
m.drop_all(engine)
class EnumTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global enum_table, non_native_enum_table, metadata
metadata = MetaData(testing.db)
enum_table = Table('enum_table', metadata,
Column("id", Integer, primary_key=True),
Column('someenum', Enum('one','two','three', name='myenum'))
)
non_native_enum_table = Table('non_native_enum_table', metadata,
Column("id", Integer, primary_key=True),
Column('someenum', Enum('one','two','three', native_enum=False)),
)
metadata.create_all()
def teardown(self):
enum_table.delete().execute()
non_native_enum_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('postgresql+zxjdbc',
'zxjdbc fails on ENUM: column "XXX" is of type XXX '
'but expression is of type character varying')
@testing.fails_on('postgresql+pg8000',
'zxjdbc fails on ENUM: column "XXX" is of type XXX '
'but expression is of type text')
def test_round_trip(self):
enum_table.insert().execute([
{'id':1, 'someenum':'two'},
{'id':2, 'someenum':'two'},
{'id':3, 'someenum':'one'},
])
eq_(
enum_table.select().order_by(enum_table.c.id).execute().fetchall(),
[
(1, 'two'),
(2, 'two'),
(3, 'one'),
]
)
def test_non_native_round_trip(self):
non_native_enum_table.insert().execute([
{'id':1, 'someenum':'two'},
{'id':2, 'someenum':'two'},
{'id':3, 'someenum':'one'},
])
eq_(
non_native_enum_table.select().
order_by(non_native_enum_table.c.id).execute().fetchall(),
[
(1, 'two'),
(2, 'two'),
(3, 'one'),
]
)
def test_adapt(self):
from sqlalchemy.dialects.postgresql import ENUM
e1 = Enum('one','two','three', native_enum=False)
eq_(e1.adapt(ENUM).native_enum, False)
e1 = Enum('one','two','three', native_enum=True)
eq_(e1.adapt(ENUM).native_enum, True)
e1 = Enum('one','two','three', name='foo', schema='bar')
eq_(e1.adapt(ENUM).name, 'foo')
eq_(e1.adapt(ENUM).schema, 'bar')
@testing.crashes('mysql',
'Inconsistent behavior across various OS/drivers'
)
def test_constraint(self):
assert_raises(exc.DBAPIError,
enum_table.insert().execute,
{'id':4, 'someenum':'four'}
)
@testing.fails_on('mysql',
"the CHECK constraint doesn't raise an exception for unknown reason")
def test_non_native_constraint(self):
assert_raises(exc.DBAPIError,
non_native_enum_table.insert().execute,
{'id':4, 'someenum':'four'}
)
def test_mock_engine_no_prob(self):
"""ensure no 'checkfirst' queries are run when enums
are created with checkfirst=False"""
e = engines.mock_engine()
t = Table('t1', MetaData(),
Column('x', Enum("x", "y", name="pge"))
)
t.create(e, checkfirst=False)
# basically looking for the start of
# the constraint, or the ENUM def itself,
# depending on backend.
assert "('x'," in e.print_sql()
class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
__excluded_on__ = (
('mysql', '<', (4, 1, 1)), # screwy varbinary types
)
@classmethod
def setup_class(cls):
global binary_table, MyPickleType, metadata
class MyPickleType(types.TypeDecorator):
impl = PickleType
def process_bind_param(self, value, dialect):
if value:
value.stuff = 'this is modified stuff'
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = 'this is the right stuff'
return value
metadata = MetaData(testing.db)
binary_table = Table('binary_table', metadata,
Column('primary_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', LargeBinary),
Column('data_slice', LargeBinary(100)),
Column('misc', String(30)),
Column('pickled', PickleType),
Column('mypickle', MyPickleType)
)
metadata.create_all()
@engines.close_first
def teardown(self):
binary_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_round_trip(self):
testobj1 = pickleable.Foo('im foo 1')
testobj2 = pickleable.Foo('im foo 2')
testobj3 = pickleable.Foo('im foo 3')
stream1 =self.load_stream('binary_data_one.dat')
stream2 =self.load_stream('binary_data_two.dat')
binary_table.insert().execute(
primary_id=1,
misc='binary_data_one.dat',
data=stream1,
data_slice=stream1[0:100],
pickled=testobj1,
mypickle=testobj3)
binary_table.insert().execute(
primary_id=2,
misc='binary_data_two.dat',
data=stream2,
data_slice=stream2[0:99],
pickled=testobj2)
binary_table.insert().execute(
primary_id=3,
misc='binary_data_two.dat',
data=None,
data_slice=stream2[0:99],
pickled=None)
for stmt in (
binary_table.select(order_by=binary_table.c.primary_id),
text(
"select * from binary_table order by binary_table.primary_id",
typemap={'pickled':PickleType,
'mypickle':MyPickleType,
'data':LargeBinary, 'data_slice':LargeBinary},
bind=testing.db)
):
l = stmt.execute().fetchall()
eq_(stream1, l[0]['data'])
eq_(stream1[0:100], l[0]['data_slice'])
eq_(stream2, l[1]['data'])
eq_(testobj1, l[0]['pickled'])
eq_(testobj2, l[1]['pickled'])
eq_(testobj3.moredata, l[0]['mypickle'].moredata)
eq_(l[0]['mypickle'].stuff, 'this is the right stuff')
@testing.fails_on('oracle+cx_oracle', 'oracle fairly grumpy about binary '
'data, not really known how to make this work')
def test_comparison(self):
"""test that type coercion occurs on comparison for binary"""
expr = binary_table.c.data == 'foo'
assert isinstance(expr.right.type, LargeBinary)
data = os.urandom(32)
binary_table.insert().execute(data=data)
eq_(binary_table.select().where(binary_table.c.data==data).alias().count().scalar(), 1)
def load_stream(self, name):
f = os.path.join(os.path.dirname(__file__), "..", name)
return open(f, mode='rb').read()
class ExpressionTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global test_table, meta, MyCustomType, MyTypeDec
class MyCustomType(types.UserDefinedType):
def get_col_spec(self):
return "INT"
def bind_processor(self, dialect):
def process(value):
return value * 10
return process
def result_processor(self, dialect, coltype):
def process(value):
return value / 10
return process
def adapt_operator(self, op):
return {operators.add:operators.sub, operators.sub:operators.add}.get(op, op)
class MyTypeDec(types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
meta = MetaData(testing.db)
test_table = Table('test', meta,
Column('id', Integer, primary_key=True),
Column('data', String(30)),
Column('atimestamp', Date),
Column('avalue', MyCustomType),
Column('bvalue', MyTypeDec(50)),
)
meta.create_all()
test_table.insert().execute({
'id':1,
'data':'somedata',
'atimestamp':datetime.date(2007, 10, 15),
'avalue':25, 'bvalue':'foo'})
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_control(self):
assert testing.db.execute("select avalue from test").scalar() == 250
eq_(
test_table.select().execute().fetchall(),
[(1, 'somedata', datetime.date(2007, 10, 15), 25,
'BIND_INfooBIND_OUT')]
)
def test_bind_adapt(self):
# test an untyped bind gets the left side's type
expr = test_table.c.atimestamp == bindparam("thedate")
eq_(expr.right.type._type_affinity, Date)
eq_(
testing.db.execute(
select([test_table.c.id, test_table.c.data, test_table.c.atimestamp])
.where(expr),
{"thedate":datetime.date(2007, 10, 15)}).fetchall(),
[(1, 'somedata', datetime.date(2007, 10, 15))]
)
expr = test_table.c.avalue == bindparam("somevalue")
eq_(expr.right.type._type_affinity, MyCustomType)
eq_(
testing.db.execute(test_table.select().where(expr),
{'somevalue': 25}).fetchall(),
[(1, 'somedata', datetime.date(2007, 10, 15), 25,
'BIND_INfooBIND_OUT')]
)
expr = test_table.c.bvalue == bindparam("somevalue")
eq_(expr.right.type._type_affinity, String)
eq_(
testing.db.execute(test_table.select().where(expr),
{"somevalue":"foo"}).fetchall(),
[(1, 'somedata',
datetime.date(2007, 10, 15), 25, 'BIND_INfooBIND_OUT')]
)
def test_literal_adapt(self):
# literals get typed based on the types dictionary, unless
# compatible with the left side type
expr = column('foo', String) == 5
eq_(expr.right.type._type_affinity, Integer)
expr = column('foo', String) == "asdf"
eq_(expr.right.type._type_affinity, String)
expr = column('foo', CHAR) == 5
eq_(expr.right.type._type_affinity, Integer)
expr = column('foo', CHAR) == "asdf"
eq_(expr.right.type.__class__, CHAR)
@testing.fails_on('firebird', 'Data type unknown on the parameter')
@testing.fails_on('mssql', 'int is unsigned ? not clear')
def test_operator_adapt(self):
"""test type-based overloading of operators"""
# test string concatenation
expr = test_table.c.data + "somedata"
eq_(testing.db.execute(select([expr])).scalar(), "somedatasomedata")
expr = test_table.c.id + 15
eq_(testing.db.execute(select([expr])).scalar(), 16)
# test custom operator conversion
expr = test_table.c.avalue + 40
assert expr.type.__class__ is test_table.c.avalue.type.__class__
# value here is calculated as (250 - 40) / 10 = 21
# because "40" is an integer, not an "avalue"
eq_(testing.db.execute(select([expr.label('foo')])).scalar(), 21)
expr = test_table.c.avalue + literal(40, type_=MyCustomType)
# + operator converted to -
# value is calculated as: (250 - (40 * 10)) / 10 == -15
eq_(testing.db.execute(select([expr.label('foo')])).scalar(), -15)
# this one relies upon anonymous labeling to assemble result
# processing rules on the column.
eq_(testing.db.execute(select([expr])).scalar(), -15)
def test_typedec_operator_adapt(self):
expr = test_table.c.bvalue + "hi"
assert expr.type.__class__ is MyTypeDec
assert expr.right.type.__class__ is MyTypeDec
eq_(
testing.db.execute(select([expr.label('foo')])).scalar(),
"BIND_INfooBIND_INhiBIND_OUT"
)
def test_typedec_righthand_coercion(self):
class MyTypeDec(types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
tab = table('test', column('bvalue', MyTypeDec))
expr = tab.c.bvalue + 6
self.assert_compile(
expr,
"test.bvalue || :bvalue_1",
use_default_dialect=True
)
assert expr.type.__class__ is MyTypeDec
eq_(
testing.db.execute(select([expr.label('foo')])).scalar(),
"BIND_INfooBIND_IN6BIND_OUT"
)
def test_bind_typing(self):
from sqlalchemy.sql import column
class MyFoobarType(types.UserDefinedType):
pass
class Foo(object):
pass
# unknown type + integer, right hand bind
# is an Integer
expr = column("foo", MyFoobarType) + 5
assert expr.right.type._type_affinity is types.Integer
# untyped bind - it gets assigned MyFoobarType
expr = column("foo", MyFoobarType) + bindparam("foo")
assert expr.right.type._type_affinity is MyFoobarType
expr = column("foo", MyFoobarType) + bindparam("foo", type_=Integer)
assert expr.right.type._type_affinity is types.Integer
# unknown type + unknown, right hand bind
# coerces to the left
expr = column("foo", MyFoobarType) + Foo()
assert expr.right.type._type_affinity is MyFoobarType
# including for non-commutative ops
expr = column("foo", MyFoobarType) - Foo()
assert expr.right.type._type_affinity is MyFoobarType
expr = column("foo", MyFoobarType) - datetime.date(2010, 8, 25)
assert expr.right.type._type_affinity is types.Date
def test_date_coercion(self):
from sqlalchemy.sql import column
expr = column('bar', types.NULLTYPE) - column('foo', types.TIMESTAMP)
eq_(expr.type._type_affinity, types.NullType)
expr = func.sysdate() - column('foo', types.TIMESTAMP)
eq_(expr.type._type_affinity, types.Interval)
expr = func.current_date() - column('foo', types.TIMESTAMP)
eq_(expr.type._type_affinity, types.Interval)
def test_numerics_coercion(self):
from sqlalchemy.sql import column
import operator
for op in (
operator.add,
operator.mul,
operator.truediv,
operator.sub
):
for other in (Numeric(10, 2), Integer):
expr = op(
column('bar', types.Numeric(10, 2)),
column('foo', other)
)
assert isinstance(expr.type, types.Numeric)
expr = op(
column('foo', other),
column('bar', types.Numeric(10, 2))
)
assert isinstance(expr.type, types.Numeric)
def test_null_comparison(self):
eq_(
str(column('a', types.NullType()) + column('b', types.NullType())),
"a + b"
)
def test_expression_typing(self):
expr = column('bar', Integer) - 3
eq_(expr.type._type_affinity, Integer)
expr = bindparam('bar') + bindparam('foo')
eq_(expr.type, types.NULLTYPE)
def test_distinct(self):
s = select([distinct(test_table.c.avalue)])
eq_(testing.db.execute(s).scalar(), 25)
s = select([test_table.c.avalue.distinct()])
eq_(testing.db.execute(s).scalar(), 25)
assert distinct(test_table.c.data).type == test_table.c.data.type
assert test_table.c.data.distinct().type == test_table.c.data.type
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default_compile(self):
"""test that the base dialect of the type object is used
for default compilation.
"""
for type_, expected in (
(String(), "VARCHAR"),
(Integer(), "INTEGER"),
(dialects.postgresql.INET(), "INET"),
(dialects.postgresql.FLOAT(), "FLOAT"),
(dialects.mysql.REAL(precision=8, scale=2), "REAL(8, 2)"),
(dialects.postgresql.REAL(), "REAL"),
(INTEGER(), "INTEGER"),
(dialects.mysql.INTEGER(display_width=5), "INTEGER(5)")
):
self.assert_compile(type_, expected,
allow_dialect_select=True)
class DateTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
global users_with_date, insert_data
db = testing.db
if testing.against('oracle'):
insert_data = [
(7, 'jack',
datetime.datetime(2005, 11, 10, 0, 0),
datetime.date(2005,11,10),
datetime.datetime(2005, 11, 10, 0, 0, 0, 29384)),
(8, 'roy',
datetime.datetime(2005, 11, 10, 11, 52, 35),
datetime.date(2005,10,10),
datetime.datetime(2006, 5, 10, 15, 32, 47, 6754)),
(9, 'foo',
datetime.datetime(2006, 11, 10, 11, 52, 35),
datetime.date(1970,4,1),
datetime.datetime(2004, 9, 18, 4, 0, 52, 1043)),
(10, 'colber', None, None, None),
]
fnames = ['user_id', 'user_name', 'user_datetime',
'user_date', 'user_time']
collist = [Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
Column('user_datetime', DateTime),
Column('user_date', Date),
Column('user_time', TIMESTAMP)]
else:
datetime_micro = 54839
time_micro = 999
# Missing or poor microsecond support:
if testing.against('mssql', 'mysql', 'firebird', '+zxjdbc'):
datetime_micro, time_micro = 0, 0
# No microseconds for TIME
elif testing.against('maxdb'):
time_micro = 0
insert_data = [
(7, 'jack',
datetime.datetime(2005, 11, 10, 0, 0),
datetime.date(2005, 11, 10),
datetime.time(12, 20, 2)),
(8, 'roy',
datetime.datetime(2005, 11, 10, 11, 52, 35),
datetime.date(2005, 10, 10),
datetime.time(0, 0, 0)),
(9, 'foo',
datetime.datetime(2005, 11, 10, 11, 52, 35, datetime_micro),
datetime.date(1970, 4, 1),
datetime.time(23, 59, 59, time_micro)),
(10, 'colber', None, None, None),
]
fnames = ['user_id', 'user_name', 'user_datetime',
'user_date', 'user_time']
collist = [Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
Column('user_datetime', DateTime(timezone=False)),
Column('user_date', Date),
Column('user_time', Time)]
if testing.against('sqlite', 'postgresql'):
insert_data.append(
(11, 'historic',
datetime.datetime(1850, 11, 10, 11, 52, 35, datetime_micro),
datetime.date(1727,4,1),
None),
)
users_with_date = Table('query_users_with_date',
MetaData(testing.db), *collist)
users_with_date.create()
insert_dicts = [dict(zip(fnames, d)) for d in insert_data]
for idict in insert_dicts:
users_with_date.insert().execute(**idict)
@classmethod
def teardown_class(cls):
users_with_date.drop()
def testdate(self):
global insert_data
l = map(tuple,
users_with_date.select().order_by(users_with_date.c.user_id).execute().fetchall())
self.assert_(l == insert_data,
'DateTest mismatch: got:%s expected:%s' % (l, insert_data))
def testtextdate(self):
x = testing.db.execute(text(
"select user_datetime from query_users_with_date",
typemap={'user_datetime':DateTime})).fetchall()
self.assert_(isinstance(x[0][0], datetime.datetime))
x = testing.db.execute(text(
"select * from query_users_with_date where user_datetime=:somedate",
bindparams=[bindparam('somedate', type_=types.DateTime)]),
somedate=datetime.datetime(2005, 11, 10, 11, 52, 35)).fetchall()
def testdate2(self):
meta = MetaData(testing.db)
t = Table('testdate', meta,
Column('id', Integer,
Sequence('datetest_id_seq', optional=True),
primary_key=True),
Column('adate', Date), Column('adatetime', DateTime))
t.create(checkfirst=True)
try:
d1 = datetime.date(2007, 10, 30)
t.insert().execute(adate=d1, adatetime=d1)
d2 = datetime.datetime(2007, 10, 30)
t.insert().execute(adate=d2, adatetime=d2)
x = t.select().execute().fetchall()[0]
eq_(x.adate.__class__, datetime.date)
eq_(x.adatetime.__class__, datetime.datetime)
t.delete().execute()
# test mismatched date/datetime
t.insert().execute(adate=d2, adatetime=d2)
eq_(select([t.c.adate, t.c.adatetime], t.c.adate==d1).execute().fetchall(), [(d1, d2)])
eq_(select([t.c.adate, t.c.adatetime], t.c.adate==d1).execute().fetchall(), [(d1, d2)])
finally:
t.drop(checkfirst=True)
class StringTest(fixtures.TestBase):
@testing.requires.unbounded_varchar
def test_nolength_string(self):
metadata = MetaData(testing.db)
foo = Table('foo', metadata, Column('one', String))
foo.create()
foo.drop()
class NumericTest(fixtures.TestBase):
def setup(self):
global metadata
metadata = MetaData(testing.db)
def teardown(self):
metadata.drop_all()
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def _do_test(self, type_, input_, output, filter_=None, check_scale=False):
t = Table('t', metadata, Column('x', type_))
t.create()
t.insert().execute([{'x':x} for x in input_])
result = set([row[0] for row in t.select().execute()])
output = set(output)
if filter_:
result = set(filter_(x) for x in result)
output = set(filter_(x) for x in output)
#print result
#print output
eq_(result, output)
if check_scale:
eq_(
[str(x) for x in result],
[str(x) for x in output],
)
def test_numeric_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_numeric_as_float(self):
if testing.against("oracle+cx_oracle"):
filter_ = lambda n:n is not None and round(n, 5) or None
else:
filter_ = None
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563"), None],
[15.7563, None],
filter_ = filter_
)
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
filter_ = lambda n:n is not None and round(n, 5) or None
)
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_ = lambda n:n is not None and round(n, 5) or None
)
@testing.fails_on('mssql+pymssql', 'FIXME: improve pymssql dec handling')
def test_precision_decimal(self):
numbers = set([
decimal.Decimal("54.234246451650"),
decimal.Decimal("0.004354"),
decimal.Decimal("900.0"),
])
self._do_test(
Numeric(precision=18, scale=12),
numbers,
numbers,
)
@testing.fails_on('mssql+pymssql', 'FIXME: improve pymssql dec handling')
def test_enotation_decimal(self):
"""test exceedingly small decimals.
Decimal reports values with E notation when the exponent
is greater than 6.
"""
numbers = set([
decimal.Decimal('1E-2'),
decimal.Decimal('1E-3'),
decimal.Decimal('1E-4'),
decimal.Decimal('1E-5'),
decimal.Decimal('1E-6'),
decimal.Decimal('1E-7'),
decimal.Decimal('1E-8'),
decimal.Decimal("0.01000005940696"),
decimal.Decimal("0.00000005940696"),
decimal.Decimal("0.00000000000696"),
decimal.Decimal("0.70000000000696"),
decimal.Decimal("696E-12"),
])
self._do_test(
Numeric(precision=18, scale=14),
numbers,
numbers
)
@testing.fails_on("sybase+pyodbc",
"Don't know how do get these values through FreeTDS + Sybase")
@testing.fails_on("firebird", "Precision must be from 1 to 18")
def test_enotation_decimal_large(self):
"""test exceedingly large decimals.
"""
numbers = set([
decimal.Decimal('4E+8'),
decimal.Decimal("5748E+15"),
decimal.Decimal('1.521E+15'),
decimal.Decimal('00000000000000.1E+12'),
])
self._do_test(
Numeric(precision=25, scale=2),
numbers,
numbers
)
@testing.fails_on('sqlite', 'TODO')
@testing.fails_on("firebird", "Precision must be from 1 to 18")
@testing.fails_on("sybase+pysybase", "TODO")
@testing.fails_on('mssql+pymssql', 'FIXME: improve pymssql dec handling')
def test_many_significant_digits(self):
numbers = set([
decimal.Decimal("31943874831932418390.01"),
decimal.Decimal("319438950232418390.273596"),
decimal.Decimal("87673.594069654243"),
])
self._do_test(
Numeric(precision=38, scale=12),
numbers,
numbers
)
@testing.fails_on('oracle+cx_oracle',
"this may be a bug due to the difficulty in handling "
"oracle precision numerics"
)
@testing.fails_on('postgresql+pg8000',
"pg-8000 does native decimal but truncates the decimals.")
def test_numeric_no_decimal(self):
numbers = set([
decimal.Decimal("1.000")
])
self._do_test(
Numeric(precision=5, scale=3),
numbers,
numbers,
check_scale=True
)
class NumericRawSQLTest(fixtures.TestBase):
"""Test what DBAPIs and dialects return without any typing
information supplied at the SQLA level.
"""
def _fixture(self, metadata, type, data):
t = Table('t', metadata,
Column("val", type)
)
metadata.create_all()
t.insert().execute(val=data)
@testing.fails_on('sqlite', "Doesn't provide Decimal results natively")
@testing.provide_metadata
def test_decimal_fp(self):
metadata = self.metadata
t = self._fixture(metadata, Numeric(10, 5), decimal.Decimal("45.5"))
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45.5"))
@testing.fails_on('sqlite', "Doesn't provide Decimal results natively")
@testing.provide_metadata
def test_decimal_int(self):
metadata = self.metadata
t = self._fixture(metadata, Numeric(10, 5), decimal.Decimal("45"))
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45"))
@testing.provide_metadata
def test_ints(self):
metadata = self.metadata
t = self._fixture(metadata, Integer, 45)
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, (int, long))
eq_(val, 45)
@testing.provide_metadata
def test_float(self):
metadata = self.metadata
t = self._fixture(metadata, Float, 46.583)
val = testing.db.execute("select val from t").scalar()
assert isinstance(val, float)
# some DBAPIs have unusual float handling
if testing.against('oracle+cx_oracle', 'mysql+oursql'):
eq_(round_decimal(val, 3), 46.583)
else:
eq_(val, 46.583)
class IntervalTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
global interval_table, metadata
metadata = MetaData(testing.db)
interval_table = Table("intervaltable", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("native_interval", Interval()),
Column("native_interval_args", Interval(day_precision=3, second_precision=6)),
Column("non_native_interval", Interval(native=False)),
)
metadata.create_all()
@engines.close_first
def teardown(self):
interval_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_non_native_adapt(self):
interval = Interval(native=False)
adapted = interval.dialect_impl(testing.db.dialect)
assert type(adapted) is Interval
assert adapted.native is False
eq_(str(adapted), "DATETIME")
@testing.fails_on("+pg8000", "Not yet known how to pass values of the INTERVAL type")
@testing.fails_on("postgresql+zxjdbc", "Not yet known how to pass values of the INTERVAL type")
@testing.fails_on("oracle+zxjdbc", "Not yet known how to pass values of the INTERVAL type")
def test_roundtrip(self):
small_delta = datetime.timedelta(days=15, seconds=5874)
delta = datetime.timedelta(414)
interval_table.insert().execute(
native_interval=small_delta,
native_interval_args=delta,
non_native_interval=delta
)
row = interval_table.select().execute().first()
eq_(row['native_interval'], small_delta)
eq_(row['native_interval_args'], delta)
eq_(row['non_native_interval'], delta)
@testing.fails_on("oracle+zxjdbc", "Not yet known how to pass values of the INTERVAL type")
def test_null(self):
interval_table.insert().execute(id=1, native_inverval=None, non_native_interval=None)
row = interval_table.select().execute().first()
eq_(row['native_interval'], None)
eq_(row['native_interval_args'], None)
eq_(row['non_native_interval'], None)
class BooleanTest(fixtures.TestBase, AssertsExecutionResults):
@classmethod
def setup_class(cls):
global bool_table
metadata = MetaData(testing.db)
bool_table = Table('booltest', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('value', Boolean),
Column('unconstrained_value', Boolean(create_constraint=False)),
)
bool_table.create()
@classmethod
def teardown_class(cls):
bool_table.drop()
def teardown(self):
bool_table.delete().execute()
def test_boolean(self):
bool_table.insert().execute(id=1, value=True)
bool_table.insert().execute(id=2, value=False)
bool_table.insert().execute(id=3, value=True)
bool_table.insert().execute(id=4, value=True)
bool_table.insert().execute(id=5, value=True)
bool_table.insert().execute(id=6, value=None)
res = select([bool_table.c.id, bool_table.c.value]).where(
bool_table.c.value == True
).order_by(bool_table.c.id).execute().fetchall()
eq_(res, [(1, True), (3, True), (4, True), (5, True)])
res2 = select([bool_table.c.id, bool_table.c.value]).where(
bool_table.c.value == False).execute().fetchall()
eq_(res2, [(2, False)])
res3 = select([bool_table.c.id, bool_table.c.value]).\
order_by(bool_table.c.id).\
execute().fetchall()
eq_(res3, [(1, True), (2, False),
(3, True), (4, True),
(5, True), (6, None)])
# ensure we're getting True/False, not just ints
assert res3[0][1] is True
assert res3[1][1] is False
@testing.fails_on('mysql',
"The CHECK clause is parsed but ignored by all storage engines.")
@testing.fails_on('mssql',
"FIXME: MS-SQL 2005 doesn't honor CHECK ?!?")
@testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
def test_constraint(self):
assert_raises((exc.IntegrityError, exc.ProgrammingError),
testing.db.execute,
"insert into booltest (id, value) values(1, 5)")
@testing.skip_if(lambda: testing.db.dialect.supports_native_boolean)
def test_unconstrained(self):
testing.db.execute(
"insert into booltest (id, unconstrained_value) values (1, 5)")
class PickleTest(fixtures.TestBase):
def test_eq_comparison(self):
p1 = PickleType()
for obj in (
{'1':'2'},
pickleable.Bar(5, 6),
pickleable.OldSchool(10, 11)
):
assert p1.compare_values(p1.copy_value(obj), obj)
assert_raises(NotImplementedError,
p1.compare_values,
pickleable.BrokenComparable('foo'),
pickleable.BrokenComparable('foo'))
def test_nonmutable_comparison(self):
p1 = PickleType()
for obj in (
{'1':'2'},
pickleable.Bar(5, 6),
pickleable.OldSchool(10, 11)
):
assert p1.compare_values(p1.copy_value(obj), obj)
class CallableTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
global meta
meta = MetaData(testing.db)
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_callable_as_arg(self):
ucode = util.partial(Unicode)
thing_table = Table('thing', meta,
Column('name', ucode(20))
)
assert isinstance(thing_table.c.name.type, Unicode)
thing_table.create()
def test_callable_as_kwarg(self):
ucode = util.partial(Unicode)
thang_table = Table('thang', meta,
Column('name', type_=ucode(20), primary_key=True)
)
assert isinstance(thang_table.c.name.type, Unicode)
thang_table.create()
| apache-2.0 | 4,155,674,783,077,162,000 | 34.904663 | 100 | 0.532195 | false |
OpenInfoporto/infoporto.odoo.ecommerce | infoporto/odoo/ecommerce/lib/odoo.py | 1 | 5284 | from infoporto.odoo.core.odoo import OdooInstance
class Odoo(object):
# settings
def getCurrency(self):
""" Retrieve currency from Odoo Company settings """
odoo_core = OdooInstance()
# company ID should be dynamic
return odoo_core.read('res.company', 1, ['currency_id'])
# product.category
def getAncestors(self, cid):
""" Retrieve recursively all parents for the given cid """
odoo_core = OdooInstance()
res = []
last_found = cid
while last_found:
category = odoo_core.read('product.category', int(last_found), ['id', 'name', 'parent_id'])
if category['parent_id']:
last_found = category['parent_id'][0]
else:
last_found = False
res.append(dict(id=category['id'], name=category['name']))
return reversed(res)
def getCategory(self, cid):
odoo_core = OdooInstance()
category = odoo_core.read('product.category', [int(cid)], ['id', 'name'])
return category[0]
def getCategories(self, cid=False):
odoo_core = OdooInstance()
if not cid:
args = [('parent_id', '=', False)]
else:
args = [('parent_id', '=', int(cid))]
ids = odoo_core.search('product.category', args)
categories = odoo_core.read('product.category', ids, ['id', 'name'])
return categories
def getProducts(self, cid=False):
odoo_core = OdooInstance()
if not cid:
args = []
else:
args = [('categ_id', '=', int(cid))]
ids = odoo_core.search('product.product', args)
products = odoo_core.read('product.product', ids,
['id', 'name', 'description',
'lst_price', 'image', 'image_medium',
'categ_id', 'taxes_id'])
for product in products:
if product['taxes_id']:
tax = odoo_core.read('account.tax',
int(product['taxes_id'][0]), ['amount'])['amount']
else:
tax = 0.0
product['tax'] = tax
product = self.sanitizeProduct(product)
return products
# product.product
def getProduct(self, pid):
odoo_core = OdooInstance()
product = odoo_core.read('product.product', int(pid),
['id', 'name', 'description',
'lst_price', 'image', 'image_medium',
'categ_id', 'taxes_id'])
if product['taxes_id']:
tax = odoo_core.read('account.tax',
int(product['taxes_id'][0]), ['amount'])['amount']
else:
tax = 0.0
product['tax'] = tax
return self.sanitizeProduct(product)
def getInShowcase(self):
#odoo_core = OdooInstance()
#TODO: an attribute shoudl be added to Odoo product management
return self.getProducts()
def sanitizeProduct(self, p):
""" Sanitize product for using in templates """
from money import Money
p['price'] = p['lst_price']
p['lst_price'] = Money(amount=p['lst_price'],
currency=self.getCurrency().get('currency_id')[1])
p['price_total'] = Money(amount=p['price'] * (1 + p['tax']),
currency=self.getCurrency().get('currency_id')[1])
p['categ_id'] = p['categ_id'][0]
# Category norm
if p['image']:
p['image'] = ''.join(["data:image/png;base64,", p['image']])
if p['image_medium']:
p['image_medium'] = ''.join(["data:image/png;base64,", p['image_medium']])
return p
def createSalesOrder(self, params, cart):
""" Create a partner if the e-mail weren't found, create a Sales Order
and its Sales Order Line """
odoo_core = OdooInstance()
# check if user exists ...
args = [('email', '=', params['user']['email'])]
ids = odoo_core.search('res.partner', args)
# ... otherwise create it
if not ids:
partner_id = odoo_core.create('res.partner',
dict(name=params['user']['name'],
email=params['user']['email']))
# build sales order
so = dict(partner_id=ids[0] or partner_id,
state="manual",
amount_total=params['total'] * 1.22,
amount_tax=params['total'] * 1.22 - params['total'],
amount_untaxed=params['total'])
so_id = odoo_core.create('sale.order', so)
for el in cart:
sol = dict(order_id=so_id,
product_uom=1,
price_unit=float(el['price_total']),
product_uom_qty=1,
state='confirmed',
product_id=el['id'],
order_partner_id=ids[0],
tax_id=[1])
sol_id = odoo_core.create('sale.order.line', sol)
#FIXME: taxes?!?
return so_id
| gpl-2.0 | 833,721,185,680,812,400 | 32.025 | 103 | 0.488266 | false |
cgeoffroy/son-analyze | son-scikit/tests/unit/son_scikit/hl_prometheus_test.py | 1 | 2358 | # Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
# pylint: disable=invalid-name,missing-docstring
import copy
import datetime
import typing # noqa pylint: disable=unused-import
from son_analyze.core import prometheus
import son_scikit.hl_prometheus as hl
def test_build_sonata_df(basic_query_01):
x = prometheus.PrometheusData(basic_query_01)
base_entry = x.raw['data']['result'][0]
new_entry1 = copy.deepcopy(base_entry)
new_entry1['metric']['__name__'] = 'uno'
x.add_entry(new_entry1)
new_entry2 = copy.deepcopy(base_entry)
new_entry2['metric']['__name__'] = 'bis'
new_entry2['values'] = [(i[0], 20+i[1]) for i in new_entry2['values']]
x.add_entry(new_entry2)
new_entry3 = copy.deepcopy(base_entry)
new_entry3['metric']['__name__'] = 'ter'
def trans(t): # pylint: disable=missing-docstring,invalid-name
d = hl.convert_timestamp_to_posix(t[0])
d = d + datetime.timedelta(0, 1)
return (d.timestamp(), 30+t[1])
new_entry3['values'] = [trans(i) for i in new_entry3['values']]
x.add_entry(new_entry3)
tmp = hl.build_sonata_df_by_id(x)
for _, elt in tmp.items():
assert elt.index.freq == 'S'
assert any(elt.notnull())
| apache-2.0 | 4,939,344,521,716,886,000 | 38.3 | 74 | 0.707379 | false |
fnurl/alot | docs/source/generate_commands.py | 1 | 4877 | from __future__ import absolute_import
import sys
import os
HERE = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(HERE, '..', '..'))
from alot.commands import *
from alot.commands import COMMANDS
import alot.buffers
from argparse import HelpFormatter, SUPPRESS, OPTIONAL, ZERO_OR_MORE, ONE_OR_MORE, PARSER, REMAINDER
from alot.utils.argparse import BooleanAction
from gettext import gettext as _
import collections as _collections
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
NOTE = ".. CAUTION: THIS FILE IS AUTO-GENERATED!\n\n\n"
class HF(HelpFormatter):
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def rstify_parser(parser):
#header = parser.format_usage().strip()
#print '\n\n%s\n' % header + '_' * len(header)
parser.formatter_class = HF
#parser.print_help()
#continue
formatter = parser._get_formatter()
out = ""
# usage
usage = formatter._format_usage(None, parser._actions,
parser._mutually_exclusive_groups,
'').strip()
usage = usage.replace('--','---')
# section header
out += '.. describe:: %s\n\n' % parser.prog
# description
out += ' '*4 + parser.description
out += '\n\n'
if len(parser._positionals._group_actions) == 1:
out += " argument\n"
a = parser._positionals._group_actions[0]
out += ' '*8 + str(parser._positionals._group_actions[0].help)
if a.choices:
out += ". valid choices are: %s." % ','.join(['\`%s\`' % s for s
in a.choices])
if a.default:
out += ". defaults to: '%s'." % a.default
out += '\n\n'
elif len(parser._positionals._group_actions) > 1:
out += " positional arguments\n"
for index, a in enumerate(parser._positionals._group_actions):
out += " %s: %s" % (index, a.help)
if a.choices:
out += ". valid choices are: %s." % ','.join(['\`%s\`' % s for s
in a.choices])
if a.default:
out += ". defaults to: '%s'." % a.default
out += '\n'
out += '\n\n'
if parser._optionals._group_actions:
out += " optional arguments\n"
for a in parser._optionals._group_actions:
switches = [s.replace('--','---') for s in a.option_strings]
out += " :%s: %s" % (', '.join(switches), a.help)
if a.choices and not isinstance(a, BooleanAction):
out += ". Valid choices are: %s" % ','.join(['\`%s\`' % s for s
in a.choices])
if a.default:
out += " (Defaults to: '%s')" % a.default
out += '.\n'
out += '\n'
# epilog
#out += formatter.add_text(parser.epilog)
return out
def get_mode_docs():
docs = {}
b = alot.buffers.Buffer
for entry in alot.buffers.__dict__.values():
if isinstance(entry, type):
if issubclass(entry, b) and not entry == b:
docs[entry.modename] = entry.__doc__.strip()
return docs
if __name__ == "__main__":
modes = []
for mode, modecommands in COMMANDS.items():
modefilename = mode+'.rst'
modefile = open(os.path.join(HERE, 'usage', 'modes', modefilename), 'w')
modefile.write(NOTE)
if mode != 'global':
modes.append(mode)
header = 'Commands in `%s` mode' % mode
modefile.write('%s\n%s\n' % (header, '-' * len(header)))
modefile.write('The following commands are available in %s mode\n\n' % mode)
else:
header = 'Global Commands'
modefile.write('%s\n%s\n' % (header, '-' * len(header)))
modefile.write('The following commands are available globally\n\n')
for cmdstring,struct in modecommands.items():
cls, parser, forced_args = struct
labelline = '.. _cmd.%s.%s:\n\n' % (mode, cmdstring.replace('_',
'-'))
modefile.write(labelline)
modefile.write(rstify_parser(parser))
modefile.close()
| gpl-3.0 | 7,545,414,185,903,129,000 | 36.229008 | 100 | 0.498462 | false |
AmericanResearchInstitute/poweru-server | cmis_storage/amara/flextyper.py | 1 | 5781 | #!/usr/bin/env python
"""
Python compiler from DTLL (ISO DSDL Part 5: Datatypes) to a Python data types script
"""
import os
import re
import sys
import codecs
import optparse
import cStringIO
from xml import sax
from amara import domtools
from Ft.Xml.Domlette import GetAllNs
from Ft.Xml.Domlette import NonvalidatingReader
from xml.dom import EMPTY_NAMESPACE as NULL_NAMESPACE
from xml.dom import EMPTY_PREFIX as NULL_PREFIX
DTLL_NS = "http://www.jenitennison.com/datatypes"
WXSDT_NS = "http://www.w3.org/2001/XMLSchema-datatypes"
#FIXME: Use 4Suite L10N
def _(t): return t
NAMED_PATTERN_PAT = re.compile(r'\(\?\[(\w+)\]')
TOP_SKEL = u'''\
#Warning: this is an auto-generated file. Do not edit unless you're
#sure you know what you're doing
import sys
import re
import codecs
'''
MAIN_SKEL = u'''\
BASE_URI = '%(ns)s'
'''
DT_CLASS_SKEL = u'''\
class %s(name)Type:
self._name = %(name)s
'''
DT_REGEX_SKEL = u'''\
regex = re.compile(%(regex)s)
def __init__(self, value):
m = self.regex.match(value)
if not m:
raise ValueError('Value does not conform to specified regex for data type %%s'%%(self._name))
#Extract named patterns
self.__dict__.update(m.groupdict())
return
'''
DT_NO_REGEX_SKEL = u'''\
def __init__(self, value):
return
'''
class dtll_processor:
def __init__(self, output_stem):
self.reset()
self.output_stem = output_stem
return
def reset(self):
self.prefixes = {'dtll': DTLL_NS, 'wxs': WXSDT_NS}
#Maps each data type namespace to one module of Python output
self.outputs = {}
return
def execute(self, dtlldoc):
for datatype in domtools.get_elements_by_tag_name_ns(dtlldoc, DTLL_NS, u'datatype'):
self.handle_datatype(datatype)
return
def write_files(self):
module_count = 1
for ns, cstring in self.outputs.items():
fout = open(self.output_stem + str(module_count) + '.py', 'w')
fout.write(cstring.getvalue())
module_count += 1
return
def handle_datatype(self, datatype):
qname = datatype.getAttributeNS(NULL_PREFIX, u'name')
prefix = qname[:qname.find(':') + 1][:-1] or NULL_PREFIX
local = qname[qname.find(':')+1:]
namespace = None
if prefix:
#Specified data type namespace by using a qname
namespace = self.prefixes.get(prefix)
if not namespace:
#Specified data type namespace by in-scope namespaces
namespace = GetAllNs(datatype)[prefix]
output = self.outputs.setdefault(namespace, cStringIO.StringIO())
output.write(TOP_SKEL)
skel_params = {'ns': namespace}
output.write(MAIN_SKEL%skel_params)
skel_params = {'name': local}
output.write(DT_CLASS_SKEL%skel_params)
for parse in domtools.get_elements_by_tag_name_ns(datatype, DTLL_NS, u'parse'):
self.handle_parse(parse, output)
return
def handle_parse(self, parse, output):
regexen = list(domtools.get_elements_by_tag_name_ns(parse, DTLL_NS, u'regex'))
if regexen:
regex = python_regex(domtools.string_value(regexen[0]))
skel_params = {'regex': regex}
output.write(DT_REGEX_SKEL%skel_params)
else:
output.write(DT_NO_REGEX_SKEL)
return
def python_regex(dtllregex):
'''
Convert a DTLL regex to a Python/Perl regex
'''
return NAMED_PATTERN_PAT.subn(lambda m: '(?P<'+m.group(1)+'>', dtllregex)[0]
def run(dtll_doc, output_stem, prep_for_test=0):
#global BOTTOM_SKEL
#if prep_for_test:
# BOTTOM_SKEL = MAIN_SKEL + TEST_SCRIPT_SKEL
#else:
# BOTTOM_SKEL = MAIN_SKEL
proc = dtll_processor(output_stem)
proc.execute(dtll_doc)
proc.write_files()
return
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def command_line(args):
from optparse import OptionParser
usage = "%prog [options] dtll-file"
parser = OptionParser(usage=usage)
parser.add_option("-o", "--dt-module-prefix",
action="store", type="string", dest="dt_modname_stem",
help="file name prefix for data type modules to be generated", metavar="FILE")
parser.add_option("--test",
action="store_true", dest="test_ready", default=0,
help="generate hooks for unit tests in the data type")
#parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=1,
# help="don't print status messages to stdout")
global OPTIONS, ARGS
(OPTIONS, ARGS) = parser.parse_args(args)
return parser
def main(argv=None):
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
if argv is None:
argv = sys.argv
try:
try:
optparser = command_line(argv)
dtll_fname = ARGS[1]
except KeyboardInterrupt:
pass
except:
raise Usage(optparser.format_help())
enc, dec, inwrap, outwrap = codecs.lookup('utf-8')
output_stem = OPTIONS.dt_modname_stem
if not output_stem:
output_stem = os.path.splitext(dtll_fname)[0] + '-datatypes'
if dtll_fname == '-':
dtllf = sys.stdin
else:
dtllf = open(dtll_fname, 'r')
dtll_doc = NonvalidatingReader.parseStream(dtllf, 'http://example.com')
run(dtll_doc, output_stem, OPTIONS.test_ready)
except Usage, err:
print >>sys.stderr, err.msg
return 2
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-3-clause | 741,136,956,725,452,900 | 28.953368 | 105 | 0.599896 | false |
exhacking/TKinter | jam.py | 1 | 1891 | # file: digital.py
# versi: python 2.7
# Program Jam Digital dengan Tkinter
# created by Exhacking.net
# update: 02/08/2012 12.13 AM
# memanggil modul Tkinter
from Tkinter import *
# memanggil modul time (untuk mengakses waktu saat ini)
import time
class JamDigital:
""" Kelas Jam Digital"""
def __init__(self, parent, title):
self.parent = parent
self.parent.title(title)
self.parent.protocol("WM_DELETE_WINDOW", self.onTutup)
self.parent.resizable(False, False)
# buat variabel String untuk teks jam
self.teksJam = StringVar()
self.aturKomponen()
# melalukan looping untuk tampilan jam
self.update()
def aturKomponen(self):
mainFrame = Frame(self.parent, bd=10)
mainFrame.pack(fill=BOTH, expand=YES)
# teks jam dibuat dengan komponen Label, yang bisa berubah
# setiap waktu.
self.lblJam = Label(mainFrame, textvariable=self.teksJam,
font=('Helvetica', 40))
self.lblJam.pack(expand=YES)
self.lblInfo = Label(mainFrame, text="http://www.exhacking.net",
fg='red')
self.lblInfo.pack(side=TOP, pady=5)
def update(self):
# strftime() berfungsi untuk merubah data waktu secara lokal
# menjadi bentuk string yang kita inginkan.
datJam = time.strftime("%H:%M:%S", time.localtime())
# mengubah teks jam sesuai dengan waktu saat ini
self.teksJam.set(datJam)
# perubahan teks jam dalam selang waktu 1 detik (1000 ms)
self.timer = self.parent.after(1000, self.update)
def onTutup(self, event=None):
self.parent.destroy()
if __name__ == '__main__':
root = Tk()
app = JamDigital(root, "Jam Digital")
root.mainloop() | gpl-3.0 | -8,135,576,838,944,078,000 | 29.516129 | 72 | 0.594395 | false |
PlanetHunt/satgen | config_step.py | 1 | 3618 | #!/bin/env python
# -*- coding: utf-8 -*-
"""
This class is menat to read step configurations
for the different parameters related to the satelite.
the syntax of the step parameters should be like this
[Parameter Name]
Start Value = float / int
Step = float / int
End Value = float / int
...
...
...
This configuration will be read and put to a matrix
so diffrenet satellite will be creared.
"""
from logger import Logger
import ConfigParser
import itertools
import ast
import re
class ConfigStep:
def __init__(self, log_level="ERROR"):
self.logger = Logger(log_level)
self.log = self.logger.get_logger()
self.step_conf = dict()
def set_step_conf(self, conf):
"""
Set the step config
"""
return self.step_conf
def get_step_conf(self):
"""
Returns the step conf
"""
return self.step_conf
def get_conf_parser(self):
"""
Generates a ConfigParser instance
"""
return ConfigParser.ConfigParser()
def read_conf(self, address):
"""
Reads the config file contents and
generates a configuration dict
"""
config = self.get_conf_parser()
config.read(address)
sections = config.sections()
for section in sections:
self.get_step_conf()[section] = dict()
for option in config.options(section):
config_value = config.get(section, option, True)
self.get_step_conf()[section][option.title()] = config_value
def add_edge_length(self, a, b):
"""
add to same size tuples of edge-length toghether.
"""
return tuple(sum(x) for x in zip(a, b))
def convert_to_tuple(self, tuple_str):
"""
converts the given tuple string to a tuple python object
"""
return ast.literal_eval(tuple_str)
def do_steps(self):
"""
Returns all the possible values for different paramters in array
With the help of this results, the combination matirx will be created
"""
steps = self.get_step_conf()
all_step_config = dict()
for k, v in steps.items():
tmp_list = list()
all_step_config[k] = tmp_list
start = v["Start Value"]
end = v["End Value"]
# special handling of edge length
if(k == "Edge Length"):
start = self.convert_to_tuple(start)
end = self.convert_to_tuple(end)
tmp_list.append(str(start))
while(start != end):
start = self.add_edge_length(
start, self.convert_to_tuple(v["Step"]))
tmp_list.append(str(start))
print start
else:
tmp_list.append(float(start))
while float(start) < float(end):
start = float(start) + float(v["Step"])
tmp_list.append(start)
return all_step_config
def get_combinations(self):
"""
Returns all the possible combinations from the given dict
it uses product function.
"""
all_steps = self.do_steps()
self.option = [k for k, v in all_steps.items()]
result = itertools.product(*(v for k, v in all_steps.items()))
return result
def get_options(self):
all_steps = self.get_step_conf()
return self.option
# steps = ConfigStep()
# steps.read_conf("steps.cfg")
# print list(steps.get_combinations())
# print steps.get_options()
| mit | 1,390,979,606,951,293,700 | 28.414634 | 77 | 0.563571 | false |
projectweekend/raspberry-pi-io | raspberry_pi_io/io.py | 1 | 1595 | import yaml
from api import DeviceConfig
from gpio import PinManager
from rabbit import AsyncConsumer
class IOService(object):
def __init__(self, config_file):
with open(config_file) as file:
self.config = yaml.safe_load(file)
self.load_device_config()
self.initialize_pin_manager()
self.initialize_consumer()
@staticmethod
def _error(response):
return {'error': 1, 'response': response}
@staticmethod
def _response(response):
return {'error': 0, 'response': response}
def load_device_config(self):
self.device_config = DeviceConfig(
api=self.config['api'],
user_email=self.config['user_email'],
user_key=self.config['user_key'],
device_id=self.config['device_id']).get()
def initialize_pin_manager(self):
self.pin_manager = PinManager(self.device_config['pinConfig'])
def initialize_consumer(self):
def action(instruction):
response = getattr(self.pin_manager, instruction['action'])(int(instruction['pin']))
return {
'response': response
}
self.consumer = AsyncConsumer(
rabbit_url=self.device_config['rabbitURL'],
queue=self.config['device_id'],
exchange='raspberry-pi-io',
exchange_type='direct',
routing_key=self.config['device_id'],
action=action)
def start(self):
try:
self.consumer.run()
except:
self.consumer.stop()
raise
| mit | -9,010,794,593,780,724,000 | 27.482143 | 96 | 0.584326 | false |
adamwiggins/cocos2d | test/test_menu_centered.py | 2 | 1158 | #
# Cocos
# http://code.google.com/p/los-cocos/
#
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("TITLE" )
self.menu_valign = CENTER
self.menu_halign = CENTER
# then add the items
items = [
( MenuItem('Item 1', self.on_quit ) ),
( MenuItem('Item 2', self.on_quit ) ),
( MenuItem('Item 3', self.on_quit ) ),
( MenuItem('Item 4', self.on_quit ) ),
( MenuItem('Item 5', self.on_quit ) ),
( MenuItem('Item 6', self.on_quit ) ),
( MenuItem('Item 7', self.on_quit ) ),
]
self.create_menu( items, shake(), shake_back() )
def on_quit( self ):
pyglet.app.exit()
if __name__ == "__main__":
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
| bsd-3-clause | -8,547,460,937,734,142,000 | 21.705882 | 65 | 0.556995 | false |
durante987/nonogram_solver | nonogram_solver.py | 1 | 1269 | #!/usr/bin/env python3.8
"""
A program that tries to solve nonograms.
"""
import argparse
import logging
import sys
from nonogram.raster import Raster
from nonogram import solver
def main(args=None):
"""
Read the puzzle from the input file and start solving it.
"""
logging.basicConfig(format='%(message)s',
level=logging.DEBUG if args.debug else logging.WARNING)
with open(args.input_file, 'r') as inp:
raster = Raster.from_file(inp)
solution = solver.solve(raster)
if not solution:
print("Program couldn't find any solution.")
logging.debug(str(raster))
sys.exit(2)
print(str(solution), end='')
if args.bmp_file:
solution.to_bitmap(args.bmp_file)
if __name__ == '__main__':
# pylint: disable=invalid-name
parser = argparse.ArgumentParser(description='Solve nonograms')
parser.add_argument('input_file', help='file specifying the nonogram')
parser.add_argument(
'--bmp', dest='bmp_file', help='write the solution to the specified'
' file in BMP format')
parser.add_argument('--debug', help='enable debug logs',
action='store_true')
main(args=parser.parse_args())
| mit | -9,051,952,016,072,988,000 | 27.840909 | 79 | 0.622537 | false |
Springerle/hovercraft-slides | {{cookiecutter.repo_name}}/setup.py | 1 | 1618 | """A setup shim for 'rituals'"""
import os
import re
import sys
import subprocess
from datetime import datetime
try:
url = subprocess.check_output('git remote get-url origin',
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError:
url = '{{ cookiecutter.url }}'
else:
url = url.decode('utf-8').strip()
if url.endswith('.git'):
url = url[:-4]
if url.startswith('ssh://'):
url = url[6:]
url = re.sub(r'git@([^:/]+)[:/]', r'https://\1/', url)
try:
now = '{:%Y%m%d-%H%M}'.format(datetime.now())
version = subprocess.check_output("git describe --long --dirty='-{}' --all --always".format(now),
stderr=subprocess.STDOUT, shell=True)
version = version.decode('utf-8').strip().replace('/', '-')
except subprocess.CalledProcessError:
filedate = os.path.getmtime(os.path.join(os.path.dirname(__file__), 'index.rst'))
version = datetime.fromtimestamp(filedate).isoformat('-')[:16].replace(':', '').replace('-', '.')
project = dict(
name=os.path.basename(os.path.dirname(os.path.abspath(__file__))),
version=version,
url=url,
author='{{ cookiecutter.full_name }}',
author_email='{{ cookiecutter.email }}',
license='{{ cookiecutter.license }}',
)
if __name__ == "__main__":
install = True
for arg in sys.argv[1:]:
if arg.startswith('--') and arg.lstrip('-') in project:
print(project.get(arg.lstrip('-')))
install = False
if install:
subprocess.call("pip install -r requirements.txt", shell=True)
| cc0-1.0 | 7,890,750,058,121,851,000 | 33.425532 | 101 | 0.588381 | false |
luskaner/wps-dict | wps_dict/wps_dict/interface/gui/bootstrap.py | 1 | 3244 |
from os.path import dirname, abspath
from .SignalHandler import *
from ...providers.online.downloadable.list import online_downloadable_providers
from ...providers.online.queryable.list import online_queryable_providers
from ...providers.offline.list import offline_providers
from ...tools.list import tools
gi.require_version('Gtk', '3.0')
# noinspection PyPep8,PyUnresolvedReferences
from gi.repository import Gtk
# noinspection PyUnusedLocal
def _set_treeview_row(_, cell, *__):
inconsistent = cell.get_property('inconsistent')
enabled = cell.get_property('active')
cell.set_property('inconsistent', inconsistent)
cell.set_active(enabled)
def _get_column(builder, are_tools=False):
column = Gtk.TreeViewColumn()
name = Gtk.CellRendererText()
enabled = Gtk.CellRendererToggle()
if are_tools:
enabled.connect("toggled", SignalHandler(builder).on_cell_toggled_tools)
else:
enabled.connect("toggled", SignalHandler(builder).on_cell_toggled_providers)
column.pack_start(name, True)
column.pack_start(enabled, True)
column.add_attribute(name, "text", 0)
column.add_attribute(enabled, "active", 1)
if not are_tools:
column.add_attribute(enabled, "inconsistent", 2)
column.set_cell_data_func(enabled, _set_treeview_row)
return column
def generate_provider_tree(builder):
providers_list = builder.get_object("providers_list")
item_offline_providers = providers_list.append(None, ['Offline providers', True, False])
item_online_providers = providers_list.append(None, ['Online providers', True, False])
item_online_downloadable_providers = providers_list.append(item_online_providers,
['Downloadable providers', True, False])
item_online_queryable_providers = providers_list.append(item_online_providers, ['Queryable providers', True, False])
for offline_provider in offline_providers.keys():
providers_list.append(item_offline_providers, [offline_provider, True, False])
for online_provider in online_queryable_providers.keys():
providers_list.append(item_online_queryable_providers, [online_provider, True, False])
for online_downloadable_provider in online_downloadable_providers.keys():
providers_list.append(item_online_downloadable_providers, [online_downloadable_provider, True, False])
builder.get_object("providers_tree_view").get_selection().set_mode(Gtk.SelectionMode.NONE)
builder.get_object("providers_tree_view").append_column(_get_column(builder))
def generate_tool_tree(builder):
tools_list = builder.get_object("tools_list")
for tool in tools.keys():
tools_list.append([tool, True, False])
builder.get_object("tools_tree_view").append_column(_get_column(builder, True))
builder.get_object("tools_tree_view").get_selection().set_mode(Gtk.SelectionMode.NONE)
def init():
builder = Gtk.Builder()
builder.add_from_file(dirname(abspath(__file__)) + "/ui.glade")
builder.connect_signals(SignalHandler(builder))
window = builder.get_object("main_window")
generate_provider_tree(builder)
generate_tool_tree(builder)
window.show_all()
Gtk.main()
| gpl-3.0 | -6,190,776,960,386,111,000 | 38.084337 | 120 | 0.711776 | false |
ykaneko/quantum | quantum/extensions/portbindings.py | 1 | 3587 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.api import extensions
from quantum.api.v2 import attributes
# The service will return the vif type for the specific port.
VIF_TYPE = 'binding:vif_type'
# In some cases different implementations may be run on different hosts.
# The host on which the port will be allocated.
HOST_ID = 'binding:host_id'
# The profile will be a dictionary that enables the application running
# on the specific host to pass and receive vif port specific information to
# the plugin.
PROFILE = 'binding:profile'
# The capabilities will be a dictionary that enables pass information about
# functionalies quantum provides. The following value should be provided.
# - port_filter : Boolean value indicating Quantum provides port filtering
# features such as security group and anti MAC/IP spoofing
CAPABILITIES = 'binding:capabilities'
CAP_PORT_FILTER = 'port_filter'
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_OTHER = 'other'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
VIF_TYPE: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
HOST_ID: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
PROFILE: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'validate': {'type:dict': None},
'is_visible': True},
CAPABILITIES: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
}
}
class Portbindings(extensions.ExtensionDescriptor):
"""Extension class supporting port bindings.
This class is used by quantum's extension framework to make
metadata about the port bindings available to external applications.
With admin rights one will be able to update and read the values.
"""
@classmethod
def get_name(cls):
return "Port Binding"
@classmethod
def get_alias(cls):
return "binding"
@classmethod
def get_description(cls):
return "Expose port bindings of a virtual port to external application"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/binding/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-11-14T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 | 8,823,158,288,647,582,000 | 34.514851 | 79 | 0.651241 | false |
ntymtsiv/tempest | tempest/services/compute/v3/json/quotas_client.py | 1 | 2980 | # Copyright 2012 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common.rest_client import RestClient
from tempest import config
CONF = config.CONF
class QuotasV3ClientJSON(RestClient):
def __init__(self, auth_provider):
super(QuotasV3ClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_v3_type
def get_quota_set(self, tenant_id):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['quota_set']
def get_default_quota_set(self, tenant_id):
"""List the default quota set for a tenant."""
url = 'os-quota-sets/%s/defaults' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['quota_set']
def update_quota_set(self, tenant_id, force=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
security_group_rules=None, cores=None,
security_groups=None):
"""
Updates the tenant's quota limits for one or more resources
"""
post_body = {}
if force is not None:
post_body['force'] = force
if metadata_items is not None:
post_body['metadata_items'] = metadata_items
if ram is not None:
post_body['ram'] = ram
if floating_ips is not None:
post_body['floating_ips'] = floating_ips
if fixed_ips is not None:
post_body['fixed_ips'] = fixed_ips
if key_pairs is not None:
post_body['key_pairs'] = key_pairs
if instances is not None:
post_body['instances'] = instances
if security_group_rules is not None:
post_body['security_group_rules'] = security_group_rules
if cores is not None:
post_body['cores'] = cores
if security_groups is not None:
post_body['security_groups'] = security_groups
post_body = json.dumps({'quota_set': post_body})
resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body,
self.headers)
body = json.loads(body)
return resp, body['quota_set']
| apache-2.0 | -6,148,559,382,273,381,000 | 31.747253 | 78 | 0.6 | false |
koss822/misc | Linux/MySettings/myvim/vim/bundle/jedi-vim/pythonx/jedi/test/completion/decorators.py | 1 | 5367 | # -----------------
# normal decorators
# -----------------
def decorator(func):
def wrapper(*args):
return func(1, *args)
return wrapper
@decorator
def decorated(a,b):
return a,b
exe = decorated(set, '')
#? set
exe[1]
#? int()
exe[0]
# more complicated with args/kwargs
def dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@dec
def fu(a, b, c, *args, **kwargs):
return a, b, c, args, kwargs
exe = fu(list, c=set, b=3, d='')
#? list
exe[0]
#? int()
exe[1]
#? set
exe[2]
#? []
exe[3][0].
#? str()
exe[4]['d']
exe = fu(list, set, 3, '', d='')
#? str()
exe[3][0]
# -----------------
# multiple decorators
# -----------------
def dec2(func2):
def wrapper2(first_arg, *args2, **kwargs2):
return func2(first_arg, *args2, **kwargs2)
return wrapper2
@dec2
@dec
def fu2(a, b, c, *args, **kwargs):
return a, b, c, args, kwargs
exe = fu2(list, c=set, b=3, d='str')
#? list
exe[0]
#? int()
exe[1]
#? set
exe[2]
#? []
exe[3][0].
#? str()
exe[4]['d']
# -----------------
# Decorator is a class
# -----------------
def same_func(func):
return func
class Decorator(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(1, *args, **kwargs)
@Decorator
def nothing(a,b,c):
return a,b,c
#? int()
nothing("")[0]
#? str()
nothing("")[1]
@same_func
@Decorator
def nothing(a,b,c):
return a,b,c
#? int()
nothing("")[0]
class MethodDecoratorAsClass():
class_var = 3
@Decorator
def func_without_self(arg, arg2):
return arg, arg2
@Decorator
def func_with_self(self, arg):
return self.class_var
#? int()
MethodDecoratorAsClass().func_without_self('')[0]
#? str()
MethodDecoratorAsClass().func_without_self('')[1]
#?
MethodDecoratorAsClass().func_with_self(1)
class SelfVars():
"""Init decorator problem as an instance, #247"""
@Decorator
def __init__(self):
"""
__init__ decorators should be ignored when looking up variables in the
class.
"""
self.c = list
@Decorator
def shouldnt_expose_var(not_self):
"""
Even though in real Python this shouldn't expose the variable, in this
case Jedi exposes the variable, because these kind of decorators are
normally descriptors, which SHOULD be exposed (at least 90%).
"""
not_self.b = 1.0
def other_method(self):
#? float()
self.b
#? list
self.c
# -----------------
# not found decorators (are just ignored)
# -----------------
@not_found_decorator
def just_a_func():
return 1
#? int()
just_a_func()
#? ['__closure__']
just_a_func.__closure__
class JustAClass:
@not_found_decorator2
def a(self):
return 1
#? ['__call__']
JustAClass().a.__call__
#? int()
JustAClass().a()
#? ['__call__']
JustAClass.a.__call__
#? int()
JustAClass.a()
# -----------------
# illegal decorators
# -----------------
class DecoratorWithoutCall():
def __init__(self, func):
self.func = func
@DecoratorWithoutCall
def f():
return 1
# cannot be resolved - should be ignored
@DecoratorWithoutCall(None)
def g():
return 1
#?
f()
#? int()
g()
class X():
@str
def x(self):
pass
def y(self):
#? str()
self.x
#?
self.x()
def decorator_var_args(function, *args):
return function(*args)
@decorator_var_args
def function_var_args(param):
return param
#? int()
function_var_args(1)
# -----------------
# method decorators
# -----------------
def dec(f):
def wrapper(s):
return f(s)
return wrapper
class MethodDecorators():
_class_var = 1
def __init__(self):
self._method_var = ''
@dec
def constant(self):
return 1.0
@dec
def class_var(self):
return self._class_var
@dec
def method_var(self):
return self._method_var
#? float()
MethodDecorators().constant()
#? int()
MethodDecorators().class_var()
#? str()
MethodDecorators().method_var()
class Base():
@not_existing
def __init__(self):
pass
@not_existing
def b(self):
return ''
@dec
def c(self):
return 1
class MethodDecoratorDoesntExist(Base):
"""#272 github: combination of method decorators and super()"""
def a(self):
#?
super().__init__()
#? str()
super().b()
#? int()
super().c()
#? float()
self.d()
@doesnt_exist
def d(self):
return 1.0
# -----------------
# others
# -----------------
def memoize(function):
def wrapper(*args):
if random.choice([0, 1]):
pass
else:
rv = function(*args)
return rv
return wrapper
@memoize
def follow_statement(stmt):
return stmt
# here we had problems with the else clause, because the parent was not right.
#? int()
follow_statement(1)
# -----------------
# class decorators
# -----------------
# class decorators should just be ignored
@should_ignore
class A():
def ret(self):
return 1
#? int()
A().ret()
# -----------------
# On decorator completions
# -----------------
import abc
#? ['abc']
@abc
#? ['abstractmethod']
@abc.abstractmethod
| gpl-3.0 | 978,030,315,969,505,900 | 15.31307 | 78 | 0.533259 | false |
odahoda/noisicaa | noisicaa/music/project_client.py | 1 | 15366 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import asyncio
from fractions import Fraction
import functools
import getpass
import logging
import random
import socket
from typing import Any, Dict, List, Tuple, Callable, TypeVar
from noisicaa import audioproc
from noisicaa import core
from noisicaa import lv2
from noisicaa import node_db as node_db_lib
from noisicaa import editor_main_pb2
from noisicaa.core import empty_message_pb2
from noisicaa.core import ipc
from noisicaa.core import session_data_pb2
from . import render_pb2
from . import project as project_lib
from . import writer_client
from . import render
from . import player as player_lib
from . import session_value_store
from . import loadtest_generator
logger = logging.getLogger(__name__)
class ProjectClient(object):
def __init__(
self, *,
event_loop: asyncio.AbstractEventLoop,
server: ipc.Server,
manager: ipc.Stub,
tmp_dir: str,
node_db: node_db_lib.NodeDBClient,
urid_mapper: lv2.ProxyURIDMapper
) -> None:
self.__event_loop = event_loop
self.__server = server
self.__tmp_dir = tmp_dir
self.__manager = manager
self.__node_db = node_db
self.__urid_mapper = urid_mapper
self.__pool = None # type: project_lib.Pool
self.__project = None # type: project_lib.BaseProject
self.__writer_client = None # type: writer_client.WriterClient
self.__writer_address = None # type: str
self.__session_values = None # type: session_value_store.SessionValueStore
self.__session_data_listeners = core.CallbackMap[str, Any]()
self.__players = {} # type: Dict[str, player_lib.Player]
self.__cb_endpoint_name = 'project-%016x' % random.getrandbits(63)
self.__cb_endpoint_address = None # type: str
@property
def project(self) -> project_lib.BaseProject:
return self.__project
async def setup(self) -> None:
cb_endpoint = ipc.ServerEndpoint(self.__cb_endpoint_name)
cb_endpoint.add_handler(
'CONTROL_VALUE_CHANGE', self.__handle_control_value_change,
audioproc.ControlValueChange, empty_message_pb2.EmptyMessage)
cb_endpoint.add_handler(
'PLUGIN_STATE_CHANGE', self.__handle_plugin_state_change,
audioproc.PluginStateChange, empty_message_pb2.EmptyMessage)
self.__cb_endpoint_address = await self.__server.add_endpoint(cb_endpoint)
async def cleanup(self) -> None:
players = list(self.__players.values())
self.__players.clear()
for player in players:
await player.cleanup()
if self.__cb_endpoint_address is not None:
await self.__server.remove_endpoint(self.__cb_endpoint_name)
self.__cb_endpoint_address = None
await self.close()
async def __create_writer(self) -> None:
logger.info("Creating writer process...")
create_writer_response = editor_main_pb2.CreateProcessResponse()
await self.__manager.call(
'CREATE_WRITER_PROCESS', None, create_writer_response)
self.__writer_address = create_writer_response.address
logger.info("Connecting to writer process %r...", self.__writer_address)
self.__writer_client = writer_client.WriterClient(
event_loop=self.__event_loop)
await self.__writer_client.setup()
await self.__writer_client.connect(self.__writer_address)
async def __init_session_data(self) -> None:
session_name = '%s.%s' % (getpass.getuser(), socket.getfqdn())
self.__session_values = session_value_store.SessionValueStore(
self.__event_loop, session_name)
await self.__session_values.init(self.__project.data_dir)
for session_value in self.__session_values.values():
self.__session_data_listeners.call(
session_value.name, self.__session_proto_to_py(session_value))
# def get_object(self, obj_id: int) -> model_base.ObjectBase:
# return self.__pool[obj_id]
async def __handle_control_value_change(
self,
request: audioproc.ControlValueChange,
response: empty_message_pb2.EmptyMessage
) -> None:
assert self.__project is not None
logger.info(
"control_value_change(%s, %s, %s, %f, %d)",
request.realm, request.node_id,
request.value.name, request.value.value, request.value.generation)
node = None
for node in self.__project.nodes:
if node.pipeline_node_id == request.node_id:
break
else:
raise ValueError("Invalid node_id '%s'" % request.node_id)
with self.__project.apply_mutations('Change control value "%s"' % request.value.name):
node.set_control_value(
request.value.name, request.value.value, request.value.generation)
async def __handle_plugin_state_change(
self,
request: audioproc.PluginStateChange,
response: empty_message_pb2.EmptyMessage
) -> None:
assert self.__project is not None
node = None
for node in self.__project.nodes:
if node.pipeline_node_id == request.node_id:
break
else:
raise ValueError("Invalid node_id '%s'" % request.node_id)
with self.__project.apply_mutations('Change plugin state'):
node.set_plugin_state(request.state)
async def create(self, path: str) -> None:
assert self.__project is None
await self.__create_writer()
self.__pool = project_lib.Pool(project_cls=project_lib.Project)
self.__project = await project_lib.Project.create_blank(
path=path,
pool=self.__pool,
writer=self.__writer_client,
node_db=self.__node_db)
self.__project.monitor_model_changes()
await self.__init_session_data()
async def create_loadtest(self, path: str, spec: Dict[str, Any]) -> None:
assert self.__project is None
await self.__create_writer()
self.__pool = project_lib.Pool(project_cls=project_lib.Project)
self.__project = await project_lib.Project.create_blank(
path=path,
pool=self.__pool,
writer=self.__writer_client,
node_db=self.__node_db)
self.__project.monitor_model_changes()
with self.__project.apply_mutations('Fill it with junk'):
loadtest_generator.fill_project(self.__project, spec)
await self.__init_session_data()
async def create_inmemory(self) -> None:
assert self.__project is None
self.__pool = project_lib.Pool()
self.__project = self.__pool.create(
project_lib.BaseProject, node_db=self.__node_db)
self.__pool.set_root(self.__project)
self.__project.monitor_model_changes()
await self.__init_session_data()
async def open(self, path: str) -> None:
assert self.__project is None
await self.__create_writer()
self.__pool = project_lib.Pool(project_cls=project_lib.Project)
self.__project = await project_lib.Project.open(
path=path,
pool=self.__pool,
writer=self.__writer_client,
node_db=self.__node_db)
self.__project.monitor_model_changes()
await self.__init_session_data()
async def close(self) -> None:
if self.__project is not None:
await self.__project.close()
self.__project = None
self.__pool = None
if self.__writer_client is not None:
await self.__writer_client.close()
await self.__writer_client.cleanup()
self.__writer_client = None
if self.__writer_address is not None:
await self.__manager.call(
'SHUTDOWN_PROCESS',
editor_main_pb2.ShutdownProcessRequest(
address=self.__writer_address))
self.__writer_address = None
async def create_player(self, *, audioproc_address: str) -> Tuple[str, str]:
assert self.__project is not None
logger.info("Creating audioproc client...")
audioproc_client = audioproc.AudioProcClient(
self.__event_loop, self.__server, self.__urid_mapper)
await audioproc_client.setup()
logger.info("Connecting audioproc client...")
await audioproc_client.connect(audioproc_address)
realm_name = 'project:%s' % self.__project.id
logger.info("Creating realm '%s'...", realm_name)
await audioproc_client.create_realm(
name=realm_name,
parent='root',
enable_player=True,
callback_address=self.__cb_endpoint_address)
player = player_lib.Player(
project=self.__project,
callback_address=self.__cb_endpoint_address,
event_loop=self.__event_loop,
audioproc_client=audioproc_client,
realm=realm_name,
session_values=self.__session_values)
await player.setup()
self.__players[player.id] = player
return (player.id, player.realm)
async def delete_player(self, player_id: str) -> None:
player = self.__players.pop(player_id)
await player.cleanup()
if player.audioproc_client is not None:
if player.realm is not None:
logger.info("Deleting realm '%s'...", player.realm)
await player.audioproc_client.delete_realm(name=player.realm)
await player.audioproc_client.disconnect()
await player.audioproc_client.cleanup()
async def create_plugin_ui(self, player_id: str, node_id: str) -> Tuple[int, Tuple[int, int]]:
player = self.__players[player_id]
return await player.create_plugin_ui(node_id)
async def delete_plugin_ui(self, player_id: str, node_id: str) -> None:
player = self.__players[player_id]
await player.delete_plugin_ui(node_id)
async def update_player_state(self, player_id: str, state: audioproc.PlayerState) -> None:
player = self.__players[player_id]
await player.update_state(state)
async def dump(self) -> None:
raise NotImplementedError
# await self._stub.call('DUMP')
async def render(
self, callback_address: str, render_settings: render_pb2.RenderSettings
) -> None:
assert self.__project is not None
renderer = render.Renderer(
project=self.__project,
tmp_dir=self.__tmp_dir,
server=self.__server,
manager=self.__manager,
event_loop=self.__event_loop,
callback_address=callback_address,
render_settings=render_settings,
urid_mapper=self.__urid_mapper,
)
await renderer.run()
def add_session_data_listener(
self, key: str, func: Callable[[Any], None]) -> core.Listener:
return self.__session_data_listeners.add(key, func)
def __session_proto_to_py(self, session_value: session_data_pb2.SessionValue) -> Any:
value_type = session_value.WhichOneof('type')
if value_type == 'string_value':
return session_value.string_value
elif value_type == 'bytes_value':
return session_value.bytes_value
elif value_type == 'bool_value':
return session_value.bool_value
elif value_type == 'int_value':
return session_value.int_value
elif value_type == 'double_value':
return session_value.double_value
elif value_type == 'fraction_value':
return Fraction(
session_value.fraction_value.numerator,
session_value.fraction_value.denominator)
elif value_type == 'musical_time_value':
return audioproc.MusicalTime.from_proto(session_value.musical_time_value)
elif value_type == 'musical_duration_value':
return audioproc.MusicalDuration.from_proto(session_value.musical_duration_value)
else:
raise ValueError(session_value)
def set_session_value(self, key: str, value: Any) -> None:
self.set_session_values({key: value})
def set_session_values(self, data: Dict[str, Any]) -> None:
session_values = [] # type: List[session_data_pb2.SessionValue]
for key, value in data.items():
session_value = session_data_pb2.SessionValue()
session_value.name = key
if isinstance(value, str):
session_value.string_value = value
elif isinstance(value, bytes):
session_value.bytes_value = value
elif isinstance(value, bool):
session_value.bool_value = value
elif isinstance(value, int):
session_value.int_value = value
elif isinstance(value, float):
session_value.double_value = value
elif isinstance(value, Fraction):
session_value.fraction_value.numerator = value.numerator
session_value.fraction_value.denominator = value.denominator
elif isinstance(value, audioproc.MusicalTime):
session_value.musical_time_value.numerator = value.numerator
session_value.musical_time_value.denominator = value.denominator
elif isinstance(value, audioproc.MusicalDuration):
session_value.musical_time_value.numerator = value.numerator
session_value.musical_time_value.denominator = value.denominator
else:
raise ValueError("%s: %s" % (key, type(value)))
session_values.append(session_value)
task = self.__event_loop.create_task(self.__session_values.set_values(session_values))
task.add_done_callback(functools.partial(self.__set_session_values_done, data))
def __set_session_values_done(self, data: Dict[str, Any], task: asyncio.Task) -> None:
for key, value in data.items():
self.__session_data_listeners.call(key, value)
T = TypeVar('T')
def get_session_value(self, key: str, default: T) -> T: # pylint: disable=undefined-variable
try:
session_value = self.__session_values.get_value(key)
except KeyError:
return default
else:
return self.__session_proto_to_py(session_value)
| gpl-2.0 | 6,034,009,819,632,288,000 | 38.099237 | 98 | 0.617988 | false |
pFernbach/hpp-rbprm-corba | src/hpp/corbaserver/rbprm/client.py | 1 | 1639 | #!/usr/bin/env python
#
# Copyright (c) 2014 CNRS
# Author: Steve Tonneau
#
# This file is part of hpp-rbprm-corba.
# hpp-rbprm-corba is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-rbprm-corba is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-manipulation-corba. If not, see
# <http://www.gnu.org/licenses/>.
from hpp.corbaserver.client import Client as _Parent
from hpp_idl.hpp.corbaserver.rbprm import RbprmBuilder
class Client (_Parent):
"""
Connect and create clients for hpp-rbprm library.
"""
defaultClients = {
'rbprmbuilder' : RbprmBuilder,
}
def __init__(self, url = None, context = "corbaserver"):
"""
Initialize CORBA and create default clients.
:param url: URL in the IOR, corbaloc, corbalocs, and corbanames formats.
For a remote corba server, use
url = "corbaloc:iiop:<host>:<port>/NameService"
"""
self._initOrb (url)
self._makeClients ("rbprm", self.defaultClients, context)
# self.rbprmbuilder is created by self._makeClients
# The old code stored the object as self.rbprm
# Make it backward compatible.
self.rbprm = self.rbprmbuilder
| lgpl-3.0 | -2,557,956,103,731,145,700 | 35.422222 | 76 | 0.706528 | false |
kezabelle/clastic | clastic/tests/test_render.py | 1 | 4396 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from nose.tools import eq_, ok_
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from clastic import Application
from clastic.render import (JSONRender,
JSONPRender,
render_basic,
BasicRender,
Table,
TabularRender)
from common import (hello_world_str,
hello_world_html,
hello_world_ctx,
complex_context)
import json
_CUR_DIR = os.path.dirname(__file__)
def test_json_render(render_json=None):
if render_json is None:
render_json = JSONRender(dev_mode=True)
app = Application([('/', hello_world_ctx, render_json),
('/<name>/', hello_world_ctx, render_json),
('/beta/<name>/', complex_context, render_json)])
yield ok_, callable(app.routes[0]._execute)
yield ok_, callable(app.routes[0]._render)
c = Client(app, BaseResponse)
resp = c.get('/')
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world'
resp = c.get('/Kurt/')
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Kurt'
resp = c.get('/beta/Rajkumar/')
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Rajkumar'
yield ok_, resp_data['date']
yield ok_, len(resp_data) > 4
def test_jsonp_render(render_json=None):
if render_json is None:
render_json = JSONPRender(qp_name='callback', dev_mode=True)
app = Application([('/', hello_world_ctx, render_json),
('/<name>/', hello_world_ctx, render_json),
('/beta/<name>/', complex_context, render_json)])
c = Client(app, BaseResponse)
resp = c.get('/?callback=test_callback')
yield eq_, resp.status_code, 200
yield ok_, resp.data.startswith('test_callback')
yield ok_, 'world' in resp.data
resp = c.get('/?callback=test_callback')
yield eq_, resp.status_code, 200
yield ok_, resp.data.startswith('test_callback')
yield ok_, 'world' in resp.data
#def test_default_json_render():
# from clastic.render import render_json
# for t in test_json_render(render_json):
# yield t
def test_default_render():
app = Application([('/', hello_world_ctx, render_basic),
('/<name>/', hello_world_ctx, render_basic),
('/text/<name>/', hello_world_str, render_basic),
('/html/<name>/', hello_world_html, render_basic),
('/beta/<name>/', complex_context, render_basic)])
yield ok_, callable(app.routes[0]._execute)
yield ok_, callable(app.routes[0]._render)
c = Client(app, BaseResponse)
resp = c.get('/') # test simple json with endpoint default
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world'
resp = c.get('/Kurt/') # test simple json with url param
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Kurt'
resp = c.get('/beta/Rajkumar/') # test fancy json
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Rajkumar'
yield ok_, resp_data['date']
yield ok_, len(resp_data) > 4
resp = c.get('/text/Noam/') # test text
yield eq_, resp.status_code, 200
yield eq_, resp.data, 'Hello, Noam!'
resp = c.get('/html/Asia/') # test basic html
yield eq_, resp.status_code, 200
yield ok_, 'text/html' in resp.headers['Content-Type']
def test_custom_table_render():
class BoldHTMLTable(Table):
def get_cell_html(self, value):
std_html = super(BoldHTMLTable, self).get_cell_html(value)
return '<b>' + std_html + '</b>'
custom_tr = TabularRender(table_type=BoldHTMLTable)
custom_render = BasicRender(tabular_render=custom_tr)
app = Application([('/', hello_world_ctx, custom_render)])
c = Client(app, BaseResponse)
resp = c.get('/?format=html')
yield eq_, resp.status_code, 200
assert '<b>' in resp.data
| bsd-3-clause | -1,876,823,114,414,563,000 | 32.30303 | 73 | 0.58758 | false |
lwbrooke/slackbot | fangorn/utils.py | 1 | 1541 | import click
def merge_many_dicts(*configs):
config = {}
for c in configs:
config = merge_dicts(config, c)
return config
def merge_dicts(a, b):
if isinstance(a, dict) and isinstance(b, dict):
for k, v in b.items():
if k not in a:
a[k] = v
else:
a[k] = merge_dicts(a[k], v)
return a
path_type = click.Path(file_okay=False, exists=True, resolve_path=True)
class FloatRange(click.ParamType):
name = 'float range'
def __init__(self, min=None, max=None, clamp=False):
if min is not None and max is not None and min > max:
raise ValueError('min {} must be less than or equal to max {}'.format(min, max))
self._min = min
self._max = max
self._clamp = clamp
def convert(self, value, param, ctx):
try:
value = float(value)
except ValueError:
self.fail('{} is not a valid float'.format(value), param, ctx)
if self._clamp:
value = max(value, self._min) if self._min is not None else value
value = min(value, self._max) if self._max is not None else value
else:
if self._min is not None and value < self._min:
self.fail('{} must be greater than or equal to {}'.format(value, self._min), param, ctx)
if self._max is not None and value > self._max:
self.fail('{} must be less than or equal to {}'.format(value, self._max), param, ctx)
return value
| apache-2.0 | 5,728,246,418,293,037,000 | 28.634615 | 104 | 0.556781 | false |
ClearcodeHQ/mirakuru | tests/executors/test_http_executor.py | 1 | 6031 | """HTTP Executor tests."""
import sys
import socket
from functools import partial
from http.client import HTTPConnection, OK
from typing import Dict, Any, Union
from unittest.mock import patch
import pytest
from mirakuru import HTTPExecutor, TCPExecutor
from mirakuru import TimeoutExpired, AlreadyRunning
from tests import TEST_SERVER_PATH, HTTP_SERVER_CMD
HOST = "127.0.0.1"
PORT = 7987
HTTP_NORMAL_CMD = f"{HTTP_SERVER_CMD} {PORT}"
HTTP_SLOW_CMD = f"{sys.executable} {TEST_SERVER_PATH} {HOST}:{PORT}"
slow_server_executor = partial( # pylint: disable=invalid-name
HTTPExecutor,
HTTP_SLOW_CMD,
f"http://{HOST}:{PORT}/",
)
def connect_to_server() -> None:
"""Connect to http server and assert 200 response."""
conn = HTTPConnection(HOST, PORT)
conn.request("GET", "/")
assert conn.getresponse().status == OK
conn.close()
def test_executor_starts_and_waits() -> None:
"""Test if process awaits for HEAD request to be completed."""
command = f'bash -c "sleep 3 && {HTTP_NORMAL_CMD}"'
executor = HTTPExecutor(command, f"http://{HOST}:{PORT}/", timeout=20)
executor.start()
assert executor.running() is True
connect_to_server()
executor.stop()
# check proper __str__ and __repr__ rendering:
assert "HTTPExecutor" in repr(executor)
assert command in str(executor)
def test_shell_started_server_stops() -> None:
"""Test if executor terminates properly executor with shell=True."""
executor = HTTPExecutor(
HTTP_NORMAL_CMD, f"http://{HOST}:{PORT}/", timeout=20, shell=True
)
with pytest.raises(socket.error):
connect_to_server()
with executor:
assert executor.running() is True
connect_to_server()
assert executor.running() is False
with pytest.raises(socket.error):
connect_to_server()
@pytest.mark.parametrize("method", ("HEAD", "GET", "POST"))
def test_slow_method_server_starting(method: str) -> None:
"""
Test whether or not executor awaits for slow starting servers.
Simple example. You run Gunicorn and it is working but you have to
wait for worker processes.
"""
http_method_slow_cmd = (
f"{sys.executable} {TEST_SERVER_PATH} {HOST}:{PORT} False {method}"
)
with HTTPExecutor(
http_method_slow_cmd,
f"http://{HOST}:{PORT}/",
method=method,
timeout=30,
) as executor:
assert executor.running() is True
connect_to_server()
def test_slow_post_payload_server_starting() -> None:
"""
Test whether or not executor awaits for slow starting servers.
Simple example. You run Gunicorn and it is working but you have to
wait for worker processes.
"""
http_method_slow_cmd = (
f"{sys.executable} {TEST_SERVER_PATH} {HOST}:{PORT} False Key"
)
with HTTPExecutor(
http_method_slow_cmd,
f"http://{HOST}:{PORT}/",
method="POST",
timeout=30,
payload={"key": "hole"},
) as executor:
assert executor.running() is True
connect_to_server()
@pytest.mark.parametrize("method", ("HEAD", "GET", "POST"))
def test_slow_method_server_timed_out(method: str) -> None:
"""Check if timeout properly expires."""
http_method_slow_cmd = (
f"{sys.executable} {TEST_SERVER_PATH} {HOST}:{PORT} False {method}"
)
executor = HTTPExecutor(
http_method_slow_cmd, f"http://{HOST}:{PORT}/", method=method, timeout=1
)
with pytest.raises(TimeoutExpired) as exc:
executor.start()
assert executor.running() is False
assert "timed out after" in str(exc.value)
def test_fail_if_other_running() -> None:
"""Test raising AlreadyRunning exception when port is blocked."""
executor = HTTPExecutor(
HTTP_NORMAL_CMD,
f"http://{HOST}:{PORT}/",
)
executor2 = HTTPExecutor(
HTTP_NORMAL_CMD,
f"http://{HOST}:{PORT}/",
)
with executor:
assert executor.running() is True
with pytest.raises(AlreadyRunning):
executor2.start()
with pytest.raises(AlreadyRunning) as exc:
with executor2:
pass
assert "seems to be already running" in str(exc.value)
@patch.object(HTTPExecutor, "DEFAULT_PORT", PORT)
def test_default_port() -> None:
"""
Test default port for the base TCP check.
Check if HTTP executor fills in the default port for the TCP check
from the base class if no port is provided in the URL.
"""
executor = HTTPExecutor(HTTP_NORMAL_CMD, f"http://{HOST}/")
assert executor.url.port is None
assert executor.port == PORT
assert TCPExecutor.pre_start_check(executor) is False
executor.start()
assert TCPExecutor.pre_start_check(executor) is True
executor.stop()
@pytest.mark.parametrize(
"accepted_status, expected_timeout",
(
# default behaviour - only 2XX HTTP status codes are accepted
(None, True),
# one explicit integer status code
(200, True),
# one explicit status code as a string
("404", False),
# status codes as a regular expression
(r"(2|4)\d\d", False),
# status codes as a regular expression
("(200|404)", False),
),
)
def test_http_status_codes(
accepted_status: Union[None, int, str], expected_timeout: bool
) -> None:
"""
Test how 'status' argument influences executor start.
:param int|str accepted_status: Executor 'status' value
:param bool expected_timeout: if Executor raises TimeoutExpired or not
"""
kwargs: Dict[str, Any] = {
"command": HTTP_NORMAL_CMD,
"url": f"http://{HOST}:{PORT}/badpath",
"timeout": 2,
}
if accepted_status:
kwargs["status"] = accepted_status
executor = HTTPExecutor(**kwargs)
if not expected_timeout:
executor.start()
executor.stop()
else:
with pytest.raises(TimeoutExpired):
executor.start()
executor.stop()
| lgpl-3.0 | 5,730,340,418,585,702,000 | 26.792627 | 80 | 0.633228 | false |
franklongford/alias | alias/src/tests/test_intrinsic_sampling_method.py | 1 | 1910 | from unittest import TestCase
import numpy as np
from alias.src.intrinsic_surface import xi, dxy_dxi, ddxy_ddxi
class TestISM(TestCase):
def setUp(self):
self.qm = 8
self.qu = 5
self.n_waves = 2 * self.qm + 1
self.coeff = np.ones(self.n_waves ** 2) * 0.5
self.dim = [10., 12.]
self.pos = np.arange(10)
self.u_array = np.arange(-self.qm, self.qm + 1)
def test_xi(self):
xi_array = xi(
self.pos, self.pos, self.coeff,
self.qm, self.qu, self.dim
)
self.assertEqual((10,), xi_array.shape)
for index, x in enumerate(self.pos):
array = xi(x, x, self.coeff, self.qm, self.qu, self.dim)
self.assertTrue(np.allclose(array, xi_array[index]))
def test_dxy_dxi(self):
dx_dxi_array, dy_dxi_array = dxy_dxi(
self.pos, self.pos, self.coeff, self.qm, self.qu, self.dim
)
self.assertEqual((10,), dx_dxi_array.shape)
self.assertEqual((10,), dy_dxi_array.shape)
for index, x in enumerate(self.pos):
dx_dxi, dy_dxi = dxy_dxi(
x, x, self.coeff, self.qm, self.qu, self.dim)
self.assertTrue(np.allclose(dx_dxi, dx_dxi_array[index]))
self.assertTrue(np.allclose(dy_dxi, dy_dxi_array[index]))
def test_ddxy_ddxi(self):
ddx_ddxi_array, ddy_ddxi_array = ddxy_ddxi(
self.pos, self.pos, self.coeff, self.qm, self.qu, self.dim
)
self.assertEqual((10,), ddx_ddxi_array.shape)
self.assertEqual((10,), ddy_ddxi_array.shape)
for index, x in enumerate(self.pos):
ddx_ddxi, ddy_ddxi = ddxy_ddxi(
x, x, self.coeff, self.qm, self.qu, self.dim)
self.assertTrue(np.allclose(ddx_ddxi, ddx_ddxi_array[index]))
self.assertTrue(np.allclose(ddy_ddxi, ddy_ddxi_array[index]))
| bsd-2-clause | 5,422,547,661,543,708,000 | 29.31746 | 73 | 0.568586 | false |
alexmogavero/home-assistant | homeassistant/components/knx.py | 1 | 12903 | """
Support for KNX components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/knx/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['knxip==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 3671
DOMAIN = 'knx'
EVENT_KNX_FRAME_RECEIVED = 'knx_frame_received'
KNXTUNNEL = None
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the connection to the KNX IP interface."""
global KNXTUNNEL
from knxip.ip import KNXIPTunnel
from knxip.core import KNXException
host = config[DOMAIN].get(CONF_HOST)
port = config[DOMAIN].get(CONF_PORT)
if host is '0.0.0.0':
_LOGGER.debug("Will try to auto-detect KNX/IP gateway")
KNXTUNNEL = KNXIPTunnel(host, port)
try:
res = KNXTUNNEL.connect()
_LOGGER.debug("Res = %s", res)
if not res:
_LOGGER.error("Could not connect to KNX/IP interface %s", host)
return False
except KNXException as ex:
_LOGGER.exception("Can't connect to KNX/IP interface: %s", ex)
KNXTUNNEL = None
return False
_LOGGER.info("KNX IP tunnel to %s:%i established", host, port)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, close_tunnel)
return True
def close_tunnel(_data):
"""Close the NKX tunnel connection on shutdown."""
global KNXTUNNEL
KNXTUNNEL.disconnect()
KNXTUNNEL = None
class KNXConfig(object):
"""Handle the fetching of configuration from the config file."""
def __init__(self, config):
"""Initialize the configuration."""
from knxip.core import parse_group_address
self.config = config
self.should_poll = config.get('poll', True)
if config.get('address'):
self._address = parse_group_address(config.get('address'))
else:
self._address = None
if self.config.get('state_address'):
self._state_address = parse_group_address(
self.config.get('state_address'))
else:
self._state_address = None
@property
def name(self):
"""Return the name given to the entity."""
return self.config['name']
@property
def address(self):
"""Return the address of the device as an integer value.
3 types of addresses are supported:
integer - 0-65535
2 level - a/b
3 level - a/b/c
"""
return self._address
@property
def state_address(self):
"""Return the group address the device sends its current state to.
Some KNX devices can send the current state to a seperate
group address. This makes send e.g. when an actuator can
be switched but also have a timer functionality.
"""
return self._state_address
class KNXGroupAddress(Entity):
"""Representation of devices connected to a KNX group address."""
def __init__(self, hass, config):
"""Initialize the device."""
self._config = config
self._state = False
self._data = None
_LOGGER.debug(
"Initalizing KNX group address for %s (%s)",
self.name, self.address
)
def handle_knx_message(addr, data):
"""Handle an incoming KNX frame.
Handle an incoming frame and update our status if it contains
information relating to this device.
"""
if (addr == self.state_address) or (addr == self.address):
self._state = data[0]
self.schedule_update_ha_state()
KNXTUNNEL.register_listener(self.address, handle_knx_message)
if self.state_address:
KNXTUNNEL.register_listener(self.state_address, handle_knx_message)
@property
def name(self):
"""Return the entity's display name."""
return self._config.name
@property
def config(self):
"""Return the entity's configuration."""
return self._config
@property
def should_poll(self):
"""Return the state of the polling, if needed."""
return self._config.should_poll
@property
def is_on(self):
"""Return True if the value is not 0 is on, else False."""
return self._state != 0
@property
def address(self):
"""Return the KNX group address."""
return self._config.address
@property
def state_address(self):
"""Return the KNX group address."""
return self._config.state_address
@property
def cache(self):
"""Return the name given to the entity."""
return self._config.config.get('cache', True)
def group_write(self, value):
"""Write to the group address."""
KNXTUNNEL.group_write(self.address, [value])
def update(self):
"""Get the state from KNX bus or cache."""
from knxip.core import KNXException
try:
if self.state_address:
res = KNXTUNNEL.group_read(
self.state_address, use_cache=self.cache)
else:
res = KNXTUNNEL.group_read(self.address, use_cache=self.cache)
if res:
self._state = res[0]
self._data = res
else:
_LOGGER.debug(
"%s: unable to read from KNX address: %s (None)",
self.name, self.address
)
except KNXException:
_LOGGER.exception(
"%s: unable to read from KNX address: %s",
self.name, self.address
)
return False
class KNXMultiAddressDevice(Entity):
"""Representation of devices connected to a multiple KNX group address.
This is needed for devices like dimmers or shutter actuators as they have
to be controlled by multiple group addresses.
"""
def __init__(self, hass, config, required, optional=None):
"""Initialize the device.
The namelist argument lists the required addresses. E.g. for a dimming
actuators, the namelist might look like:
onoff_address: 0/0/1
brightness_address: 0/0/2
"""
from knxip.core import parse_group_address, KNXException
self.names = {}
self.values = {}
self._config = config
self._state = False
self._data = None
_LOGGER.debug(
"%s: initalizing KNX multi address device",
self.name
)
settings = self._config.config
if config.address:
_LOGGER.debug(
"%s: base address: address=%s",
self.name, settings.get('address')
)
self.names[config.address] = 'base'
if config.state_address:
_LOGGER.debug(
"%s, state address: state_address=%s",
self.name, settings.get('state_address')
)
self.names[config.state_address] = 'state'
# parse required addresses
for name in required:
paramname = '{}{}'.format(name, '_address')
addr = settings.get(paramname)
if addr is None:
_LOGGER.error(
"%s: Required KNX group address %s missing",
self.name, paramname
)
raise KNXException(
"%s: Group address for {} missing in "
"configuration for {}".format(
self.name, paramname
)
)
_LOGGER.debug(
"%s: (required parameter) %s=%s",
self.name, paramname, addr
)
addr = parse_group_address(addr)
self.names[addr] = name
# parse optional addresses
for name in optional:
paramname = '{}{}'.format(name, '_address')
addr = settings.get(paramname)
_LOGGER.debug(
"%s: (optional parameter) %s=%s",
self.name, paramname, addr
)
if addr:
try:
addr = parse_group_address(addr)
except KNXException:
_LOGGER.exception(
"%s: cannot parse group address %s",
self.name, addr
)
self.names[addr] = name
@property
def name(self):
"""Return the entity's display name."""
return self._config.name
@property
def config(self):
"""Return the entity's configuration."""
return self._config
@property
def should_poll(self):
"""Return the state of the polling, if needed."""
return self._config.should_poll
@property
def cache(self):
"""Return the name given to the entity."""
return self._config.config.get('cache', True)
def has_attribute(self, name):
"""Check if the attribute with the given name is defined.
This is mostly important for optional addresses.
"""
for attributename in self.names.values():
if attributename == name:
return True
return False
def set_percentage(self, name, percentage):
"""Set a percentage in knx for a given attribute.
DPT_Scaling / DPT 5.001 is a single byte scaled percentage
"""
percentage = abs(percentage) # only accept positive values
scaled_value = percentage * 255 / 100
value = min(255, scaled_value)
return self.set_int_value(name, value)
def get_percentage(self, name):
"""Get a percentage from knx for a given attribute.
DPT_Scaling / DPT 5.001 is a single byte scaled percentage
"""
value = self.get_int_value(name)
percentage = round(value * 100 / 255)
return percentage
def set_int_value(self, name, value, num_bytes=1):
"""Set an integer value for a given attribute."""
# KNX packets are big endian
value = round(value) # only accept integers
b_value = value.to_bytes(num_bytes, byteorder='big')
return self.set_value(name, list(b_value))
def get_int_value(self, name):
"""Get an integer value for a given attribute."""
# KNX packets are big endian
summed_value = 0
raw_value = self.value(name)
try:
# convert raw value in bytes
for val in raw_value:
summed_value *= 256
summed_value += val
except TypeError:
# pknx returns a non-iterable type for unsuccessful reads
pass
return summed_value
def value(self, name):
"""Return the value to a given named attribute."""
from knxip.core import KNXException
addr = None
for attributeaddress, attributename in self.names.items():
if attributename == name:
addr = attributeaddress
if addr is None:
_LOGGER.error("%s: attribute '%s' undefined",
self.name, name)
_LOGGER.debug(
"%s: defined attributes: %s",
self.name, str(self.names)
)
return False
try:
res = KNXTUNNEL.group_read(addr, use_cache=self.cache)
except KNXException:
_LOGGER.exception(
"%s: unable to read from KNX address: %s",
self.name, addr
)
return False
return res
def set_value(self, name, value):
"""Set the value of a given named attribute."""
from knxip.core import KNXException
addr = None
for attributeaddress, attributename in self.names.items():
if attributename == name:
addr = attributeaddress
if addr is None:
_LOGGER.error("%s: attribute '%s' undefined",
self.name, name)
_LOGGER.debug(
"%s: defined attributes: %s",
self.name, str(self.names)
)
return False
try:
KNXTUNNEL.group_write(addr, value)
except KNXException:
_LOGGER.exception(
"%s: unable to write to KNX address: %s",
self.name, addr
)
return False
return True
| apache-2.0 | -1,636,903,083,226,797,800 | 29.288732 | 79 | 0.558785 | false |
mrgambal/vulyk | vulyk/models/tasks.py | 1 | 6601 | # -*- coding: utf-8 -*-
"""Module contains all models directly related to the main entity - tasks."""
from collections import namedtuple
from typing import Any, Dict, List
from bson import ObjectId
from flask_mongoengine import Document
from mongoengine import (
BooleanField,
CASCADE,
DateTimeField,
DictField,
IntField,
ListField,
ReferenceField,
StringField
)
from vulyk.models.user import User
from vulyk.signals import on_batch_done
__all__ = [
'AbstractAnswer',
'AbstractTask',
'Batch',
'BatchUpdateResult'
]
BatchUpdateResult = namedtuple('BatchUpdateResult', ['success', 'closed'])
class Batch(Document):
"""
Helper category to group tasks.
"""
id = StringField(max_length=50, primary_key=True)
task_type = StringField(max_length=50, required=True, db_field='taskType')
tasks_count = IntField(default=0, required=True, db_field='tasksCount')
tasks_processed = IntField(default=0, db_field='tasksProcessed')
closed = BooleanField(default=False, required=False)
batch_meta = DictField(db_field='batchMeta')
meta = {
'collection': 'batches',
'allow_inheritance': True,
'indexes': [
'task_type',
'closed'
]
}
@classmethod
def task_done_in(cls, batch_id: str) -> BatchUpdateResult:
"""
Increment needed values upon a task from the batch is done. In case if
all tasks are finished – close the batch.
:param batch_id: Batch ID
:type batch_id: str
:return: Aggregate which represents complex effect of the method
:rtype: BatchUpdateResult
"""
num_changed = 0
batch = cls.objects.get(id=batch_id) # type: Batch
processed = batch.tasks_processed + 1
if processed > batch.tasks_count:
return BatchUpdateResult(success=False, closed=False)
closed = processed == batch.tasks_count
update_q = {'inc__tasks_processed': 1}
if closed:
update_q['set__closed'] = closed
num_changed = cls \
.objects(id=batch.id, closed=False) \
.update(**update_q)
if num_changed == 0:
update_q.pop('set__closed', None)
closed = False
num_changed = batch.update(**update_q)
elif closed:
on_batch_done.send(batch)
return BatchUpdateResult(success=num_changed > 0, closed=closed)
def __str__(self) -> str:
return str(self.id)
def __repr__(self) -> str:
return 'Batch [{id}] ({processed}/{count})'.format(
id=self.id,
processed=self.tasks_processed,
count=self.tasks_count)
class AbstractTask(Document):
"""
This is AbstractTask model.
You need to inherit it in your model
"""
id = StringField(max_length=200, default='', primary_key=True)
task_type = StringField(max_length=50, required=True, db_field='taskType')
batch = ReferenceField(Batch, reverse_delete_rule=CASCADE)
users_count = IntField(default=0, db_field='usersCount')
users_processed = ListField(ReferenceField(User),
db_field='usersProcessed')
users_skipped = ListField(ReferenceField(User), db_field='usersSkipped')
closed = BooleanField(default=False)
task_data = DictField(required=True)
meta = {
'collection': 'tasks',
'allow_inheritance': True,
'indexes': [
'task_type',
'batch'
]
}
def as_dict(self) -> Dict[str, Any]:
"""
Converts the model-instance into a safe and lightweight dictionary.
:rtype: Dict[str, Any]
"""
return {
'id': self.id,
'closed': self.closed,
'data': self.task_data
}
@classmethod
def ids_in_batch(cls, batch: Batch) -> List[str]:
"""
Collects IDs of all tasks that belong to certain batch.
:param batch: Batch instance
:type batch: Batch
:return: List of IDs
:rtype: List[str]
"""
return cls.objects(batch=batch).distinct('id')
def __str__(self) -> str:
return str(self.id)
def __repr__(self) -> str:
return str(self)
class AbstractAnswer(Document):
"""
This is AbstractTask model.
You need to inherit it in your model
"""
task = ReferenceField(AbstractTask, reverse_delete_rule=CASCADE)
created_by = ReferenceField(User, reverse_delete_rule=CASCADE,
db_field='createdBy')
created_at = DateTimeField(db_field='createdAt')
task_type = StringField(max_length=50, required=True, db_field='taskType')
# not sure - could be extended
result = DictField()
meta = {
'collection': 'reports',
'allow_inheritance': True,
'indexes': [
'task',
'created_by',
'created_at',
{
'fields': ['created_by', 'task'],
'unique': True
}
]
}
# TODO: decide, if we need it at all
@property
def corrections(self) -> int:
"""
Returns whole amount of actions/corrections given by user in this
particular answer.
:return: Count of corrections in this answer
:rtype: int
"""
return 1
@corrections.setter
def corrections(self, value: int) -> None:
pass
@corrections.deleter
def corrections(self) -> None:
pass
@classmethod
def answers_numbers_by_tasks(cls, task_ids: List[str]) -> Dict[ObjectId, int]:
"""
Groups answers, filtered by tasks they belong to, by user and count
number of answers for every user.
:param task_ids: List of tasks IDs
:type task_ids: List[str]
:return: Map having user IDs as keys and answers numbers as values
:rtype: Dict[ObjectId, int]
"""
return cls.objects(task__in=task_ids).item_frequencies('created_by')
def as_dict(self) -> Dict[str, Dict]:
"""
Converts the model-instance into a safe that will include also task
and user.
:rtype: Dict[str, Dict]
"""
return {
'task': self.task.as_dict(),
'answer': self.result,
'user': self.created_by.as_dict()
}
def __str__(self) -> str:
return str(self.pk)
def __repr__(self) -> str:
return 'Report [{} by {}]'.format(self.created_by, self.task)
| bsd-3-clause | -6,040,339,416,882,159,000 | 26.961864 | 82 | 0.579936 | false |
Yellowen/Owrang | stock/doctype/stock_entry/stock_entry.py | 1 | 35585 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
import webnotes.defaults
from webnotes.utils import cstr, cint, flt, comma_or, nowdate
from webnotes.model.doc import Document, addchild
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
from stock.utils import get_incoming_rate
from stock.stock_ledger import get_previous_sle
from controllers.queries import get_match_cond
import json
sql = webnotes.conn.sql
class NotUpdateStockError(webnotes.ValidationError): pass
class StockOverReturnError(webnotes.ValidationError): pass
class IncorrectValuationRateError(webnotes.ValidationError): pass
class DuplicateEntryForProductionOrderError(webnotes.ValidationError): pass
from controllers.stock_controller import StockController
class DocType(StockController):
def __init__(self, doc, doclist=None):
self.doc = doc
self.doclist = doclist
self.fname = 'mtn_details'
def validate(self):
self.validate_posting_time()
self.validate_purpose()
pro_obj = self.doc.production_order and \
get_obj('Production Order', self.doc.production_order) or None
self.validate_item()
self.validate_uom_is_integer("uom", "qty")
self.validate_uom_is_integer("stock_uom", "transfer_qty")
self.validate_warehouse(pro_obj)
self.validate_production_order(pro_obj)
self.get_stock_and_rate()
self.validate_incoming_rate()
self.validate_bom()
self.validate_finished_goods()
self.validate_return_reference_doc()
self.validate_with_material_request()
self.validate_fiscal_year()
self.set_total_amount()
def on_submit(self):
self.update_stock_ledger()
self.update_serial_no(1)
self.update_production_order(1)
self.make_gl_entries()
def on_cancel(self):
self.update_stock_ledger()
self.update_serial_no(0)
self.update_production_order(0)
self.make_cancel_gl_entries()
def validate_fiscal_year(self):
import accounts.utils
accounts.utils.validate_fiscal_year(self.doc.posting_date, self.doc.fiscal_year,
self.meta.get_label("posting_date"))
def validate_purpose(self):
valid_purposes = ["Material Issue", "Material Receipt", "Material Transfer",
"Manufacture/Repack", "Subcontract", "Sales Return", "Purchase Return"]
if self.doc.purpose not in valid_purposes:
msgprint(_("Purpose must be one of ") + comma_or(valid_purposes),
raise_exception=True)
def validate_item(self):
stock_items = self.get_stock_items()
for item in self.doclist.get({"parentfield": "mtn_details"}):
if item.item_code not in stock_items:
msgprint(_("""Only Stock Items are allowed for Stock Entry"""),
raise_exception=True)
def validate_warehouse(self, pro_obj):
"""perform various (sometimes conditional) validations on warehouse"""
source_mandatory = ["Material Issue", "Material Transfer", "Purchase Return"]
target_mandatory = ["Material Receipt", "Material Transfer", "Sales Return"]
validate_for_manufacture_repack = any([d.bom_no for d in self.doclist.get(
{"parentfield": "mtn_details"})])
if self.doc.purpose in source_mandatory and self.doc.purpose not in target_mandatory:
self.doc.to_warehouse = None
for d in getlist(self.doclist, 'mtn_details'):
d.t_warehouse = None
elif self.doc.purpose in target_mandatory and self.doc.purpose not in source_mandatory:
self.doc.from_warehouse = None
for d in getlist(self.doclist, 'mtn_details'):
d.s_warehouse = None
for d in getlist(self.doclist, 'mtn_details'):
if not d.s_warehouse and not d.t_warehouse:
d.s_warehouse = self.doc.from_warehouse
d.t_warehouse = self.doc.to_warehouse
if not (d.s_warehouse or d.t_warehouse):
msgprint(_("Atleast one warehouse is mandatory"), raise_exception=1)
if self.doc.purpose in source_mandatory and not d.s_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Source Warehouse") + _(" is mandatory"), raise_exception=1)
if self.doc.purpose in target_mandatory and not d.t_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Target Warehouse") + _(" is mandatory"), raise_exception=1)
if self.doc.purpose == "Manufacture/Repack":
if validate_for_manufacture_repack:
if d.bom_no:
d.s_warehouse = None
if not d.t_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Target Warehouse") + _(" is mandatory"), raise_exception=1)
elif pro_obj and cstr(d.t_warehouse) != pro_obj.doc.fg_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Target Warehouse") + _(" should be same as that in ")
+ _("Production Order"), raise_exception=1)
else:
d.t_warehouse = None
if not d.s_warehouse:
msgprint(_("Row # ") + "%s: " % cint(d.idx)
+ _("Source Warehouse") + _(" is mandatory"), raise_exception=1)
if cstr(d.s_warehouse) == cstr(d.t_warehouse):
msgprint(_("Source and Target Warehouse cannot be same"),
raise_exception=1)
def validate_production_order(self, pro_obj=None):
if not pro_obj:
if self.doc.production_order:
pro_obj = get_obj('Production Order', self.doc.production_order)
else:
return
if self.doc.purpose == "Manufacture/Repack":
# check for double entry
self.check_duplicate_entry_for_production_order()
elif self.doc.purpose != "Material Transfer":
self.doc.production_order = None
def check_duplicate_entry_for_production_order(self):
other_ste = [t[0] for t in webnotes.conn.get_values("Stock Entry", {
"production_order": self.doc.production_order,
"purpose": self.doc.purpose,
"docstatus": ["!=", 2],
"name": ["!=", self.doc.name]
}, "name")]
if other_ste:
production_item, qty = webnotes.conn.get_value("Production Order",
self.doc.production_order, ["production_item", "qty"])
args = other_ste + [production_item]
fg_qty_already_entered = webnotes.conn.sql("""select sum(actual_qty)
from `tabStock Entry Detail`
where parent in (%s)
and item_code = %s
and ifnull(s_warehouse,'')='' """ % (", ".join(["%s" * len(other_ste)]), "%s"), args)[0][0]
if fg_qty_already_entered >= qty:
webnotes.throw(_("Stock Entries already created for Production Order ")
+ self.doc.production_order + ":" + ", ".join(other_ste), DuplicateEntryForProductionOrderError)
def set_total_amount(self):
self.doc.total_amount = sum([flt(item.amount) for item in self.doclist.get({"parentfield": "mtn_details"})])
def get_stock_and_rate(self):
"""get stock and incoming rate on posting date"""
for d in getlist(self.doclist, 'mtn_details'):
args = webnotes._dict({
"item_code": d.item_code,
"warehouse": d.s_warehouse or d.t_warehouse,
"posting_date": self.doc.posting_date,
"posting_time": self.doc.posting_time,
"qty": d.s_warehouse and -1*d.transfer_qty or d.transfer_qty,
"serial_no": d.serial_no,
"bom_no": d.bom_no,
})
# get actual stock at source warehouse
d.actual_qty = get_previous_sle(args).get("qty_after_transaction") or 0
# get incoming rate
if not flt(d.incoming_rate):
d.incoming_rate = self.get_incoming_rate(args)
d.amount = flt(d.transfer_qty) * flt(d.incoming_rate)
def get_incoming_rate(self, args):
incoming_rate = 0
if self.doc.purpose == "Sales Return" and \
(self.doc.delivery_note_no or self.doc.sales_invoice_no):
sle = webnotes.conn.sql("""select name, posting_date, posting_time,
actual_qty, stock_value, warehouse from `tabStock Ledger Entry`
where voucher_type = %s and voucher_no = %s and
item_code = %s limit 1""",
((self.doc.delivery_note_no and "Delivery Note" or "Sales Invoice"),
self.doc.delivery_note_no or self.doc.sales_invoice_no, args.item_code), as_dict=1)
if sle:
args.update({
"posting_date": sle[0].posting_date,
"posting_time": sle[0].posting_time,
"sle": sle[0].name,
"warehouse": sle[0].warehouse,
})
previous_sle = get_previous_sle(args)
incoming_rate = (flt(sle[0].stock_value) - flt(previous_sle.get("stock_value"))) / \
flt(sle[0].actual_qty)
else:
incoming_rate = get_incoming_rate(args)
return incoming_rate
def validate_incoming_rate(self):
for d in getlist(self.doclist, 'mtn_details'):
if d.t_warehouse:
self.validate_value("incoming_rate", ">", 0, d, raise_exception=IncorrectValuationRateError)
def validate_bom(self):
for d in getlist(self.doclist, 'mtn_details'):
if d.bom_no and not webnotes.conn.sql("""select name from `tabBOM`
where item = %s and name = %s and docstatus = 1 and is_active = 1""",
(d.item_code, d.bom_no)):
msgprint(_("Item") + " %s: " % cstr(d.item_code)
+ _("does not belong to BOM: ") + cstr(d.bom_no)
+ _(" or the BOM is cancelled or inactive"), raise_exception=1)
def validate_finished_goods(self):
"""validation: finished good quantity should be same as manufacturing quantity"""
for d in getlist(self.doclist, 'mtn_details'):
if d.bom_no and flt(d.transfer_qty) != flt(self.doc.fg_completed_qty):
msgprint(_("Row #") + " %s: " % d.idx
+ _("Quantity should be equal to Manufacturing Quantity. ")
+ _("To fetch items again, click on 'Get Items' button \
or update the Quantity manually."), raise_exception=1)
def validate_return_reference_doc(self):
"""validate item with reference doc"""
ref = get_return_doclist_and_details(self.doc.fields)
if ref.doclist:
# validate docstatus
if ref.doclist[0].docstatus != 1:
webnotes.msgprint(_(ref.doclist[0].doctype) + ' "' + ref.doclist[0].name + '": '
+ _("Status should be Submitted"), raise_exception=webnotes.InvalidStatusError)
# update stock check
if ref.doclist[0].doctype == "Sales Invoice" and cint(ref.doclist[0].update_stock) != 1:
webnotes.msgprint(_(ref.doclist[0].doctype) + ' "' + ref.doclist[0].name + '": '
+ _("Update Stock should be checked."),
raise_exception=NotUpdateStockError)
# posting date check
ref_posting_datetime = "%s %s" % (cstr(ref.doclist[0].posting_date),
cstr(ref.doclist[0].posting_time) or "00:00:00")
this_posting_datetime = "%s %s" % (cstr(self.doc.posting_date),
cstr(self.doc.posting_time))
if this_posting_datetime < ref_posting_datetime:
from webnotes.utils.dateutils import datetime_in_user_format
webnotes.msgprint(_("Posting Date Time cannot be before")
+ ": " + datetime_in_user_format(ref_posting_datetime),
raise_exception=True)
stock_items = get_stock_items_for_return(ref.doclist, ref.parentfields)
already_returned_item_qty = self.get_already_returned_item_qty(ref.fieldname)
for item in self.doclist.get({"parentfield": "mtn_details"}):
# validate if item exists in the ref doclist and that it is a stock item
if item.item_code not in stock_items:
msgprint(_("Item") + ': "' + item.item_code + _("\" does not exist in ") +
ref.doclist[0].doctype + ": " + ref.doclist[0].name,
raise_exception=webnotes.DoesNotExistError)
# validate quantity <= ref item's qty - qty already returned
ref_item = ref.doclist.getone({"item_code": item.item_code})
returnable_qty = ref_item.qty - flt(already_returned_item_qty.get(item.item_code))
self.validate_value("transfer_qty", "<=", returnable_qty, item,
raise_exception=StockOverReturnError)
def get_already_returned_item_qty(self, ref_fieldname):
return dict(webnotes.conn.sql("""select item_code, sum(transfer_qty) as qty
from `tabStock Entry Detail` where parent in (
select name from `tabStock Entry` where `%s`=%s and docstatus=1)
group by item_code""" % (ref_fieldname, "%s"), (self.doc.fields.get(ref_fieldname),)))
def update_serial_no(self, is_submit):
"""Create / Update Serial No"""
from stock.doctype.stock_ledger_entry.stock_ledger_entry import update_serial_nos_after_submit, get_serial_nos
update_serial_nos_after_submit(self, "Stock Entry", "mtn_details")
for d in getlist(self.doclist, 'mtn_details'):
for serial_no in get_serial_nos(d.serial_no):
if self.doc.purpose == 'Purchase Return':
sr = webnotes.bean("Serial No", serial_no)
sr.doc.status = "Purchase Returned" if is_submit else "Available"
sr.save()
if self.doc.purpose == "Sales Return":
sr = webnotes.bean("Serial No", serial_no)
sr.doc.status = "Sales Returned" if is_submit else "Delivered"
sr.save()
def update_stock_ledger(self):
sl_entries = []
for d in getlist(self.doclist, 'mtn_details'):
if cstr(d.s_warehouse) and self.doc.docstatus == 1:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
if cstr(d.t_warehouse):
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.t_warehouse),
"actual_qty": flt(d.transfer_qty),
"incoming_rate": flt(d.incoming_rate)
}))
# On cancellation, make stock ledger entry for
# target warehouse first, to update serial no values properly
if cstr(d.s_warehouse) and self.doc.docstatus == 2:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": cstr(d.s_warehouse),
"actual_qty": -flt(d.transfer_qty),
"incoming_rate": 0
}))
self.make_sl_entries(sl_entries, self.doc.amended_from and 'Yes' or 'No')
def update_production_order(self, is_submit):
if self.doc.production_order:
# first perform some validations
# (they are here coz this fn is also called during on_cancel)
pro_obj = get_obj("Production Order", self.doc.production_order)
if flt(pro_obj.doc.docstatus) != 1:
msgprint("""You cannot do any transaction against
Production Order : %s, as it's not submitted"""
% (pro_obj.doc.name), raise_exception=1)
if pro_obj.doc.status == 'Stopped':
msgprint("""You cannot do any transaction against Production Order : %s,
as it's status is 'Stopped'"""% (pro_obj.doc.name), raise_exception=1)
# update bin
if self.doc.purpose == "Manufacture/Repack":
from stock.utils import update_bin
pro_obj.doc.produced_qty = flt(pro_obj.doc.produced_qty) + \
(is_submit and 1 or -1 ) * flt(self.doc.fg_completed_qty)
args = {
"item_code": pro_obj.doc.production_item,
"warehouse": pro_obj.doc.fg_warehouse,
"posting_date": self.doc.posting_date,
"planned_qty": (is_submit and -1 or 1 ) * flt(self.doc.fg_completed_qty)
}
update_bin(args)
# update production order status
pro_obj.doc.status = (flt(pro_obj.doc.qty)==flt(pro_obj.doc.produced_qty)) \
and 'Completed' or 'In Process'
pro_obj.doc.save()
def get_item_details(self, arg):
arg = json.loads(arg)
item = sql("""select stock_uom, description, item_name from `tabItem`
where name = %s and (ifnull(end_of_life,'')='' or end_of_life ='0000-00-00'
or end_of_life > now())""", (arg.get('item_code')), as_dict = 1)
if not item:
msgprint("Item is not active", raise_exception=1)
ret = {
'uom' : item and item[0]['stock_uom'] or '',
'stock_uom' : item and item[0]['stock_uom'] or '',
'description' : item and item[0]['description'] or '',
'item_name' : item and item[0]['item_name'] or '',
'qty' : 0,
'transfer_qty' : 0,
'conversion_factor' : 1,
'batch_no' : '',
'actual_qty' : 0,
'incoming_rate' : 0
}
stock_and_rate = arg.get('warehouse') and self.get_warehouse_details(json.dumps(arg)) or {}
ret.update(stock_and_rate)
return ret
def get_uom_details(self, arg = ''):
arg, ret = eval(arg), {}
uom = sql("""select conversion_factor from `tabUOM Conversion Detail`
where parent = %s and uom = %s""", (arg['item_code'], arg['uom']), as_dict = 1)
if not uom or not flt(uom[0].conversion_factor):
msgprint("There is no Conversion Factor for UOM '%s' in Item '%s'" % (arg['uom'],
arg['item_code']))
ret = {'uom' : ''}
else:
ret = {
'conversion_factor' : flt(uom[0]['conversion_factor']),
'transfer_qty' : flt(arg['qty']) * flt(uom[0]['conversion_factor']),
}
return ret
def get_warehouse_details(self, args):
args = json.loads(args)
ret = {}
if args.get('warehouse') and args.get('item_code'):
args.update({
"posting_date": self.doc.posting_date,
"posting_time": self.doc.posting_time,
})
args = webnotes._dict(args)
ret = {
"actual_qty" : get_previous_sle(args).get("qty_after_transaction") or 0,
"incoming_rate" : self.get_incoming_rate(args)
}
return ret
def get_items(self):
self.doclist = self.doc.clear_table(self.doclist, 'mtn_details', 1)
pro_obj = None
if self.doc.production_order:
# common validations
pro_obj = get_obj('Production Order', self.doc.production_order)
if pro_obj:
self.validate_production_order(pro_obj)
self.doc.bom_no = pro_obj.doc.bom_no
else:
# invalid production order
self.doc.production_order = None
if self.doc.bom_no:
if self.doc.purpose in ["Material Issue", "Material Transfer", "Manufacture/Repack",
"Subcontract"]:
if self.doc.production_order and self.doc.purpose == "Material Transfer":
item_dict = self.get_pending_raw_materials(pro_obj)
else:
item_dict = self.get_bom_raw_materials(self.doc.fg_completed_qty)
for item in item_dict.values():
if pro_obj:
item["from_warehouse"] = pro_obj.doc.wip_warehouse
item["to_warehouse"] = ""
# add raw materials to Stock Entry Detail table
self.add_to_stock_entry_detail(item_dict)
# add finished good item to Stock Entry Detail table -- along with bom_no
if self.doc.production_order and self.doc.purpose == "Manufacture/Repack":
self.add_to_stock_entry_detail({
cstr(pro_obj.doc.production_item): {
"to_warehouse": pro_obj.doc.fg_warehouse,
"from_warehouse": "",
"qty": self.doc.fg_completed_qty,
"description": pro_obj.doc.description,
"stock_uom": pro_obj.doc.stock_uom
}
}, bom_no=pro_obj.doc.bom_no)
elif self.doc.purpose in ["Material Receipt", "Manufacture/Repack"]:
if self.doc.purpose=="Material Receipt":
self.doc.from_warehouse = ""
item = webnotes.conn.sql("""select item, description, uom from `tabBOM`
where name=%s""", (self.doc.bom_no,), as_dict=1)
self.add_to_stock_entry_detail({
item[0]["item"] : {
"qty": self.doc.fg_completed_qty,
"description": item[0]["description"],
"stock_uom": item[0]["uom"],
"from_warehouse": ""
}
}, bom_no=self.doc.bom_no)
self.get_stock_and_rate()
def get_bom_raw_materials(self, qty):
"""
get all items from flat bom except
child items of sub-contracted and sub assembly items
and sub assembly items itself.
"""
# item dict = { item_code: {qty, description, stock_uom} }
item_dict = {}
def _make_items_dict(items_list):
"""makes dict of unique items with it's qty"""
for item in items_list:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = {
"qty": flt(item.qty),
"description": item.description,
"stock_uom": item.stock_uom,
"from_warehouse": item.default_warehouse
}
if self.doc.use_multi_level_bom:
# get all raw materials with sub assembly childs
fl_bom_sa_child_item = sql("""select
fb.item_code,
ifnull(sum(fb.qty_consumed_per_unit),0)*%s as qty,
fb.description,
fb.stock_uom,
it.default_warehouse
from
`tabBOM Explosion Item` fb,`tabItem` it
where
it.name = fb.item_code
and ifnull(it.is_pro_applicable, 'No') = 'No'
and ifnull(it.is_sub_contracted_item, 'No') = 'No'
and fb.docstatus < 2
and fb.parent=%s group by item_code, stock_uom""",
(qty, self.doc.bom_no), as_dict=1)
if fl_bom_sa_child_item:
_make_items_dict(fl_bom_sa_child_item)
else:
# get only BOM items
fl_bom_sa_items = sql("""select
`tabItem`.item_code,
ifnull(sum(`tabBOM Item`.qty_consumed_per_unit), 0) *%s as qty,
`tabItem`.description,
`tabItem`.stock_uom,
`tabItem`.default_warehouse
from
`tabBOM Item`, `tabItem`
where
`tabBOM Item`.parent = %s and
`tabBOM Item`.item_code = tabItem.name and
`tabBOM Item`.docstatus < 2
group by item_code""", (qty, self.doc.bom_no), as_dict=1)
if fl_bom_sa_items:
_make_items_dict(fl_bom_sa_items)
return item_dict
def get_pending_raw_materials(self, pro_obj):
"""
issue (item quantity) that is pending to issue or desire to transfer,
whichever is less
"""
item_dict = self.get_bom_raw_materials(1)
issued_item_qty = self.get_issued_qty()
max_qty = flt(pro_obj.doc.qty)
only_pending_fetched = []
for item in item_dict:
pending_to_issue = (max_qty * item_dict[item]["qty"]) - issued_item_qty.get(item, 0)
desire_to_transfer = flt(self.doc.fg_completed_qty) * item_dict[item]["qty"]
if desire_to_transfer <= pending_to_issue:
item_dict[item]["qty"] = desire_to_transfer
else:
item_dict[item]["qty"] = pending_to_issue
if pending_to_issue:
only_pending_fetched.append(item)
# delete items with 0 qty
for item in item_dict.keys():
if not item_dict[item]["qty"]:
del item_dict[item]
# show some message
if not len(item_dict):
webnotes.msgprint(_("""All items have already been transferred \
for this Production Order."""))
elif only_pending_fetched:
webnotes.msgprint(_("""Only quantities pending to be transferred \
were fetched for the following items:\n""" + "\n".join(only_pending_fetched)))
return item_dict
def get_issued_qty(self):
issued_item_qty = {}
result = sql("""select t1.item_code, sum(t1.qty)
from `tabStock Entry Detail` t1, `tabStock Entry` t2
where t1.parent = t2.name and t2.production_order = %s and t2.docstatus = 1
and t2.purpose = 'Material Transfer'
group by t1.item_code""", self.doc.production_order)
for t in result:
issued_item_qty[t[0]] = flt(t[1])
return issued_item_qty
def add_to_stock_entry_detail(self, item_dict, bom_no=None):
for d in item_dict:
se_child = addchild(self.doc, 'mtn_details', 'Stock Entry Detail',
self.doclist)
se_child.s_warehouse = item_dict[d].get("from_warehouse", self.doc.from_warehouse)
se_child.t_warehouse = item_dict[d].get("to_warehouse", self.doc.to_warehouse)
se_child.item_code = cstr(d)
se_child.description = item_dict[d]["description"]
se_child.uom = item_dict[d]["stock_uom"]
se_child.stock_uom = item_dict[d]["stock_uom"]
se_child.qty = flt(item_dict[d]["qty"])
# in stock uom
se_child.transfer_qty = flt(item_dict[d]["qty"])
se_child.conversion_factor = 1.00
# to be assigned for finished item
se_child.bom_no = bom_no
def get_cust_values(self):
"""fetches customer details"""
if self.doc.delivery_note_no:
doctype = "Delivery Note"
name = self.doc.delivery_note_no
else:
doctype = "Sales Invoice"
name = self.doc.sales_invoice_no
result = webnotes.conn.sql("""select customer, customer_name,
address_display as customer_address
from `tab%s` where name=%s""" % (doctype, "%s"), (name,), as_dict=1)
return result and result[0] or {}
def get_cust_addr(self):
from utilities.transaction_base import get_default_address, get_address_display
res = sql("select customer_name from `tabCustomer` where name = '%s'"%self.doc.customer)
address_display = None
customer_address = get_default_address("customer", self.doc.customer)
if customer_address:
address_display = get_address_display(customer_address)
ret = {
'customer_name' : res and res[0][0] or '',
'customer_address' : address_display}
return ret
def get_supp_values(self):
result = webnotes.conn.sql("""select supplier, supplier_name,
address_display as supplier_address
from `tabPurchase Receipt` where name=%s""", (self.doc.purchase_receipt_no,),
as_dict=1)
return result and result[0] or {}
def get_supp_addr(self):
from utilities.transaction_base import get_default_address, get_address_display
res = sql("""select supplier_name from `tabSupplier`
where name=%s""", self.doc.supplier)
address_display = None
supplier_address = get_default_address("customer", self.doc.customer)
if supplier_address:
address_display = get_address_display(supplier_address)
ret = {
'supplier_name' : res and res[0][0] or '',
'supplier_address' : address_display }
return ret
def validate_with_material_request(self):
for item in self.doclist.get({"parentfield": "mtn_details"}):
if item.material_request:
mreq_item = webnotes.conn.get_value("Material Request Item",
{"name": item.material_request_item, "parent": item.material_request},
["item_code", "warehouse", "idx"], as_dict=True)
if mreq_item.item_code != item.item_code or mreq_item.warehouse != item.t_warehouse:
msgprint(_("Row #") + (" %d: " % item.idx) + _("does not match")
+ " " + _("Row #") + (" %d %s " % (mreq_item.idx, _("of")))
+ _("Material Request") + (" - %s" % item.material_request),
raise_exception=webnotes.MappingMismatchError)
@webnotes.whitelist()
def get_production_order_details(production_order):
result = webnotes.conn.sql("""select bom_no,
ifnull(qty, 0) - ifnull(produced_qty, 0) as fg_completed_qty, use_multi_level_bom
from `tabProduction Order` where name = %s""", production_order, as_dict=1)
return result and result[0] or {}
def query_sales_return_doc(doctype, txt, searchfield, start, page_len, filters):
conditions = ""
if doctype == "Sales Invoice":
conditions = "and update_stock=1"
return webnotes.conn.sql("""select name, customer, customer_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `customer` like %%(txt)s) %s %s
order by name, customer, customer_name
limit %s""" % (doctype, searchfield, conditions,
get_match_cond(doctype, searchfield), "%(start)s, %(page_len)s"),
{"txt": "%%%s%%" % txt, "start": start, "page_len": page_len},
as_list=True)
def query_purchase_return_doc(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, supplier, supplier_name
from `tab%s` where docstatus = 1
and (`%s` like %%(txt)s
or `supplier` like %%(txt)s) %s
order by name, supplier, supplier_name
limit %s""" % (doctype, searchfield, get_match_cond(doctype, searchfield),
"%(start)s, %(page_len)s"), {"txt": "%%%s%%" % txt, "start":
start, "page_len": page_len}, as_list=True)
def query_return_item(doctype, txt, searchfield, start, page_len, filters):
txt = txt.replace("%", "")
ref = get_return_doclist_and_details(filters)
stock_items = get_stock_items_for_return(ref.doclist, ref.parentfields)
result = []
for item in ref.doclist.get({"parentfield": ["in", ref.parentfields]}):
if item.item_code in stock_items:
item.item_name = cstr(item.item_name)
item.description = cstr(item.description)
if (txt in item.item_code) or (txt in item.item_name) or (txt in item.description):
val = [
item.item_code,
(len(item.item_name) > 40) and (item.item_name[:40] + "...") or item.item_name,
(len(item.description) > 40) and (item.description[:40] + "...") or \
item.description
]
if val not in result:
result.append(val)
return result[start:start+page_len]
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
if not filters.get("posting_date"):
filters["posting_date"] = nowdate()
batch_nos = None
args = {
'item_code': filters['item_code'],
's_warehouse': filters['s_warehouse'],
'posting_date': filters['posting_date'],
'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype, searchfield),
"start": start,
"page_len": page_len
}
if filters.get("s_warehouse"):
batch_nos = webnotes.conn.sql("""select batch_no
from `tabStock Ledger Entry` sle
where item_code = '%(item_code)s'
and warehouse = '%(s_warehouse)s'
and batch_no like '%(txt)s'
and exists(select * from `tabBatch`
where name = sle.batch_no
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '')
and docstatus != 2)
%(mcond)s
group by batch_no having sum(actual_qty) > 0
order by batch_no desc
limit %(start)s, %(page_len)s """
% args)
if batch_nos:
return batch_nos
else:
return webnotes.conn.sql("""select name from `tabBatch`
where item = '%(item_code)s'
and docstatus < 2
and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s
or expiry_date = '' or expiry_date = "0000-00-00")
%(mcond)s
order by name desc
limit %(start)s, %(page_len)s
""" % args)
def get_stock_items_for_return(ref_doclist, parentfields):
"""return item codes filtered from doclist, which are stock items"""
if isinstance(parentfields, basestring):
parentfields = [parentfields]
all_items = list(set([d.item_code for d in
ref_doclist.get({"parentfield": ["in", parentfields]})]))
stock_items = webnotes.conn.sql_list("""select name from `tabItem`
where is_stock_item='Yes' and name in (%s)""" % (", ".join(["%s"] * len(all_items))),
tuple(all_items))
return stock_items
def get_return_doclist_and_details(args):
ref = webnotes._dict()
# get ref_doclist
if args["purpose"] in return_map:
for fieldname, val in return_map[args["purpose"]].items():
if args.get(fieldname):
ref.fieldname = fieldname
ref.doclist = webnotes.get_doclist(val[0], args[fieldname])
ref.parentfields = val[1]
break
return ref
return_map = {
"Sales Return": {
# [Ref DocType, [Item tables' parentfields]]
"delivery_note_no": ["Delivery Note", ["delivery_note_details", "packing_details"]],
"sales_invoice_no": ["Sales Invoice", ["entries", "packing_details"]]
},
"Purchase Return": {
"purchase_receipt_no": ["Purchase Receipt", ["purchase_receipt_details"]]
}
}
@webnotes.whitelist()
def make_return_jv(stock_entry):
se = webnotes.bean("Stock Entry", stock_entry)
if not se.doc.purpose in ["Sales Return", "Purchase Return"]:
return
ref = get_return_doclist_and_details(se.doc.fields)
if ref.doclist[0].doctype == "Delivery Note":
result = make_return_jv_from_delivery_note(se, ref)
elif ref.doclist[0].doctype == "Sales Invoice":
result = make_return_jv_from_sales_invoice(se, ref)
elif ref.doclist[0].doctype == "Purchase Receipt":
result = make_return_jv_from_purchase_receipt(se, ref)
# create jv doclist and fetch balance for each unique row item
jv_list = [{
"__islocal": 1,
"doctype": "Journal Voucher",
"posting_date": se.doc.posting_date,
"voucher_type": se.doc.purpose == "Sales Return" and "Credit Note" or "Debit Note",
"fiscal_year": se.doc.fiscal_year,
"company": se.doc.company
}]
from accounts.utils import get_balance_on
for r in result:
jv_list.append({
"__islocal": 1,
"doctype": "Journal Voucher Detail",
"parentfield": "entries",
"account": r.get("account"),
"against_invoice": r.get("against_invoice"),
"against_voucher": r.get("against_voucher"),
"balance": get_balance_on(r.get("account"), se.doc.posting_date) \
if r.get("account") else 0
})
return jv_list
def make_return_jv_from_sales_invoice(se, ref):
# customer account entry
parent = {
"account": ref.doclist[0].debit_to,
"against_invoice": ref.doclist[0].name,
}
# income account entries
children = []
for se_item in se.doclist.get({"parentfield": "mtn_details"}):
# find item in ref.doclist
ref_item = ref.doclist.getone({"item_code": se_item.item_code})
account = get_sales_account_from_item(ref.doclist, ref_item)
if account not in children:
children.append(account)
return [parent] + [{"account": account} for account in children]
def get_sales_account_from_item(doclist, ref_item):
account = None
if not ref_item.income_account:
if ref_item.parent_item:
parent_item = doclist.getone({"item_code": ref_item.parent_item})
account = parent_item.income_account
else:
account = ref_item.income_account
return account
def make_return_jv_from_delivery_note(se, ref):
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "delivery_note",
ref.doclist[0].name)
if not invoices_against_delivery:
sales_orders_against_delivery = [d.prevdoc_docname for d in
ref.doclist.get({"prevdoc_doctype": "Sales Order"}) if d.prevdoc_docname]
if sales_orders_against_delivery:
invoices_against_delivery = get_invoice_list("Sales Invoice Item", "sales_order",
sales_orders_against_delivery)
if not invoices_against_delivery:
return []
packing_item_parent_map = dict([[d.item_code, d.parent_item] for d in ref.doclist.get(
{"parentfield": ref.parentfields[1]})])
parent = {}
children = []
for se_item in se.doclist.get({"parentfield": "mtn_details"}):
for sales_invoice in invoices_against_delivery:
si = webnotes.bean("Sales Invoice", sales_invoice)
if se_item.item_code in packing_item_parent_map:
ref_item = si.doclist.get({"item_code": packing_item_parent_map[se_item.item_code]})
else:
ref_item = si.doclist.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = get_sales_account_from_item(si.doclist, ref_item)
if account not in children:
children.append(account)
if not parent:
parent = {"account": si.doc.debit_to}
break
if len(invoices_against_delivery) == 1:
parent["against_invoice"] = invoices_against_delivery[0]
result = [parent] + [{"account": account} for account in children]
return result
def get_invoice_list(doctype, link_field, value):
if isinstance(value, basestring):
value = [value]
return webnotes.conn.sql_list("""select distinct parent from `tab%s`
where docstatus = 1 and `%s` in (%s)""" % (doctype, link_field,
", ".join(["%s"]*len(value))), tuple(value))
def make_return_jv_from_purchase_receipt(se, ref):
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_receipt",
ref.doclist[0].name)
if not invoice_against_receipt:
purchase_orders_against_receipt = [d.prevdoc_docname for d in
ref.doclist.get({"prevdoc_doctype": "Purchase Order"}) if d.prevdoc_docname]
if purchase_orders_against_receipt:
invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_order",
purchase_orders_against_receipt)
if not invoice_against_receipt:
return []
parent = {}
children = []
for se_item in se.doclist.get({"parentfield": "mtn_details"}):
for purchase_invoice in invoice_against_receipt:
pi = webnotes.bean("Purchase Invoice", purchase_invoice)
ref_item = pi.doclist.get({"item_code": se_item.item_code})
if not ref_item:
continue
ref_item = ref_item[0]
account = ref_item.expense_head
if account not in children:
children.append(account)
if not parent:
parent = {"account": pi.doc.credit_to}
break
if len(invoice_against_receipt) == 1:
parent["against_voucher"] = invoice_against_receipt[0]
result = [parent] + [{"account": account} for account in children]
return result
| agpl-3.0 | -4,433,923,836,272,466,400 | 34.656313 | 112 | 0.658423 | false |
koalalorenzo/greatdiary | main.py | 1 | 15891 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import gtk
import webkit
import gobject
import xml.dom.minidom
from xml.dom.minidom import Node
import time
import os
import sys
import crypt, random, string
import libcrypt
from sqlite3 import dbapi2 as sqlite
gobject.threads_init()
def dialog_info(info):
"""
this function show a info dialog.
"""
dialog = gtk.MessageDialog(
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO,
gtk.BUTTONS_OK,
None)
dialog.set_markup(info)
dialog.show_all()
dialog.run()
dialog.destroy()
def dialog_get_password(motivo="This will be used for <i>identification</i> purposes"):
"""
This function ask for password.
"""
dialog = gtk.MessageDialog(
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK,
None)
dialog.set_markup("Please enter the <b>password</b>")
def responseToDialog(entry, dialog, response):
dialog.response(response)
entry = gtk.Entry()
entry.set_visibility(False)
entry.connect("activate", responseToDialog, dialog, gtk.RESPONSE_OK)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("Password:"), False, 5, 5)
hbox.pack_end(entry)
dialog.format_secondary_markup(motivo)
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
dialog.run()
text = entry.get_text()
dialog.destroy()
return text
def accent2html(astring,reverse=False):
"""
This 'stupid' function replace accents with the html tag.
"""
values = {
"à": "à",
"è": "è",
"ì": "ì",
"ò": "ò",
"ù": "ù",
"À": "À",
"È": "È",
"Ì": "Ì",
"Ò": "Ò",
"Ù": "Ù",
" ": " ",
"!": "!"
}
if not reverse:
astring = astring.replace("&","&")
for lettera in values.keys():
if reverse:
astring = astring.replace(values[lettera],lettera)
else:
astring = astring.replace(lettera,values[lettera])
if reverse:
astring = astring.replace("&","&")
return astring
def get_salt(chars = string.letters + string.digits): return random.choice(chars) + random.choice(chars)
def db2html(database="database.sql", password=None):
"""
Use this class to export the database values in html format.
"""
if not os.path.exists("%s.dir/" % database):
os.mkdir("%s.dir/" % database)
adatabase = sqlite.connect(database)
os.chdir("%s.dir/" % database)
cursor = adatabase.cursor()
if not password:
password = dialog_get_password()
eget = cursor.execute("SELECT * FROM settings")
for (key, value) in eget:
if key == "salt":
salt = value
elif key == "password":
check = value
if not crypt.crypt(password,salt) == check:
dialog_info("Your password is not correct!")
sys.exit(1)
eget = cursor.execute("SELECT * FROM pages")
for (number, date, text ) in eget:
xs = open("%s-%s.html" % (number, date), "w")
xs.write("<html>\n%s</html>" % libcrypt.decrypt(text,password).decode("base64"))
xs.close()
dialog_info("diary converted in html pages")
class Page(object):
"""
This class is used to easily manage a diary page.
"""
def __init__(self):
self.meta = dict()
self.text = str()
self.time = str()
self.number = int()
def set_page(self, text, date, number):
self.text = text
self.time = date
self.number = number
class PagesManager(object):
"""
This class manage the pages and the database.
"""
def __init__(self, database_path):
self.pages = dict()
self.settings = dict()
self.__load_database(database_path)
if self.settings["is_crypted"]:
self.tmp_password = ""
def __load_database(self, database_path):
if not database_path:
database_path = "./database.sql"
self.database = sqlite.connect(database_path)
self.cursor = self.database.cursor()
eget = self.cursor.execute("SELECT * FROM settings")
for ( key, value ) in eget:
if value == "True":
self.settings[key] = True
elif value == "False":
self.settings[key] = False
else:
self.settings[key] = value
def get_pages(self):
eget = self.cursor.execute("SELECT * FROM pages")
for (number, date, text ) in eget:
self.pages[number] = Page()
if self.settings["is_crypted"] and self.tmp_password:
text = libcrypt.decrypt(text,self.tmp_password)
try:
self.pages[number].set_page(text.decode("base64"), date, number)
except:
self.pages[number].set_page(text, date, number)
def make_page(self, text, date=None):
if not date:
date = time.strftime("%A %d %B %Y - %H:%M:%S")
self.get_pages()
num = len(self.pages.keys()) + 1
if self.settings["is_crypted"] and self.tmp_password:
text = libcrypt.crypt(text.encode("base64") ,self.tmp_password)
self.cursor.execute( "INSERT INTO pages (number, date, text) VALUES ('%s', '%s', '%s')" % (num, date, text) )
self.database.commit()
self.get_pages()
def check_passwd(self):
if not crypt.crypt(self.tmp_password,self.settings["salt"]) == self.settings["password"]:
return False
return True
def commit(self): self.database.commit()
def close(self):
self.database.commit()
self.database.close()
class Gui(object):
"""
This class manages, builds and destroys the windows.
"""
def __init__(self, database_path="database.sql"):
self.manager = PagesManager(database_path)
if self.manager.settings["is_crypted"]:
self.manager.tmp_password = dialog_get_password()
if not self.manager.check_passwd():
dialog_info("Your password is not correct!")
sys.exit(1)
self.manager.get_pages()
self.__number = len(self.manager.pages.keys()) + 1
self.window = gtk.Window()
self.window.set_title("Gread Diary")
self.__icon = self.window.render_icon(gtk.STOCK_ORIENTATION_PORTRAIT, gtk.ICON_SIZE_MENU)
self.window.set_icon(self.__icon)
self.window.set_size_request(660,500)
self.window.set_resizable(True)
self.window.connect("destroy", self.destroy)
self.new_button = gtk.ToolButton(gtk.STOCK_NEW)
self.new_button.connect("clicked", self.new)
self.save_button = gtk.ToolButton(gtk.STOCK_SAVE)
self.save_button.connect("clicked", self.save)
self.convert_button = gtk.ToolButton(gtk.STOCK_CONVERT)
self.convert_button.connect("clicked", self.__convert)
self.about_button = gtk.ToolButton(gtk.STOCK_ABOUT)
self.about_button.connect("clicked", self.__about)
self.back_button = gtk.ToolButton(gtk.STOCK_GO_BACK)
self.back_button.connect("clicked", self.__go_back)
self.forward_button = gtk.ToolButton(gtk.STOCK_GO_FORWARD)
self.forward_button.connect("clicked", self.__go_forward)
self.space_button_one = gtk.ToolItem()
self.space_button_two = gtk.ToolItem()
self.number_button = gtk.ToolItem()
self.number_entry = gtk.Entry()
self.number_entry.connect("activate", self.__change_page)
self.number_button.add(self.number_entry)
self.number_button.set_expand(False)
self.space_button_one.set_expand(True)
self.space_button_two.set_expand(True)
self.panel_bar = gtk.Toolbar()
self.panel_bar.add(self.back_button)
self.panel_bar.add(self.space_button_one)
self.panel_bar.add(self.new_button)
self.panel_bar.add(self.save_button)
self.panel_bar.add(self.convert_button)
self.panel_bar.add(self.about_button)
self.panel_bar.add(self.space_button_two)
self.panel_bar.add(self.number_button)
self.panel_bar.add(self.forward_button)
self.webkit = webkit.WebView()
#self.webkit.connect("populate-popup", self.__hide_menu)
self.scroll_box = gtk.ScrolledWindow()
self.scroll_box.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.scroll_box.add(self.webkit)
self.__vbox = gtk.VBox(False, 0)
self.__vbox.pack_start(self.scroll_box, True)
self.__vbox.pack_start(self.panel_bar, False)
self.window.add(self.__vbox)
self.__disable_input()
self.forward_button.set_sensitive(False)
self.window.show_all()
self.convert_button.hide()
if not len(self.manager.pages.keys()) >= 1:
self.back_button.set_sensitive(False)
self.forward_button.set_sensitive(False)
else:
#This allow to go back and read the page
self.show_page(len(self.manager.pages.keys()))
self.show_intro_page()
def __hide_menu(self, view, menu):
if not self.webkit.get_editable():
menu.destroy()
def __change_page(self, view=None, menu=None): self.show_page(int(self.number_entry.get_text()))
def __go_back(self, view=None, menu=None):
self.show_page(self.__number - 1)
def __about(self, view=None, menu=None):
about = gtk.AboutDialog()
about.set_program_name("Great Diary")
about.set_version("2.0")
about.set_copyright("(c) Lorenzo Setale")
about.set_comments("A symple diary written with python, gtk, webkit and using sqlite as storage!")
about.set_website("http://code.google.com/p/greatdiary/")
about.set_logo(self.window.render_icon(gtk.STOCK_ORIENTATION_PORTRAIT, gtk.ICON_SIZE_DIALOG))
about.run()
about.destroy()
def __convert(self, view=None, menu=None): db2html(password=self.manager.tmp_password)
def __go_forward(self, view=None, menu=None):
self.show_page(self.__number + 1)
def new(self, widget=None, data=None):
self.save_button.set_sensitive(True)
self.webkit.load_string("", "text/html", "iso-8859-15", "new-page")
self.webkit.set_editable(True)
self.number_entry.set_editable(False)
self.number_entry.set_text(str(len(self.manager.pages.keys())+1))
self.__number = len(self.manager.pages.keys())+1
self.back_button.set_sensitive(True)
self.forward_button.set_sensitive(False)
def save(self, widget=None, data=None):
self.webkit.execute_script("document.title=document.documentElement.innerHTML;")
text = accent2html(self.webkit.get_main_frame().get_title())
self.manager.make_page(text)
self.__number = len(self.manager.pages.keys())
self.__disable_input()
self.number_entry.set_editable(True)
def __disable_input(self):
self.webkit.set_editable(False)
self.save_button.set_sensitive(False)
def show_page(self, anumber):
self.__disable_input()
self.manager.get_pages()
if int(anumber) >= len(self.manager.pages.keys()):
anumber = len(self.manager.pages.keys())
self.back_button.set_sensitive(True)
self.forward_button.set_sensitive(False)
elif int(anumber) <= 1:
anumber = 1
self.back_button.set_sensitive(False)
self.forward_button.set_sensitive(True)
else:
self.back_button.set_sensitive(True)
self.forward_button.set_sensitive(True)
self.webkit.load_string("<html>\n%s</html>" % self.manager.pages[anumber].text.replace("&nbsp;"," "), "text/html", "iso-8859-15", "new-page")
self.__number = anumber
self.number_entry.set_text(str(anumber))
def quit(self, widget=None, data=None):
self.destroy()
def destroy(self, widget=None, data=None):
self.manager.close()
gtk.main_quit()
def show_intro_page(self):
HTML = "<html>"
HTML += """<head><style type="text/css">
.core {
clear: none;
min-width: 512px;
margin: 0 15px 10px 15px;
background: #cccccc;
padding: 5px 3px;
-webkit-border-radius: 13px;
-webkit-transition: all 0.1s ease-out;
background-color: #babdb6;
border: 0px solid #000; box-shadow:0px 0px 15px #000;
-webkit-box-shadow: 0px 0px 15px #000;
}
.baloon {
margin: 5px;
border: 1px solid transparent;
}.title {
padding: 5px 0px 0px 5px;
text-align: left;
font: bold 1.1em "Trebuchet MS", Helvetica, Sans-Serif;
background: -webkit-gradient(linear, left top, left bottom, from(#eeeeec), to(#babdb6));
-webkit-border-radius: 7px 7px 0px 0px;
-webkit-transition: all 0.1s ease-out;
}
</style></head>"""
HTML += """<body><br><div class="core"><div class="baloon"><div class="title">"""
HTML += """Welcome to GreatDiary: Your secret diary!</div>"""
HTML += """This is your secret diary, you can write everything you want: your emotions are safe there and are crypted by your password!<br><br> """
HTML += """<b>It's easy to use</b>: like a diary you can browse the pages by pressing the """
HTML += """two button with the arrows. You can write by clicking to the add-button in the bottom-center of this window and then save"""
HTML += """ your page with the save-button.</div></div>"""
HTML += """<div style="position: fixed; margin: auto; width: 100%; top: auto; right: 0; bottom: 0; left: 0; background-color: #3b5998;"""
HTML += """ border: 0px solid #000; box-shadow:0px 0px 15px #000;"""
HTML += """ -webkit-box-shadow: 0px 0px 15px #000; padding: 5px 10px; color: white;"></div></body></html>"""
self.webkit.load_string(HTML, "text/html", "iso-8859-15", "intro")
self.number_entry.set_editable(False)
self.number_entry.set_text(str(len(self.manager.pages.keys())+1))
self.__number = len(self.manager.pages.keys())+1
if __name__ == "__main__":
DEF_DB_PATH = "./database.sql"
if len(sys.argv) > 1:
DEF_DB_PATH = " ".join(sys.argv[1:])
if not os.path.isfile(DEF_DB_PATH):
dialog_info("This is thefirst time that you run Great Diary. Now we are going to generate the database and then we will crypt them by a password.")
print "Generating the database:",
database = sqlite.connect(DEF_DB_PATH)
cursor = database.cursor()
while 1:
password = dialog_get_password(motivo="This will be used to crypt the pages and database.")
if len(password) > 3:
break
else:
dialog_info("The password must be longer than 3 lecters")
salt = get_salt()
cursor.execute("CREATE TABLE pages (number INTEGER NOT NULL PRIMARY KEY, date TEXT NOT NULL, text TEXT NOT NULL)")
cursor.execute("CREATE TABLE settings (key TEXT NOT NULL, value TEXT NOT NULL)")
cursor.execute("INSERT INTO settings (key, value) VALUES ('is_crypted', 'True')")
cursor.execute("INSERT INTO settings (key, value) VALUES ('salt', '%s')" % salt)
cursor.execute("INSERT INTO settings (key, value) VALUES ('password', '%s')" % crypt.crypt(password,salt) )
database.commit()
database.close()
print "done"
dialog_info("Done! Everything is OK! Now you can use GreatDiary")
c = Gui(database_path=DEF_DB_PATH)
gtk.main()
| gpl-3.0 | 8,236,639,972,998,706,000 | 36.992823 | 155 | 0.601914 | false |
Sebastian-ba/DoDoBing | red-scare/src/test_output.py | 1 | 1050 |
from main import *
def test_output_1():
print()
res = output("Testfile", 43, True, 3,2,1, False, False)
res1 = output("Testfile", 43, True, 3,2,1, False, False)
res2 = output("Testfile", 43, True, 3,2,1, False, False)
res3 = output("Testfile", 43, True, 3,2,1, False, False)
res4 = output("Testfile", 43, True, 3,2,1, False, False)
res5 = output("Testfile", 43, True, 3,2,1, False, False)
print(res)
print(res1)
print(res2)
print(res3)
print(res4)
print(res5)
def test_output_2():
print()
res = output("Testfile", 43, True, 3,2,1, False, True)
res1 = output("Testfile", 43, True, 3,2,1, False, True)
res2 = output("Testfile", 43, True, 3,2,1, False, True)
res3 = output("Testfile", 43, True, 3,2,1, False, True)
res4 = output("Testfile", 43, True, 3,2,1, False, True)
res5 = output("Testfile", 43, True, 3,2,1, False, True)
print(res)
print(res1)
print(res2)
print(res3)
print(res4)
print(res5)
| mit | -484,905,052,207,816,100 | 28 | 60 | 0.565714 | false |
egh/spydaap | spydaap/parser/vorbis.py | 1 | 3664 | # Copyright (C) 2008 Erik Hetzner
# This file is part of Spydaap. Spydaap is free software: you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# Spydaap is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Spydaap. If not, see <http://www.gnu.org/licenses/>.
import mutagen
import spydaap
import re
import os
from spydaap.daap import do
# * TODO Implement song.songtrackcount, song.disccount
# daap.songbeatsperminute
# daap.songcomment
# daap.songdateadded
# daap.songdatemodified,
# daap.songdisabled,
# daap.songeqpreset
# daap.songformat
# daap.songdescription
# daap.songrelativevolume,
# daap.songsize,
# daap.songstarttime,
# daap.songstoptime,
# daap.songtime,
# daap.songuserrating,
# daap.songdatakind,
# daap.songdataurl
class VorbisParser(spydaap.parser.Parser):
vorbis_string_map = {
'grouping': 'daap.songgrouping',
'title': 'dmap.itemname',
'artist': 'daap.songartist',
'composer': 'daap.songcomposer',
'genre': 'daap.songgenre',
'album': 'daap.songalbum',
'albumartist': 'daap.songalbumartist',
}
vorbis_int_map = {
'bpm': 'daap.songbeatsperminute',
'date': 'daap.songyear',
'year': 'daap.songyear',
'compilation': 'daap.songcompilation',
}
def handle_track(self, flac, d):
tracknumber = None
trackcount = None
if 'tracknumber' in flac.tags:
t = str(flac.tags['tracknumber']).split('/')
tracknumber = self.my_int(t[0])
if (len(t) == 2):
trackcount = self.my_int(t[1])
if 'tracktotal' in flac.tags:
trackcount = self.my_int(flac.tags['tracktotal'])
if tracknumber:
d.append(do('daap.songtracknumber', tracknumber))
if trackcount:
d.append(do('daap.songtrackcount', trackcount))
def handle_disc(self, flac, d):
discnumber = None
disccount = None
if 'discnumber' in flac.tags:
t = unicode(flac.tags['discnumber'][0]).split('/')
discnumber = self.my_int(t[0])
if (len(t) == 2):
disccount = self.my_int(t[1])
if 'disctotal' in flac.tags:
disccount = self.my_int(flac.tags['disctotal'])
if discnumber:
d.append(do('daap.songdiscnumber', discnumber))
if disccount:
d.append(do('daap.songdisccount', disccount))
file_re = re.compile(".*\\.([fF][lL][aA][cC]|[oO][gG]{2})$")
def understands(self, filename):
return self.file_re.match(filename)
def parse(self, filename):
md = mutagen.File(filename)
d = []
if md.tags is not None:
self.handle_string_tags(self.vorbis_string_map, md, d)
self.handle_int_tags(self.vorbis_int_map, md, d)
self.handle_track(md, d)
self.handle_disc(md, d)
self.add_file_info(filename, d)
d.extend([do('daap.songtime', md.info.length * 1000),
do('daap.songsamplerate', md.info.sample_rate)])
name = self.set_itemname_if_unset(os.path.basename(filename), d)
if hasattr(self, 'parse_extra_vorbis'):
self.parse_extra_vorbis(filename, md, d)
return (d, name)
| gpl-3.0 | -2,897,420,776,476,224,500 | 32.925926 | 72 | 0.623908 | false |
BillClyde/safenetfs | safenet/api/directory.py | 1 | 4211 | import safenet.api
import requests
import json
from StringIO import StringIO
import base64
__author__ = "William Clyde"
__copyright__ = "Copyright 2016, William Clyde"
__license__ = "MIT"
class Directory:
"""Directory management"""
def __init__(self):
"""__init__"""
self.headers = {'content-type': 'application/json',
'authorization':
'Bearer {0}'.format(safenet.api.getToken())}
def create(self, path, meta_data, is_private=True):
"""Create a new directory
Parameters
----------
:param path: string
path of new directory
:param meta_data: string
optional directory information
:param is_private: bool
marks the file as private
Returns
-------
bool
True if successful, False otherwise
"""
privacy = "true" if is_private else "false"
data = """{{ "isPrivate": {privacy}, "metadata": "{meta_data}" }}"""
response = requests.post(safenet.api.DRIVE_DIR_URL + path,
headers=self.headers,
data=data.format(privacy, meta_data=base64.b64encode(meta_data)))
if response.status_code == 200:
return True
return False
def get(self, path):
"""Get directory at path
Parameters
----------
:param path: string
path to directory
"""
response = requests.get(safenet.api.DRIVE_DIR_URL + path, headers=self.headers)
if response.status_code == 200:
return json.load(StringIO(response.text))
else:
return response.reason
def update(self, path, new_name, meta_data):
"""Update the name of the directory
Parameters
----------
:param path: string
path to directory
:param new_name: string
updated directory name
:param meta_data: string
optional directory information
Returns
-------
bool
True if successful, otherwise False
"""
data = """{{ "name":"{new_name}", "metadata":"{meta_data}" }}"""
response = requests.put(safenet.api.DRIVE_DIR_URL + path,
data=data.format(new_name=new_name,
meta_data=base64.b64encode(meta_data)),
headers=self.headers)
if response.status_code == 200:
return True
return False
def move(self, src_path, dest_path, copy=False):
"""Move directory to new location with optional copy
Parameters
----------
:param src_path: string
current path to directory
:param dest_path: string
new path to directory
:param copy: bool
copy file instead of moving
Returns
-------
bool
True if successful, otherwise False
"""
action = "copy" if copy else "move"
data = """{{ "srcRootPath":"drive",
"srcPath":"{src_path}",
"destRootPath":"drive",
"destPath":"{dest_path}",
"action":"{action}" }} """.format(src_path=src_path,
dest_path=dest_path,
action=action)
response = requests.post(safenet.api.DIR_URL + "/movedir",
data=data,
headers=self.headers)
if response.status_code == 200:
return True
return False
def delete(self, path):
"""delete
Parameters
----------
:param path: string
path of the directory to delete
Returns
-------
bool
True if successful, otherwise False
"""
response = requests.delete(safenet.api.DRIVE_DIR_URL + path, headers=self.headers)
if response.status_code == 200:
return True
return False
| mit | 7,075,495,754,665,540,000 | 28.243056 | 98 | 0.495607 | false |
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/states/ssh_known_hosts.py | 1 | 7898 | # -*- coding: utf-8 -*-
'''
Control of SSH known_hosts entries
==================================
Manage the information stored in the known_hosts files.
.. code-block:: yaml
github.com:
ssh_known_hosts:
- present
- user: root
- fingerprint: 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48
example.com:
ssh_known_hosts:
- absent
- user: root
'''
from __future__ import absolute_import
# Import python libs
import os
# Import salt libs
import salt.utils
from salt.exceptions import CommandNotFoundError
def present(
name,
user=None,
fingerprint=None,
key=None,
port=None,
enc=None,
config=None,
hash_hostname=True,
hash_known_hosts=True,
timeout=5):
'''
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_hostname : True
Hash all hostnames and addresses in the known hosts file.
.. deprecated:: Carbon
Please use hash_known_hosts instead.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'changes': {},
'result': None if __opts__['test'] else True,
'comment': ''}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret['result'] = False
return dict(ret, comment=comment)
if not hash_hostname:
salt.utils.warn_until(
'Carbon',
'The hash_hostname parameter is misleading as ssh-keygen can only '
'hash the whole known hosts file, not entries for individual '
'hosts. Please use hash_known_hosts=False instead.')
hash_known_hosts = hash_hostname
if __opts__['test']:
if key and fingerprint:
comment = 'Specify either "key" or "fingerprint", not both.'
ret['result'] = False
return dict(ret, comment=comment)
elif key and not enc:
comment = 'Required argument "enc" if using "key" argument.'
ret['result'] = False
return dict(ret, comment=comment)
try:
result = __salt__['ssh.check_known_host'](user, name,
key=key,
fingerprint=fingerprint,
config=config,
port=port)
except CommandNotFoundError as err:
ret['result'] = False
ret['comment'] = 'ssh.check_known_host error: {0}'.format(err)
return ret
if result == 'exists':
comment = 'Host {0} is already in {1}'.format(name, config)
ret['result'] = True
return dict(ret, comment=comment)
elif result == 'add':
comment = 'Key for {0} is set to be added to {1}'.format(name,
config)
return dict(ret, comment=comment)
else: # 'update'
comment = 'Key for {0} is set to be updated in {1}'.format(name,
config)
return dict(ret, comment=comment)
result = __salt__['ssh.set_known_host'](user=user, hostname=name,
fingerprint=fingerprint,
key=key,
port=port,
enc=enc,
config=config,
hash_known_hosts=hash_known_hosts,
timeout=timeout)
if result['status'] == 'exists':
return dict(ret,
comment='{0} already exists in {1}'.format(name, config))
elif result['status'] == 'error':
return dict(ret, result=False, comment=result['error'])
else: # 'updated'
if key:
new_key = result['new']['key']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (key: {2})'.format(
name, config, new_key))
else:
fingerprint = result['new']['fingerprint']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (fingerprint: {2})'.format(
name, config, fingerprint))
def absent(name, user=None, config=None):
'''
Verifies that the specified host is not known by the given user
name
The host name
user
The user who owns the ssh authorized keys file to modify
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret['result'] = False
return dict(ret, comment=comment)
known_host = __salt__['ssh.get_known_host'](user=user, hostname=name, config=config)
if not known_host:
return dict(ret, comment='Host is already absent')
if __opts__['test']:
comment = 'Key for {0} is set to be removed from {1}'.format(name,
config)
ret['result'] = None
return dict(ret, comment=comment)
rm_result = __salt__['ssh.rm_known_host'](user=user, hostname=name, config=config)
if rm_result['status'] == 'error':
return dict(ret, result=False, comment=rm_result['error'])
else:
return dict(ret,
changes={'old': known_host, 'new': None},
result=True,
comment=rm_result['comment'])
| apache-2.0 | -8,825,735,002,828,564,000 | 33.640351 | 88 | 0.546341 | false |
codilime/cloudify-system-tests | cosmo_tester/test_suites/test_security/auth_test_base.py | 1 | 22487 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
import contextlib
from cStringIO import StringIO
import os
import sys
from sh import ErrorReturnCode
from cloudify_rest_client.client import CloudifyClient
from cloudify_rest_client.exceptions import UserUnauthorizedError
from cosmo_tester.framework import util
from cosmo_tester.test_suites.test_security import security_test_base
RUNNING_EXECUTIONS_MESSAGE = 'There are running executions for this deployment'
class BaseAuthTest(security_test_base.SecurityTestBase):
admin_username = 'alice'
admin_password = 'alice_password'
deployer_username = 'bob'
deployer_password = 'bob_password'
viewer_username = 'clair'
viewer_password = 'clair_password'
no_role_username = 'dave'
no_role_password = 'dave_password'
def _test_authentication_and_authorization(self, assert_token=None):
self._test_authentication(assert_token=assert_token)
self._test_authorization()
def _test_authorization(self):
# setup temp blueprint
self.blueprints_dir = self.copy_mocks_blueprints_dir()
self.blueprint_path = '{0}/empty-blueprint.yaml'\
.format(self.blueprints_dir)
self.blueprint_yaml = self.blueprint_path
# start authorization assertions
self._assert_blueprint_operations()
self._assert_deployment_operations()
self._assert_execution_operations()
def copy_mocks_blueprints_dir(self):
dest_path = '{0}/{1}'.format(self.workdir, 'mocks')
if not os.path.exists(dest_path):
dest_path = self.copy_blueprint('mocks')
return dest_path
def _test_authentication(self, assert_token=None):
self._assert_valid_credentials_authenticate()
self._assert_invalid_credentials_fails()
self._assert_empty_credentials_fails()
self._assert_no_credentials_or_token_fails()
if assert_token:
self._assert_valid_token_authenticates()
self._assert_invalid_token_fails()
self._assert_empty_token_fails()
def _assert_blueprint_operations(self):
blueprint_ids = self._assert_upload_blueprint()
self._assert_list_blueprint(blueprint_ids)
self._assert_get_blueprint(blueprint_ids[0])
self._assert_delete_blueprint(blueprint_ids[0])
# cleanup
self._login_cli(self.admin_username, self.admin_password)
# item 0 has already been deleted in _assert_delete_blueprint
for blueprint_id in blueprint_ids[1:]:
self.cfy.delete_blueprint(blueprint_id)
def _assert_deployment_operations(self):
blueprint_id = 'test_deployment_blueprint1'
# setup
self._login_cli(self.admin_username, self.admin_password)
self.cfy.upload_blueprint(blueprint_id, self.blueprint_path)
# test
deployment_ids = self._assert_create_deployment(blueprint_id)
self._assert_list_deployment(deployment_ids)
self._assert_delete_deployment(deployment_ids)
# cleanup
self._login_cli(self.admin_username, self.admin_password)
# item 0 has already been deleted in _assert_delete_deployment
for deployment_id in deployment_ids[1:]:
self.cfy.delete_deployment(deployment_id)
self.cfy.delete_blueprint(blueprint_id)
def _assert_execution_operations(self):
blueprint_id = 'test_execution_blueprint1'
deployment_ids = ['test_execution_deployment1',
'test_execution_deployment2',
'test_execution_deployment3']
# setup
self._login_cli(self.admin_username, self.admin_password)
self.cfy.upload_blueprint(blueprint_id, self.blueprint_path)
for deployment_id in deployment_ids:
self.cfy.create_deployment(blueprint_id, deployment_id)
self.wait_until_all_deployment_executions_end(
deployment_id=deployment_id,
verify_no_failed_execution=True)
# test
self._assert_start_execution(deployment_ids)
execution_ids = self._get_execution_ids()
self._assert_list_executions(execution_ids)
self._assert_get_execution(execution_ids[0])
self._assert_cancel_executions(execution_id1=execution_ids[0],
execution_id2=execution_ids[1])
# cleanup
self._login_cli(self.admin_username, self.admin_password)
for deployment_id in deployment_ids:
self.wait_until_all_deployment_executions_end(deployment_id)
self.cfy.delete_deployment(deployment_id, ignore_live_nodes=True)
self.cfy.delete_blueprint(blueprint_id)
##############################
# blueprint tests
##############################
def _assert_upload_blueprint(self):
def _upload_and_assert(blueprint_id):
out, err = self._execute_and_get_streams(self.cfy.upload_blueprint,
blueprint_id,
self.blueprint_path)
self._assert_in_output(out, 'Uploaded blueprint')
self.assertEqual('', err)
# admins and deployers should be able to upload blueprints...
blueprint1_id = 'blueprint1_id'
blueprint2_id = 'blueprint2_id'
self._login_cli(self.admin_username, self.admin_password)
_upload_and_assert(blueprint1_id)
self._login_cli(self.deployer_username, self.deployer_password)
_upload_and_assert(blueprint2_id)
# ...but viewers and simple users should not
self._login_cli(self.viewer_username, self.viewer_password)
self._assert_unauthorized(self.cfy.upload_blueprint,
'dummy_bp',
self.blueprint_path)
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.upload_blueprint,
'dummy_bp',
self.blueprint_path)
return blueprint1_id, blueprint2_id
def _assert_list_blueprint(self, blueprint_ids):
def _list_and_assert():
out, err = self._execute_and_get_streams(self.cfy.list_blueprints)
self._assert_in_output(out, *blueprint_ids)
self.assertEqual('', err)
# admins, deployers and viewers should be able to list blueprints...
self._login_cli(self.admin_username, self.admin_password)
_list_and_assert()
self._login_cli(self.deployer_username, self.deployer_password)
_list_and_assert()
self._login_cli(self.viewer_username, self.viewer_password)
_list_and_assert()
# ...but simple users should not
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.list_blueprints)
def _assert_get_blueprint(self, blueprint_id):
def _get_and_assert():
out, err = self._execute_and_get_streams(
self.cfy.get_blueprint, blueprint_id)
self._assert_in_output(out, blueprint_id)
self.assertEqual('', err)
# admins, deployers and viewers should be able to get blueprints...
self._login_cli(self.admin_username, self.admin_password)
_get_and_assert()
self._login_cli(self.deployer_username, self.deployer_password)
_get_and_assert()
self._login_cli(self.viewer_username, self.viewer_password)
_get_and_assert()
# ...but simple users should not
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.get_blueprint, blueprint_id)
def _assert_delete_blueprint(self, blueprint_id):
def _delete_and_assert():
out, err = self._execute_and_get_streams(
self.cfy.delete_blueprint, blueprint_id)
self._assert_in_output(out, 'Deleted blueprint successfully')
self.assertEqual('', err)
# admins should be able to delete blueprints...
self._login_cli(self.admin_username, self.admin_password)
self._login_cli(self.admin_username, self.admin_password)
_delete_and_assert()
# ...but deployers, viewers and simple users should not
self._login_cli(self.deployer_username, self.deployer_password)
self._assert_unauthorized(self.cfy.delete_blueprint, blueprint_id)
self._login_cli(self.viewer_username, self.viewer_password)
self._assert_unauthorized(self.cfy.delete_blueprint, blueprint_id)
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.delete_blueprint, blueprint_id)
##############################
# deployment tests
##############################
def _assert_create_deployment(self, blueprint_id):
def _create_and_assert(deployment_id):
out, err = self._execute_and_get_streams(
self.cfy.create_deployment, blueprint_id, deployment_id)
self._assert_in_output(out, 'Deployment created')
# polling for deployments requires an authorized client
self._login_client(username=self.admin_username,
password=self.admin_password)
self.wait_until_all_deployment_executions_end(deployment_id)
self.assertEqual('', err)
# admins and deployers should be able to create deployments...
deployment1_id = 'deployment1'
deployment2_id = 'deployment2'
self._login_cli(self.admin_username, self.admin_password)
_create_and_assert(deployment1_id)
self._login_cli(self.deployer_username, self.deployer_password)
_create_and_assert(deployment2_id)
# ...but viewers and simple users should not
self._login_cli(self.viewer_username, self.viewer_password)
self._assert_unauthorized(self.cfy.create_deployment,
blueprint_id,
'dummy_dp')
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.create_deployment,
blueprint_id,
'dummy_dp')
return deployment1_id, deployment2_id
def _assert_list_deployment(self, deployment_ids):
def _list_and_assert():
out, err = self._execute_and_get_streams(self.cfy.list_deployments)
self._assert_in_output(out, *deployment_ids)
self.assertEqual('', err)
# admins, deployers and viewers should be able to list deployments...
self._login_cli(self.admin_username, self.admin_password)
_list_and_assert()
self._login_cli(self.deployer_username, self.deployer_password)
_list_and_assert()
self._login_cli(self.viewer_username, self.viewer_password)
_list_and_assert()
# ...but simple users should not
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.list_deployments)
def _assert_delete_deployment(self, deployment_ids):
def _delete_and_assert():
out, err = self._execute_and_get_streams(
self.cfy.delete_deployment, deployment_ids[0])
self._assert_in_output(out, 'Deleted deployment successfully')
self.assertEqual('', err)
# admins should be able to delete deployments...
self._login_cli(self.admin_username, self.admin_password)
_delete_and_assert()
# ...but deployers, viewers and simple users should not
self._login_cli(self.deployer_username, self.deployer_password)
self._assert_unauthorized(self.cfy.delete_deployment,
deployment_ids[1])
self._login_cli(self.viewer_username, self.viewer_password)
self._assert_unauthorized(self.cfy.delete_deployment,
deployment_ids[1])
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.delete_deployment,
deployment_ids[1])
##############################
# execution tests
##############################
def _assert_start_execution(self, deployment_ids):
workflow = 'install'
def _start_and_assert(deployment_id):
out, err = self._execute_and_get_streams(
self.cfy.execute_workflow, workflow, deployment_id)
self._assert_in_output(out, 'Finished executing workflow')
self.assertEqual('', err)
# admins and deployers should be able to start executions...
self._login_cli(self.admin_username, self.admin_password)
_start_and_assert(deployment_ids[0])
self._login_cli(self.deployer_username, self.deployer_password)
_start_and_assert(deployment_ids[1])
# ...but viewers and simple users should not
self._login_cli(self.viewer_username, self.viewer_password)
self._assert_unauthorized(
self.cfy.execute_workflow, workflow, deployment_ids[2])
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(
self.cfy.execute_workflow, workflow, deployment_ids[2])
def _assert_list_executions(self, execution_ids):
def _list_and_assert():
out, err = self._execute_and_get_streams(self.cfy.list_executions)
self._assert_in_output(out, *execution_ids)
self.assertEqual('', err)
# admins, deployers and viewers should be able so list executions...
self._login_cli(self.admin_username, self.admin_password)
_list_and_assert()
self._login_cli(self.deployer_username, self.deployer_password)
_list_and_assert()
self._login_cli(self.viewer_username, self.viewer_password)
_list_and_assert()
# ...but simple users should not
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.list_executions)
def _assert_get_execution(self, execution_id):
def _get_and_assert():
out, err = self._execute_and_get_streams(
self.cfy.get_execution, execution_id)
self._assert_in_output(out, execution_id)
self.assertEqual('', err)
# admins, deployers and viewers should be able to get executions...
self._login_cli(self.admin_username, self.admin_password)
_get_and_assert()
self._login_cli(self.deployer_username, self.deployer_password)
_get_and_assert()
self._login_cli(self.viewer_username, self.viewer_password)
_get_and_assert()
# ...but simple users should not
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.get_execution, execution_id)
def _assert_cancel_executions(self, execution_id1, execution_id2):
def _cancel_and_assert(execution_id):
out, err = self._execute_and_get_streams(
self.cfy.cancel_execution, execution_id)
cancelling_msg = 'A cancel request for execution {0} has been' \
' sent'.format(execution_id)
already_terminated_msg = 'in status terminated'
if cancelling_msg not in out and already_terminated_msg not in out:
self.fail('failed to cancel execution {0}, output: {1}'.
format(execution_id, out))
self.assertEqual('', err)
# admins and deployers should be able to cancel executions...
self._login_cli(self.admin_username, self.admin_password)
_cancel_and_assert(execution_id1)
self._login_cli(self.deployer_username, self.deployer_password)
_cancel_and_assert(execution_id2)
# ...but viewers and simple users should not
self._login_cli(self.viewer_username, self.viewer_password)
self._assert_unauthorized(self.cfy.cancel_execution, execution_id1)
self._login_cli(self.no_role_username, self.no_role_password)
self._assert_unauthorized(self.cfy.cancel_execution, execution_id1)
###############################
# utility methods and wrappers
###############################
@contextlib.contextmanager
def _capture_streams(self):
old_out = sys.stdout
old_err = sys.stderr
try:
out, err = StringIO(), StringIO()
sys.stdout = out
sys.stderr = err
yield out, err
finally:
sys.stdout = old_out
sys.stderr = old_err
def _execute_and_get_streams(self, method, *args):
with self._capture_streams() as (out, err):
try:
method(*args)
except ErrorReturnCode:
pass
except UserUnauthorizedError as e:
out.write(str(e))
return out.getvalue(), err.getvalue()
def _assert_in_output(self, out, *output_values):
for value in output_values:
self.assertIn(value, out)
def _login_cli(self, username=None, password=None):
self.logger.info('performing login to CLI with username: {0}, '
'password: {1}'.format(username, password))
os.environ['CLOUDIFY_USERNAME'] = username
os.environ['CLOUDIFY_PASSWORD'] = password
def _login_client(self, username=None, password=None, token=None):
self.logger.info('performing login to test client with username: {0}, '
'password: {1}, token: {2}'
.format(username, password, token))
self.client = self._create_client(username=username,
password=password,
token=token)
def _create_client(self, username=None, password=None, token=None):
user_pass_header = util.get_auth_header(username=username,
password=password,
token=token)
return CloudifyClient(host=self.env.management_ip,
headers=user_pass_header)
def _get_execution_ids(self):
alice_client = self._create_client(self.admin_username,
self.admin_password)
return [execution.id for execution in alice_client.executions.list()]
def _assert_valid_credentials_authenticate(self):
self._login_client(username=self.admin_username,
password=self.admin_password)
self._assert_authorized()
def _assert_invalid_credentials_fails(self):
self._login_client(username='wrong_username',
password='wrong_password')
self._assert_unauthorized(self.client.manager.get_status)
def _assert_empty_credentials_fails(self):
self._login_client(username='',
password='')
self._assert_unauthorized(self.client.manager.get_status)
def _assert_valid_token_authenticates(self):
client = self._create_client(self.admin_username, self.admin_password)
token = client.tokens.get().value
self._login_client(token=token)
self._assert_authorized()
def _assert_invalid_token_fails(self):
self._login_client(token='wrong_token')
self._assert_unauthorized(self.client.manager.get_status)
def _assert_empty_token_fails(self):
self._login_client(token='')
self._assert_unauthorized(self.client.manager.get_status)
def _assert_no_credentials_or_token_fails(self):
self.client = CloudifyClient(host=self.env.management_ip)
self._assert_unauthorized(self.client.manager.get_status)
def _assert_authorized(self):
response = self.client.manager.get_status()
if not response['status'] == 'running':
self.fail('Failed to get manager status using username and '
'password')
def _assert_unauthorized(self, method, *args):
out, err = self._execute_and_get_streams(method, *args)
self.assertIn('401: user unauthorized', out)
self.assertEqual('', err)
def get_userstore_users(self):
return [
{
'username': self.admin_username,
'password': self.admin_password,
'groups': [
'cfy_admins'
]
},
{
'username': self.deployer_username,
'password': self.deployer_password,
'groups': [
'cfy_deployers'
]
},
{
'username': self.viewer_username,
'password': self.viewer_password,
'groups': [
'cfy_viewer'
]
},
{
'username': self.no_role_username,
'password': self.no_role_password,
'groups': ['users']
}
]
def get_userstore_groups(self):
return [
{
'name': 'cfy_admins',
'roles': [
'administrator'
]
},
{
'name': 'cfy_deployers',
'roles': [
'deployer'
]
},
{
'name': 'cfy_viewer',
'roles': [
'viewer'
]
}
]
| apache-2.0 | -227,918,180,580,074,000 | 38.659612 | 79 | 0.596256 | false |
lilydjwg/xmpptalk | main.py | 1 | 16436 | #!/usr/bin/env python3
# vim:fileencoding=utf-8
#
# (C) Copyright 2012 lilydjwg <[email protected]>
#
# This file is part of xmpptalk.
#
# xmpptalk is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xmpptalk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with xmpptalk. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import logging
import datetime
import base64
import hashlib
from collections import defaultdict
from functools import partial
from xml.etree import ElementTree as ET
import pyxmpp2.exceptions
from pyxmpp2.jid import JID
from pyxmpp2.message import Message
from pyxmpp2.presence import Presence
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.roster import RosterReceivedEvent
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT, NO_CHANGE
from pyxmpp2.streamevents import AuthorizedEvent, DisconnectedEvent
from pyxmpp2.interfaces import XMPPFeatureHandler
from pyxmpp2.interfaces import presence_stanza_handler, message_stanza_handler
from pyxmpp2.ext.version import VersionProvider
from pyxmpp2.expdict import ExpiringDictionary
from pyxmpp2.iq import Iq
try:
from xmpp_receipt import ReceiptSender
except ImportError:
ReceiptSender = None
from misc import *
import config
import models
from models import ValidationError
from messages import MessageMixin
from user import UserMixin
if getattr(config, 'conn_lost_interval_minutes', False):
conn_lost_interval = datetime.timedelta(minutes=config.conn_lost_interval_minutes)
else:
conn_lost_interval = None
class ChatBot(MessageMixin, UserMixin, EventHandler, XMPPFeatureHandler):
got_roster = False
message_queue = None
receipt_sender = None
ignore = set()
def __init__(self, jid, settings, botsettings=None):
if 'software_name' not in settings:
settings['software_name'] = self.__class__.__name__
if 'software_version' not in settings:
settings['software_version'] = __version__
version_provider = VersionProvider(settings)
handlers = []
if ReceiptSender:
self.receipt_sender = rs = ReceiptSender()
handlers.append(rs)
handlers.extend([self, version_provider])
self.client = Client(jid, handlers, settings)
self.presence = defaultdict(dict)
self.subscribes = ExpiringDictionary(default_timeout=5)
self.invited = {}
self.avatar_hash = None
self.settings = botsettings
def run(self):
self.client.connect()
self.jid = self.client.jid.bare()
logger.info('self jid: %r', self.jid)
self.update_on_setstatus = set()
if self.receipt_sender:
self.receipt_sender.stream = self.client.stream
self.client.run()
def disconnect(self):
'''Request disconnection and let the main loop run for a 2 more
seconds for graceful disconnection.'''
self.client.disconnect()
while True:
try:
self.client.run(timeout = 2)
except pyxmpp2.exceptions.StreamParseError:
# we raise SystemExit to exit, expat says XML_ERROR_FINISHED
pass
else:
break
def handle_early_message(self):
self.got_roster = True
q = self.message_queue
if q:
self.now = datetime.datetime.utcnow()
for sender, stanza in q:
self.current_jid = sender
self._cached_jid = None
try:
timestamp = stanza.as_xml().find('{urn:xmpp:delay}delay').attrib['stamp']
except AttributeError:
timestamp = None
self.handle_message(stanza.body, timestamp)
self.message_queue = self.__class__.message_queue = None
@event_handler(RosterReceivedEvent)
def roster_received(self, stanze):
self.delayed_call(2, self.handle_early_message)
self.delayed_call(getattr(config, 'reconnect_timeout', 24 * 3600), self.signal_connect)
nick, avatar_type, avatar_file = (getattr(config, x, None) for x in ('nick', 'avatar_type', 'avatar_file'))
if nick or (avatar_type and avatar_file):
self.set_vcard(nick, (avatar_type, avatar_file))
return True
def signal_connect(self):
logging.info('Schedule to re-connecting...')
self.client.disconnect()
@message_stanza_handler()
def message_received(self, stanza):
if stanza.stanza_type != 'chat':
return True
if not stanza.body:
logging.info("%s message: %s", stanza.from_jid, stanza.serialize())
return True
sender = stanza.from_jid
body = stanza.body
self.current_jid = sender
self.now = datetime.datetime.utcnow()
logging.info('[%s] %s', sender, stanza.body)
if '@' not in str(sender.bare()):
logging.info('(server messages ignored)')
return True
if str(sender.bare()) in self.ignore:
logging.info('(The above message is ignored on purpose)')
return True
if getattr(config, 'ban_russian'):
if str(sender.bare()).endswith('.ru'):
logging.info('(Russian messager banned)')
return True
elif is_russian(body):
logging.info('(Russian message banned)')
return True
if not self.got_roster:
if not self.message_queue:
self.message_queue = []
self.message_queue.append((sender, stanza))
else:
self.handle_message(body)
logging.info('done with new message')
return True
def send_message(self, receiver, msg):
if isinstance(receiver, str):
receiver = JID(receiver)
m = Message(
stanza_type = 'chat',
from_jid = self.jid,
to_jid = receiver,
body = msg,
)
self.send(m)
def reply(self, msg):
self.send_message(self.current_jid, msg)
def send(self, stanza):
self.client.stream.send(stanza)
def delayed_call(self, seconds, func, *args, **kwargs):
self.client.main_loop.delayed_call(seconds, partial(func, *args, **kwargs))
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
return QUIT
@property
def roster(self):
return self.client.roster
def get_online_users(self):
ret = [x.jid for x in self.roster if x.subscription == 'both' and \
str(x.jid) in self.presence]
logging.info('%d online buddies: %r', len(ret), ret)
return ret
def get_xmpp_status(self, jid):
return sorted(self.presence[str(jid)].values(), key=lambda x: x['priority'], reverse=True)[0]
def xmpp_setstatus(self, status, to_jid=None):
if isinstance(to_jid, str):
to_jid = JID(to_jid)
presence = Presence(status=status, to_jid=to_jid)
self.send(presence)
def update_roster(self, jid, name=NO_CHANGE, groups=NO_CHANGE):
self.client.roster_client.update_item(jid, name, groups)
def removeInvitation(self):
for ri in self.roster.values():
if ri.ask is not None:
self.client.roster_client.remove_item(ri.jid)
logging.info('%s removed', ri.jid)
def unsubscribe(self, jid, type='unsubscribe'):
presence = Presence(to_jid=jid, stanza_type=type)
self.send(presence)
def subscribe(self, jid):
self.invited[jid] = 2
presence = Presence(to_jid=jid, stanza_type='subscribe')
self.send(presence)
@presence_stanza_handler('subscribe')
def handle_presence_subscribe(self, stanza):
logging.info('%s subscribe', stanza.from_jid)
sender = stanza.from_jid
bare = sender.bare()
# avoid repeated request
invited = False
if bare not in self.subscribes:
invited = self.invited.get(bare, False)
if invited is not False:
if invited == 2:
self.invited[bare] = 1
else:
del self.invited[bare]
return stanza.make_accept_response()
# We won't deny inivted members
self.handle_userjoin_before()
else:
if config.private and str(bare) != config.root:
self.send_message(sender, _('Sorry, this is a private group, and you are not invited.'))
return stanza.make_deny_response()
if not self.handle_userjoin_before():
return stanza.make_deny_response()
self.current_jid = sender
self.now = datetime.datetime.utcnow()
try:
self.handle_userjoin(action=stanza.stanza_type)
except ValidationError:
#The server is subscribing
pass
self.subscribes[bare] = True
if stanza.stanza_type.endswith('ed'):
return stanza.make_accept_response()
if invited is False:
presence = Presence(to_jid=stanza.from_jid.bare(),
stanza_type='subscribe')
return [stanza.make_accept_response(), presence]
@presence_stanza_handler('subscribed')
def handle_presence_subscribed(self, stanza):
# use the same function
logging.info('%s subscribed', stanza.from_jid)
return self.handle_presence_subscribe(stanza)
@presence_stanza_handler('unsubscribe')
def handle_presence_unsubscribe(self, stanza):
logging.info('%s unsubscribe', stanza.from_jid)
sender = stanza.from_jid
self.current_jid = sender
self.now = datetime.datetime.utcnow()
self.handle_userleave(action=stanza.stanza_type)
if stanza.stanza_type.endswith('ed'):
return stanza.make_accept_response()
presence = Presence(to_jid=stanza.from_jid.bare(),
stanza_type='unsubscribe')
return [stanza.make_accept_response(), presence]
@presence_stanza_handler('unsubscribed')
def handle_presence_unsubscribed(self, stanza):
# use the same function
logging.info('%s unsubscribed', stanza.from_jid)
return self.handle_presence_unsubscribe(stanza)
@presence_stanza_handler()
def handle_presence_available(self, stanza):
if stanza.stanza_type not in ('available', None):
return False
jid = stanza.from_jid
plainjid = str(jid.bare())
if plainjid == str(self.jid):
return
self.now = datetime.datetime.utcnow()
if plainjid not in self.presence:
type = 'new'
self.current_jid = jid
self.user_update_presence(plainjid)
if conn_lost_interval and self.current_user and self.current_user.last_seen and \
self.now - self.current_user.last_seen < conn_lost_interval:
type = 'reconnect'
self.send_lost_message()
logging.info('%s[%s] (%s)', jid, stanza.show or 'available', type)
if self.roster and jid.bare() not in self.roster:
presence = Presence(to_jid=jid.bare(), stanza_type='subscribe')
self.send(presence)
presence = Presence(to_jid=jid.bare(), stanza_type='subscribed')
self.send(presence)
else:
if jid.resource not in self.presence[plainjid]:
self.user_update_presence(plainjid)
logging.info('%s[%s]', jid, stanza.show or 'available')
self.presence[plainjid][jid.resource] = {
'show': stanza.show,
'status': stanza.status,
'priority': stanza.priority,
}
if self.get_user_by_jid(plainjid) is None:
try:
self.current_jid = jid
self.handle_userjoin()
except ValidationError:
#The server is subscribing
pass
if config.warnv105 and jid.resource and \
jid.resource.startswith('Talk.') and not jid.resource.startswith('Talk.v104'):
# Got a Talk.v107...
# No need to translate; GTalk only has a v105 for Chinese.
self.send_message(jid, '警告:你正在使用的可能是不加密的 GTalk v105 版本。网络上的其它人可能会截获您的消息。这样不安全!请使用 GTalk v104 英文版或者其它 XMPP 客户端。\nGTalk 英文版: http://www.google.com/talk/index.html\nPidgin: http://www.pidgin.im/')
return True
@presence_stanza_handler('unavailable')
def handle_presence_unavailable(self, stanza):
jid = stanza.from_jid
plainjid = str(jid.bare())
if plainjid in self.presence and plainjid != str(self.jid):
try:
del self.presence[plainjid][jid.resource]
except KeyError:
pass
if self.presence[plainjid]:
logging.info('%s[unavailable] (partly)', jid)
else:
del self.presence[plainjid]
self.now = datetime.datetime.utcnow()
self.user_disappeared(plainjid)
logging.info('%s[unavailable] (totally)', jid)
return True
@event_handler()
def handle_all(self, event):
'''Log all events.'''
logging.info('-- {0}'.format(event))
def get_name(self, jid):
if isinstance(jid, str):
jid = JID(jid)
else:
jid = jid.bare()
try:
return self.roster[jid].name or hashjid(jid)
except KeyError:
return hashjid(jid)
def get_vcard(self, jid=None, callback=None):
'''callback is used as both result handler and error handler'''
q = Iq(
to_jid = jid and jid.bare(),
stanza_type = 'get',
)
vc = ET.Element("{vcard-temp}vCard")
q.add_payload(vc)
if callback:
self.stanza_processor.set_response_handlers(q, callback, callback)
self.send(q)
def set_vcard(self, nick=None, avatar=None):
self.get_vcard(callback=partial(self._set_vcard, nick, avatar))
def _set_vcard(self, nick=None, avatar=None, stanza=None):
#FIXME: This doesn't seem to work with jabber.org
q = Iq(
from_jid = self.jid,
stanza_type = 'set',
)
vc = ET.Element("{vcard-temp}vCard")
if nick is not None:
n = ET.SubElement(vc, '{vcard-temp}FN')
n.text = nick
if avatar is not None:
type, picfile = avatar
photo = ET.SubElement(vc, '{vcard-temp}PHOTO')
t = ET.SubElement(photo, '{vcard-temp}TYPE')
t.text = type
d = ET.SubElement(photo, '{vcard-temp}BINVAL')
data = open(picfile, 'rb').read()
d.text = base64.b64encode(data).decode('ascii')
self.avatar_hash = hashlib.new('sha1', data).hexdigest()
q.add_payload(vc)
self.stanza_processor.set_response_handlers(
q, self._set_vcard_callback, self._set_vcard_callback)
self.send(q)
def _set_vcard_callback(self, stanza):
if stanza.stanza_type == 'error':
logging.error('failed to set my vCard.')
else:
logging.info('my vCard set.')
self.update_presence()
def update_presence(self):
#TODO: update for individual users
presence = self.settings['presence']
x = ET.Element('{vcard-temp:x:update}x')
if self.avatar_hash:
photo = ET.SubElement(x, '{vcard-temp:x:update}photo')
photo.text = self.avatar_hash
presence.add_payload(x)
self.send(presence)
def runit(settings, mysettings):
bot = ChatBot(JID(config.jid), settings, mysettings)
try:
bot.run()
# Connection resets
raise Exception
except SystemExit as e:
if e.code == CMD_RESTART:
# restart
bot.disconnect()
models.connection.disconnect()
try:
os.close(lock_fd[0])
except:
pass
logging.info('restart...')
os.execv(sys.executable, [sys.executable] + sys.argv)
except KeyboardInterrupt:
pass
finally:
ChatBot.message_queue = bot.message_queue
bot.disconnect()
def main():
gp = models.connection.Group.one()
if gp and gp.status:
st = gp.status
else:
st = None
settings = dict(
# deliver here even if the admin logs in
initial_presence = Presence(priority=30, status=st),
poll_interval = 3,
)
botsettings = {
'presence': settings['initial_presence'],
}
settings.update(config.settings)
settings = XMPPSettings(settings)
if config.trace:
logging.info('enabling trace')
for logger in ('pyxmpp2.IN', 'pyxmpp2.OUT'):
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
for logger in (
'pyxmpp2.mainloop.base', 'pyxmpp2.expdict',
'pyxmpp2.mainloop.poll', 'pyxmpp2.mainloop.events',
'pyxmpp2.transport', 'pyxmpp2.mainloop.events',
):
logger = logging.getLogger(logger)
logger.setLevel(max((logging.INFO, config.logging_level)))
if config.logging_level > logging.DEBUG:
restart_if_failed(runit, 3, args=(settings, botsettings))
else:
runit(settings, botsettings)
if __name__ == '__main__':
setup_logging()
models.init()
main()
| gpl-3.0 | 8,970,019,177,258,831,000 | 30.441233 | 199 | 0.665278 | false |
macarthur-lab/xbrowse | xbrowse_server/base/management/commands/get_lof_variants.py | 1 | 4504 | from collections import defaultdict
import csv
from django.core.management.base import BaseCommand
import elasticsearch
import elasticsearch_dsl
import json
import settings
from seqr.models import Individual
from seqr.views.utils.orm_to_json_utils import _get_json_for_individuals
from xbrowse_server.base.models import Project as BaseProject
EXCLUDE_PROJECTS = ['ext', '1000 genomes', 'DISABLED', 'project', 'interview', 'non-cmg', 'amel']
PER_PAGE = 5000
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("--metadata-only", action="store_true", help="Only get the project/ individual metadata.")
parser.add_argument("--use-project-indices-csv", action="store_true", help="Load projects to search from project_indices.csv")
parser.add_argument("--index", nargs='+', help="Individual index to use")
def handle(self, *args, **options):
if options["index"]:
es_indices = options["index"]
elif options["use_project_indices_csv"]:
with open('project_indices.csv') as csvfile:
reader = csv.DictReader(csvfile)
es_indices = {row['index'] for row in reader}
else:
projects_q = BaseProject.objects.filter(genome_version='37')
for exclude_project in EXCLUDE_PROJECTS:
projects_q = projects_q.exclude(project_name__icontains=exclude_project)
indices_for_project = defaultdict(list)
for project in projects_q:
indices_for_project[project.get_elasticsearch_index()].append(project)
indices_for_project.pop(None, None)
seqr_projects = []
with open('project_indices.csv', 'wb') as csvfile:
fieldnames = ['projectGuid', 'index']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for index, projects in indices_for_project.items():
for project in projects:
seqr_projects.append(project.seqr_project)
writer.writerow({'projectGuid': project.seqr_project.guid, 'index': index})
individuals = _get_json_for_individuals(Individual.objects.filter(family__project__in=seqr_projects))
with open('seqr_individuals.csv', 'wb') as csvfile:
fieldnames = ['projectGuid', 'familyGuid', 'individualId', 'paternalId', 'maternalId', 'sex',
'affected']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore')
writer.writeheader()
for individual in individuals:
writer.writerow(individual)
es_indices = indices_for_project.keys()
if not options["metadata_only"]:
es_client = elasticsearch.Elasticsearch(host=settings.ELASTICSEARCH_SERVICE_HOSTNAME, timeout=10000)
search = elasticsearch_dsl.Search(using=es_client, index='*,'.join(es_indices) + "*")
search = search.query("match", mainTranscript_lof='HC')
search = search.source(['contig', 'pos', 'ref', 'alt', '*num_alt', '*gq', '*ab', '*dp', '*ad'])
print('Searching across {} indices...'.format(len(es_indices)))
result_count_search = search.params(size=0)
total = result_count_search.execute().hits.total
print('Loading {} variants...'.format(total))
with open('lof_variants.csv', 'a') as csvfile:
sample_fields = ['num_alt', 'gq', 'ab', 'dp', 'ad']
fieldnames = ['contig', 'pos', 'ref', 'alt', 'index'] + sample_fields
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore')
if not options["index"]:
writer.writeheader()
for i, hit in enumerate(search.scan()):
result = {key: hit[key] for key in hit}
result['index'] = hit.meta.index
for field in sample_fields:
result[field] = json.dumps({
key.rstrip('_{}'.format(field)): val for key, val in result.items() if key.endswith(field)
})
writer.writerow(result)
if i % 10000 == 0:
print('Parsed {} variants'.format(i))
print('Loaded {} variants'.format(i))
print('Done') | agpl-3.0 | -8,824,804,290,275,084,000 | 47.44086 | 134 | 0.583925 | false |
coala-analyzer/coala-quickstart | coala_quickstart/generation/FileGlobs.py | 1 | 2475 | import os
from coalib.parsing.Globbing import glob_escape
from coala_quickstart.generation.Utilities import get_gitignore_glob
from coala_utils.Question import ask_question
from coala_quickstart.Strings import GLOB_HELP
from coalib.collecting.Collectors import collect_files
def get_project_files(log_printer,
printer,
project_dir,
file_path_completer,
non_interactive=False):
"""
Gets the list of files matching files in the user's project directory
after prompting for glob expressions.
:param log_printer:
A ``LogPrinter`` object.
:param printer:
A ``ConsolePrinter`` object.
:param file_path_completer:
A ``file_path_completer`` object.
:param non_interactive
Whether coala-quickstart is in non-interactive mode
:return:
A list of file paths matching the files.
"""
file_globs = ['**']
ignore_globs = None
gitignore_dir_list = []
for dir_name, subdir_name, file_list in os.walk(project_dir):
if os.path.isfile(os.path.join(dir_name, '.gitignore')):
gitignore_dir_list += [dir_name]
if gitignore_dir_list:
printer.print('The contents of your .gitignore file for the project '
'will be automatically loaded as the files to ignore.',
color='green')
ignore_globs = get_gitignore_glob(project_dir, gitignore_dir_list)
if non_interactive and not ignore_globs:
ignore_globs = []
if ignore_globs is None:
printer.print(GLOB_HELP)
file_path_completer.activate(seed_dir=project_dir)
ignore_globs = ask_question(
'Which files do you want coala to ignore inside the '
'project directory?',
printer=printer,
typecast=list)
file_path_completer.deactivate()
printer.print()
ignore_globs = list(ignore_globs)
escaped_project_dir = glob_escape(project_dir)
file_path_globs = [os.path.join(
escaped_project_dir, glob_exp) for glob_exp in file_globs]
ignore_path_globs = [os.path.join(
escaped_project_dir, glob_exp) for glob_exp in ignore_globs]
ignore_path_globs.append(os.path.join(escaped_project_dir, '.git/**'))
file_paths = collect_files(
file_path_globs,
log_printer,
ignored_file_paths=ignore_path_globs)
return file_paths, ignore_globs
| agpl-3.0 | 1,518,963,703,581,851,000 | 33.375 | 77 | 0.634343 | false |
tehasdf/AdventOfCode2016 | p4.py | 1 | 1371 | from collections import Counter
def split(name):
name, _, sector_checksum = name.strip().rpartition('-')
sector, _, checksum = sector_checksum.partition('[')
checksum = checksum[:-1]
return name, int(sector), checksum
def real(name, checksum):
letters = Counter(name.replace('-', ''))
return ''.join(sorted(letters, key=lambda x: (-letters[x], x))[:5]) \
== checksum
def decrypt(name, counter):
def letters():
for letter in name:
if letter == '-':
yield ' '
else:
x = ord(letter) - ord('a')
x = (x + counter) % 26
yield chr(x + ord('a'))
return ''.join(letters())
def p1(inp):
return sum(sector for name, sector, checksum in map(split, inp)
if real(name, checksum))
def p2(inp):
for line in inp:
name, sector, checksum = split(line)
name = decrypt(name, sector)
if 'north' in name:
print sector, name
assert real('aaaaa-bbb-z-y-x', 'abxyz')
assert real('a-b-c-d-e-f-g-h', 'abcde')
assert real('not-a-real-room', 'oarel')
assert not real('totally-real-room', 'decoy')
with open('input_4.txt') as f:
print p1(f)
assert decrypt('q', 343) == 'v'
assert decrypt('qzmt-zixmtkozy-ivhz', 343) == 'very encrypted name'
with open('input_4.txt') as f:
print p2(f)
| mit | 1,116,403,845,823,417,200 | 24.388889 | 73 | 0.565281 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtNetwork/QHttpMultiPart.py | 1 | 1266 | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib/python3/dist-packages/PyQt4/QtNetwork.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QHttpMultiPart(__PyQt4_QtCore.QObject):
"""
QHttpMultiPart(QObject parent=None)
QHttpMultiPart(QHttpMultiPart.ContentType, QObject parent=None)
"""
def append(self, QHttpPart): # real signature unknown; restored from __doc__
""" QHttpMultiPart.append(QHttpPart) """
pass
def boundary(self): # real signature unknown; restored from __doc__
""" QHttpMultiPart.boundary() -> QByteArray """
pass
def setBoundary(self, QByteArray): # real signature unknown; restored from __doc__
""" QHttpMultiPart.setBoundary(QByteArray) """
pass
def setContentType(self, QHttpMultiPart_ContentType): # real signature unknown; restored from __doc__
""" QHttpMultiPart.setContentType(QHttpMultiPart.ContentType) """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
AlternativeType = 3
ContentType = None # (!) real value is ''
FormDataType = 2
MixedType = 0
RelatedType = 1
| gpl-2.0 | -5,000,133,939,483,875,000 | 29.878049 | 105 | 0.670616 | false |
JeroenZegers/Nabu-MSSS | nabu/processing/feature_computers/angspec.py | 1 | 1409 | """@file angspec.py
contains the angular spectrum feature computer"""
import numpy as np
import base
import feature_computer
from sigproc import snip
class Angspec(feature_computer.FeatureComputer):
"""the feature computer class to compute angular spectrum feature"""
def comp_feat(self, sig, rate):
"""
compute the features
Args:
sig: the audio signal as a 1-D numpy array
rate: the sampling rate
Returns:
the features as a [seq_length x feature_dim] numpy array
"""
# snip the edges
sig = snip(sig, rate, float(self.conf['winlen']), float(self.conf['winstep']))
if 'scipy' in self.conf and self.conf['scipy'] == 'True':
feat = base.angspec_scipy(sig, rate, self.conf)
else:
feat = base.angspec(sig, rate, self.conf)
if self.conf['include_energy'] == 'True':
if 'scipy' in self.conf and self.conf['scipy'] == 'True':
_, energy = base.fbank_scipy(sig, rate, self.conf)
else:
_, energy = base.fbank(sig, rate, self.conf)
feat = np.append(feat, energy[:, np.newaxis], 1)
return feat
def get_dim(self):
"""the feature dimemsion"""
dim = int(self.conf['nfft'])/2+1
if self.conf['include_energy'] == 'True':
dim += 1
return dim
| mit | 173,415,704,382,370,140 | 27.18 | 86 | 0.568488 | false |
HomeRad/TorCleaner | wc/update.py | 1 | 10807 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2009 Bastian Kleineidam
"""
Routines for updating filter and rating configuration.
"""
import os
import md5
from . import log, LOG_GUI, Name, Version, configuration
#XXXfrom filter.Rating import rating_cache_merge, rating_cache_parse
#
# urlutils.py - Simplified urllib handling
#
# Written by Chris Lawrence <[email protected]>
# (C) 1999-2002 Chris Lawrence
#
# This program is freely distributable per the following license:
#
## Permission to use, copy, modify, and distribute this software and its
## documentation for any purpose and without fee is hereby granted,
## provided that the above copyright notice appears in all copies and that
## both that copyright notice and this permission notice appear in
## supporting documentation.
##
## I DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL I
## BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
## DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
## WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
## SOFTWARE.
#
# Version 2.19; see changelog for revision history
#
# modified by Bastian Kleineidam <[email protected]> for WebCleaner
import httplib
import urllib
import urllib2
import re
import socket
import zlib
import cStringIO as StringIO
from . import gzip2 as gzip
UA_STR = '%s/%s' % (Name, Version)
def decode(page):
"""
Gunzip or deflate a compressed page.
"""
encoding = page.info().get("Content-Encoding")
# note: some servers send content encoding gzip if file ends with ".gz"
# but we don't want to decompress such files
if encoding in ('gzip', 'x-gzip', 'deflate') and \
not page.geturl().endswith(".gz"):
# cannot seek in socket descriptors, so must get content now
content = page.read()
if encoding == 'deflate':
fp = StringIO.StringIO(zlib.decompress(content))
else:
fp = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(content))
# remove content-encoding header
headers = {}
ceheader = re.compile(r"(?i)content-encoding:")
for h in page.info().keys():
if not ceheader.match(h):
headers[h] = page.info()[h]
newpage = urllib.addinfourl(fp, headers, page.geturl())
if hasattr(page, "code"):
# python 2.4 compatibility
newpage.code = page.code
if hasattr(page, "msg"):
# python 2.4 compatibility
newpage.msg = page.msg
page = newpage
return page
class HttpWithGzipHandler(urllib2.HTTPHandler):
"""
Support gzip encoding.
"""
def http_open(self, req):
"""
Open and gunzip request data.
"""
return decode(urllib2.HTTPHandler.http_open(self, req))
if hasattr(httplib, 'HTTPS'):
class HttpsWithGzipHandler(urllib2.HTTPSHandler):
"""
Support gzip encoding.
"""
def http_open(self, req):
"""
Open and gunzip request data.
"""
return decode(urllib2.HTTPSHandler.http_open(self, req))
class PasswordManager(object):
"""
Simple user/password store.
"""
def __init__(self, user, password):
"""
Store given credentials.
"""
self.user = user
self.password = password
def add_password(self, realm, uri, user, passwd):
"""
Already have the password, ignore parameters.
"""
pass
def find_user_password(self, realm, authuri):
"""
Return stored credentials.
"""
return self.user, self.password
_opener = None
def urlopen(url, proxies=None, data=None):
"""
Return connected request object for given url.
All errors raise exceptions.
"""
global _opener
if proxies is None:
proxies = urllib.getproxies()
headers = {
'User-Agent': UA_STR,
'Accept-Encoding' : 'gzip;q=1.0, deflate;q=0.9, identity;q=0.5',
}
request = urllib2.Request(url, data, headers)
proxy_support = urllib2.ProxyHandler(proxies)
if _opener is None:
# XXX heh, not really protected :)
pwd_manager = PasswordManager("WebCleaner", "imadoofus")
handlers = [proxy_support,
urllib2.UnknownHandler,
HttpWithGzipHandler,
urllib2.HTTPBasicAuthHandler(pwd_manager),
urllib2.ProxyBasicAuthHandler(pwd_manager),
urllib2.HTTPDigestAuthHandler(pwd_manager),
urllib2.ProxyDigestAuthHandler(pwd_manager),
urllib2.HTTPDefaultErrorHandler,
urllib2.HTTPRedirectHandler,
]
if hasattr(httplib, 'HTTPS'):
handlers.append(HttpsWithGzipHandler)
_opener = urllib2.build_opener(*handlers)
# print _opener.handlers
urllib2.install_opener(_opener)
return _opener.open(request)
# Global useful URL opener; throws IOError on error
def open_url(url, proxies=None):
"""
Return connected request object for given url.
@raise: IOError
"""
try:
page = urlopen(url, proxies=proxies)
except urllib2.HTTPError, x:
log.error(LOG_GUI, "could not open url %r", url)
raise IOError(x)
except (socket.gaierror, socket.error, urllib2.URLError), x:
log.error(LOG_GUI, "could not open url %r", url)
raise IOError("no network access available")
except IOError, data:
log.error(LOG_GUI, "could not open url %r", url)
if data and data[0] == 'http error' and data[1] == 404:
raise IOError(data)
else:
raise IOError("no network access available")
except OSError, data:
raise IOError, data
return page
# ====================== end of urlutils.py =================================
def update_filter(wconfig, dryrun=False, log=None):
"""
Update the given configuration object with .zap files found at baseurl.
If dryrun is True, only print out the changes but do nothing.
@raise: IOError
"""
print >> log, _("updating filters"), "..."
chg = False
baseurl = wconfig['baseurl']+"filter/"
url = baseurl+"filter-md5sums.txt"
try:
page = open_url(url)
except IOError, msg:
print >> log, _("error fetching %s") % url, msg
print >> log, "...", _("done")
return chg
# remember all local config files
filemap = {}
for filename in configuration.filterconf_files(wconfig.filterdir):
filemap[os.path.basename(filename)] = filename
# read md5sums
for line in page.read().splitlines():
if "<" in line:
print >> log, _("error fetching %s") % url
print >> log, "...", _("done")
return chg
if not line:
continue
md5sum, filename = line.split()
assert filename.endswith('.zap')
fullname = os.path.join(wconfig.configdir, filename)
# compare checksums
if filename in filemap:
f = file(fullname)
data = f.read()
digest = list(md5.new(data).digest())
f.close()
digest = "".join([ "%0.2x"%ord(c) for c in digest ])
if digest == md5sum:
print >> log, \
_("filter %s not changed, ignoring") % filename
continue
print >> log, _("updating filter %s") % filename
else:
print >> log, _("adding new filter %s") % filename
# parse new filter
url = baseurl + filename
page = open_url(url)
parserclass = configuration.confparse.ZapperParser
p = parserclass(fullname, compile_data=False)
p.parse(fp=page)
page.close()
# compare version compatibility
if wconfig['configversion'][0] != p.folder.configversion[0]:
print >> log, _("Incompatible folder version %s, must be %s") % \
(wconfig['configversion'], p.folder.configversion)
if wconfig.merge_folder(p.folder, dryrun=dryrun, log=log):
chg = True
url = baseurl + "extern-md5sums.txt"
try:
page = open_url(url)
except IOError, msg:
print >> log, _("error fetching %s:") % url, msg
print >> log, "...", _("done")
return chg
lines = page.read().splitlines()
page.close()
for line in lines:
if "<" in line:
print >> log, _("error fetching %s:") % url, \
_("invalid content")
print >> log, "...", _("done")
return chg
if not line:
continue
md5sum, filename = line.split()
# XXX UNIX-generated md5sum filenames with subdirs are not portable
fullname = os.path.join(wconfig.configdir, filename)
# compare checksums
if os.path.exists(fullname):
f = file(fullname)
data = f.read()
digest = list(md5.new(data).digest())
f.close()
digest = "".join([ "%0.2x"%ord(c) for c in digest ])
if digest == md5sum:
print >> log, \
_("extern filter %s not changed, ignoring")%filename
continue
print >> log, _("updating extern filter %s") % filename
else:
print >> log, _("adding new extern filter %s") % filename
chg = True
if not dryrun:
url = baseurl+filename
try:
page = open_url(url)
except IOError, msg:
print >> log, _("error fetching %s:") % url, msg
continue
data = page.read()
if not data:
print >> log, _("error fetching %s:") % url, \
_("got no data")
continue
f = file(fullname, 'wb')
f.write(data)
f.close()
print >> log, "...", _("done")
return chg
def update_ratings(wconfig, dryrun=False, log=None):
"""
Update rating database from configured online rating service.
"""
print >> log, _("updating ratings...")
chg = False
baseurl = wconfig['baseurl']+"rating/"
url = baseurl+"rating.txt"
try:
page = open_url(url)
except IOError, msg:
print >> log, _("error fetching %s:") % url, msg
print >> log, "...", _("done")
return chg
# Merge new ratings.
new_ratings = rating.storage.rating_parse(page)
chg = rating.ratings.merge(new_ratings, dryrun=dryrun, log=log)
print >> log, "...", _("done")
return chg
| gpl-2.0 | -5,249,150,327,102,987,000 | 32.04893 | 77 | 0.580272 | false |
pablorecio/Cobaya | src/cobaya/config.py | 1 | 2598 | ###############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright (C) 2010, Lorenzo Gil Sanchez <[email protected]> #
###############################################################################
"""Configuration management.
"""
import StringIO
import ConfigParser
import os
import sys
class ConfigError(Exception):
pass
class Config(object):
default_conf = """
[hamster]
db = ~/.local/share/hamster-applet/hamster.db
log_file = ~/.local/share/hamster-applet/synced-tasks.dat
[remote]
url =
user =
password =
[tasks]
ticket_field = activity
project_field = tags
description_field = description
security_days = 10
"""
def __init__(self):
self.parser = ConfigParser.SafeConfigParser()
self.conf_files = [
os.path.join(os.path.dirname(sys.prefix), 'etc', 'cobaya.conf'),
os.path.join(os.path.expanduser('~'), '.cobayarc'),
]
def load(self, filename=None):
self.parser.readfp(StringIO.StringIO(self.default_conf))
if filename is not None:
self.conf_files.append(filename)
return self.parser.read(self.conf_files)
def get_option(self, option):
parts = option.split('.')
if not parts or len(parts) != 2:
raise ConfigError("Options must be qualified with the section")
section, option = parts
value = self.parser.get(section, option)
if value.startswith('~'):
value = value.replace('~', os.path.expanduser('~'))
return value
| gpl-3.0 | 8,995,140,087,937,473,000 | 33.64 | 79 | 0.516166 | false |
drblez/dynamodb-transaction-manager | dynamodb2/constructor.py | 1 | 6099 | from datetime import datetime
import decimal
__author__ = 'drblez'
"""
Field('f1', 'value1').field('f2', 'value2').field('f3', 42).field('f4', ['a', 'b', 'c']).field('f5', [1, 2, 3]).dict
{
'f1': {'S': 'value1'),
'f2': {'S': 'value2'},
'f3': {'N': '42'},
'f4': {'SS': ['a', 'b', 'c']},
'f5': {'NS': [1, 2, 3]}
}
Update('f3').add(1).also(Update('f4').add(['d'])).also(Update('f5').delete([2, 3])).also(Update('f6').put(0)).
also(Update('f1').delete()).dict()
{
'f3': {'Value': {'N': '1'}, 'Action': 'ADD'}
'f4': {'Value': {'SS': ['d']}, 'Action': 'ADD'}
'f5': {'Value': {'NS': ['2', '3'], Action: 'DELETE'}
'f6': {'Action': 'DELETE'}
}
Expected('f1', True, 'value1').expected('f3', True, 42).expected('f6', False).dict()
{
'f1': {'Value': {'S', 'value1'}, 'Exists': true}
'f2': {'Value': {'N', '42'}, 'Exists': true}
'f6': {'Exists': false}
}
KeyConditions('f3').between(40, 44).also(KeyConditions('f1').eq('value1')).dict()
{
'f3': {'AttributeValueList': [{'N': '40'}, {'N': '44'}], 'ComparisonOperator': 'BETWEEN'},
'f1': {'AttributeValueList': [{'S', 'value1'}], 'ComparisonOperator': 'EQ'}
}
"""
class EmptyList(Exception):
pass
class BadDynamoDBType(Exception):
pass
class ActionAlreadyExists(Exception):
pass
class ExpectedError(Exception):
pass
def dynamodb_type(value):
if type(value) == str:
return 'S'
elif type(value) == int:
return 'N'
elif type(value) == float:
return 'N'
elif type(value) == decimal.Decimal:
return 'N'
elif type(value) == datetime:
return 'D'
elif type(value) == list:
if len(value) == 0:
raise EmptyList()
return dynamodb_type(value[0]) + 'S'
else:
raise BadDynamoDBType('Bad type {} of value {}'.format(type(value), value))
class Field():
def __init__(self, name, value):
self.name = name
self.type = dynamodb_type(value)
if self.type in ['SS', 'NS']:
t = []
for v in value:
t.append(str(v))
self.value = t
elif self.type == 'D':
self.type = 'S'
self.value = value.isoformat()
elif self.type == 'DS':
self.type = 'SS'
t = []
for v in value:
t.append(v.isoformat())
self.value = t
else:
self.value = str(value)
self.items = [self]
def field(self, name, value):
f = Field(name, value)
self.items.append(f)
return self
def dict(self):
d = {}
for i in self.items:
d[i.name] = {i.type: i.value}
return d
class Update():
def __init__(self, field):
self.field = field
self.action = None
self.value = None
self.items = []
def add(self, value):
if not self.action is None:
raise ActionAlreadyExists('For field {} exists action {}'.format(self.field, self.action))
self.value = Field('Value', value).dict()
self.action = 'ADD'
self.items.append(self)
return self
def put(self, value):
self.value = Field('Value', value).dict()
self.action = 'PUT'
self.items.append(self)
return self
def delete(self, value=None):
if not value is None:
self.value = Field('Value', value).dict()
self.action = 'DELETE'
self.items.append(self)
return self
def also(self, update):
self.items.append(update)
return self
def dict(self):
d = {}
for i in self.items:
if not i.value is None:
t = i.value
else:
t = {}
t['Action'] = i.action
d[i.field] = t
return d
class Expected():
def __init__(self, field, exists, value=None):
self.field = field
self.exists = str(exists).lower()
if exists and (value is None):
raise ExpectedError('Exists true and Value is None not compatible')
if value is None:
self.value = None
else:
self.value = Field('Value', value).dict()
self.items = [self]
def expected(self, field, exists, value=None):
e = Expected(field, exists, value)
self.items.append(e)
return self
def dict(self):
d = {}
for i in self.items:
if not i.value is None:
t = i.value
else:
t = {}
t['Exists'] = i.exists
d[i.field] = t
return d
class KeyConditions():
def __init__(self, field):
self.field = field
self.items = []
self.operator = None
self.values = []
def between(self, lower, upper):
v1 = Field('Value', lower).dict()['Value']
v2 = Field('Value', upper).dict()['Value']
self.values = [v1, v2]
self.operator = 'BETWEEN'
self.items.append(self)
return self
def __operator(self, operator, value):
self.values = [value]
self.operator = operator
self.items.append(self)
return self
def eq(self, value):
return self.__operator('EQ', value)
def le(self, value):
return self.__operator('LE', value)
def lt(self, value):
return self.__operator('LT', value)
def ge(self, value):
return self.__operator('GE', value)
def gt(self, value):
return self.__operator('GT', value)
def begins_with(self, value):
return self.__operator('BEGINS_WITH', value)
def also(self, key_conditions):
self.items.append(key_conditions)
return self
def dict(self):
d = {}
for i in self.items:
d[i.field] = {
'AttributeValueList': i.values,
'ComparisonOperator': i.operator
}
return d
| gpl-3.0 | 1,864,183,897,517,733,000 | 24.62605 | 120 | 0.499918 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.