repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hide-tono/python-training | deep-learning-tf-keras/ch04/mnist_defs_tf.py | 1 | 4221 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
np.random.seed(0)
tf.set_random_seed(1234)
def inference(x, keep_prob, n_in, n_hiddens, n_out):
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.zeros(shape)
return tf.Variable(initial)
for i, n_hidden in enumerate(n_hiddens):
if i == 0:
input = x
input_dim = n_in
else:
input = output
input_dim = n_hiddens[i - 1]
W = weight_variable([input_dim, n_hidden])
b = bias_variable(n_hidden)
h = tf.nn.relu(tf.matmul(input, W) + b)
output = tf.nn.dropout(h, keep_prob)
W_out = weight_variable([n_hiddens[-1], n_out])
b_out = bias_variable([n_out])
y = tf.nn.softmax(tf.matmul(output, W_out) + b_out)
return y
def loss(y, t):
cross_entropy = \
tf.reduce_mean(-tf.reduce_sum(
t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)),
reduction_indices=[1]
))
return cross_entropy
def training(loss):
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_step = optimizer.minimize(loss)
return train_step
def accuracy(y, t):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.arg_max(t, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
if __name__ == '__main__':
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 30000
N_train = 20000
N_validation = 4000
indices = np.random.permutation(range(n))[:N]
X = mnist.data[indices]
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)]
X_train, X_test, Y_train, Y_test = \
train_test_split(X, Y, train_size=N_train)
X_train, X_validation, Y_train, Y_validation = \
train_test_split(X_train, Y_train, test_size=N_validation)
n_in = len(X[0])
n_hiddens = [200, 200, 200]
n_out = len(Y[0])
p_keep = 0.5
x = tf.placeholder(tf.float32, shape=[None, n_in])
t = tf.placeholder(tf.float32, shape=[None, n_out])
keep_prob = tf.placeholder(tf.float32)
y = inference(x, keep_prob, n_in=n_in, n_hiddens=n_hiddens, n_out=n_out)
loss = loss(y, t)
train_step = training(loss)
accuracy = accuracy(y, t)
history = {
'val_loss': [],
'val_acc': []
}
epochs = 50
batch_size = 200
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
n_batches = N_train // batch_size
for epoch in range(epochs):
X_, Y_ = shuffle(X_train, Y_train)
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
sess.run(train_step, feed_dict={
x: X_[start:end],
t: Y_[start:end],
keep_prob: p_keep
})
val_loss = loss.eval(session=sess, feed_dict={
x: X_validation,
t: Y_validation,
keep_prob: 1.0
})
val_acc = accuracy.eval(session=sess, feed_dict={
x: X_validation,
t: Y_validation,
keep_prob: 1.0
})
history['val_loss'].append(val_loss)
history['val_acc'].append(val_acc)
print('epoch:', epoch, ' validation loss:', val_loss, ' validation accuracy:', val_acc)
plt.rc('font', family='serif')
fig = plt.figure()
ax_acc = fig.add_subplot(111)
ax_acc.plot(range(epochs), history['val_acc'],
label='acc', color='black')
ax_loss = ax_acc.twinx()
ax_loss.plot(range(epochs), history['val_loss'],
label='loss', color='gray')
plt.xlabel('epochs')
plt.savefig('mnist_tensorflow.eps')
accuracy_rate = accuracy.eval(session=sess, feed_dict={
x: X_test,
t: Y_test,
keep_prob: 1.0
})
print('accuracy: ', accuracy_rate)
| apache-2.0 | 9,006,583,419,889,451,000 | 26.232258 | 95 | 0.560531 | false |
agdsn/pycroft | tests/frontend/test_fields.py | 1 | 1257 | # Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import string
import pytest
from wtforms_widgets.fields.core import DateField
__author__ = 'shreyder'
def test_date_field_format_strings():
for directive, replacement in DateField.supported_directives.items():
assert DateField.convert_format_string("%" + directive) == replacement
assert DateField.convert_format_string("%%" + directive) == "%" + directive
for directive in DateField.unsupported_directives:
with pytest.raises(ValueError):
DateField.convert_format_string("%" + directive)
assert DateField.convert_format_string("%%" + directive) == "%" + directive
unknown_directives = set(string.ascii_letters).difference(
set(DateField.supported_directives.keys()),
set(DateField.unsupported_directives)
)
for directive in unknown_directives:
with pytest.raises(ValueError):
DateField.convert_format_string("%" + directive)
assert DateField.convert_format_string("%%") == "%"
assert DateField.convert_format_string("%%%%") == "%%"
| apache-2.0 | 4,335,578,972,140,562,000 | 38.28125 | 83 | 0.692124 | false |
testmana2/test | Helpviewer/VirusTotal/VirusTotalApi.py | 1 | 15389 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the <a href="http://www.virustotal.com">VirusTotal</a>
API class.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
import json
from PyQt5.QtCore import QObject, QUrl, QByteArray, pyqtSignal, qVersion
from PyQt5.QtNetwork import QNetworkRequest, QNetworkReply
from E5Gui import E5MessageBox
import Preferences
class VirusTotalAPI(QObject):
"""
Class implementing the <a href="http://www.virustotal.com">VirusTotal</a>
API.
@signal checkServiceKeyFinished(bool, str) emitted after the service key
check has been performed. It gives a flag indicating validity
(boolean) and an error message in case of a network error (string).
@signal submitUrlError(str) emitted with the error string, if the URL scan
submission returned an error.
@signal urlScanReport(str) emitted with the URL of the URL scan report page
@signal fileScanReport(str) emitted with the URL of the file scan report
page
"""
checkServiceKeyFinished = pyqtSignal(bool, str)
submitUrlError = pyqtSignal(str)
urlScanReport = pyqtSignal(str)
fileScanReport = pyqtSignal(str)
TestServiceKeyScanID = \
"4feed2c2e352f105f6188efd1d5a558f24aee6971bdf96d5fdb19c197d6d3fad"
ServiceResult_ItemQueued = -2
ServiceResult_ItemNotPresent = 0
ServiceResult_ItemPresent = 1
# HTTP Status Codes
ServiceCode_InvalidKey = 202
ServiceCode_RateLimitExceeded = 204
ServiceCode_InvalidPrivilege = 403
GetFileReportPattern = "{0}://www.virustotal.com/vtapi/v2/file/report"
ScanUrlPattern = "{0}://www.virustotal.com/vtapi/v2/url/scan"
GetUrlReportPattern = "{0}://www.virustotal.com/vtapi/v2/url/report"
GetIpAddressReportPattern = \
"{0}://www.virustotal.com/vtapi/v2/ip-address/report"
GetDomainReportPattern = "{0}://www.virustotal.com/vtapi/v2/domain/report"
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent object (QObject)
"""
super(VirusTotalAPI, self).__init__(parent)
self.__replies = []
self.__loadSettings()
self.__lastIP = ""
self.__lastDomain = ""
self.__ipReportDlg = None
self.__domainReportDlg = None
def __loadSettings(self):
"""
Private method to load the settings.
"""
if Preferences.getHelp("VirusTotalSecure"):
protocol = "https"
else:
protocol = "http"
self.GetFileReportUrl = self.GetFileReportPattern.format(protocol)
self.ScanUrlUrl = self.ScanUrlPattern.format(protocol)
self.GetUrlReportUrl = self.GetUrlReportPattern.format(protocol)
self.GetIpAddressReportUrl = self.GetIpAddressReportPattern.format(
protocol)
self.GetDomainReportUrl = self.GetDomainReportPattern.format(protocol)
self.errorMessages = {
204: self.tr("Request limit has been reached."),
0: self.tr("Requested item is not present."),
-2: self.tr("Requested item is still queued."),
}
def preferencesChanged(self):
"""
Public slot to handle a change of preferences.
"""
self.__loadSettings()
def checkServiceKeyValidity(self, key, protocol=""):
"""
Public method to check the validity of the given service key.
@param key service key (string)
@param protocol protocol used to access VirusTotal (string)
"""
if protocol == "":
urlStr = self.GetFileReportUrl
else:
urlStr = self.GetFileReportPattern.format(protocol)
request = QNetworkRequest(QUrl(urlStr))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&resource={1}".format(
key, self.TestServiceKeyScanID).encode("utf-8"))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__checkServiceKeyValidityFinished)
self.__replies.append(reply)
def __checkServiceKeyValidityFinished(self):
"""
Private slot to determine the result of the service key validity check.
"""
res = False
msg = ""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
res = True
elif reply.error() == self.ServiceCode_InvalidKey:
res = False
else:
msg = reply.errorString()
self.__replies.remove(reply)
reply.deleteLater()
self.checkServiceKeyFinished.emit(res, msg)
def submitUrl(self, url):
"""
Public method to submit an URL to be scanned.
@param url url to be scanned (QUrl)
"""
request = QNetworkRequest(QUrl(self.ScanUrlUrl))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&url=".format(
Preferences.getHelp("VirusTotalServiceKey")).encode("utf-8"))\
.append(QUrl.toPercentEncoding(url.toString()))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__submitUrlFinished)
self.__replies.append(reply)
def __submitUrlFinished(self):
"""
Private slot to determine the result of the URL scan submission.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if result["response_code"] == self.ServiceResult_ItemPresent:
self.urlScanReport.emit(result["permalink"])
self.__getUrlScanReportUrl(result["scan_id"])
else:
if result["response_code"] in self.errorMessages:
msg = self.errorMessages[result["response_code"]]
else:
msg = result["verbose_msg"]
self.submitUrlError.emit(msg)
elif reply.error() == self.ServiceCode_RateLimitExceeded:
self.submitUrlError.emit(
self.errorMessages[result[self.ServiceCode_RateLimitExceeded]])
else:
self.submitUrlError.emit(reply.errorString())
self.__replies.remove(reply)
reply.deleteLater()
def __getUrlScanReportUrl(self, scanId):
"""
Private method to get the report URL for a URL scan.
@param scanId ID of the scan to get the report URL for (string)
"""
request = QNetworkRequest(QUrl(self.GetUrlReportUrl))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&resource={1}".format(
Preferences.getHelp("VirusTotalServiceKey"), scanId)
.encode("utf-8"))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__getUrlScanReportUrlFinished)
self.__replies.append(reply)
def __getUrlScanReportUrlFinished(self):
"""
Private slot to determine the result of the URL scan report URL
request.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if "filescan_id" in result and result["filescan_id"] is not None:
self.__getFileScanReportUrl(result["filescan_id"])
self.__replies.remove(reply)
reply.deleteLater()
def __getFileScanReportUrl(self, scanId):
"""
Private method to get the report URL for a file scan.
@param scanId ID of the scan to get the report URL for (string)
"""
request = QNetworkRequest(QUrl(self.GetFileReportUrl))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&resource={1}".format(
Preferences.getHelp("VirusTotalServiceKey"), scanId)
.encode("utf-8"))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__getFileScanReportUrlFinished)
self.__replies.append(reply)
def __getFileScanReportUrlFinished(self):
"""
Private slot to determine the result of the file scan report URL
request.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
self.fileScanReport.emit(result["permalink"])
self.__replies.remove(reply)
reply.deleteLater()
def getIpAddressReport(self, ipAddress):
"""
Public method to retrieve a report for an IP address.
@param ipAddress valid IPv4 address in dotted quad notation
@type str
"""
self.__lastIP = ipAddress
queryItems = [
("apikey", Preferences.getHelp("VirusTotalServiceKey")),
("ip", ipAddress),
]
url = QUrl(self.GetIpAddressReportUrl)
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
query = QUrlQuery()
query.setQueryItems(queryItems)
url.setQuery(query)
else:
url.setQueryItems(queryItems)
request = QNetworkRequest(url)
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.get(request)
reply.finished.connect(self.__getIpAddressReportFinished)
self.__replies.append(reply)
def __getIpAddressReportFinished(self):
"""
Private slot to process the IP address report data.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if result["response_code"] == 0:
E5MessageBox.information(
None,
self.tr("VirusTotal IP Address Report"),
self.tr("""VirusTotal does not have any information for"""
""" the given IP address."""))
elif result["response_code"] == -1:
E5MessageBox.information(
None,
self.tr("VirusTotal IP Address Report"),
self.tr("""The submitted IP address is invalid."""))
else:
owner = result["as_owner"]
resolutions = result["resolutions"]
try:
urls = result["detected_urls"]
except KeyError:
urls = []
from .VirusTotalIpReportDialog import VirusTotalIpReportDialog
self.__ipReportDlg = VirusTotalIpReportDialog(
self.__lastIP, owner, resolutions, urls)
self.__ipReportDlg.show()
self.__replies.remove(reply)
reply.deleteLater()
def getDomainReport(self, domain):
"""
Public method to retrieve a report for a domain.
@param domain domain name
@type str
"""
self.__lastDomain = domain
queryItems = [
("apikey", Preferences.getHelp("VirusTotalServiceKey")),
("domain", domain),
]
url = QUrl(self.GetDomainReportUrl)
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
query = QUrlQuery()
query.setQueryItems(queryItems)
url.setQuery(query)
else:
url.setQueryItems(queryItems)
request = QNetworkRequest(url)
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.get(request)
reply.finished.connect(self.__getDomainReportFinished)
self.__replies.append(reply)
def __getDomainReportFinished(self):
"""
Private slot to process the IP address report data.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if result["response_code"] == 0:
E5MessageBox.information(
None,
self.tr("VirusTotal Domain Report"),
self.tr("""VirusTotal does not have any information for"""
""" the given domain."""))
elif result["response_code"] == -1:
E5MessageBox.information(
None,
self.tr("VirusTotal Domain Report"),
self.tr("""The submitted domain address is invalid."""))
else:
resolutions = result["resolutions"]
try:
urls = result["detected_urls"]
except KeyError:
urls = []
try:
subdomains = result["subdomains"]
except KeyError:
subdomains = []
try:
bdCategory = result["BitDefender category"]
except KeyError:
bdCategory = self.tr("not available")
try:
tmCategory = result["TrendMicro category"]
except KeyError:
tmCategory = self.tr("not available")
try:
wtsCategory = result["Websense ThreatSeeker category"]
except KeyError:
wtsCategory = self.tr("not available")
try:
whois = result["whois"]
except KeyError:
whois = ""
from .VirusTotalDomainReportDialog import \
VirusTotalDomainReportDialog
self.__domainReportDlg = VirusTotalDomainReportDialog(
self.__lastDomain, resolutions, urls, subdomains,
bdCategory, tmCategory, wtsCategory, whois)
self.__domainReportDlg.show()
self.__replies.remove(reply)
reply.deleteLater()
def close(self):
"""
Public slot to close the API.
"""
for reply in self.__replies:
reply.abort()
self.__ipReportDlg and self.__ipReportDlg.close()
self.__domainReportDlg and self.__domainReportDlg.close()
| gpl-3.0 | -7,896,726,135,286,818,000 | 36.534146 | 79 | 0.576841 | false |
chenzilin/git-repo | subcmds/help.py | 1 | 4832 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from formatter import AbstractFormatter, DumbWriter
from color import Coloring
from command import PagedCommand, MirrorSafeCommand, GitcAvailableCommand, GitcClientCommand
import gitc_utils
class Help(PagedCommand, MirrorSafeCommand):
common = False
helpSummary = "Display detailed help on a command"
helpUsage = """
%prog [--all|command]
"""
helpDescription = """
Displays detailed usage information about a command.
"""
def _PrintAllCommands(self):
print('usage: repo COMMAND [ARGS]')
print('The complete list of recognized repo commands are:')
commandNames = list(sorted(self.commands))
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = self.commands[name]
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print(fmt % (name, summary))
print("See 'repo help <command>' for more information on a "
'specific command.')
def _PrintCommonCommands(self):
print('usage: repo COMMAND [ARGS]')
print('The most commonly used repo commands are:')
def gitc_supported(cmd):
if not isinstance(cmd, GitcAvailableCommand) and not isinstance(cmd, GitcClientCommand):
return True
if self.manifest.isGitcClient:
return True
if isinstance(cmd, GitcClientCommand):
return False
if gitc_utils.get_gitc_manifest_dir():
return True
return False
commandNames = list(sorted([name
for name, command in self.commands.items()
if command.common and gitc_supported(command)]))
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = self.commands[name]
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print(fmt % (name, summary))
print(
"See 'repo help <command>' for more information on a specific command.\n"
"See 'repo help --all' for a complete list of recognized commands.")
def _PrintCommandHelp(self, cmd):
class _Out(Coloring):
def __init__(self, gc):
Coloring.__init__(self, gc, 'help')
self.heading = self.printer('heading', attr='bold')
self.wrap = AbstractFormatter(DumbWriter())
def _PrintSection(self, heading, bodyAttr):
try:
body = getattr(cmd, bodyAttr)
except AttributeError:
return
if body == '' or body is None:
return
self.nl()
self.heading('%s', heading)
self.nl()
self.nl()
me = 'repo %s' % cmd.NAME
body = body.strip()
body = body.replace('%prog', me)
asciidoc_hdr = re.compile(r'^\n?#+ (.+)$')
for para in body.split("\n\n"):
if para.startswith(' '):
self.write('%s', para)
self.nl()
self.nl()
continue
m = asciidoc_hdr.match(para)
if m:
self.heading(m.group(1))
self.nl()
self.nl()
continue
self.wrap.add_flowing_data(para)
self.wrap.end_paragraph(1)
self.wrap.end_paragraph(0)
out = _Out(self.manifest.globalConfig)
out._PrintSection('Summary', 'helpSummary')
cmd.OptionParser.print_help()
out._PrintSection('Description', 'helpDescription')
def _Options(self, p):
p.add_option('-a', '--all',
dest='show_all', action='store_true',
help='show the complete list of commands')
def Execute(self, opt, args):
if len(args) == 0:
if opt.show_all:
self._PrintAllCommands()
else:
self._PrintCommonCommands()
elif len(args) == 1:
name = args[0]
try:
cmd = self.commands[name]
except KeyError:
print("repo: '%s' is not a repo command." % name, file=sys.stderr)
sys.exit(1)
cmd.manifest = self.manifest
self._PrintCommandHelp(cmd)
else:
self._PrintCommandHelp(self)
| apache-2.0 | -7,711,002,196,199,770,000 | 28.284848 | 94 | 0.617343 | false |
fga-gpp-mds/2017.2-Receituario-Medico | medical_prescription/user/test/test_model_send_invitation.py | 1 | 1112 | # Standard library
import hashlib
import random
import datetime
# Django imports
from django.test import TestCase
# Local django imports
from user.models import SendInvitationProfile, Patient
class TestSendInvitationProfile(TestCase):
def setUp(self):
self.send_invitation_profile = SendInvitationProfile()
self.patient = Patient.objects.create_user(email='[email protected]')
self.salt = hashlib.sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
self.activation_key = hashlib.sha1(str(self.salt+self.patient.email).encode('utf-8')).hexdigest()
self.key_expires = datetime.datetime.today() + datetime.timedelta(2)
self.send_invitation_profile = SendInvitationProfile.objects.create(activation_key=self.activation_key,
patient=self.patient,
key_expires=self.key_expires)
def test_user_str(self):
self.assertEquals(str(self.send_invitation_profile), '[email protected]',)
| mit | 954,918,931,982,965,900 | 40.037037 | 111 | 0.636282 | false |
google/nitroml | examples/metalearning_benchmark.py | 1 | 7286 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
# pyformat: disable
"""Demos a metalearning pipeline as a NitroML benchmark on 'OpenMLCC18' datasets.
To run in open-source:
python -m examples.metalearning_benchmark.py
""" # pylint: disable=line-too-long
# pyformat: enable
# pylint: disable=g-import-not-at-top
import os
import sys
# Required since Python binaries ignore relative paths when importing:
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import nitroml
from nitroml.automl import autodata as ad
from nitroml.automl.metalearning import metalearning as mtl
from nitroml.automl.metalearning.tuner import component as tuner_component
from nitroml.benchmark.suites import openml_cc18
from examples import config
from tfx import components as tfx
from tfx.components.trainer import executor as trainer_executor
from tfx.dsl.components.base import executor_spec
from tfx.proto import trainer_pb2
from google.protobuf import text_format
class MetaLearningBenchmark(nitroml.Benchmark):
r"""Benchmarks a metalearning pipeline on OpenML-CC18 classification tasks."""
def benchmark(self,
algorithm: str = None,
mock_data: bool = False,
data_dir: str = None):
# TODO(nikhilmehta): Extend this to multiple test datasets using subbenchmarks.
metatrain_task_names = frozenset([
'OpenML.connect4', 'OpenML.creditapproval', 'OpenML.creditg',
'OpenML.cylinderbands', 'OpenML.diabetes'
])
metatest_task_names = frozenset(['OpenML.dressessales'])
train_steps = 1000
if mock_data:
metatrain_task_names = {'OpenML.mockdata_1'}
metatest_task_names = {'OpenML.mockdata_2'}
train_steps = 10
metatrain_tasks = []
metatest_tasks = []
for task in openml_cc18.OpenMLCC18(data_dir, mock_data=mock_data):
if task.name in metatrain_task_names:
metatrain_tasks.append(task)
if task.name in metatest_task_names:
metatest_tasks.append(task)
meta_train_data = {}
train_autodata_list = []
for task in metatrain_tasks:
# Register running the Task's data preparation components.
self.add(task.components)
# Create the autodata instance for this task, which creates Transform,
# StatisticsGen and SchemaGen component.
autodata = self.add(
ad.AutoData(
task.problem_statement,
examples=task.train_and_eval_examples,
preprocessor=ad.BasicPreprocessor(),
instance_name=f'train.{task.name}'))
# Add a tuner component for each metatrain dataset that finds the optimum
# HParams.
tuner = self.add(
tuner_component.AugmentedTuner(
tuner_fn='nitroml.automl.autotrainer.lib.auto_trainer.tuner_fn',
examples=autodata.outputs.transformed_examples,
transform_graph=autodata.outputs.transform_graph,
train_args=trainer_pb2.TrainArgs(num_steps=train_steps),
eval_args=trainer_pb2.EvalArgs(num_steps=1),
custom_config={
# Pass the problem statement proto as a text proto. Required
# since custom_config must be JSON-serializable.
'problem_statement':
text_format.MessageToString(
message=task.problem_statement, as_utf8=True),
},
instance_name=f'train.{task.name}'))
train_autodata_list.append(autodata)
key = f'hparams_train_{len(train_autodata_list)}'
meta_train_data[key] = tuner.outputs.best_hyperparameters
# Construct the MetaLearning subpipeline.
metalearning = self.add(
mtl.MetaLearning(
train_autodata_list=train_autodata_list,
meta_train_data=meta_train_data,
algorithm=algorithm))
for task in metatest_tasks:
with self.sub_benchmark(task.name):
# Register running the Task's data preparation components.
self.add(task.components)
# Create the autodata instance for the test task.
autodata = self.add(
ad.AutoData(
task.problem_statement,
examples=task.train_and_eval_examples,
preprocessor=ad.BasicPreprocessor()))
test_meta_components, best_hparams = metalearning.create_test_components(
autodata, tuner_steps=train_steps)
self.add(test_meta_components)
# Create a trainer component that utilizes the recommended HParams
# from the metalearning subpipeline.
trainer = self.add(
tfx.Trainer(
run_fn='nitroml.automl.autotrainer.lib.auto_trainer.run_fn',
custom_executor_spec=(executor_spec.ExecutorClassSpec(
trainer_executor.GenericExecutor)),
transformed_examples=autodata.outputs.transformed_examples,
transform_graph=autodata.outputs.transform_graph,
schema=autodata.outputs.schema,
train_args=trainer_pb2.TrainArgs(num_steps=train_steps),
eval_args=trainer_pb2.EvalArgs(num_steps=1),
hyperparameters=best_hparams,
custom_config={
# Pass the problem statement proto as a text proto. Required
# since custom_config must be JSON-serializable.
'problem_statement':
text_format.MessageToString(
message=task.problem_statement, as_utf8=True),
}))
# Finally, call evaluate() on the workflow DAG outputs, This will
# automatically append Evaluators to compute metrics from the given
# SavedModel and 'eval' TF Examples.ss
self.evaluate(task=task, model=trainer.outputs.model)
if __name__ == '__main__':
metalearning_algorithm = 'nearest_neighbor'
run_config = dict(
pipeline_name=f'metalearning_{metalearning_algorithm}',
data_dir=config.OTHER_DOWNLOAD_DIR,
algorithm=metalearning_algorithm)
if config.USE_KUBEFLOW:
# We need the string "KubeflowDagRunner" in this file to appease the
# validator used in `tfx create pipeline`.
# Validator: https://github.com/tensorflow/tfx/blob/v0.22.0/tfx/tools/cli/handler/base_handler.py#L105
nitroml.main(
pipeline_root=os.path.join(config.PIPELINE_ROOT,
run_config['pipeline_name']),
tfx_runner=nitroml.get_default_kubeflow_dag_runner(),
**run_config)
else:
# This example has not been tested with engines other than Kubeflow.
nitroml.main(**run_config)
| apache-2.0 | 3,418,842,400,522,188,300 | 39.477778 | 106 | 0.650425 | false |
TiagoDGomes/py-ad-ldap | ad_ldap/errors.py | 1 | 2402 | #!/usr/bin/python
"""A module containing the exception classes for the adldap module.
Copyright 2010 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Error(Exception):
"""Generic Error class for adldap."""
class ObjectPropertyNotFoundError(Error):
"""An attempt was made to access a property that was not found."""
class UserNotDisabledError(Error):
"""An attempt was made to enable a user that was not disabled."""
class UserNotEnabledError(Error):
"""An attempt was made to disable a user that was not enabled."""
class UserNotLockedOutError(Error):
"""An attempt was made to unlock a user that is not locked out."""
class NoComputerPasswordResetError(Error):
"""Resetting the password for computer objects is not supported."""
class LDAPConnectionFailedError(Error):
"""The LDAP server could not be contacted."""
class InvalidCredentialsError(Error):
"""The credentials supplied were invalid."""
class QueryTimeoutError(Error):
"""The ldap query timed out waiting for results."""
class InvalidPropertyFormatError(Error):
"""The properties requested for the object are in an invalid format."""
class ADObjectClassOnlyError(Error):
"""These results can only be retrieved when using the ADObject class type."""
class ADObjectNotFoundError(Error):
"""The search returned zero results."""
class ADDomainNotConnectedError(Error):
"""You must call Connect() on the Domain object before this operation."""
class ADPasswordSetFailedError(Error):
"""The attempt to update the password failed."""
class MemberAlreadyError(Error):
"""The group member already exists."""
class NotAMemberError(Error):
"""The object is not a member of the group."""
class NonListParameterError(Error):
"""The parameter must be a list or other iterable."""
class ServerRefusedOperationError(Error):
"""The server refused to perform the operation requested."""
| apache-2.0 | -282,757,042,359,981,980 | 26.295455 | 79 | 0.754788 | false |
hzlf/openbroadcast | website/apps/abcast/views/schedulerviews.py | 1 | 11454 | from django.views.generic import DetailView, ListView, FormView, UpdateView
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.shortcuts import get_object_or_404, render_to_response
from django.db.models import Avg
from django import http
from django.http import HttpResponse, HttpResponseForbidden, Http404, HttpResponseRedirect
from django.utils import simplejson as json
from django.conf import settings
from django.shortcuts import redirect
from django.core import serializers
from django.utils.translation import ugettext as _
import json
from django.template import RequestContext
from abcast.models import Emission, Channel
from alibrary.models import Playlist
#from abcast.filters import EmissionFilter
from tagging.models import Tag, TaggedItem
from tagging.utils import calculate_cloud
import datetime
from jsonview.decorators import json_view
import jsonview
from easy_thumbnails.files import get_thumbnailer
from django.db.models import Q
from lib.util import tagging_extra
# logging
import logging
logger = logging.getLogger(__name__)
SCHEDULER_GRID_WIDTH = getattr(settings, 'SCHEDULER_GRID_WIDTH', 830)
SCHEDULER_GRID_OFFSET = getattr(settings, 'SCHEDULER_GRID_OFFSET', 60)
SCHEDULER_PPH = getattr(settings, 'SCHEDULER_PPH', 42)
SCHEDULER_PPD = getattr(settings, 'SCHEDULER_PPD', 110) # actually should be calculated
# how long ahead should the schedule be locked
SCHEDULER_LOCK_AHEAD = getattr(settings, 'SCHEDULER_LOCK_AHEAD', 60) # 1 minute, to allow caching of files
SCHEDULER_NUM_DAYS = 7
# hours to offset the schedule
# 6: day starts at 6:00 and goes until 6:00
SCHEDULER_OFFSET = getattr(settings, 'SCHEDULER_OFFSET', 6)
SCHEDULER_DEFAULT_CHANNEL_ID = getattr(settings, 'SCHEDULER_DEFAULT_CHANNEL_ID', 1)
def schedule(request):
log = logging.getLogger('abcast.schedulerviews.schedule')
data = {}
# pet all available channels
data['channels'] = Channel.objects.filter(has_scheduler=True)
data['list_style'] = request.GET.get('list_style', 's')
data['days_offset'] = request.GET.get('days_offset', 0)
data['get'] = request.GET
num_days = request.GET.get('num_days', SCHEDULER_NUM_DAYS)
data['num_days'] = int(num_days)
days = []
today = datetime.datetime.now()
today = datetime.datetime(today.year, today.month, today.day)
offset = datetime.timedelta(days=-today.weekday() + int(data['days_offset']))
for day in range(int(num_days)):
date = today + offset
#date = date.strftime("%a, %d %b %Y %H:%M:%S +0000")
days.append( date )
offset += datetime.timedelta(days=1)
data['today'] = today
data['days'] = days
data['pph'] = SCHEDULER_PPH
data['ppd'] = (SCHEDULER_GRID_WIDTH - SCHEDULER_GRID_OFFSET) / int(num_days)
data['offset'] = SCHEDULER_OFFSET
# build a range-filter string for the API
range_start = days[0] + datetime.timedelta(hours=SCHEDULER_OFFSET)
range_end = days[-1] + datetime.timedelta(hours=SCHEDULER_OFFSET + 24)
range_start = range_start.strftime("%Y-%m-%dT%H:%M:%S")
range_end = range_end.strftime("%Y-%m-%dT%H:%M:%S")
data['range_filter'] = '&time_start__gte=%s&time_end__lte=%s&' % (range_start, range_end)
channel_id = request.GET.get('channel_id', SCHEDULER_DEFAULT_CHANNEL_ID)
channel_id = int(channel_id)
channel = Channel.objects.get(pk=channel_id)
dayparts = channel.get_dayparts(days[0])
data['dayparts'] = dayparts
data['channel'] = channel
print dayparts
for dp in dayparts:
print dp.duration
log.debug('grid pph: %s' % data['pph'])
log.debug('grid ppd: %s' % data['ppd'])
data['station_time'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
# look for a selected playlist in session
playlist_id = request.session.get('scheduler_selected_playlist_id', None)
if playlist_id:
data['selected_playlist'] = Playlist.objects.get(pk=playlist_id)
log.debug('schedule offset: %s' % offset)
log.debug('schedule today: %s' % today)
log.debug('schedule playlist_id: %s' % playlist_id)
return render_to_response('abcast/schedule.html', data, context_instance=RequestContext(request))
class EmissionListView(ListView):
model = Emission
extra_context = {}
def get_context_data(self, **kwargs):
context = super(EmissionListView, self).get_context_data(**kwargs)
self.extra_context['list_style'] = self.request.GET.get('list_style', 's')
self.extra_context['get'] = self.request.GET
days = []
today = datetime.datetime.now()
offset = datetime.timedelta(days=-today.weekday())
for day in range(7):
date = today + offset
#date = date.strftime("%a, %d %b %Y %H:%M:%S +0000")
days.append( date )
offset += datetime.timedelta(days=1)
self.extra_context['today'] = today
self.extra_context['days'] = days
context.update(self.extra_context)
return context
def get_queryset(self, **kwargs):
# return render_to_response('my_app/template.html', {'filter': f})
kwargs = {}
self.tagcloud = None
q = self.request.GET.get('q', None)
if q:
qs = Emission.objects.filter(Q(name__istartswith=q))\
.distinct()
else:
qs = Emission.objects.all()
return qs
class EmissionDetailView(DetailView):
# context_object_name = "emission"
model = Emission
extra_context = {}
def render_to_response(self, context):
return super(EmissionDetailView, self).render_to_response(context, mimetype="text/html")
def get_context_data(self, **kwargs):
obj = kwargs.get('object', None)
context = super(EmissionDetailView, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
"""
views for playlist / emission selection
"""
#@json_view
def select_playlist(request):
log = logging.getLogger('abcast.schedulerviews.select_playlist')
playlist_id = request.GET.get('playlist_id', None)
next = request.GET.get('next', None)
if not playlist_id:
request.session['scheduler_selected_playlist_id'] = None
try:
playlist = Playlist.objects.get(pk=playlist_id)
except Playlist.DoesNotExist:
log.warning('playlist does not exists. (id: %s)' % playlist_id)
raise Http404
request.session['scheduler_selected_playlist_id'] = playlist.pk
log.debug('nex: %s' % next)
log.debug('playlist_id: %s' % playlist_id)
if next:
return redirect(next)
data = {
'status': True,
'playlist_id': playlist.id
}
#return data
data = json.dumps(data)
return HttpResponse(data, mimetype='application/json')
"""
put object to schedule
"""
@json_view
def schedule_object(request):
log = logging.getLogger('abcast.schedulerviews.schedule_object')
ct = request.POST.get('ct', None)
obj_id = request.POST.get('obj_id', None)
top = request.POST.get('top', None)
left = request.POST.get('left', None)
range_start = request.POST.get('range_start', None)
range_end = request.POST.get('range_end', None)
num_days = request.POST.get('num_days', SCHEDULER_NUM_DAYS)
log.debug('content type: %s' % ct)
if ct == 'playlist':
obj = Playlist.objects.get(pk=int(obj_id))
log.debug('object to schedule: %s' % obj.name)
pph = SCHEDULER_PPH
# ppd = SCHEDULER_PPD
ppd = (SCHEDULER_GRID_WIDTH - SCHEDULER_GRID_OFFSET) / int(num_days)
top = float(top) / pph * 60
offset_min = int(15 * round(float(top)/15))
left = float(left) / ppd
offset_d = int(round(float(left)))
log.debug('minutes (offset): %s' % offset_min)
log.debug('days (offset): %s' % offset_d)
# calculate actual date/time for position
schedule_start = datetime.datetime.strptime('%s 00:00' % range_start, '%Y-%m-%d %H:%M')
# add offsets
time_start = schedule_start + datetime.timedelta(minutes=offset_min)
time_start = time_start + datetime.timedelta(days=offset_d)
time_start = time_start + datetime.timedelta(hours=SCHEDULER_OFFSET)
# time_end = time_start + datetime.timedelta(milliseconds=obj.get_duration())
# for duration calculation we use the 'target duration' (to avoid blocked slots)
time_end = time_start + datetime.timedelta(seconds=(obj.target_duration))
log.debug('time_start: %s' % time_start)
log.debug('time_end: %s' % time_end)
# check if in past
now = datetime.datetime.now()
lock_end = now + datetime.timedelta(seconds=SCHEDULER_LOCK_AHEAD)
if lock_end > time_start:
return { 'message': _('You cannot schedule things in the past!') }
# check if slot is free
# hm just allow some seconds of tolerance (in case of mini-overlaps)
es = Emission.objects.filter(time_end__gt=time_start + datetime.timedelta(seconds=2), time_start__lt=time_end)
if es.count() > 0:
for em in es:
print 'Blocking emission: %s' % em.id
print em.time_start
print em.time_end
return { 'message': _('Sorry, but the desired time does not seem to be available.') }
# if no errors so far -> create emission and attach object
e = Emission(content_object=obj, time_start=time_start, user=request.user)
e.save()
data = {
'status': True,
'obj_id': obj_id
}
return data
#data = json.dumps(data)
#return HttpResponse(data, mimetype='application/json')
"""
copy a day to another
"""
@json_view
def copy_paste_day(request):
log = logging.getLogger('abcast.schedulerviews.copy_day')
source = request.POST.get('source', None)
target = request.POST.get('target', None)
channel_id = request.POST.get('channel_id', SCHEDULER_DEFAULT_CHANNEL_ID)
channel = Channel.objects.get(pk=channel_id)
log.debug('copy from: %s to %s' % (source, target))
if source and target:
source = datetime.datetime.strptime(source, '%Y-%m-%d')
target = datetime.datetime.strptime(target, '%Y-%m-%d')
offset = (target - source)
source_start = source + datetime.timedelta(hours=SCHEDULER_OFFSET)
source_end = source_start + datetime.timedelta(hours=24)
log.debug('source: %s to %s' % (source_start, source_end))
log.debug('offset: %s' % (offset))
# get emissions
es = Emission.objects.filter(time_start__gte=source_start, time_end__lte=source_end)
for e in es:
print e
e.pk = None
e.uuid = None
e.locked = False
e.time_start = e.time_start + offset
e.save()
#ne = Emission()
now = datetime.datetime.now()
data = {
'status': True,
}
return data
| gpl-3.0 | 4,984,770,444,386,933,000 | 26.868613 | 114 | 0.620918 | false |
jonhadfield/acli | lib/acli/output/vpc.py | 1 | 4347 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output import (output_ascii_table, output_ascii_table_list, dash_if_none)
from colorclass import Color, Windows
Windows.enable(auto_colors=True, reset_atexit=True)
def get_tag(name=None, tags=None):
if tags:
for tag in tags:
if tag.get('Key') == name:
return tag.get('Value')
def output_vpc_list(vpcs=None):
"""
@type vpcs: dict
"""
td = list()
table_header = [Color('{autoblue}vpc id{/autoblue}'), Color('{autoblue}name{/autoblue}'),
Color('{autoblue}CIDR block{/autoblue}'), Color('{autoblue}tenancy{/autoblue}'),
Color('{autoblue}state{/autoblue}'), Color('{autoblue}DHCP options{/autoblue}'),
Color('{autoblue}default vpc{/autoblue}')]
for vpc in vpcs.get('Vpcs'):
vpcid = vpc.get('VpcId')
cidr_block = vpc.get('CidrBlock')
tenancy = vpc.get('InstanceTenancy')
state = vpc.get('State')
dhcpoptions = vpc.get('DhcpOptionsId')
default = str(vpc.get('IsDefault'))
td.append([vpcid,
dash_if_none(get_tag(name='Name', tags=vpc.get('Tags'))),
dash_if_none(cidr_block),
dash_if_none(tenancy),
dash_if_none(state),
dash_if_none(dhcpoptions),
default])
output_ascii_table_list(table_title=Color('{autowhite}VPCs{/autowhite}'),
table_data=td,
table_header=table_header,
inner_heading_row_border=True)
exit(0)
def output_vpc_info(vpc=None, subnets=None):
"""
@type vpc: ec2.Vpc
@type subnets: dict
"""
if vpc:
td = list()
td.append([Color('{autoblue}vpc id{/autoblue}'), vpc.get('VpcId')])
td.append([Color('{autoblue}CIDR block{/autoblue}'), vpc.get('CidrBlock')])
td.append([Color('{autoblue}default{/autoblue}'), str(vpc.get('IsDefault'))])
td.append([Color('{autoblue}tenancy{/autoblue}'), vpc.get('InstanceTenancy')])
td.append([Color('{autoblue}state{/autoblue}'), dash_if_none(vpc.get('State'))])
td.append([Color('{autoblue}tags{/autoblue}'), " "])
if vpc.get('Tags'):
for vpc_tag in vpc.get('Tags'):
td.append([Color('{autoblue}' + "{0}".format(vpc_tag.get('Key'))+'{/autoblue}'),
" {0}".format(vpc_tag.get('Value'))])
if subnets:
td.append(["{0}".format('-' * 30), "{0}".format('-' * 30)])
td.append([Color('{autowhite}SUBNETS{/autowhite}'), " "])
for subnet in subnets.get('Subnets'):
td.append(["{0}".format('-' * 30),
"{0}".format('-' * 30)])
td.append([Color('{autoblue}subnet id{/autoblue}'),
subnet.get('SubnetId')])
td.append([Color('{autoblue}az{/autoblue}'),
subnet.get('AvailabilityZone')])
td.append([Color('{autoblue}state{/autoblue}'),
subnet.get('State')])
td.append([Color('{autoblue}available IPs{/autoblue}'),
str(subnet.get('AvailableIpAddressCount'))])
td.append([Color('{autoblue}CIDR block{/autoblue}'),
subnet.get('CidrBlock')])
td.append([Color('{autoblue}default for az{/autoblue}'),
str(subnet.get('DefaultForAz'))])
td.append([Color('{autoblue}map public ip on launch{/autoblue}'),
str(subnet.get('MapPublicIpOnLaunch'))])
if subnet.get('Tags'):
td.append([Color('{autoblue}tags{/autoblue}'), "-"])
for tag in subnet.get('Tags'):
tag_key, tag_value = dash_if_none(tag.get('Key')), dash_if_none(tag.get('Value'))
td.append([Color('{autoblue}'+" {}".format(tag_key)+'{/autoblue}'), "{}".format(tag_value)])
output_ascii_table(table_title=Color('{autowhite}vpc info{/autowhite}'),
table_data=td)
else:
exit('VPC does not exist.')
exit(0)
| mit | 8,650,758,224,400,534,000 | 46.769231 | 116 | 0.517138 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0059_sample_dataset_file_path.py | 1 | 1937 | """
Migration script to modify the 'file_path' field type in 'sample_dataset' table
to 'TEXT' so that it can support large file paths exceeding 255 characters
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from sqlalchemy.exc import *
from galaxy.model.custom_types import *
from galaxy.util.json import loads, dumps
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
SampleDataset_table = Table( "sample_dataset", metadata, autoload=True )
except NoSuchTableError, e:
SampleDataset_table = None
log.debug( "Failed loading table 'sample_dataset'" )
if SampleDataset_table is not None:
cmd = "SELECT id, file_path FROM sample_dataset"
result = migrate_engine.execute( cmd )
filepath_dict = {}
for r in result:
id = int(r[0])
filepath_dict[id] = r[1]
# remove the 'file_path' column
try:
SampleDataset_table.c.file_path.drop()
except Exception, e:
log.debug( "Deleting column 'file_path' from the 'sample_dataset' table failed: %s" % ( str( e ) ) )
# create the column again
try:
col = Column( "file_path", TEXT )
col.create( SampleDataset_table )
assert col is SampleDataset_table.c.file_path
except Exception, e:
log.debug( "Creating column 'file_path' in the 'sample_dataset' table failed: %s" % ( str( e ) ) )
for id, file_path in filepath_dict.items():
cmd = "update sample_dataset set file_path='%s' where id=%i" % (file_path, id)
migrate_engine.execute( cmd )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
| gpl-3.0 | 8,046,237,435,248,791,000 | 31.830508 | 112 | 0.636551 | false |
damoxc/vsmtpd | vsmtpd/tests/plugins/test_connection_time.py | 1 | 1076 | #
# vsmtpd/tests/plugins/test_connection_time.py
#
# Copyright (C) 2011 Damien Churchill <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
import time
from vsmtpd.tests.common import PluginTestCase, create_connection
class ConnectionTimeTestCase(PluginTestCase):
plugin_name = 'connection_time'
def test_short_connection(self):
plugin = self.plugin(None)
connection = create_connection()
plugin.pre_connection(connection)
time.sleep(0.1)
plugin.post_connection(connection)
| gpl-3.0 | -8,641,858,165,633,547,000 | 30.647059 | 70 | 0.727695 | false |
vanadium23/catalog-project | catalog/app/models.py | 1 | 2366 | from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from app.database import Base
class User(Base):
"""This is an ORM model for logging users"""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
email = Column(String(100), nullable=False, unique=True)
picture = Column(String(250))
class Category(Base):
"""This is an ORM model for our categories"""
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False, unique=True)
description = Column(String(250), nullable=True)
items = relationship("Item",
backref="Category",
cascade="all, delete-orphan")
author_id = Column(Integer, ForeignKey('users.id'))
author = relationship('User')
def __init__(self, name, description, author_id):
self.name = name
self.description = description
self.author_id = author_id
@property
def to_json(self):
category = {"name": self.name,
"description": self.description,
"id": self.id
}
category['items'] = [i.to_json for i in self.items]
return category
class Item(Base):
"""This is an ORM model for our items"""
__tablename__ = 'items'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False, unique=True)
description = Column(String(250), nullable=True)
image_name = Column(String(250),
nullable=False,
default='no-image-large.png')
category_id = Column(Integer, ForeignKey('categories.id'))
category = relationship('Category')
author_id = Column(Integer, ForeignKey('users.id'))
author = relationship('User')
def __init__(self, name, description, category_id, author_id):
self.name = name
self.description = description
self.category_id = category_id
self.author_id = author_id
@property
def to_json(self):
return {"id": self.id,
"name": self.name,
"description": self.description,
"image_name": self.image_name,
"category_id": self.category_id
}
| mit | -6,516,774,220,803,841,000 | 31.861111 | 66 | 0.592139 | false |
py-in-the-sky/challenges | mine_allocations.py | 1 | 8134 | """
Greedy Algorithm
https://community.topcoder.com/stat?c=problem_statement&pm=1957&rd=4650
Discussion: https://www.topcoder.com/community/data-science/data-science-tutorials/greedy-is-good/
"[T]he [expected] profit of allocating an extra worker to a mine is always higher or equal
with the [expected] profit of allocating the next extra worker to that mine." That is,
for each mine, the profit from allocating an additional miner is a non-increasing
sequence; the profit you'll get from adding this miner is greater than or
equal to the profit from allocating the next.
Because of this structure, we can devise a greedy algorithm that finds the
globally maximum profit.
"""
from collections import deque
def P_at_least_n(n, probabilities):
return sum(probabilities[n:])
def get_marginal_profits(mine):
probabilities = map(lambda f: f / 100, map(float, mine.split(', ')))
mine_len = len(probabilities)
marignal_probabilities = [P_at_least_n(n, probabilities) for n in xrange(mine_len)]
mp = marignal_probabilities + [0]
p = probabilities
marginal_profits = (mp[i+1] * 60 + p[i] * (50 - 10*(i-1)) + (1 - mp[i]) * -20 for i in xrange(mine_len))
marginal_profits = deque(marginal_profits)
marginal_profits.popleft() # remove p_0, which is always 1.0 and not needed for allocation decisions
return marginal_profits
def get_allocation(mines, miners):
marginal_profits = map(get_marginal_profits, mines)
allocation = [0] * len(mines)
for _ in xrange(miners):
available_mines = (i for i,_ in enumerate(marginal_profits) if allocation[i] < 6)
i = max(available_mines, key=lambda i: marginal_profits[i][0])
mine = marginal_profits[i]
mine.popleft() # remove marginal profit from used allocation
allocation[i] += 1
return allocation
def tests():
miners = 4
mines = [
"000, 030, 030, 040, 000, 000, 000",
"020, 020, 020, 010, 010, 010, 010"
]
assert get_allocation(mines, miners) == [2, 2]
print 'one'
miners = 8
mines = [
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000"
]
assert get_allocation(mines, miners) == [6, 2, 0, 0, 0]
print 'two'
miners = 30
mines = [
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000"
]
assert get_allocation(mines, miners) == [4, 4, 4, 4, 4, 4, 4, 2, 0, 0]
print 'three'
miners = 56
mines = [
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004"
]
assert get_allocation(mines, miners) == [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
print 'four'
miners = 150
mines = [
"100, 000, 000, 000, 000, 000, 000",
"090, 010, 000, 000, 000, 000, 000",
"080, 020, 000, 000, 000, 000, 000",
"075, 025, 000, 000, 000, 000, 000",
"050, 050, 000, 000, 000, 000, 000",
"025, 075, 000, 000, 000, 000, 000",
"020, 080, 000, 000, 000, 000, 000",
"010, 090, 000, 000, 000, 000, 000",
"000, 100, 000, 000, 000, 000, 000",
"000, 090, 010, 000, 000, 000, 000",
"000, 080, 020, 000, 000, 000, 000",
"000, 075, 025, 000, 000, 000, 000",
"000, 050, 050, 000, 000, 000, 000",
"000, 025, 075, 000, 000, 000, 000",
"000, 020, 080, 000, 000, 000, 000",
"000, 010, 090, 000, 000, 000, 000",
"000, 000, 100, 000, 000, 000, 000",
"000, 000, 090, 010, 000, 000, 000",
"000, 000, 080, 020, 000, 000, 000",
"000, 000, 075, 025, 000, 000, 000",
"000, 000, 050, 050, 000, 000, 000",
"000, 000, 025, 075, 000, 000, 000",
"000, 000, 020, 080, 000, 000, 000",
"000, 000, 010, 090, 000, 000, 000",
"000, 000, 000, 100, 000, 000, 000",
"000, 000, 000, 100, 000, 000, 000",
"000, 000, 000, 090, 010, 000, 000",
"000, 000, 000, 080, 020, 000, 000",
"000, 000, 000, 075, 025, 000, 000",
"000, 000, 000, 050, 050, 000, 000",
"000, 000, 000, 025, 075, 000, 000",
"000, 000, 000, 020, 080, 000, 000",
"000, 000, 000, 010, 090, 000, 000",
"000, 000, 000, 000, 100, 000, 000",
"000, 000, 000, 000, 090, 010, 000",
"000, 000, 000, 000, 080, 020, 000",
"000, 000, 000, 000, 075, 025, 000",
"000, 000, 000, 000, 050, 050, 000",
"000, 000, 000, 000, 025, 075, 000",
"000, 000, 000, 000, 020, 080, 000",
"000, 000, 000, 000, 010, 090, 000",
"000, 000, 000, 000, 000, 100, 000",
"000, 000, 000, 000, 000, 090, 010",
"000, 000, 000, 000, 000, 080, 020",
"000, 000, 000, 000, 000, 075, 025",
"000, 000, 000, 000, 000, 050, 050",
"000, 000, 000, 000, 000, 025, 075",
"000, 000, 000, 000, 000, 020, 080",
"000, 000, 000, 000, 000, 010, 090",
"000, 000, 000, 000, 000, 000, 100"
]
assert get_allocation(mines, miners) == [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6]
print 'five'
print 'tests pass!'
if __name__ == '__main__':
tests()
| mit | -5,176,688,691,923,415,000 | 38.678049 | 243 | 0.512294 | false |
Nic30/hwtHls | hwtHls/examples/pid.py | 1 | 1640 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import Add
from hwt.synthesizer.param import Param
from hwtHls.platform.virtual import VirtualHlsPlatform
from hwtHls.hls import Hls
from hwtLib.logic.pid import PidController
class PidControllerHls(PidController):
def _config(self):
super(PidControllerHls, self)._config()
self.CLK_FREQ = Param(int(100e6))
def _impl(self):
# register of current output value
u = self._reg("u", dtype=self.output._dtype, def_val=0)
# create y-pipeline registers (y -> y_reg[0]-> y_reg[1])
y = [self.input, ]
for i in range(2):
_y = self._reg("y_reg%d" % i, dtype=self.input._dtype, def_val=0)
# feed data from last register
_y(y[-1])
y.append(_y)
# trim signal to width of output
def trim(signal):
return signal._reinterpret_cast(self.output._dtype)
# create arith. expressions between inputs and regs
with Hls(self, freq=self.CLK_FREQ) as hls:
io = hls.io
err = io(self.input) - io(self.target)
a = [io(c) for c in self.coefs]
y = [io(_y) for _y in y]
_u = Add(io(u), a[0] * err, a[1] * y[0],
a[2] * y[1], a[3] * y[2], key=trim)
hls.io(u)(_u)
# propagate output value register to output
self.output(u)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = PidController()
print(to_rtl_str(u))
u = PidControllerHls()
print(to_rtl_str(u, target_platform=VirtualHlsPlatform()))
| mit | -3,685,152,865,492,009,500 | 29.943396 | 77 | 0.570732 | false |
fbmnds/FSharp-Computational-Fluid-Dynamics | FSharp-Computational-Fluid-Dynamics/test/lesson-11.py | 1 | 9123 | from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
###
def print_u (f, label1, u, ny, nx, label2):
f.write(label1)
for j in range(ny):
f.write("[")
for i in range(nx):
f.write(str(u[j,i]))
if i < nx-1:
f.write(", ")
if j < ny-1:
f.write("],\n")
else:
f.write("]\n")
f.write(label2)
def print_v (f, label1, v, n, label2):
f.write(label1)
for i in range(n):
f.write(str(v[i]))
if i < n-1:
f.write(", ")
f.write(label2)
def print_params(f,nx,dx,ny,dy,nt,dt,rho,nu,nit):
f.write("{ \"nx\" : ")
f.write(str(nx))
f.write(", \"dx\" : ")
f.write(str(dx))
f.write(", \"ny\" : ")
f.write(str(ny))
f.write(", \"dy\" : ")
f.write(str(dy))
f.write(", \"nt\" : ")
f.write(str(nt))
f.write(", \"dt\" : ")
f.write(str(dt))
f.write(", \"rho\" : ")
f.write(str(rho))
f.write(", \"nu\" : ")
f.write(str(nu))
f.write(", \"nit\" : ")
f.write(str(nit))
def results(f,nx,dx,ny,dy,dt,u,v,p,b0):
print_u (f,",\n \"u_nt\" : [", u, nx, ny, "]")
print_u (f,",\n \"v_nt\" : [", v, nx, ny, "]")
print_u (f, ",\n \"b0\" : [", b0, nx, ny, "]")
b_nt = buildUpB(b0, rho, dt, u, v, dx, dy)
print_u (f,",\n \"b_nt\" : [", b_nt, nx, ny, "]")
print_u (f,",\n \"p_nt\" : [", p, nx, ny, "]")
p_py = presPoisson(p, dx, dy, b_nt)
print_u (f,",\n \"p_py\" : [", p_py, nx, ny, "]")
f.write( "}" )
###
def buildUpB(b, rho, dt, u, v, dx, dy):
b[1:-1,1:-1]=rho*(1/dt*((u[2:,1:-1]-u[0:-2,1:-1])/(2*dx)+(v[1:-1,2:]-v[1:-1,0:-2])/(2*dy))-\
((u[2:,1:-1]-u[0:-2,1:-1])/(2*dx))**2-\
2*((u[1:-1,2:]-u[1:-1,0:-2])/(2*dy)*(v[2:,1:-1]-v[0:-2,1:-1])/(2*dx))-\
((v[1:-1,2:]-v[1:-1,0:-2])/(2*dy))**2)
return b
nit=50 ## implicite parameter of presPoisson
def presPoisson(p, dx, dy, b):
pn = np.empty_like(p)
pn[:] = p[:]
for q in range(nit):
pn[:] = p[:]
p[1:-1,1:-1] = ((pn[2:,1:-1]+pn[0:-2,1:-1])*dy**2+(pn[1:-1,2:]+pn[1:-1,0:-2])*dx**2)/\
(2*(dx**2+dy**2)) -\
dx**2*dy**2/(2*(dx**2+dy**2))*b[1:-1,1:-1]
p[-1,:] =p[-2,:] ##dp/dy = 0 at y = 2
p[0,:] = p[1,:] ##dp/dy = 0 at y = 0
p[:,0]=p[:,1] ##dp/dx = 0 at x = 0
p[:,-1]=0 ##p = 0 at x = 2
return p
def cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
for n in range(nt):
un[:] = u[:]
vn[:] = v[:]
b = buildUpB(b, rho, dt, u, v, dx, dy)
p = presPoisson(p, dx, dy, b)
u[1:-1,1:-1] = un[1:-1,1:-1]-\
un[1:-1,1:-1]*dt/dx*(un[1:-1,1:-1]-un[0:-2,1:-1])-\
vn[1:-1,1:-1]*dt/dy*(un[1:-1,1:-1]-un[1:-1,0:-2])-\
dt/(2*rho*dx)*(p[2:,1:-1]-p[0:-2,1:-1])+\
nu*(dt/dx**2*(un[2:,1:-1]-2*un[1:-1,1:-1]+un[0:-2,1:-1])+\
dt/dy**2*(un[1:-1,2:]-2*un[1:-1,1:-1]+un[1:-1,0:-2]))
v[1:-1,1:-1] = vn[1:-1,1:-1]-\
un[1:-1,1:-1]*dt/dx*(vn[1:-1,1:-1]-vn[0:-2,1:-1])-\
vn[1:-1,1:-1]*dt/dy*(vn[1:-1,1:-1]-vn[1:-1,0:-2])-\
dt/(2*rho*dy)*(p[1:-1,2:]-p[1:-1,0:-2])+\
nu*(dt/dx**2*(vn[2:,1:-1]-2*vn[1:-1,1:-1]+vn[0:-2,1:-1])+\
(dt/dy**2*(vn[1:-1,2:]-2*vn[1:-1,1:-1]+vn[1:-1,0:-2])))
u[0,:] = 0
u[:,0] = 0
u[:,-1] = 1 ## in last line overwritten below
v[0,:] = 0
v[-1,:]=0
v[:,0] = 0
v[:,-1] = 0
u[-1,:] = 0
return u, v, p, b
nx = 41
ny = 41
nt = 500 ## subsequently overwritten
## nit=50 ## implicite parameter of presPoisson
c = 1 ## unused
dx = 2.0/(nx-1)
dy = 2.0/(ny-1)
x = np.linspace(0,2,nx)
y = np.linspace(0,2,ny)
Y,X = np.meshgrid(y,x)
rho = 1.
nu = .1
dt = .001
# u = np.zeros((ny, nx))
# v = np.zeros((ny, nx))
# p = np.zeros((ny, nx))
# b = np.zeros((ny, nx))
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
b0 = np.zeros((ny, nx))
nt = 200 ## variable test case parameter
p_py = np.zeros((ny, nx)) ## for testing presPoisson()
### first test case
f = open(''.join(['test-11-', str(nt), '.json']),'w')
print_params(f,nx,dx,ny,dy,nt,dt,rho,nu,nit)
print_u (f, ",\n \"p0\" : [", p, nx, ny, "]")
print_u (f, ",\n \"u0\" : [", u, nx, ny, "]")
print_u (f, ",\n \"v0\" : [", v, nx, ny, "]")
u, v, p, b0 = cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu)
results(f,nx,dx,ny,dy,dt,u,v,p,b0)
f.close()
fig = plt.figure(figsize=(11,7), dpi=100)
plt.contourf(X,Y,p,alpha=0.5) ###plotting the pressure field as a contour
plt.colorbar()
plt.contour(X,Y,p) ###plotting the pressure field outlines
plt.quiver(X[::2,::2],Y[::2,::2],u[::2,::2],v[::2,::2]) ##plotting velocity
plt.xlabel('X')
plt.ylabel('Y')
fig.show()
#input(" press ENTER to continue ")
### second test case
### reinitialization
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
nt = 700 ## variable test case parameter
f = open(''.join(['test-11-', str(nt), '.json']),'w')
print_params(f,nx,dx,ny,dy,nt,dt,rho,nu,nit)
print_u (f, ",\n \"p0\" : [", p, nx, ny, "]")
print_u (f, ",\n \"u0\" : [", u, nx, ny, "]")
print_u (f, ",\n \"v0\" : [", v, nx, ny, "]")
u, v, p, b0 = cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu)
results(f,nx,dx,ny,dy,dt,u,v,p,b0)
f.close()
fig = plt.figure(figsize=(11,7), dpi=100)
plt.contourf(X,Y,p,alpha=0.5)
plt.colorbar()
plt.contour(X,Y,p)
plt.quiver(X[::2,::2],Y[::2,::2],u[::2,::2],v[::2,::2])
plt.xlabel('X')
plt.ylabel('Y')
fig.show()
#input(" press ENTER to continue ")
### third test case
### reinitialization
nx = 21
ny = 21
dx = 2.0/(nx-1)
dy = 2.0/(ny-1)
x = np.linspace(0,2,nx)
y = np.linspace(0,2,ny)
Y,X = np.meshgrid(y,x)
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
nt = 3 ## variable test case parameter
f = open(''.join(['test-11-', str(nt), '.json']),'w')
print_params(f,nx,dx,ny,dy,nt,dt,rho,nu,nit)
print_u (f, ",\n \"p0\" : [", p, nx, ny, "]")
print_u (f, ",\n \"u0\" : [", u, nx, ny, "]")
print_u (f, ",\n \"v0\" : [", v, nx, ny, "]")
u, v, p, b0 = cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu)
u2 = np.zeros((ny, nx))
u2 = u**2
print_u (f, ",\n \"u2\" : [", u2, nx, ny, "]")
uu = np.zeros((ny, nx))
uu = u*u
print_u (f, ",\n \"uu\" : [", uu, nx, ny, "]")
results(f,nx,dx,ny,dy,dt,u,v,p,b0)
f.close()
fig = plt.figure(figsize=(11,7), dpi=100)
plt.contourf(X,Y,p,alpha=0.5)
plt.colorbar()
plt.contour(X,Y,p)
plt.quiver(X[::2,::2],Y[::2,::2],u[::2,::2],v[::2,::2])
plt.xlabel('X')
plt.ylabel('Y')
fig.show()
#input(" press ENTER to continue ")
### fourth test case
def test_cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu):
un = np.empty_like(u)
vn = np.empty_like(v)
## b = np.zeros((ny, nx))
#for n in range(nt):
un[:] = u[:]
vn[:] = v[:]
## b = buildUpB(b, rho, dt, u, v, dx, dy)
## p = presPoisson(p, dx, dy, b)
u[1:-1,1:-1] = un[1:-1,1:-1]-\
un[1:-1,1:-1]*dt/dx*(un[1:-1,1:-1]-un[0:-2,1:-1])-\
vn[1:-1,1:-1]*dt/dy*(un[1:-1,1:-1]-un[1:-1,0:-2])-\
dt/(2*rho*dx)*(p[2:,1:-1]-p[0:-2,1:-1])+\
nu*(dt/dx**2*(un[2:,1:-1]-2*un[1:-1,1:-1]+un[0:-2,1:-1])+\
dt/dy**2*(un[1:-1,2:]-2*un[1:-1,1:-1]+un[1:-1,0:-2]))
v[1:-1,1:-1] = vn[1:-1,1:-1]-\
un[1:-1,1:-1]*dt/dx*(vn[1:-1,1:-1]-vn[0:-2,1:-1])-\
vn[1:-1,1:-1]*dt/dy*(vn[1:-1,1:-1]-vn[1:-1,0:-2])-\
dt/(2*rho*dy)*(p[1:-1,2:]-p[1:-1,0:-2])+\
nu*(dt/dx**2*(vn[2:,1:-1]-2*vn[1:-1,1:-1]+vn[0:-2,1:-1])+\
(dt/dy**2*(vn[1:-1,2:]-2*vn[1:-1,1:-1]+vn[1:-1,0:-2])))
u[0,:] = 0
u[:,0] = 0
u[:,-1] = 1
v[0,:] = 0
v[-1,:]=0
v[:,0] = 0
v[:,-1] = 0
u[-1,:] = 0
return u, v
### reinitialization
nx = 21
ny = 21
dx = 2.0/(nx-1)
dy = 2.0/(ny-1)
x = np.linspace(0,2,nx)
y = np.linspace(0,2,ny)
Y,X = np.meshgrid(y,x)
### take u,v,p from third test case
## u = np.zeros((ny, nx))
## v = np.zeros((ny, nx))
## p = np.zeros((ny, nx))
nt = 1 ## variable test case parameter / pseudo in this case
f = open(''.join(['test-11-', str(nt), '.json']),'w')
print_params(f,nx,dx,ny,dy,nt,dt,rho,nu,nit)
print_u (f, ",\n \"u0\" : [", u, nx, ny, "]")
print_u (f, ",\n \"v0\" : [", v, nx, ny, "]")
print_u (f, ",\n \"p0\" : [", p, nx, ny, "]")
### in: u0,v0,p0
u, v = test_cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu)
### out: u_nt,v_nt
print_u (f,",\n \"u_nt\" : [", u, nx, ny, "]")
print_u (f,",\n \"v_nt\" : [", v, nx, ny, "]")
f.write( "}" )
f.close()
## fig = plt.figure(figsize=(11,7), dpi=100)
## plt.contourf(X,Y,p,alpha=0.5)
## plt.colorbar()
## plt.contour(X,Y,p)
## plt.quiver(X[::2,::2],Y[::2,::2],u[::2,::2],v[::2,::2])
## plt.xlabel('X')
## plt.ylabel('Y')
## fig.show()
## input(" press ENTER to continue ")
| apache-2.0 | 465,601,827,552,391,360 | 22.634715 | 102 | 0.446892 | false |
TinkerMill/mms-server | mmsServer/__init__.py | 1 | 5213 | #!/usr/bin/env python
# __init__.py
### IMPORTS ###
import os
import sys
import os.path
import time
import json
from datetime import datetime
from flask import Flask, g, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
### GLOBALS ###
# Have to setup the template directory when this is a package
# http://stackoverflow.com/questions/8478404/flask-doesnt-locate-template-directory-when-running-with-twisted
templateDirectory = os.path.join( os.path.dirname( os.path.abspath(__file__)), 'templates')
app = Flask( 'mmsServer', template_folder = templateDirectory)
#app.config.update(dict(DATABASE="tinkermill.db"))
#app.config.from_envvar('FLASKR_SETTINGS', silent=True)
#SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join( os.path.dirname( __file__), 'mms.db')
SQLALCHEMY_DATABASE_URI = 'mysql://root:strangehat@localhost/mms_server'
app.config[ 'SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy( app)
### VIEWS ###
# This should be used in the modules to import the models for use
from mmsServer.models import Member
from mmsServer.models import Logaccessdevice
from mmsServer.models import Accessdevice
# create everything
db.create_all()
db.session.commit()
### FUNCTIONS ###
### ROUTING ###
def log(deviceId, memberId, logMessage):
"""log access to the API. will add a timestamp to the logs
Args:
deviceId (int): The ID of the device
memberId (int): The ID of the member
message (string): message describing what happened
Returns:
nothing
"""
l = Logaccessdevice(device_id = deviceId, member_id = memberId, message=logMessage, timestamp=datetime.now() )
db.session.add(l)
db.session.commit()
@app.route("/checkAccess/<deviceId>/<serialNumber>")
def checkAccess(deviceId=None, serialNumber=None):
"""Return if serialNumber has access to current device
Given a number off the RFID badge, lookup the user that is associated
with that number, and then check if that user has access to that deviceid
# test with :
# http://localhost:5000/checkAccess/0/a2f49dk3 <- YAY
# http://localhost:5000/checkAccess/0/a2f49dk33 <- FAIL
Args:
deviceId (int): The ID of the device
serialNumber (string) : the Serial number on the badge
Returns:
JSON The return code::
{status: true, message: "Success" } -- Success!
{status: false, message: "fail reason" } -- No good.
"""
log(deviceId, memberId, "Requesting Access for serial:" + serialNumber)
m = Member.query.filter(User.serial == serialNumber).first()
if m and m.account_disabled == False:
log(deviceId, memberId, "Granted Access")
return json.dumps({'status': True, 'message': "Success"})
else:
log(deviceId, memberId, "Denined Access : Access has been revoked")
return json.dumps({'status': False, 'message': "Access has been revoked"})
@app.route("/log/usageLog")
def showLogusageLog():
"""
Show the usage log, which shows what memberId's are trying
to accomplish.
http://localhost:5000/log/usageLog
"""
logData = ""
return render_template('usageLog.html', logData=logData)
@app.route("/list/members")
def listMembers():
mlist=""
return render_template('memberList.html', mlist=mlist)
@app.route("/processData" , methods=['POST'])
def processData():
"""take a cmd structure to update a table. see the testData.html
in the static folder to see how to send valid data to this endpoint.
This will be used to create new records and update existing records
base on the cmd (update/new)
Args:
cmd : is this new or an update
table: the table to modify
Returns:
JSON The return code::
{status: true, message: "Success" } -- Success!
{status: false, message: "fail reason" } -- No good.
"""
dataStruct = request.form
cmd = dataStruct['cmd']
table = dataStruct['table']
response = '{status: false, message: "no valid path" }'
# if acl check does not return true then fail to make the update
if not checkAcl(dataStruct['username'], dataStruct['passwordHash'] , cmd, table, get_db()):
print cmd
return '{status: false, message: "ACL Fail Check" }'
# when creating functions to handle data, pass in the dataStruct, and get_db()
# which will give it access to the database
# see the response format above to see what kind of string/JSON to return to the client
# so it knows what happened.
if cmd == "new" and table == "member":
response = newMember(dataStruct, get_db() )
if cmd == "update" and table == "member":
response = updateMember(dataStruct, get_db() )
return response
@app.route("/")
def index():
"""main landing page
"""
return render_template('index.html')
### MAIN ###
def main():
app.run()
if __name__ == '__main__':
main()
| apache-2.0 | -2,763,381,210,062,170,000 | 28.48538 | 114 | 0.647995 | false |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/old_scripts/highq_power_sweep_140423_0813f4.py | 1 | 5959 | import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
from kid_readout.analysis.resonator import fit_best_resonator
ri = baseband.RoachBasebandWide()
ri.initialize()
#ri.set_fft_gain(6)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')#[:4]
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f5_2014-02-27.npy')
f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_140423_0813f4.npy')
f0s.sort()
#f0s = f0s*(0.9995)
suffix = "led"
nf = len(f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)#[5:15]
offsets = offsets
#offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = np.concatenate(([-40e3],offsets,[40e3]))/1e6
#offsets = offsets*4
nsamp = 2**18
step = 1
nstep = 80
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
print f0s
print offsets*1e6
print len(f0s)
if False:
from kid_readout.utils.parse_srs import get_all_temperature_data
while True:
temp = get_all_temperature_data()[1][-1]
print "mk stage at", temp
if temp > 0.348:
break
time.sleep(300)
time.sleep(600)
start = time.time()
use_fmin = True
attenlist = np.linspace(33,45,5)-9
#attenlist = [44.0]
#attenlist = attenlist[:4]
for atten in attenlist:
print "setting attenuator to",atten
ri.set_dac_attenuator(atten)
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
nsamp = 2**20
step = 1
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])#np.arange(-4,4)*step
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile(suffix=suffix)
df.log_hw_state(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
raw_input("turn on LED take data")
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.2)
t0 = time.time()
dmod,addr = ri.get_data_seconds(30,demod=True)
print nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
| bsd-2-clause | -5,829,926,809,568,526,000 | 32.105556 | 114 | 0.606813 | false |
ebertti/nospam | configuracao.py | 1 | 1030 | # coding: utf-8
import os
PROJECT_DIR = os.path.dirname(__file__)
DATASET_COMPLETO = os.path.join(PROJECT_DIR, 'dataset/completo/youtube_comments_20120117.csv')
DATASET_TREINO = os.path.join(PROJECT_DIR, 'dataset/treino/')
DATASET_PREPARADO = os.path.join(PROJECT_DIR, 'dataset/preparado/')
HOST_MONITOR = 'localhost'
HOST_PORTA = 8124
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # this fixes the problem
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(message)s'
},
},
'handlers': {
'default': {
'level': 'NOTSET',
'formatter': 'standard',
'class': 'logging.StreamHandler',
},
'arquivo': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': "logfile.log",
},
},
'root': {
'handlers': ['default', 'arquivo'],
'level': 'NOTSET',
'propagate': True
}
} | mit | -8,956,428,627,889,489,000 | 24.775 | 94 | 0.538835 | false |
fengalin/gstation-edit | gstation_edit/midi/port.py | 1 | 1045 | """
gstation-edit MidiPort definition
"""
# this file is part of gstation-edit
# Copyright (C) F LAIGNEL 2009-2017 <[email protected]>
#
# gstation-edit is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gstation-edit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
class MidiPort:
def __init__(self, client, port, port_name):
self.client = client
self.port = port
self.port_name = port_name
def __str__(self):
return '%s %d:%d'%(self.port_name,
self.client, self.port)
| lgpl-3.0 | 1,040,199,894,642,888,700 | 36.321429 | 74 | 0.69378 | false |
audy/banana | banana.py | 1 | 4205 | #!/usr/bin/env python
# YAM SPLIT - Austin G. Davis-Richardson
# Splits barcoded, 3-paired illumina files based on a .yaml config file
import sys
import os
from glob import glob
import string
try:
import yaml
except ImportError:
print >> sys.stderr, "could not import yaml\ntry:\n sudo easy_install pyyaml"
quit(1)
# PARSE ARGUMENTS
try:
config_file = sys.argv[1]
reads_directory = sys.argv[2]
output_directory = sys.argv[3]
except IndexError:
print >> sys.stderr, "usage: %s <config.yaml> <reads_directory/> <output_directory/>" %\
sys.argv[0]
# Parse YAML file
config = yaml.load(open(config_file))
# Make Output Directories
try:
os.mkdir(output_directory)
except OSError:
print >> sys.stderr, "%s exists! Delete or move." % output_directory
quit()
for lane in config['lanes']:
for experiment in config['lanes'][lane]:
try:
os.mkdir('%s/%s' % (output_directory, experiment))
except OSError:
continue
# DEFINE HOW FILES LOOK
FILENAME = "s_%(lane)s_%(mate)s_%(number)s_qseq.txt"
RANGE = range(1, 121) # Number goes FROM 0 TO 120
# For reverse complementing the barcode sequence
COMPLEMENT = string.maketrans('GATCRYgatcry', 'CTAGYRctagyr')
# Test reverse complement
assert 'GATCRYgatcry'.translate(COMPLEMENT) == 'CTAGYRctagyr'
# Load Barcodes
for key in config['barcodes']:
print "%s => %s barcodes" % (key, len(config['barcodes'][key]))
# FOR LANE IN LANES
for lane in config['lanes']:
print 'Lane: %s' % lane
# FOR EXP IN LANE.EXPERIMENTS
for experiment in config['lanes'][lane]:
# Load BARCODES
barcode_type = config['lanes'][lane][experiment]['barcodes']
barcode_range = config['lanes'][lane][experiment]['range']
# check if range or individual barcodes specified
if '-' in barcode_range:
start, stop = config['lanes'][lane][experiment]['range'].split('-')
start, stop = int(start), int(stop) + 1
barcode_range = range(start, stop)
print '\t%s (%s, %s-%s)' % (experiment, barcode_type, start, stop),
else:
barcode_range = barcode_range.split()
print '\t%s (%s, %s)' % (experiment, barcode_type, ','.join(barcode_range))
to_keep = dict( (v, k) for k, v in config['barcodes'][barcode_type].items() if k in barcode_range )
# Get which lines to keep:
kept, thrown_away = 0, 0
for file_no in RANGE:
line_to_barcode = {}
filename = '%s/%s' % (reads_directory, FILENAME % {
'lane': lane,
'mate': 2,
'number': '%04d' % file_no })
with open(filename) as handle:
print filename
for n, line in enumerate(handle):
barcode = line.split('\t')[8][::-1].translate(COMPLEMENT)
if barcode in to_keep.keys():
line_to_barcode[n] = to_keep[barcode]
kept += 1
else:
thrown_away += 1
print len(line_to_barcode)
# Output reads.
for mate in [1, 3]:
# MAKE HANDLES:
handles = dict(
(barcode,
open('%s/%s/%s' % (output_directory, experiment,
'IL5_L_%s_B_%03d_%s.txt' % (
lane, barcode, mate
)), 'a')) for barcode in to_keep.values()
)
# Read Raw Reads, splitting
infilename = '%s/%s' % (reads_directory, FILENAME % {
'lane': lane,
'mate': mate,
'number': '%04d' % file_no })
with open(infilename) as handle:
for n, line in enumerate(handle):
if n in line_to_barcode:
barcode = line_to_barcode[n]
print >> handles[barcode], line.strip()
del handles # Garbage Collector can't keep up
| mit | -1,384,405,166,370,904,000 | 31.099237 | 107 | 0.524614 | false |
barrachri/epcon | assopy/views.py | 1 | 23911 | # -*- coding: UTF-8 -*-
from django import forms
from django import http
from django.conf import settings as dsettings
from django.contrib import auth
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.util import unquote
from django.core.urlresolvers import reverse
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from assopy import forms as aforms
from assopy import janrain
from assopy import models
from assopy import settings
from assopy import utils as autils
if settings.GENRO_BACKEND:
from assopy.clients import genro
from email_template import utils
import json
import logging
import urllib
from datetime import datetime
log = logging.getLogger('assopy.views')
class HttpResponseRedirectSeeOther(http.HttpResponseRedirect):
status_code = 303
def __init__(self, url):
if not url.startswith('http'):
url = dsettings.DEFAULT_URL_PREFIX + url
super(HttpResponseRedirectSeeOther, self).__init__(url)
# see: http://www.djangosnippets.org/snippets/821/
def render_to(template):
"""
Decorator for Django views that sends returned dict to render_to_response function
with given template and RequestContext as context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter
Parameters:
- template: template name to use
"""
def renderer(func):
def wrapper(request, *args, **kw):
output = func(request, *args, **kw)
if isinstance(output, (list, tuple)):
return render_to_response(output[1], output[0], RequestContext(request))
elif isinstance(output, dict):
return render_to_response(template, output, RequestContext(request))
return output
return wrapper
return renderer
def render_to_json(f):
from conference.views import json_dumps
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json_dumps(d, indent=2)
else:
ct = 'application/json'
j = json_dumps
def wrapper(*args, **kw):
try:
result = f(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, http.HttpResponse):
return result
else:
from django.forms.util import ErrorDict
status = 200 if not isinstance(result, ErrorDict) else 400
result = j(result)
return http.HttpResponse(content=result, content_type=ct, status=status)
return wrapper
@login_required
@render_to('assopy/profile.html')
def profile(request):
user = request.user.assopy_user
if request.method == 'POST':
form = aforms.Profile(data=request.POST, files=request.FILES, instance=user)
if form.is_valid():
form.save()
messages.info(request, 'Profile updated')
return HttpResponseRedirectSeeOther('.')
else:
form = aforms.Profile(instance=user)
return {
'user': user,
'form': form,
}
@login_required
def profile_identities(request):
if request.method == 'POST':
try:
x = request.user.assopy_user.identities.get(identifier=request.POST['identifier'])
except:
return http.HttpResponseBadRequest()
log.info(
'Removed the identity "%s" from the user "%s" "%s"',
x.identifier,
x.user.name(),
x.user.user.email)
x.delete()
if request.is_ajax():
return http.HttpResponse('')
else:
return HttpResponseRedirectSeeOther(reverse('assopy-profile'))
@login_required
@render_to('assopy/billing.html')
def billing(request, order_id=None):
user = request.user.assopy_user
if request.method == 'POST':
form = aforms.BillingData(data=request.POST, files=request.FILES, instance=user)
if form.is_valid():
form.save()
return HttpResponseRedirectSeeOther('.')
else:
form = aforms.BillingData(instance=user)
return {
'user': user,
'form': form,
}
@render_to('assopy/new_account.html')
def new_account(request):
if request.user.is_authenticated():
return redirect('assopy-profile')
if request.method == 'GET':
form = aforms.NewAccountForm()
else:
form = aforms.NewAccountForm(data=request.POST)
if form.is_valid():
data = form.cleaned_data
user = models.User.objects.create_user(
email=data['email'],
first_name=data['first_name'],
last_name=data['last_name'],
password=data['password1'],
)
request.session['new-account-user'] = user.pk
return HttpResponseRedirectSeeOther(reverse('assopy-new-account-feedback'))
return {
'form': form,
'next': request.GET.get('next', '/'),
}
@render_to('assopy/new_account_feedback.html')
def new_account_feedback(request):
try:
user = models.User.objects.get(pk=request.session['new-account-user'])
except KeyError:
return redirect('/')
except models.User.DoesNotExist:
user = None
return {
'u': user,
}
def OTCHandler_V(request, token):
auth.logout(request)
user = token.user
user.is_active = True
user.save()
user = auth.authenticate(uid=user.id)
auth.login(request, user)
return redirect('assopy-profile')
def OTCHandler_J(request, token):
payload = json.loads(token.payload)
email = payload['email']
profile = payload['profile']
log.info('"%s" verified; link to "%s"', email, profile['identifier'])
identity = _linkProfileToEmail(email, profile)
duser = auth.authenticate(identifier=identity.identifier)
auth.login(request, duser)
return redirect('assopy-profile')
def otc_code(request, token):
t = models.Token.objects.retrieve(token)
if t is None:
raise http.Http404()
from assopy.utils import dotted_import
try:
path = settings.OTC_CODE_HANDLERS[t.ctype]
except KeyError:
return http.HttpResponseBadRequest()
return dotted_import(path)(request, t)
def _linkProfileToEmail(email, profile):
try:
current = autils.get_user_account_from_email(email)
except auth.models.User.DoesNotExist:
current = auth.models.User.objects.create_user(janrain.suggest_username(profile), email)
try:
current.first_name = profile['name']['givenName']
except KeyError:
pass
try:
current.last_name = profile['name']['familyName']
except KeyError:
pass
current.is_active = True
current.save()
log.debug('new (active) django user created "%s"', current)
else:
log.debug('django user found "%s"', current)
try:
# se current è stato trovato tra gli utenti locali forse esiste
# anche la controparte assopy
user = current.assopy_user
except models.User.DoesNotExist:
log.debug('the current user "%s" will become an assopy user', current)
user = models.User(user=current)
user.save()
log.debug('a new identity (for "%s") will be linked to "%s"', profile['identifier'], current)
identity = models.UserIdentity.objects.create_from_profile(user, profile)
return identity
@csrf_exempt
def janrain_token(request):
if request.method != 'POST':
return http.HttpResponseNotAllowed(('POST',))
redirect_to = request.session.get('jr_next', reverse('assopy-profile'))
try:
token = request.POST['token']
except KeyError:
return http.HttpResponseBadRequest()
try:
profile = janrain.auth_info(settings.JANRAIN['secret'], token)
except Exception, e:
log.warn('exception during janrain auth info: "%s"', str(e))
return HttpResponseRedirectSeeOther(dsettings.LOGIN_URL)
log.info('janrain profile from %s: %s', profile['providerName'], profile['identifier'])
current = request.user
duser = auth.authenticate(identifier=profile['identifier'])
if duser is None:
log.info('%s is a new identity', profile['identifier'])
# è la prima volta che questo utente si logga con questo provider
if not current.is_anonymous():
verifiedEmail = current.email
else:
# devo creare tutto, utente django, assopy e identità
if not 'verifiedEmail' in profile:
# argh, il provider scelto non mi fornisce un email sicura; per
# evitare il furto di account non posso rendere l'account
# attivo. Devo chiedere all'utente un email valida e inviare a
# quella email un link di conferma.
log.info('janrain profile without a verified email')
request.session['incomplete-profile'] = profile
return HttpResponseRedirectSeeOther(reverse('assopy-janrain-incomplete-profile'))
else:
verifiedEmail = profile['verifiedEmail']
log.info('janrain profile with a verified email "%s"', verifiedEmail)
identity = _linkProfileToEmail(verifiedEmail, profile)
duser = auth.authenticate(identifier=identity.identifier)
auth.login(request, duser)
else:
# è un utente conosciuto, devo solo verificare che lo user associato
# all'identità e quello loggato in questo momento siano la stessa
# persona
if current.is_anonymous():
# ok, non devo fare altro che loggarmi con l'utente collegato
# all'identità
auth.login(request, duser)
elif current != duser:
# l'utente corrente e quello collegato all'identità non coincidano
# devo mostrare un messaggio di errore
return HttpResponseRedirectSeeOther(reverse('assopy-janrain-login_mismatch'))
else:
# non ho niente da fare, l'utente è già loggato
pass
return HttpResponseRedirectSeeOther(redirect_to)
@render_to('assopy/janrain_incomplete_profile.html')
def janrain_incomplete_profile(request):
p = request.session['incomplete-profile']
try:
name = p['displayName']
except KeyError:
name = '%s %s' % (p['name'].get('givenName', ''), p['name'].get('familyName', ''))
class Form(forms.Form):
email = forms.EmailField()
if request.method == 'POST':
form = Form(data=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
payload = {
'email': email,
'profile': p,
}
token = models.Token.objects.create(ctype='j', payload=json.dumps(payload))
current = autils.get_user_account_from_email(email, default=None)
utils.email(
'janrain-incomplete',
ctx={
'name': name,
'provider': p['providerName'],
'token': token,
'current': current,
},
to=[email]
).send()
del request.session['incomplete-profile']
return HttpResponseRedirectSeeOther(reverse('assopy-janrain-incomplete-profile-feedback'))
else:
form = Form()
return {
'provider': p['providerName'],
'name': name,
'form': form,
}
@render_to('assopy/janrain_incomplete_profile_feedback.html')
def janrain_incomplete_profile_feedback(request):
return {}
@render_to('assopy/janrain_login_mismatch.html')
def janrain_login_mismatch(request):
return {}
@render_to('assopy/checkout.html')
def checkout(request):
if request.method == 'POST':
if not request.user.is_authenticated():
return http.HttpResponseBadRequest('unauthorized')
form = aforms.FormTickets(data=request.POST)
if form.is_valid():
data = form.cleaned_data
o = models.Order.objects.create(user=request.user.assopy_user, payment=data['payment'], items=data['tickets'])
if o.payment_url:
return HttpResponseRedirectSeeOther(o.payment_url)
else:
return HttpResponseRedirectSeeOther(reverse('assopy-tickets'))
else:
form = aforms.FormTickets()
return {
'form': form,
}
@login_required
@render_to('assopy/tickets.html')
def tickets(request):
if settings.TICKET_PAGE:
return redirect(settings.TICKET_PAGE)
return {}
@login_required
@render_to_json
def geocode(request):
address = request.GET.get('address', '').strip()
region = request.GET.get('region')
if not address:
return ''
from assopy.utils import geocode as g
return g(address, region=region)
def paypal_billing(request, code):
# questa vista serve a eseguire il redirect su paypol
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.total() == 0:
o.confirm_order(datetime.now())
return HttpResponseRedirectSeeOther(reverse('assopy-paypal-feedback-ok', kwargs={'code': code}))
form = aforms.PayPalForm(o)
return HttpResponseRedirectSeeOther("%s?%s" % (form.paypal_url(), form.as_url_args()))
def paypal_cc_billing(request, code):
# questa vista serve a eseguire il redirect su paypal e aggiungere le info
# per billing con cc
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.total() == 0:
o.confirm_order(datetime.now())
return HttpResponseRedirectSeeOther(reverse('assopy-paypal-feedback-ok', kwargs={'code': code}))
form = aforms.PayPalForm(o)
cc_data = {
"address_override" : 0,
"no_shipping" : 1,
"email": o.user.user.email,
"first_name" : o.card_name,
"last_name": "",
"address1": o.address,
#"zip": o.zip_code,
#"state": o.state,
"country": o.country,
"address_name": o.card_name,
}
qparms = urllib.urlencode([ (k,x.encode('utf-8') if isinstance(x, unicode) else x) for k,x in cc_data.items() ])
return HttpResponseRedirectSeeOther(
"%s?%s&%s" % (
form.paypal_url(),
form.as_url_args(),
qparms
)
)
@render_to('assopy/paypal_cancel.html')
def paypal_cancel(request, code):
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
form = aforms.PayPalForm(o)
return {'form': form }
# sembra che a volte la redirezione di paypal si concluda con una POST da parte
# del browser (qualcuno ha detto HttpResponseRedirectSeeOther?), dato che non
# eseguo niente di pericoloso evito di controllare il csrf
@csrf_exempt
@render_to('assopy/paypal_feedback_ok.html')
def paypal_feedback_ok(request, code):
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.user.user != request.user or o.method not in ('paypal', 'cc'):
raise http.Http404()
# aspettiamo un po' per dare tempo a Paypal di inviarci la notifica IPN
from time import sleep
sleep(0.4)
return {
'order': o,
}
@login_required
@render_to('assopy/bank_feedback_ok.html')
def bank_feedback_ok(request, code):
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.user.user != request.user or o.method != 'bank':
raise http.Http404()
return {
'order': o,
}
@login_required
def invoice(request, order_code, code, mode='html'):
if not request.user.is_staff:
userfilter = {
'order__user__user': request.user,
}
else:
userfilter = {}
invoice = get_object_or_404(
models.Invoice,
code=unquote(code),
order__code=unquote(order_code),
**userfilter
)
if mode == 'html':
order = invoice.order
address = '%s, %s' % (order.address, unicode(order.country))
ctx = {
'document': ('Fattura N.', 'Invoice N.'),
'title': unicode(invoice),
'code': invoice.code,
'emit_date': invoice.emit_date,
'order': {
'card_name': order.card_name,
'address': address,
'billing_notes': order.billing_notes,
'cf_code': order.cf_code,
'vat_number': order.vat_number,
},
'items': invoice.invoice_items(),
'note': invoice.note,
'price': {
'net': invoice.net_price(),
'vat': invoice.vat_value(),
'total': invoice.price,
},
'vat': invoice.vat,
'real': settings.IS_REAL_INVOICE(invoice.code),
}
return render_to_response('assopy/invoice.html', ctx, RequestContext(request))
else:
if settings.GENRO_BACKEND:
assopy_id = invoice.assopy_id
data = genro.invoice(assopy_id)
if data.get('credit_note'):
order = get_object_or_404(models.Order, invoices__credit_notes__assopy_id=assopy_id)
else:
order = get_object_or_404(models.Order, assopy_id=data['order_id'])
raw = urllib.urlopen(genro.invoice_url(assopy_id))
else:
hurl = reverse('assopy-invoice-html', args=(order_code, code))
if not settings.WKHTMLTOPDF_PATH:
return HttpResponseRedirectSeeOther(hurl)
raw = _pdf(request, hurl)
order = invoice.order
from conference.models import Conference
try:
conf = Conference.objects\
.get(conference_start__year=order.created.year).code
except Conference.DoesNotExist:
conf = order.created.year
fname = '[%s invoice] %s.pdf' % (conf, invoice.code.replace('/', '-'))
response = http.HttpResponse(raw, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s"' % fname
return response
def _pdf(request, url):
import subprocess
command_args = [
settings.WKHTMLTOPDF_PATH,
'--cookie',
dsettings.SESSION_COOKIE_NAME,
request.COOKIES.get(dsettings.SESSION_COOKIE_NAME),
'--zoom',
'1.3',
"%s%s" % (dsettings.DEFAULT_URL_PREFIX, url),
'-'
]
#print command_args
popen = subprocess.Popen(
command_args,
bufsize=4096,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
raw, _ = popen.communicate()
#print raw
return raw
@login_required
def credit_note(request, order_code, code, mode='html'):
if not request.user.is_staff:
userfilter = { 'invoice__order__user__user': request.user, }
else:
userfilter = {}
try:
cnote = models.CreditNote.objects\
.select_related('invoice__order')\
.get(code=unquote(code), invoice__order__code=unquote(order_code), **userfilter)
except models.CreditNote.DoesNotExist:
raise http.Http404()
order = cnote.invoice.order
if mode == 'html':
address = '%s, %s' % (order.address, unicode(order.country))
items = cnote.note_items()
for x in items:
x['price'] = x['price'] * -1
invoice = cnote.invoice
rif = invoice.code
if invoice.payment_date:
rif = '%s - %s' % (rif, invoice.payment_date.strftime('%d %b %Y'))
note = 'Nota di credito / Credit Note <b>Rif: %s</b>' % rif
ctx = {
'document': ('Nota di credito', 'Credit note'),
'title': unicode(cnote),
'code': cnote.code,
'emit_date': cnote.emit_date,
'order': {
'card_name': order.card_name,
'address': address,
'billing_notes': order.billing_notes,
'cf_code': order.cf_code,
'vat_number': order.vat_number,
},
'items': items,
'note': note,
'price': {
'net': cnote.net_price() * -1,
'vat': cnote.vat_value() * -1,
'total': cnote.price * -1,
},
'vat': cnote.invoice.vat,
'real': True,
}
return render_to_response('assopy/invoice.html', ctx, RequestContext(request))
else:
hurl = reverse('assopy-credit_note-html', args=(order_code, code))
if not settings.WKHTMLTOPDF_PATH:
print "NO WKHTMLTOPDF_PATH SET"
return HttpResponseRedirectSeeOther(hurl)
raw = _pdf(request, hurl)
from conference.models import Conference
try:
conf = Conference.objects\
.get(conference_start__year=order.created.year).code
except Conference.DoesNotExist:
conf = order.created.year
fname = '[%s credit note] %s.pdf' % (conf, cnote.code.replace('/', '-'))
response = http.HttpResponse(raw, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s"' % fname
return response
@login_required
@render_to('assopy/voucher.html')
def voucher(request, order_id, item_id):
item = get_object_or_404(models.OrderItem, order=order_id, id=item_id)
if (not item.ticket or item.ticket.fare.payment_type != 'v' or item.order.user.user != request.user) and not request.user.is_superuser:
raise http.Http404()
return {
'item': item,
}
@csrf_exempt
def order_complete(request, assopy_id):
if request.method != 'POST':
return http.HttpResponseNotAllowed(('POST',))
order = get_object_or_404(models.Order, assopy_id=assopy_id)
r = order.complete()
log.info('remote notice! order "%s" (%s) complete! result=%s', order.code, order.assopy_id, r)
return http.HttpResponse('')
@login_required
@render_to_json
def refund(request, order_id, item_id):
try:
item = models.OrderItem.objects\
.select_related('order')\
.get(order=order_id, id=item_id)
except models.OrderItem.DoesNotExist:
raise http.Http404()
try:
r = models.RefundOrderItem.objects.select_related('refund').get(orderitem=item_id)
if r.refund.status == 'rejected':
r = None
except models.RefundOrderItem.DoesNotExist:
r = None
if request.method == 'POST':
if r:
return http.HttpResponseBadRequest()
try:
d = request.session['doppelganger']
except KeyError:
user = request.user
else:
from django.contrib.auth.models import User
user = User.objects.get(id=d[0])
if not settings.ORDERITEM_CAN_BE_REFUNDED(user, item):
return http.HttpResponseBadRequest()
form = aforms.RefundItemForm(item, data=request.POST)
if not form.is_valid():
return form.errors
data = form.cleaned_data
note = ''
if data['paypal'] or data['bank']:
if data['paypal']:
note += 'paypal: %s\n' % data['paypal']
if data['bank']:
note += 'bank routing: %s\n' % data['bank']
note += '----------------------------------------\n'
r = models.Refund.objects.create_from_orderitem(
item, reason=data['reason'], internal_note=note)
if not r:
return None
return {
'status': r.status,
}
| bsd-2-clause | -1,515,016,967,348,325,000 | 34.253687 | 139 | 0.602251 | false |
ahjulstad/mathdom-python3 | mathml/utils/sax_pmathml.py | 1 | 1682 | from mathml.pmathml.element import *
from mathml.pmathml.mtoken import MToken
import xml.sax.handler
class MathMLHandler(xml.sax.handler.ContentHandler):
class Elem(object):
__slots__ = ('parent', 'name', 'attributes', 'text', 'children')
def __init__(self, plotter):
self.plotter = plotter
self.current = self.Elem()
self.current.children = []
def characters(self, content):
self.current.text += content
def startElementNS(self, xxx_todo_changeme, qname, attrs):
(ns, name) = xxx_todo_changeme
elem = self.Elem()
elem.parent = self.current
elem.parent.children.append(elem)
elem.text = ''
elem.attributes = {}
for key, value in list(attrs.items()):
elem.attributes[key] = value
elem.children = []
elem.name = name
self.current = elem
def endElementNS(self, xxx_todo_changeme1, qname):
(ns, name) = xxx_todo_changeme1
self.current = self.current.parent
def __buildTreeRecursive(self, node):
klass = xml_mapping[node.name]
if issubclass(klass, MToken):
element = klass(self.plotter, node.text.strip())
else:
children = list(map(self.__buildTreeRecursive, node.children))
element = klass(self.plotter, children)
for name, value in list(node.attributes.items()):
element.setAttribute(name, value)
return element
def buildTree(self):
assert len(self.current.children) == 1
elem = self.__buildTreeRecursive(self.current.children[0])
del self.current
return elem
def buildFromPMathml(etree, plotter):
handler = MathMLHandler(plotter)
etree.saxify(handler)
return handler.buildTree()
def buildFromMathDOM(mathdom, plotter):
return buildFromPMathml(mathdom.to_pmathml(), plotter)
| mit | 2,438,675,304,666,861,600 | 28 | 67 | 0.709275 | false |
MJL85/mnet | natlas/network.py | 1 | 19257 | #!/usr/bin/python
'''
natlas
network.py
Michael Laforest
[email protected]
Copyright (C) 2015-2018 Michael Laforest
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from timeit import default_timer as timer
from .config import natlas_config
from .util import *
from .node import *
DCODE_ROOT = 0x01
DCODE_ERR_SNMP = 0x02
DCODE_DISCOVERED = 0x04
DCODE_STEP_INTO = 0x08
DCODE_CDP = 0x10
DCODE_LLDP = 0x20
DCODE_INCLUDE = 0x40
DCODE_LEAF = 0x80
DCODE_ROOT_STR = '[root]'
DCODE_ERR_SNMP_STR = '!'
DCODE_DISCOVERED_STR = '+'
DCODE_STEP_INTO_STR = '>'
DCODE_CDP_STR = '[ cdp]'
DCODE_LLDP_STR = '[lldp]'
DCODE_INCLUDE_STR = 'i'
DCODE_LEAF_STR = 'L'
NODE_KNOWN = 0
NODE_NEW = 1
NODE_NEWIP = 2
class natlas_network:
def __init__(self, conf):
self.root_node = None
self.nodes = []
self.max_depth = 0
self.config = conf
self.verbose = 1
def __str__(self):
return ('<root_node="%s", num_nodes=%i>' % (self.root_node.name, len(self.nodes)))
def __repr__(self):
return self.__str__()
def set_max_depth(self, depth):
self.max_depth = depth
def reset_discovered(self):
for n in self.nodes:
n.discovered = 0
def set_verbose(self, level):
'''
Set the verbose output level for discovery output.
Args:
Level 0 = no output
1 = normal output
'''
self.verbose = level
def discover(self, ip):
'''
Discover the network starting at the defined root node IP.
Recursively enumerate the network tree up to self.depth.
Populates self.nodes[] as a list of discovered nodes in the
network with self.root_node being the root.
This function will discover the network with minimal information.
It is enough to define the structure of the network but will not
include much data on each node. Call discover_details() after this
to update the self.nodes[] array with more info.
'''
if (self.verbose > 0):
print('Discovery codes:\n' \
' . depth %s connection error\n' \
' %s discovering node %s numerating adjacencies\n' \
' %s include node %s leaf node\n' %
(DCODE_ERR_SNMP_STR,
DCODE_DISCOVERED_STR, DCODE_STEP_INTO_STR,
DCODE_INCLUDE_STR, DCODE_LEAF_STR)
)
print('Discovering network...')
# Start the process of querying this node and recursing adjacencies.
node, new_node = self.__query_node(ip, 'UNKNOWN')
self.root_node = node
if (node != None):
self.nodes.append(node)
self.__print_step(node.ip[0], node.name, 0, DCODE_ROOT|DCODE_DISCOVERED)
self.__discover_node(node, 0)
else:
return
# we may have missed chassis info
for n in self.nodes:
if ((n.serial == None) | (n.plat == None) | (n.ios == None)):
n.opts.get_chassis_info = True
if (n.serial == None):
n.opts.get_serial = True
if (n.ios == None):
n.opts.get_ios = True
if (n.plat == None):
n.opts.get_plat = True
n.query_node()
def discover_details(self):
'''
Enumerate the discovered nodes from discover() and update the
nodes in the array with additional info.
'''
if (self.root_node == None):
return
if (self.verbose > 0):
print('\nCollecting node details...')
ni = 0
for n in self.nodes:
ni = ni + 1
indicator = '+'
if (n.snmpobj.success == 0):
indicator = '!'
if (self.verbose > 0):
sys.stdout.write('[%i/%i]%s %s (%s)' % (ni, len(self.nodes), indicator, n.name, n.snmpobj._ip))
sys.stdout.flush()
# set what details to discover for this node
n.opts.get_router = True
n.opts.get_ospf_id = True
n.opts.get_bgp_las = True
n.opts.get_hsrp_pri = True
n.opts.get_hsrp_vip = True
n.opts.get_serial = True
n.opts.get_stack = True
n.opts.get_stack_details = self.config.diagram.get_stack_members
n.opts.get_vss = True
n.opts.get_vss_details = self.config.diagram.get_vss_members
n.opts.get_svi = True
n.opts.get_lo = True
n.opts.get_vpc = True
n.opts.get_ios = True
n.opts.get_plat = True
start = timer()
n.query_node()
end = timer()
if (self.verbose > 0):
print(' %.2f sec' % (end - start))
# There is some back fill information we can populate now that
# we know all there is to know.
if (self.verbose > 0):
print('\nBack filling node details...')
for n in self.nodes:
# Find and link VPC nodes together for easy reference later
if ((n.vpc_domain != None) & (n.vpc_peerlink_node == None)):
for link in n.links:
if ((link.local_port == n.vpc_peerlink_if) | (link.local_lag == n.vpc_peerlink_if)):
n.vpc_peerlink_node = link.node
link.node.vpc_peerlink_node = n
break
def __print_step(self, ip, name, depth, dcodes):
if (self.verbose == 0):
return
if (dcodes & DCODE_DISCOVERED):
sys.stdout.write('%-3i' % len(self.nodes))
else:
sys.stdout.write(' ')
if (dcodes & DCODE_INCLUDE):
# flip this off cause we didn't even try
dcodes = dcodes & ~DCODE_ERR_SNMP
if (dcodes & DCODE_ROOT): sys.stdout.write( DCODE_ROOT_STR )
elif (dcodes & DCODE_CDP): sys.stdout.write( DCODE_CDP_STR )
elif (dcodes & DCODE_LLDP): sys.stdout.write( DCODE_LLDP_STR )
else: sys.stdout.write(' ')
status = ''
if (dcodes & DCODE_ERR_SNMP): status += DCODE_ERR_SNMP_STR
if (dcodes & DCODE_LEAF): status += DCODE_LEAF_STR
elif (dcodes & DCODE_INCLUDE): status += DCODE_INCLUDE_STR
if (dcodes & DCODE_DISCOVERED): status += DCODE_DISCOVERED_STR
elif (dcodes & DCODE_STEP_INTO): status += DCODE_STEP_INTO_STR
sys.stdout.write('%3s' % status)
for i in range(0, depth):
sys.stdout.write('.')
name = util.shorten_host_name(name, self.config.host_domains)
if (self.verbose > 0):
print('%s (%s)' % (name, ip))
def __query_node(self, ip, host):
'''
Query this node.
Return node details and if we already knew about it or if this is a new node.
Don't save the node to the known list, just return info about it.
Args:
ip: IP Address of the node.
host: Hostname of this known (if known from CDP/LLDP)
Returns:
natlas_node: Node of this object
int: NODE_NEW = Newly discovered node
NODE_NEWIP = Already knew about this node but not by this IP
NODE_KNOWN = Already knew about this node
'''
host = util.shorten_host_name(host, self.config.host_domains)
node, node_updated = self.__get_known_node(ip, host)
if (node == None):
# new node
node = natlas_node()
node.name = host
node.ip = [ip]
state = NODE_NEW
else:
# existing node
if (node.snmpobj.success == 1):
# we already queried this node successfully - return it
return (node, NODE_KNOWN)
# existing node but we couldn't connect before
if (node_updated == 1):
state = NODE_NEWIP
else:
state = NODE_KNOWN
node.name = host
if (ip == 'UNKNOWN'):
return (node, state)
# vmware ESX reports the IP as 0.0.0.0
# LLDP can return an empty string for IPs.
if ((ip == '0.0.0.0') | (ip == '')):
return (node, state)
# find valid credentials for this node
if (node.try_snmp_creds(self.config.snmp_creds) == 0):
return (node, state)
node.name = node.get_system_name(self.config.host_domains)
if (node.name != host):
# the hostname changed (cdp/lldp vs snmp)!
# double check we don't already know about this node
if (state == NODE_NEW):
node2, node_updated2 = self.__get_known_node(ip, host)
if ((node2 != None) & (node_updated2 == 0)):
return (node, NODE_KNOWN)
if (node_updated2 == 1):
state = NODE_NEWIP
# Finally, if we still don't have a name, use the IP.
# e.g. Maybe CDP/LLDP was empty and we dont have good credentials
# for this device. A blank name can break Dot.
if ((node.name == None) | (node.name == '')):
node.name = node.get_ipaddr()
node.opts.get_serial = True # CDP/LLDP does not report, need for extended ACL
node.query_node()
return (node, state)
def __get_known_node(self, ip, host):
'''
Look for known nodes by IP and HOST.
If found by HOST, add the IP if not already known.
Return:
node: Node, if found. Otherwise None.
updated: 1=updated, 0=not updated
'''
# already known by IP ?
for ex in self.nodes:
for exip in ex.ip:
if (exip == '0.0.0.0'):
continue
if (exip == ip):
return (ex, 0)
# already known by HOST ?
node = self.__get_known_node_by_host(host)
if (node != None):
# node already known
if (ip not in node.ip):
node.ip.append(ip)
return (node, 1)
return (node, 0)
return (None, 0)
def __discover_node(self, node, depth):
'''
Given a node, recursively enumerate its adjacencies
until we reach the specified depth (>0).
Args:
node: natlas_node object to enumerate.
depth: The depth left that we can go further away from the root.
'''
if (node == None):
return
if (depth >= self.max_depth):
return
if (node.discovered > 0):
return
node.discovered = 1
# vmware ESX can report IP as 0.0.0.0
# If we are allowing 0.0.0.0/32 in the config,
# then we added it as a leaf, but don't discover it
if (node.ip[0] == '0.0.0.0'):
return
# may be a leaf we couldn't connect to previously
if (node.snmpobj.success == 0):
return
# print some info to stdout
dcodes = DCODE_STEP_INTO
if (depth == 0):
dcodes |= DCODE_ROOT
self.__print_step(node.ip[0], node.name, depth, dcodes)
# get the cached snmp credentials
snmpobj = node.snmpobj
# list of valid neighbors to discover next
valid_neighbors = []
# get list of neighbors
cdp_neighbors = node.get_cdp_neighbors()
lldp_neighbors = node.get_lldp_neighbors()
neighbors = cdp_neighbors + lldp_neighbors
if (len(neighbors) == 0):
return
for n in neighbors:
# some neighbors may not advertise IP addresses - default them to 0.0.0.0
if (n.remote_ip == None):
n.remote_ip = '0.0.0.0'
# check the ACL
acl_action = self.__match_node_acl(n.remote_ip, n.remote_name)
if (acl_action == 'deny'):
# deny inclusion of this node
continue
dcodes = DCODE_DISCOVERED
child = None
if (acl_action == 'include'):
# include this node but do not discover it
child = natlas_node()
child.ip = [n.remote_ip]
dcodes |= DCODE_INCLUDE
else:
# discover this node
child, query_result = self.__query_node(n.remote_ip, n.remote_name)
# if we couldn't pull info from SNMP fill in what we know
if (child.snmpobj.success == 0):
child.name = util.shorten_host_name(n.remote_name, self.config.host_domains)
dcodes |= DCODE_ERR_SNMP
# need to check the ACL again for extended ops (we have more info)
acl_action = self.__match_node_acl(n.remote_ip, n.remote_name, n.remote_plat, n.remote_ios, child.serial)
if (acl_action == 'deny'):
continue
if (query_result == NODE_NEW):
self.nodes.append(child)
if (acl_action == 'leaf'): dcodes |= DCODE_LEAF
if (n.discovered_proto == 'cdp'): dcodes |= DCODE_CDP
if (n.discovered_proto == 'lldp'): dcodes |= DCODE_LLDP
self.__print_step(n.remote_ip, n.remote_name, depth+1, dcodes)
# CDP/LLDP advertises the platform
child.plat = n.remote_plat
child.ios = n.remote_ios
# add the discovered node to the link object and link to the parent
n.node = child
self.__add_link(node, n)
# if we need to discover this node then add it to the list
if ((query_result == NODE_NEW) & (acl_action != 'leaf') & (acl_action != 'include')):
valid_neighbors.append(child)
# discover the valid neighbors
for n in valid_neighbors:
self.__discover_node(n, depth+1)
def __match_node_acl(self, ip, host, platform=None, software=None, serial=None):
for acl in self.config.discover_acl:
if (acl.type == 'ip'):
if (self.__match_ip(ip, acl.str)):
return acl.action
elif (acl.type == 'host'):
if (self.__match_strpattern(host, acl.str)):
return acl.action
elif (acl.type == 'platform'):
if ((platform != None) and self.__match_strpattern(platform, acl.str)):
return acl.action
elif (acl.type == 'software'):
if ((software != None) and self.__match_strpattern(software, acl.str)):
return acl.action
elif (acl.type == 'serial'):
if ((serial != None) and self.__match_strpattern(serial, acl.str)):
return acl.action
return 'deny'
def __match_ip(self, ip, cidr):
if (cidr == 'any'):
return 1
validate = re.match('^([0-2]?[0-9]?[0-9]\.){3}[0-2]?[0-9]?[0-9]$', ip)
if (validate == None):
return 0
if (USE_NETADDR):
if (ip in IPNetwork(cidr)):
return 1
else:
if (util.is_ipv4_in_cidr(ip, cidr)):
return 1
return 0
def __match_strpattern(self, str, pattern):
if (str == '*'):
return 1
if (re.search(pattern, str)):
return 1
return 0
#
# Add or update a link.
# Return
# 0 - Found an existing link and updated it
# 1 - Added as a new link
#
def __add_link(self, node, link):
if (link.node.discovered == 1):
# both nodes have been discovered,
# so try to update existing reverse link info
# instead of adding a new link
for n in self.nodes:
# find the child, which was the original parent
if (n.name == link.node.name):
# find the existing link
for ex_link in n.links:
if ((ex_link.node.name == node.name) & (ex_link.local_port == link.remote_port)):
if ((link.local_if_ip != 'UNKNOWN') & (ex_link.remote_if_ip == None)):
ex_link.remote_if_ip = link.local_if_ip
if ((link.local_lag != 'UNKNOWN') & (ex_link.remote_lag == None)):
ex_link.remote_lag = link.local_lag
if ((len(link.local_lag_ips) == 0) & len(ex_link.remote_lag_ips)):
ex_link.remote_lag_ips = link.local_lag_ips
if ((link.local_native_vlan != None) & (ex_link.remote_native_vlan == None)):
ex_link.remote_native_vlan = link.local_native_vlan
if ((link.local_allowed_vlans != None) & (ex_link.remote_allowed_vlans == None)):
ex_link.remote_allowed_vlans = link.local_allowed_vlans
return 0
else:
for ex_link in node.links:
if ((ex_link.node.name == link.node.name) & (ex_link.local_port == link.local_port)):
# haven't discovered yet but somehow we have this link twice.
# maybe from different discovery processes?
return 0
node.add_link(link)
return 1
def __get_known_node_by_host(self, hostname):
'''
Determine if the node is already known by hostname.
If it is, return it.
'''
for n in self.nodes:
if (n.name == hostname):
return n
return None
| gpl-2.0 | -6,000,111,495,897,117,000 | 35.333962 | 117 | 0.503505 | false |
dataflow/DataStage | test/FileShare/tests/TestWebDAVAccess.py | 1 | 9093 | # ---------------------------------------------------------------------
#
# Copyright (c) 2012 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ---------------------------------------------------------------------
# $Id: TestWebDAVAccess.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for FileAccess module
#
import os
# Make sure python-kerberos package is installed
import kerberos
import sys
import httplib
import urllib2
import urllib2_kerberos
import re
import base64
import unittest
from urlparse import urlparse
sys.path.append("../../..")
from MiscLib import TestUtils
readmetext="This directory is the root of the DATASTAGE shared file system.\n"
mountpoint="mountdatastagewebdav"
readmefile="DATASTAGE.README"
theurl="http://dataflow-vm1.oerc.ox.ac.uk/webdav/TestLeader"
class TestWebDAVAccess(unittest.TestCase):
def setUp(self):
# mount WebDAV share here
status=os.system('mount '+mountpoint)
self.assertEqual(status, 0, 'Mount failure')
return
def tearDown(self):
os.system('umount '+mountpoint)
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testReadMe(self):
# Test assumes DATASTAGE shared file system is mounted at mountpoint
# Open README file
f = open(mountpoint+'/'+readmefile)
assert (f), "README file open failed"
# Read first line
l = f.readline()
# Close file
f.close()
# Check first line
self.assertEqual(l, readmetext, 'Unexpected README content')
return
def testCreateFile(self):
f = open(mountpoint+'/testCreateWebDAVFile.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(mountpoint+'/testCreateWebDAVFile.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content')
return
def testUpdateFile(self):
filename = mountpoint+'/testUpdateWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'a+')
f.write('Test update of file\n')
f.close()
f = open(filename,'r')
l1 = f.readline()
l2 = f.readline()
f.close()
self.assertEqual(l1, 'Test creation of file\n', 'Unexpected file content: l1')
self.assertEqual(l2, 'Test update of file\n', 'Unexpected file content: l2')
return
def testRewriteFile(self):
filename = mountpoint+'/testRewriteWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'w+')
f.write('Test rewrite of file\n')
f.close()
f = open(filename,'r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test rewrite of file\n', 'Unexpected file content')
return
def testDeleteFile(self):
filename1 = mountpoint+'/testCreateWebDAVFile.tmp'
filename2 = mountpoint+'/testRewriteWebDAVFile.tmp'
filename3 = mountpoint+'/testUpdateWebDAVFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
# Test and delete third file
try:
s = os.stat(filename3)
except:
assert (False), "File "+filename3+" not found or other stat error"
os.remove(filename3)
try:
s = os.stat(filename3)
assert (False), "File "+filename3+" not deleted"
except:
pass
return
def testWebDAVFile(self):
h1 = httplib.HTTPConnection('zakynthos.zoo.ox.ac.uk')
h1.request('GET','/webdav')
res=h1.getresponse()
authreq = str(res.status) + ' ' + res.reason
print authreq
self.assertEqual(authreq, '401 Authorization Required', 'Unexpected response')
return
def testWebDAVFileUrlLib(self):
#_ignore = kerberos.GSS_C_DELEG_FLAG
#from kerberos import GSS_C_DELEG_FLAG,GSS_C_MUTUAL_FLAG,GSS_C_SEQUENCE_FLAG
#_ignore, ctx = kerberos.authGSSClientInit('krbtgt/[email protected]', gssflags=GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG)
_ignore, ctx = kerberos.authGSSClientInit('[email protected]')
_ignore = kerberos.authGSSClientStep(ctx, '')
tgt = kerberos.authGSSClientResponse(ctx)
opener = urllib2.build_opener()
opener.add_handler(urllib2_kerberos.HTTPKerberosAuthHandler())
resp = opener.open(theurl)
print resp
return
req = urllib2.Request(theurl)
try:
handle = urllib2.urlopen(req)
except IOError, e:
pass
else:
assert (False), theurl + " isn't protected by authentication."
if not hasattr(e, 'code') or e.code != 401:
# we got an error - but not a 401 error
assert (False), theurl + " Error: " + e
authline = e.headers['www-authenticate']
# this gets the www-authenticate line from the headers
# which has the authentication scheme and realm in it
authobj = re.compile(
r'''(?:\s*www-authenticate\s*:)?\s*(\w*)\s+realm=['"]([^'"]+)['"]''',
re.IGNORECASE)
# this regular expression is used to extract scheme and realm
matchobj = authobj.match(authline)
if not matchobj:
# if the authline isn't matched by the regular expression
# then something is wrong
assert (False), "Malformed authentication header: " + authline
scheme = matchobj.group(1)
realm = matchobj.group(2)
# here we've extracted the scheme
# and the realm from the header
print scheme
print realm
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMe"
, "testCreateFile"
, "testRewriteFile"
, "testUpdateFile"
, "testDeleteFile"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWebDAVFile"
, "testWebDAVFileUrlLib"
]
}
return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileAccess", getTestSuite, sys.argv)
# End.
| mit | 1,142,535,707,683,466,100 | 31.244681 | 143 | 0.598152 | false |
linii/ling229-final | metrics/noun_cluster_plot.py | 1 | 2306 | #!/usr/bin/python
import os
import sys
# hack to make this able to import topic_modeling. must be run from final_project/ dir
lib_path = os.path.abspath(os.path.join('.'))
sys.path.append(lib_path)
import numpy as np
import pylab
import pickle
from collections import Counter
from topic_modeling.topic_model import preprocess_docs
from pca_plot import reduce_dim
def doc_to_noun_bow(doc_nouns, top_nouns):
top_nouns_set = set(top_nouns)
matching_words = filter(lambda word: word in top_nouns_set, doc_nouns)
counts = Counter(matching_words)
return np.array([counts[noun]/float(len(doc_nouns) + 1) for noun in top_nouns])
def get_most_freq_words(docs, n=100):
all_words = reduce(lambda x, y: x + y, docs)
word_counts = Counter(all_words)
return [word for word, count in word_counts.most_common(n)]
def get_and_save_word_counts(docs, vocab, outfile="metrics/word_counts.csv"):
word_counts_by_doc = np.vstack([doc_to_noun_bow(doc, vocab) for doc in docs])
np.savetxt(outfile, word_counts_by_doc, delimiter=",")
return word_counts_by_doc
def plot_pca_noun_data(noun_counts_by_doc, labels, outfile):
colors = ["green" if label else "red" for label in labels]
reduced_data = reduce_dim(noun_counts_by_doc, 2)
pylab.scatter(reduced_data[:, 0], reduced_data[:, 1], c=colors)
# pylab.ylim(-10, 10)
# pylab.xlim(-10, 10)
pylab.ylabel("Count Data Principal Component 2")
pylab.xlabel("Count Data Principal Component 1")
pylab.title("Word Count Data Plotted By PCA: Nonromantic Lexicon Words")
pylab.savefig(outfile)
pylab.show()
if __name__ == '__main__':
label_file = sys.argv[3]
png_out_file = sys.argv[4]
if sys.argv[1] == "-pos":
postags_file = sys.argv[2]
doc_nouns = preprocess_docs(doc_texts=None, postags_file=postags_file)
noun_counts = get_and_save_word_counts(doc_nouns, get_most_freq_words(doc_nouns, 100))
elif sys.argv[1] == "-csv":
csv_file = sys.argv[2]
noun_counts = np.loadtxt(csv_file, dtype=int, delimiter=",")
labels = np.loadtxt(label_file, dtype=int, delimiter=",")
labels = labels[:np.shape(noun_counts)[0]]
plot_pca_noun_data(noun_counts, labels, png_out_file) | gpl-3.0 | 5,005,873,616,967,561,000 | 32.447761 | 94 | 0.656548 | false |
zeldin/libsigrokdecode | decoders/lpc/pd.py | 1 | 13089 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012-2013 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
# ...
fields = {
# START field (indicates start or stop of a transaction)
'START': {
0b0000: 'Start of cycle for a target',
0b0001: 'Reserved',
0b0010: 'Grant for bus master 0',
0b0011: 'Grant for bus master 1',
0b0100: 'Reserved',
0b0101: 'Reserved',
0b0110: 'Reserved',
0b0111: 'Reserved',
0b1000: 'Reserved',
0b1001: 'Reserved',
0b1010: 'Reserved',
0b1011: 'Reserved',
0b1100: 'Reserved',
0b1101: 'Start of cycle for a Firmware Memory Read cycle',
0b1110: 'Start of cycle for a Firmware Memory Write cycle',
0b1111: 'Stop/abort (end of a cycle for a target)',
},
# Cycle type / direction field
# Bit 0 (LAD[0]) is unused, should always be 0.
# Neither host nor peripheral are allowed to drive 0b11x0.
'CT_DR': {
0b0000: 'I/O read',
0b0010: 'I/O write',
0b0100: 'Memory read',
0b0110: 'Memory write',
0b1000: 'DMA read',
0b1010: 'DMA write',
0b1100: 'Reserved / not allowed',
0b1110: 'Reserved / not allowed',
},
# SIZE field (determines how many bytes are to be transferred)
# Bits[3:2] are reserved, must be driven to 0b00.
# Neither host nor peripheral are allowed to drive 0b0010.
'SIZE': {
0b0000: '8 bits (1 byte)',
0b0001: '16 bits (2 bytes)',
0b0010: 'Reserved / not allowed',
0b0011: '32 bits (4 bytes)',
},
# CHANNEL field (bits[2:0] contain the DMA channel number)
'CHANNEL': {
0b0000: '0',
0b0001: '1',
0b0010: '2',
0b0011: '3',
0b0100: '4',
0b0101: '5',
0b0110: '6',
0b0111: '7',
},
# SYNC field (used to add wait states)
'SYNC': {
0b0000: 'Ready',
0b0001: 'Reserved',
0b0010: 'Reserved',
0b0011: 'Reserved',
0b0100: 'Reserved',
0b0101: 'Short wait',
0b0110: 'Long wait',
0b0111: 'Reserved',
0b1000: 'Reserved',
0b1001: 'Ready more (DMA only)',
0b1010: 'Error',
0b1011: 'Reserved',
0b1100: 'Reserved',
0b1101: 'Reserved',
0b1110: 'Reserved',
0b1111: 'Reserved',
},
}
class Decoder(srd.Decoder):
api_version = 2
id = 'lpc'
name = 'LPC'
longname = 'Low-Pin-Count'
desc = 'Protocol for low-bandwidth devices on PC mainboards.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['lpc']
channels = (
{'id': 'lframe', 'name': 'LFRAME#', 'desc': 'Frame'},
{'id': 'lclk', 'name': 'LCLK', 'desc': 'Clock'},
{'id': 'lad0', 'name': 'LAD[0]', 'desc': 'Addr/control/data 0'},
{'id': 'lad1', 'name': 'LAD[1]', 'desc': 'Addr/control/data 1'},
{'id': 'lad2', 'name': 'LAD[2]', 'desc': 'Addr/control/data 2'},
{'id': 'lad3', 'name': 'LAD[3]', 'desc': 'Addr/control/data 3'},
)
optional_channels = (
{'id': 'lreset', 'name': 'LRESET#', 'desc': 'Reset'},
{'id': 'ldrq', 'name': 'LDRQ#', 'desc': 'Encoded DMA / bus master request'},
{'id': 'serirq', 'name': 'SERIRQ', 'desc': 'Serialized IRQ'},
{'id': 'clkrun', 'name': 'CLKRUN#', 'desc': 'Clock run'},
{'id': 'lpme', 'name': 'LPME#', 'desc': 'LPC power management event'},
{'id': 'lpcpd', 'name': 'LPCPD#', 'desc': 'Power down'},
{'id': 'lsmi', 'name': 'LSMI#', 'desc': 'System Management Interrupt'},
)
annotations = (
('warnings', 'Warnings'),
('start', 'Start'),
('cycle-type', 'Cycle-type/direction'),
('addr', 'Address'),
('tar1', 'Turn-around cycle 1'),
('sync', 'Sync'),
('data', 'Data'),
('tar2', 'Turn-around cycle 2'),
)
annotation_rows = (
('data', 'Data', (1, 2, 3, 4, 5, 6, 7)),
('warnings', 'Warnings', (0,)),
)
def __init__(self):
self.state = 'IDLE'
self.oldlclk = -1
self.samplenum = 0
self.clocknum = 0
self.lad = -1
self.addr = 0
self.cur_nibble = 0
self.cycle_type = -1
self.databyte = 0
self.tarcount = 0
self.synccount = 0
self.oldpins = None
self.ss_block = self.es_block = None
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putb(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def handle_get_start(self, lad, lad_bits, lframe):
# LAD[3:0]: START field (1 clock cycle).
# The last value of LAD[3:0] before LFRAME# gets de-asserted is what
# the peripherals must use. However, the host can keep LFRAME# asserted
# multiple clocks, and we output all START fields that occur, even
# though the peripherals are supposed to ignore all but the last one.
self.es_block = self.samplenum
self.putb([1, [fields['START'][lad], 'START', 'St', 'S']])
self.ss_block = self.samplenum
# Output a warning if LAD[3:0] changes while LFRAME# is low.
# TODO
if (self.lad != -1 and self.lad != lad):
self.putb([0, ['LAD[3:0] changed while LFRAME# was asserted']])
# LFRAME# is asserted (low). Wait until it gets de-asserted again
# (the host is allowed to keep it asserted multiple clocks).
if lframe != 1:
return
self.start_field = self.lad
self.state = 'GET CT/DR'
def handle_get_ct_dr(self, lad, lad_bits):
# LAD[3:0]: Cycle type / direction field (1 clock cycle).
self.cycle_type = fields['CT_DR'][lad]
# TODO: Warning/error on invalid cycle types.
if self.cycle_type == 'Reserved':
self.putb([0, ['Invalid cycle type (%s)' % lad_bits]])
self.es_block = self.samplenum
self.putb([2, ['Cycle type: %s' % self.cycle_type]])
self.ss_block = self.samplenum
self.state = 'GET ADDR'
self.addr = 0
self.cur_nibble = 0
def handle_get_addr(self, lad, lad_bits):
# LAD[3:0]: ADDR field (4/8/0 clock cycles).
# I/O cycles: 4 ADDR clocks. Memory cycles: 8 ADDR clocks.
# DMA cycles: no ADDR clocks at all.
if self.cycle_type in ('I/O read', 'I/O write'):
addr_nibbles = 4 # Address is 16bits.
elif self.cycle_type in ('Memory read', 'Memory write'):
addr_nibbles = 8 # Address is 32bits.
else:
addr_nibbles = 0 # TODO: How to handle later on?
# Addresses are driven MSN-first.
offset = ((addr_nibbles - 1) - self.cur_nibble) * 4
self.addr |= (lad << offset)
# Continue if we haven't seen all ADDR cycles, yet.
if (self.cur_nibble < addr_nibbles - 1):
self.cur_nibble += 1
return
self.es_block = self.samplenum
s = 'Address: 0x%%0%dx' % addr_nibbles
self.putb([3, [s % self.addr]])
self.ss_block = self.samplenum
self.state = 'GET TAR'
self.tar_count = 0
def handle_get_tar(self, lad, lad_bits):
# LAD[3:0]: First TAR (turn-around) field (2 clock cycles).
self.es_block = self.samplenum
self.putb([4, ['TAR, cycle %d: %s' % (self.tarcount, lad_bits)]])
self.ss_block = self.samplenum
# On the first TAR clock cycle LAD[3:0] is driven to 1111 by
# either the host or peripheral. On the second clock cycle,
# the host or peripheral tri-states LAD[3:0], but its value
# should still be 1111, due to pull-ups on the LAD lines.
if lad_bits != '1111':
self.putb([0, ['TAR, cycle %d: %s (expected 1111)' % \
(self.tarcount, lad_bits)]])
if (self.tarcount != 1):
self.tarcount += 1
return
self.tarcount = 0
self.state = 'GET SYNC'
def handle_get_sync(self, lad, lad_bits):
# LAD[3:0]: SYNC field (1-n clock cycles).
self.sync_val = lad_bits
self.cycle_type = fields['SYNC'][lad]
# TODO: Warnings if reserved value are seen?
if self.cycle_type == 'Reserved':
self.putb([0, ['SYNC, cycle %d: %s (reserved value)' % \
(self.synccount, self.sync_val)]])
self.es_block = self.samplenum
self.putb([5, ['SYNC, cycle %d: %s' % (self.synccount, self.sync_val)]])
self.ss_block = self.samplenum
# TODO
self.cycle_count = 0
self.state = 'GET DATA'
def handle_get_data(self, lad, lad_bits):
# LAD[3:0]: DATA field (2 clock cycles).
# Data is driven LSN-first.
if (self.cycle_count == 0):
self.databyte = lad
elif (self.cycle_count == 1):
self.databyte |= (lad << 4)
else:
raise Exception('Invalid cycle_count: %d' % self.cycle_count)
if (self.cycle_count != 1):
self.cycle_count += 1
return
self.es_block = self.samplenum
self.putb([6, ['DATA: 0x%02x' % self.databyte]])
self.ss_block = self.samplenum
self.cycle_count = 0
self.state = 'GET TAR2'
def handle_get_tar2(self, lad, lad_bits):
# LAD[3:0]: Second TAR field (2 clock cycles).
self.es_block = self.samplenum
self.putb([7, ['TAR, cycle %d: %s' % (self.tarcount, lad_bits)]])
self.ss_block = self.samplenum
# On the first TAR clock cycle LAD[3:0] is driven to 1111 by
# either the host or peripheral. On the second clock cycle,
# the host or peripheral tri-states LAD[3:0], but its value
# should still be 1111, due to pull-ups on the LAD lines.
if lad_bits != '1111':
self.putb([0, ['Warning: TAR, cycle %d: %s (expected 1111)'
% (self.tarcount, lad_bits)]])
if (self.tarcount != 1):
self.tarcount += 1
return
self.tarcount = 0
self.state = 'IDLE'
def decode(self, ss, es, data):
for (self.samplenum, pins) in data:
# If none of the pins changed, there's nothing to do.
if self.oldpins == pins:
continue
# Store current pin values for the next round.
self.oldpins = pins
# Get individual pin values into local variables.
(lframe, lclk, lad0, lad1, lad2, lad3) = pins[:6]
(lreset, ldrq, serirq, clkrun, lpme, lpcpd, lsmi) = pins[6:]
# Only look at the signals upon rising LCLK edges. The LPC clock
# is the same as the PCI clock (which is sampled at rising edges).
if not (self.oldlclk == 0 and lclk == 1):
self.oldlclk = lclk
continue
# Store LAD[3:0] bit values (one nibble) in local variables.
# Most (but not all) states need this.
if self.state != 'IDLE':
lad = (lad3 << 3) | (lad2 << 2) | (lad1 << 1) | lad0
lad_bits = bin(lad)[2:].zfill(4)
# self.putb([0, ['LAD: %s' % lad_bits]])
# TODO: Only memory read/write is currently supported/tested.
# State machine
if self.state == 'IDLE':
# A valid LPC cycle starts with LFRAME# being asserted (low).
if lframe != 0:
continue
self.ss_block = self.samplenum
self.state = 'GET START'
self.lad = -1
# self.clocknum = 0
elif self.state == 'GET START':
self.handle_get_start(lad, lad_bits, lframe)
elif self.state == 'GET CT/DR':
self.handle_get_ct_dr(lad, lad_bits)
elif self.state == 'GET ADDR':
self.handle_get_addr(lad, lad_bits)
elif self.state == 'GET TAR':
self.handle_get_tar(lad, lad_bits)
elif self.state == 'GET SYNC':
self.handle_get_sync(lad, lad_bits)
elif self.state == 'GET DATA':
self.handle_get_data(lad, lad_bits)
elif self.state == 'GET TAR2':
self.handle_get_tar2(lad, lad_bits)
| gpl-3.0 | 4,092,020,724,565,551,000 | 34.762295 | 88 | 0.53969 | false |
Glottotopia/aagd | moin/local/moin/contrib/googleimport/driver.py | 1 | 7520 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""
MoinMoin wiki project -> Google Project Hosting converter
Full of evil antipatterns, incl. Exception exceptions.
@copyright: 2007,2010 MoinMoin:AlexanderSchremmer
@license: GNU GPL, see COPYING for details.
"""
import sys
import re
import urllib2
from urllib import quote
import xmlrpclib
import csv
from MoinMoin.web.contexts import ScriptContext
from MoinMoin.Page import Page
# monkeypatch the formatter to avoid line_anchors:
from MoinMoin.formatter import text_html
text_html.line_anchors = False
request = ScriptContext(None, None)
class DataNotFoundException(Exception): pass
class Task(object):
def __init__(self, summary, desc, label, hours, mentors, difficulty, types):
self.summary = summary
self.label = label
self.hours = hours
self.mentors = mentors
self.difficulty = difficulty
self.types = types
page = Page(request, "")
page.set_raw_body(desc)
desc = request.redirectedOutput(page.send_page, content_only=1)
for s, r in [
('\n', ' '),
(' class="line862"', ''),
(' class="line867"', ''),
(' class="line874"', ''),
(' class="line891"', ''),
]:
desc = desc.replace(s, r)
self.desc = desc
def __repr__(self):
return (u"<Task summary=%r label=%r hours=%i mentors=%r difficulty=%r types=%r desc='''%s'''>" % (
self.summary, self.label, self.hours, self.mentors, self.difficulty,
self.types, self.desc[:100])).encode("utf-8")
def find_dict_entry(name, text):
m = re.search(r"^ %s:: (.*)$" % (name, ), text, re.M | re.U)
if not m:
raise DataNotFoundException("%s not found" % (name, ))
return m.groups()[0]
desc_pattern = r"""= Description =
([\s\S]*?)
= Discussion ="""
bugpage_pattern = r"""= Description =
([\s\S]*?)
="""
already_pushed_pages = set([x.strip() for x in """
""".split("\n")])
already_pushed_bugs = set([x.strip() for x in """
""".split("\n")])
gatherers = []
def first(s):
""" return first word or '' """
splitted = s.strip().split()
if splitted:
return splitted[0]
else:
return ''
class Collector(object):
def is_gatherer(function):
gatherers.append(function)
return function
def __init__(self, url):
self.url = url
self.server = xmlrpclib.ServerProxy(url + "?action=xmlrpc2")
def collect_tasks(self):
tasks = []
for gatherer in gatherers:
new = list(gatherer(self))
tasks.extend(new)
return tasks
@is_gatherer
def easytodo_pages(self):
pages = self.server.getAllPagesEx(dict(prefix="EasyToDo/"))
for page in pages:
if page in already_pushed_pages:
continue
page_contents = self.server.getPage(page)
try:
summary = find_dict_entry("Title", page_contents)
count = int(first(find_dict_entry("Count", page_contents)))
label = find_dict_entry("Tags", page_contents)
hours = int(first(find_dict_entry("Duration", page_contents)))
mentors = find_dict_entry("Mentors", page_contents)
difficulty = find_dict_entry("Difficulty", page_contents)
try:
types = find_dict_entry("Types", page_contents)
except DataNotFoundException:
# old tasks use "Type"
types = find_dict_entry("Type", page_contents)
except (DataNotFoundException, ValueError), e:
print >>sys.stderr, "Could not import %r because of %r" % (page, e)
continue
desc_m = re.search(desc_pattern, page_contents)
if not desc_m:
raise Exception("Could not import %r because Desc not found" % page)
desc = desc_m.groups()[0]
for i in range(1, count + 1):
text = desc
new_summary = summary
text += "\n\nYou can discuss this issue in the !MoinMoin wiki: %s" % (self.url + quote(page.encode("utf-8")), )
if count > 1:
text += "\n\nThis issue is available multiple times. This one is %i of %i." % (i, count)
new_summary += " %i/%i" % (i, count)
yield Task(new_summary, text, label, hours, mentors, difficulty, types)
#@is_gatherer
def moin_bugs(self):
pages = [pagename for pagename, contents in self.server.searchPages(r"t:MoinMoinBugs/ r:CategoryEasy\b")]
for page in pages:
bug_name = page.replace("MoinMoinBugs/", "")
if bug_name in already_pushed_bugs:
continue
page_contents = self.server.getPage(page)
m = re.search(bugpage_pattern, page_contents)
if not m:
raise Exception("Could not import %r because of bug desc not found" % page)
desc = m.groups()[0]
desc = "A user filed a bug report at the MoinMoin site. Here is a short description about the issue. A more detailed description is available at the MoinMoin wiki: %s\n\n" % (self.url + quote(page.encode("utf-8")), ) + desc
yield Task(bug_name, desc, "Code")
#@is_gatherer
def translation_items(self):
#languages = self.server.getPage(u"EasyToDoTranslation/Languages").strip().splitlines()
#languages = ["Lithuanian (lt)"]
languages = []
for language in languages:
page = u"EasyToDoTranslation"
page_contents = self.server.getPage(page)
page_contents = page_contents.replace("LANG", language)
summary = find_dict_entry("Summary", page_contents)
count = int(first(find_dict_entry("Count", page_contents)))
desc_m = re.search(desc_pattern, page_contents)
if not desc_m:
raise Exception("Could not import %r because Desc not found" % page)
desc = desc_m.groups()[0]
for i in range(1, count + 1):
text = desc
new_summary = summary
text += "\n\nA more detailed description of this task is available at the MoinMoin wiki: %s" % (self.url + quote(page.encode("utf-8")), )
if count > 1:
text += "\n\nThis task is available multiple times. This one is %i of %i." % (i, count)
new_summary += " %i/%i" % (i, count)
yield Task(new_summary, text, "Translation")
def pull_and_gencsv():
print >> sys.stderr, "Collecting tasks ..."
tasks = Collector("http://moinmo.in/").collect_tasks()
print >> sys.stderr, "Importing %i tasks ..." % (len(tasks), )
print >> sys.stderr, "\n".join(repr(task) for task in tasks)
summary_prefix = '' # "[TEST] " # EMPTY FOR PRODUCTION IMPORT!
tmin, tmax = 0, None
csvwriter = csv.writer(sys.stdout, delimiter=",", doublequote=True)
for task in tasks[tmin:tmax]:
csvwriter.writerow([summary_prefix + task.summary, task.desc, task.hours, task.mentors, task.difficulty, task.types, task.label])
if __name__ == "__main__":
pull_and_gencsv()
| mit | 5,125,018,128,756,069,000 | 35.788945 | 235 | 0.557181 | false |
dims/heat | heat/tests/test_translation_rule.py | 1 | 26801 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.hot import functions as hot_funcs
from heat.engine import parameters
from heat.engine import properties
from heat.engine import translation
from heat.tests import common
class TestTranslationRule(common.HeatTestCase):
def test_translation_rule(self):
for r in translation.TranslationRule.RULE_KEYS:
props = properties.Properties({}, {})
rule = translation.TranslationRule(
props,
r,
['any'],
['value'] if r == 'Add' else 'value',
'value_name' if r == 'Replace' else None,
'client_plugin' if r == 'Resolve' else None,
'finder' if r == 'Resolve' else None)
self.assertEqual(rule.properties, props)
self.assertEqual(rule.rule, r)
if r == 'Add':
self.assertEqual(['value'], rule.value)
else:
self.assertEqual('value', rule.value)
if r == 'Replace':
self.assertEqual('value_name', rule.value_name)
else:
self.assertIsNone(rule.value_name)
def test_invalid_translation_rule(self):
props = properties.Properties({}, {})
exc = self.assertRaises(ValueError,
translation.TranslationRule,
'proppy', mock.ANY,
mock.ANY)
self.assertEqual('Properties must be Properties type. '
'Found %s.' % str, six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
'EatTheCookie',
mock.ANY,
mock.ANY)
self.assertEqual('There is no rule EatTheCookie. List of allowed '
'rules is: Add, Replace, Delete, Resolve.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
'networks.network',
'value')
self.assertEqual('source_path should be a list with path instead of '
'%s.' % str, six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
[],
mock.ANY)
self.assertEqual('source_path must be non-empty list with path.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
['any'],
mock.ANY,
'value_name')
self.assertEqual('Use value_name only for replacing list elements.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
['any'],
'value')
self.assertEqual('value must be list type when rule is Add.',
six.text_type(exc))
def test_add_rule_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': [
{'red': 'blue'}
],
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[{'red': props.get('bar')}])
rule.execute_rule()
self.assertIn({'red': 'dak'}, props.get('far'))
def test_add_rule_dont_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[{'red': props.get('bar')}])
rule.execute_rule()
self.assertEqual([{'red': 'dak'}], props.get('far'))
def test_add_rule_invalid(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': 'tran',
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[props.get('bar')])
exc = self.assertRaises(ValueError, rule.execute_rule)
self.assertEqual('Add rule must be used only for lists.',
six.text_type(exc))
def test_replace_rule_map_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': {'red': 'tran'},
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual({'red': 'dak'}, props.get('far'))
def test_replace_rule_map_dont_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual({'red': 'dak'}, props.get('far'))
def test_replace_rule_list_different(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual([{'red': 'dak'}, {'red': 'dak'}], props.get('far'))
def test_replace_rule_list_same(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
),
'blue': properties.Schema(
properties.Schema.STRING
)
}
)
)}
data = {
'far': [{'blue': 'white'},
{'red': 'roses'}]
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
None,
'blue')
rule.execute_rule()
self.assertEqual([{'red': 'white', 'blue': None},
{'blue': None, 'red': 'roses'}],
props.get('far'))
def test_replace_rule_str(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one', 'bar': 'two'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
props.get('far'))
rule.execute_rule()
self.assertEqual('one', props.get('bar'))
self.assertEqual('one', props.get('far'))
def test_replace_rule_str_value_path_error(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one', 'bar': 'two'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
value_path=['far'])
ex = self.assertRaises(ValueError, rule.execute_rule)
self.assertEqual('Cannot use bar and far at the same time.',
six.text_type(ex))
def test_replace_rule_str_value_path(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
value_path=['far'])
rule.execute_rule()
self.assertEqual('one', props.get('bar'))
self.assertIsNone(props.get('far'))
def test_replace_rule_str_invalid(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.INTEGER)
}
data = {'far': 'one', 'bar': 2}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
props.get('far'))
rule.execute_rule()
exc = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual("Property error: bar: Value 'one' is not an integer",
six.text_type(exc))
def test_delete_rule_list(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
)}
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
['far', 'red'])
rule.execute_rule()
self.assertEqual([{'red': None}, {'red': None}], props.get('far'))
def test_delete_rule_other(self):
schema = {
'far': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
['far'])
rule.execute_rule()
self.assertIsNone(props.get('far'))
def _test_resolve_rule(self, is_list=False):
class FakeClientPlugin(object):
def find_name_id(self, entity=None,
src_value='far'):
if entity == 'rose':
return 'pink'
return 'yellow'
if is_list:
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
)}
else:
schema = {
'far': properties.Schema(properties.Schema.STRING)
}
return FakeClientPlugin(), schema
def test_resolve_rule_list_populated(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([{'red': 'yellow'}, {'red': 'yellow'}],
props.get('far'))
def test_resolve_rule_list_with_function(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
join_func = cfn_funcs.Join(None,
'Fn::Join', ['.', ['bar', 'baz']])
data = {
'far': [{'red': 'blue'},
{'red': join_func}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([{'red': 'yellow'}, {'red': 'yellow'}],
props.get('far'))
def test_resolve_rule_list_with_ref(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
ref = cfn_funcs.ResourceRef(stack, 'get_resource',
'another_res')
data = {
'far': [{'red': ref}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_list_empty(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
data = {
'far': [],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([], props.get('far'))
def test_resolve_rule_other(self):
client_plugin, schema = self._test_resolve_rule()
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual('yellow', props.get('far'))
def test_resolve_rule_other_with_ref(self):
client_plugin, schema = self._test_resolve_rule()
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
ref = cfn_funcs.ResourceRef(stack, 'get_resource',
'another_res')
data = {'far': ref}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_other_with_function(self):
client_plugin, schema = self._test_resolve_rule()
join_func = cfn_funcs.Join(None,
'Fn::Join', ['.', ['bar', 'baz']])
data = {'far': join_func}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_other_with_entity(self):
client_plugin, schema = self._test_resolve_rule()
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id',
entity='rose')
rule.execute_rule()
self.assertEqual('pink', props.get('far'))
def test_property_json_param_correct_translation(self):
"""Test case when property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
})
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(json_far='json_far'),
'get_param',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': param}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'bar'],
value_path=['far', 'dar'])
rule.execute_rule()
self.assertEqual('rad', props.get('far').get('bar'))
def test_property_json_param_to_list_correct_translation(self):
"""Test case when list property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
}
))
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(json_far='json_far'),
'get_param',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': [param]}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'bar'],
value_name='dar')
rule.execute_rule()
self.assertEqual([{'dar': None, 'bar': 'rad'}], props.get('far'))
def test_property_commadelimitedlist_param_correct_translation(self):
"""Test when property with sub-schema takes comma_delimited_list."""
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.STRING,
)
),
'boo': properties.Schema(
properties.Schema.STRING
)}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(list_far='list_far'),
'get_param',
'list_far')
param.parameters = {
'list_far': parameters.CommaDelimitedListParam(
'list_far',
{'Type': 'CommaDelimitedList'},
"white,roses").value()}
data = {'far': param, 'boo': 'chrysanthemums'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[props.get('boo')])
rule.execute_rule()
self.assertEqual(['white', 'roses', 'chrysanthemums'],
props.get('far'))
def test_property_no_translation_removed_function(self):
"""Test case when list property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
}
))
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.Removed(DummyStack(json_far='json_far'),
'Ref',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': [param]}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'bar'],
value_name='dar')
rule.execute_rule()
self.assertEqual([param], props.data.get('far'))
| apache-2.0 | 5,504,096,451,595,284,000 | 32.543179 | 78 | 0.461065 | false |
kpreid/shinysdr | setup.py | 1 | 4716 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013, 2014, 2015, 2016, 2019 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
import os.path
import subprocess
import urllib
from setuptools import find_packages, setup, Command
from setuptools.command.build_py import build_py
ASSETS = {
'http://requirejs.org/docs/release/2.1.22/comments/require.js': 'shinysdr/deps/require.js',
'https://raw.githubusercontent.com/requirejs/text/646db27aaf2236cea92ac4107f32cbe5ae7a8d3a/text.js': 'shinysdr/deps/text.js'
}
class DownloadAssets(Command):
description = 'Download web app assets from external sites.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for source_url, destination_path in ASSETS.items():
if os.path.exists(destination_path):
print('skipping downloading {}, already exists'.format(destination_path))
else:
print('downloading {} to {}'.format(source_url, destination_path))
urllib.urlretrieve(source_url, destination_path)
class InitGitSubModules(Command):
description = 'Initialize Git submodules for dependencies.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print('Initializing submodules...')
subprocess.call(['git', 'submodule', 'update', '--init'])
class FetchDeps(Command):
"""fetch dependencies command"""
description = 'gathers external dependencies from various sources'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command('git_init')
self.run_command('retrieve_assets')
class BuildPyCommand(build_py):
"""Customized build command to ensure deps are fetched before build."""
def run(self):
self.run_command('fetch_deps')
build_py.run(self)
setup(
name='ShinySDR',
# version='...', # No versioning is defined yet
description='Software-defined radio receiver application built on GNU Radio with a web-based UI and plugins.',
url='https://github.com/kpreid/shinysdr/',
author='Kevin Reid',
author_email='[email protected]',
classifiers=[
# TODO: review/improve; this list was made by browsing <https://pypi.python.org/pypi?%3Aaction=list_classifiers>; can we add new items?
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Twisted',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: OS Independent', # will probably fail on notPOSIX due to lack of portability work, not fundamentally
'Topic :: Communications :: Ham Radio', # non-exclusively ham
],
license='GPLv3+',
packages=find_packages(),
include_package_data=True,
install_requires=[
# 'gnuradio', # Not PyPI
# 'osmosdr', # Not PyPI
'twisted',
'txws',
'ephem',
'six',
'pyserial', # undeclared dependency of twisted.internet.serialport
# Without the service_identity module, Twisted can perform only rudimentary TLS client hostname verification
'service_identity',
'pyasn1>=0.4.1,<0.5.0', # required to pin pyans1 support for pyasn1-modules
'pyasn1-modules', # required for service_identity
],
dependency_links=[],
# zip_safe: TODO: Investigate. I suspect unsafe due to serving web resources relative to __file__.
zip_safe=False,
entry_points={
'console_scripts': {
'shinysdr = shinysdr.main:main',
'shinysdr-import = shinysdr.db_import.tool:import_main'
}
},
cmdclass={
'git_init': InitGitSubModules,
'retrieve_assets': DownloadAssets,
'fetch_deps': FetchDeps,
'build_py': BuildPyCommand,
},
)
| gpl-3.0 | 5,016,024,157,505,470,000 | 32.446809 | 143 | 0.658185 | false |
tobegit3hub/cinder_docker | cinder/tests/unit/objects/test_snapshot.py | 1 | 13201 | # Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_log import log as logging
from cinder import exception
from cinder import objects
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
LOG = logging.getLogger(__name__)
fake_db_snapshot = fake_snapshot.fake_db_snapshot(
cgsnapshot_id='fake_cgsnap_id')
del fake_db_snapshot['metadata']
del fake_db_snapshot['volume']
# NOTE(andrey-mp): make Snapshot object here to check object algorithms
fake_snapshot_obj = {
'id': '1',
'volume_id': 'fake_id',
'status': "creating",
'progress': '0%',
'volume_size': 1,
'display_name': 'fake_name',
'display_description': 'fake_description',
'metadata': {},
}
class TestSnapshot(test_objects.BaseObjectsTestCase):
@staticmethod
def _compare(test, expected, actual):
for field, value in expected.items():
test.assertEqual(expected[field], actual[field],
"Field '%s' is not equal" % field)
@mock.patch('cinder.db.snapshot_get', return_value=fake_db_snapshot)
def test_get_by_id(self, snapshot_get):
snapshot = objects.Snapshot.get_by_id(self.context, 1)
self._compare(self, fake_snapshot_obj, snapshot)
def test_reset_changes(self):
snapshot = objects.Snapshot()
snapshot.metadata = {'key1': 'value1'}
self.assertEqual({}, snapshot._orig_metadata)
snapshot.obj_reset_changes(['metadata'])
self.assertEqual({'key1': 'value1'}, snapshot._orig_metadata)
@mock.patch('cinder.db.snapshot_create', return_value=fake_db_snapshot)
def test_create(self, snapshot_create):
snapshot = objects.Snapshot(context=self.context)
snapshot.create()
self.assertEqual(fake_snapshot_obj['id'], snapshot.id)
self.assertEqual(fake_snapshot_obj['volume_id'], snapshot.volume_id)
@mock.patch('cinder.db.snapshot_create')
def test_create_with_provider_id(self, snapshot_create):
snapshot_create.return_value = copy.deepcopy(fake_db_snapshot)
snapshot_create.return_value['provider_id'] = '1111-aaaa'
snapshot = objects.Snapshot(context=self.context)
snapshot.create()
self.assertEqual('1111-aaaa', snapshot.provider_id)
@mock.patch('cinder.db.snapshot_update')
def test_save(self, snapshot_update):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
snapshot.display_name = 'foobar'
snapshot.save()
snapshot_update.assert_called_once_with(self.context, snapshot.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.snapshot_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.snapshot_update')
def test_save_with_metadata(self, snapshot_update,
snapshot_metadata_update):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
snapshot.display_name = 'foobar'
snapshot.metadata = {'key1': 'value1'}
self.assertEqual({'display_name': 'foobar',
'metadata': {'key1': 'value1'}},
snapshot.obj_get_changes())
snapshot.save()
snapshot_update.assert_called_once_with(self.context, snapshot.id,
{'display_name': 'foobar'})
snapshot_metadata_update.assert_called_once_with(self.context, '1',
{'key1': 'value1'},
True)
@mock.patch('cinder.db.snapshot_destroy')
def test_destroy(self, snapshot_destroy):
snapshot = objects.Snapshot(context=self.context, id=1)
snapshot.destroy()
snapshot_destroy.assert_called_once_with(self.context, '1')
@mock.patch('cinder.db.snapshot_metadata_delete')
def test_delete_metadata_key(self, snapshot_metadata_delete):
snapshot = objects.Snapshot(self.context, id=1)
snapshot.metadata = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual({}, snapshot._orig_metadata)
snapshot.delete_metadata_key(self.context, 'key2')
self.assertEqual({'key1': 'value1'}, snapshot.metadata)
snapshot_metadata_delete.assert_called_once_with(self.context, '1',
'key2')
def test_obj_fields(self):
volume = objects.Volume(context=self.context, id=2, _name_id=2)
snapshot = objects.Snapshot(context=self.context, id=1,
volume=volume)
self.assertEqual(['name', 'volume_name'], snapshot.obj_extra_fields)
self.assertEqual('snapshot-1', snapshot.name)
self.assertEqual('volume-2', snapshot.volume_name)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.objects.cgsnapshot.CGSnapshot.get_by_id')
def test_obj_load_attr(self, cgsnapshot_get_by_id, volume_get_by_id):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
# Test volume lazy-loaded field
volume = objects.Volume(context=self.context, id=2)
volume_get_by_id.return_value = volume
self.assertEqual(volume, snapshot.volume)
volume_get_by_id.assert_called_once_with(self.context,
snapshot.volume_id)
# Test cgsnapshot lazy-loaded field
cgsnapshot = objects.CGSnapshot(context=self.context, id=2)
cgsnapshot_get_by_id.return_value = cgsnapshot
self.assertEqual(cgsnapshot, snapshot.cgsnapshot)
cgsnapshot_get_by_id.assert_called_once_with(self.context,
snapshot.cgsnapshot_id)
@mock.patch('cinder.db.snapshot_data_get_for_project')
def test_snapshot_data_get_for_project(self, snapshot_data_get):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
volume_type_id = mock.sentinel.volume_type_id
snapshot.snapshot_data_get_for_project(self.context,
self.project_id,
volume_type_id)
snapshot_data_get.assert_called_once_with(self.context,
self.project_id,
volume_type_id)
class TestSnapshotList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all', return_value=[fake_db_snapshot])
def test_get_all(self, snapshot_get_all, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
search_opts = mock.sentinel.search_opts
snapshots = objects.SnapshotList.get_all(
self.context, search_opts)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
snapshot_get_all.assert_called_once_with(self.context, search_opts,
None, None, None, None, None)
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_by_host',
return_value=[fake_db_snapshot])
def test_get_by_host(self, get_by_host, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_by_host(
self.context, 'fake-host')
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_by_project',
return_value=[fake_db_snapshot])
def test_get_all_by_project(self, get_all_by_project, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
search_opts = mock.sentinel.search_opts
snapshots = objects.SnapshotList.get_all_by_project(
self.context, self.project_id, search_opts)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
get_all_by_project.assert_called_once_with(self.context,
self.project_id,
search_opts, None, None,
None, None, None)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_for_volume',
return_value=[fake_db_snapshot])
def test_get_all_for_volume(self, get_all_for_volume, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_all_for_volume(
self.context, fake_volume_obj.id)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_active_by_window',
return_value=[fake_db_snapshot])
def test_get_active_by_window(self, get_active_by_window,
volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_active_by_window(
self.context, mock.sentinel.begin, mock.sentinel.end)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot',
return_value=[fake_db_snapshot])
def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot,
volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
self.context, mock.sentinel.cgsnapshot_id)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all')
def test_get_all_without_metadata(self, snapshot_get_all,
volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshot = copy.deepcopy(fake_db_snapshot)
del snapshot['snapshot_metadata']
snapshot_get_all.return_value = [snapshot]
search_opts = mock.sentinel.search_opts
self.assertRaises(exception.MetadataAbsent,
objects.SnapshotList.get_all,
self.context, search_opts)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all')
def test_get_all_with_metadata(self, snapshot_get_all, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
db_snapshot = copy.deepcopy(fake_db_snapshot)
db_snapshot['snapshot_metadata'] = [{'key': 'fake_key',
'value': 'fake_value'}]
snapshot_get_all.return_value = [db_snapshot]
search_opts = mock.sentinel.search_opts
snapshots = objects.SnapshotList.get_all(
self.context, search_opts)
self.assertEqual(1, len(snapshots))
snapshot_obj = copy.deepcopy(fake_snapshot_obj)
snapshot_obj['metadata'] = {'fake_key': 'fake_value'}
TestSnapshot._compare(self, snapshot_obj, snapshots[0])
snapshot_get_all.assert_called_once_with(self.context, search_opts,
None, None, None, None, None)
| apache-2.0 | 4,037,696,467,265,548,300 | 45.319298 | 78 | 0.61715 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/twitch.py | 1 | 20771 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
import json
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
ExtractorError,
float_or_none,
int_or_none,
orderedSet,
parse_duration,
parse_iso8601,
qualities,
try_get,
unified_timestamp,
update_url_query,
url_or_none,
urljoin,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:(?:www|go|m)\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'https://usher.ttvnw.net'
_LOGIN_FORM_URL = 'https://www.twitch.tv/login'
_LOGIN_POST_URL = 'https://passport.twitch.tv/login'
_CLIENT_ID = 'kimne78kx3ncx6brgo4mv6wki5h1ko'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _call_api(self, path, item_id, *args, **kwargs):
headers = kwargs.get('headers', {}).copy()
headers['Client-ID'] = self._CLIENT_ID
kwargs['headers'] = headers
response = self._download_json(
'%s/%s' % (self._API_BASE, path), item_id,
*args, **compat_kwargs(kwargs))
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
def fail(message):
raise ExtractorError(
'Unable to login. Twitch said: %s' % message, expected=True)
def login_step(page, urlh, note, data):
form = self._hidden_inputs(page)
form.update(data)
page_url = urlh.geturl()
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page,
'post url', default=self._LOGIN_POST_URL, group='url')
post_url = urljoin(page_url, post_url)
headers = {
'Referer': page_url,
'Origin': page_url,
'Content-Type': 'text/plain;charset=UTF-8',
}
response = self._download_json(
post_url, None, note, data=json.dumps(form).encode(),
headers=headers, expected_status=400)
error = response.get('error_description') or response.get('error_code')
if error:
fail(error)
if 'Authenticated successfully' in response.get('message', ''):
return None, None
redirect_url = urljoin(
post_url,
response.get('redirect') or response['redirect_path'])
return self._download_webpage_handle(
redirect_url, None, 'Downloading login redirect page',
headers=headers)
login_page, handle = self._download_webpage_handle(
self._LOGIN_FORM_URL, None, 'Downloading login page')
# Some TOR nodes and public proxies are blocked completely
if 'blacklist_message' in login_page:
fail(clean_html(login_page))
redirect_page, handle = login_step(
login_page, handle, 'Logging in', {
'username': username,
'password': password,
'client_id': self._CLIENT_ID,
})
# Successful login
if not redirect_page:
return
if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None:
# TODO: Add mechanism to request an SMS or phone call
tfa_token = self._get_tfa_info('two-factor authentication token')
login_step(redirect_page, handle, 'Submitting TFA token', {
'authy_token': tfa_token,
'remember_2fa': 'true',
})
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._call_api(
'kraken/videos/%s%s' % (item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._call_api(
'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
status = info.get('status')
if status == 'recording':
is_live = True
elif status == 'recorded':
is_live = False
else:
is_live = None
return {
'id': info['_id'],
'title': info.get('title') or 'Untitled Broadcast',
'description': info.get('description'),
'duration': int_or_none(info.get('length')),
'thumbnail': info.get('preview'),
'uploader': info.get('channel', {}).get('display_name'),
'uploader_id': info.get('channel', {}).get('name'),
'timestamp': parse_iso8601(info.get('recorded_at')),
'view_count': int_or_none(info.get('views')),
'is_live': is_live,
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
'skip': 'HTTP Error 404: Not Found',
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/|
player\.twitch\.tv/\?.*?\bvideo=v
)
(?P<id>\d+)
'''
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TESTS = [{
'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
'title': 'LCK Summer Split - Week 6 Day 1',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 17208,
'timestamp': 1435131709,
'upload_date': '20150624',
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# Untitled broadcast (title is None)
'url': 'http://www.twitch.tv/belkao_o/v/11230755',
'info_dict': {
'id': 'v11230755',
'ext': 'mp4',
'title': 'Untitled Broadcast',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1638,
'timestamp': 1439746708,
'upload_date': '20150816',
'uploader': 'BelkAO_o',
'uploader_id': 'belkao_o',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://player.twitch.tv/?t=5m10s&video=v6528877',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/videos/6528877',
'only_matching': True,
}, {
'url': 'https://m.twitch.tv/beagsandjam/v/247478721',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/northernlion/video/291940395',
'only_matching': True,
}]
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._call_api(
'api/vods/%s/access_token' % item_id, item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?%s' % (
self._USHER_BASE, item_id,
compat_urllib_parse_urlencode({
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'player': 'twitchweb',
'nauth': access_token['token'],
'nauthsig': access_token['sig'],
})),
item_id, 'mp4', entry_protocol='m3u8_native')
self._prefer_source(formats)
info['formats'] = formats
parsed_url = compat_urllib_parse_urlparse(url)
query = compat_parse_qs(parsed_url.query)
if 't' in query:
info['start_time'] = parse_duration(query['t'][0])
if info.get('timestamp') is not None:
info['subtitles'] = {
'rechat': [{
'url': update_url_query(
'https://rechat.twitch.tv/rechat-messages', {
'video_id': 'v%s' % item_id,
'start': info['timestamp'],
}),
'ext': 'json',
}],
}
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._call_api(
'kraken/channels/%s' % channel_id,
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
broken_paging_detected = False
counter_override = None
for counter in itertools.count(1):
response = self._call_api(
self._PLAYLIST_PATH % (channel_id, offset, limit),
channel_id,
'Downloading %s JSON page %s'
% (self._PLAYLIST_TYPE, counter_override or counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
total = int_or_none(response.get('_total'))
# Since the beginning of March 2016 twitch's paging mechanism
# is completely broken on the twitch side. It simply ignores
# a limit and returns the whole offset number of videos.
# Working around by just requesting all videos at once.
# Upd: pagination bug was fixed by twitch on 15.03.2016.
if not broken_paging_detected and total and len(page_entries) > limit:
self.report_warning(
'Twitch pagination is broken on twitch side, requesting all videos at once',
channel_id)
broken_paging_detected = True
offset = total
counter_override = '(all at once)'
continue
entries.extend(page_entries)
if broken_paging_detected or total and len(page_entries) >= total:
break
offset += limit
return self.playlist_result(
[self._make_url_result(entry) for entry in orderedSet(entries)],
channel_id, channel_name)
def _make_url_result(self, url):
try:
video_id = 'v%s' % TwitchVodIE._match_id(url)
return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id)
except AssertionError:
return self.url_result(url)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TESTS = [{
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}, {
'url': 'http://m.twitch.tv/vanillatv/profile',
'only_matching': True,
}]
class TwitchVideosBaseIE(TwitchPlaylistBaseIE):
_VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type='
class TwitchAllVideosIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:all'
_VALID_URL = r'%s/all' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight'
_PLAYLIST_TYPE = 'all videos'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/all',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 869,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/all',
'only_matching': True,
}]
class TwitchUploadsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:uploads'
_VALID_URL = r'%s/uploads' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload'
_PLAYLIST_TYPE = 'uploads'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/uploads',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 0,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/uploads',
'only_matching': True,
}]
class TwitchPastBroadcastsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:past-broadcasts'
_VALID_URL = r'%s/past-broadcasts' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive'
_PLAYLIST_TYPE = 'past broadcasts'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/past-broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 0,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/past-broadcasts',
'only_matching': True,
}]
class TwitchHighlightsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:highlights'
_VALID_URL = r'%s/highlights' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight'
_PLAYLIST_TYPE = 'highlights'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/highlights',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 805,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/highlights',
'only_matching': True,
}]
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:www|go|m)\.)?twitch\.tv/|
player\.twitch\.tv/\?.*?\bchannel=
)
(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.twitch.tv/miracle_doto#profile-0',
'only_matching': True,
}, {
'url': 'https://player.twitch.tv/?channel=lotsofs',
'only_matching': True,
}, {
'url': 'https://go.twitch.tv/food',
'only_matching': True,
}, {
'url': 'https://m.twitch.tv/food',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False
if any(ie.suitable(url) for ie in (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchAllVideosIE,
TwitchUploadsIE,
TwitchPastBroadcastsIE,
TwitchHighlightsIE,
TwitchClipsIE))
else super(TwitchStreamIE, cls).suitable(url))
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._call_api(
'kraken/streams/%s?stream_type=all' % channel_id, channel_id,
'Downloading stream JSON').get('stream')
if not stream:
raise ExtractorError('%s is offline' % channel_id, expected=True)
# Channel name may be typed if different case than the original channel name
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
access_token = self._call_api(
'api/channels/%s/access_token' % channel_id, channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
class TwitchClipsIE(TwitchBaseIE):
IE_NAME = 'twitch:clips'
_VALID_URL = r'https?://(?:clips\.twitch\.tv/(?:[^/]+/)*|(?:www\.)?twitch\.tv/[^/]+/clip/)(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
'md5': '761769e1eafce0ffebfb4089cb3847cd',
'info_dict': {
'id': '42850523',
'ext': 'mp4',
'title': 'EA Play 2016 Live from the Novo Theatre',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1465767393,
'upload_date': '20160612',
'creator': 'EA',
'uploader': 'stereotype_',
'uploader_id': '43566419',
},
}, {
# multiple formats
'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/sergeynixon/clip/StormyThankfulSproutFutureMan',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
status = self._download_json(
'https://clips.twitch.tv/api/v2/clips/%s/status' % video_id,
video_id)
formats = []
for option in status['quality_options']:
if not isinstance(option, dict):
continue
source = url_or_none(option.get('source'))
if not source:
continue
formats.append({
'url': source,
'format_id': option.get('quality'),
'height': int_or_none(option.get('quality')),
'fps': int_or_none(option.get('frame_rate')),
})
self._sort_formats(formats)
info = {
'formats': formats,
}
clip = self._call_api(
'kraken/clips/%s' % video_id, video_id, fatal=False, headers={
'Accept': 'application/vnd.twitchtv.v5+json',
})
if clip:
quality_key = qualities(('tiny', 'small', 'medium'))
thumbnails = []
thumbnails_dict = clip.get('thumbnails')
if isinstance(thumbnails_dict, dict):
for thumbnail_id, thumbnail_url in thumbnails_dict.items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail_url,
'preference': quality_key(thumbnail_id),
})
info.update({
'id': clip.get('tracking_id') or video_id,
'title': clip.get('title') or video_id,
'duration': float_or_none(clip.get('duration')),
'views': int_or_none(clip.get('views')),
'timestamp': unified_timestamp(clip.get('created_at')),
'thumbnails': thumbnails,
'creator': try_get(clip, lambda x: x['broadcaster']['display_name'], compat_str),
'uploader': try_get(clip, lambda x: x['curator']['display_name'], compat_str),
'uploader_id': try_get(clip, lambda x: x['curator']['id'], compat_str),
})
else:
info.update({
'title': video_id,
'id': video_id,
})
return info
| gpl-3.0 | -2,672,165,837,656,433,700 | 27.610193 | 108 | 0.628135 | false |
bogobog/hierarchy_config_parser | bin/config_variable_processor/CallableConfigParser.py | 1 | 4615 |
import ConfigParser, re
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class CallableConfigParser( ConfigParser.ConfigParser ):
file_cache = {}
FUNC_PATTERN = re.compile(r"\|\( ?([^ ]*) (.*)\)")
def __init__(self, funcs = {}, *args, **kwargs ):
ConfigParser.ConfigParser.__init__( self, *args, **kwargs )
self.funcs = funcs
def get(self, section, option, raw=False, vars=None):
all_vars = {}
if vars:
all_vars.update( vars )
for i in range( ConfigParser.MAX_INTERPOLATION_DEPTH ):
all_vars.update( dict( ConfigParser.ConfigParser.items( self, section, raw, all_vars ) ) )
return all_vars[ option ]
def items(self, section, raw=False, vars=None):
all_vars = {}
if vars:
all_vars.update( vars )
for i in range( ConfigParser.MAX_INTERPOLATION_DEPTH ):
all_vars.update( dict( ConfigParser.ConfigParser.items( self, section, raw, all_vars ) ) )
return all_vars.items()
def _interpolate(self, section, option, rawval, vars):
#print vars
#print '%s %s %s' % ( section, option, rawval )
try:
value = ConfigParser.ConfigParser._interpolate( self, section, option, rawval, vars )
except ( ConfigParser.InterpolationMissingOptionError, ConfigParser.InterpolationDepthError ):
value = rawval
#print 'option %s' % option
#print 'rawval %s' % rawval
#print 'value %s' % value
if re.search( r"\|\( ?[^ ]* +.*\|\(.*\).*\)", value ):
value = rawval
#print 'value2 %s' % value
def func_sub( match ):
func_name = match.group(1)
func_args = match.group(2)
if func_name is None:
return match.group()
#print 'func_name %s' % func_name
#print 'func_args %s' % func_args
error_string = '|( %s %s)' % ( func_name, func_args )
if '%(' in func_args or '|(' in func_args or not func_name in self.funcs:
return error_string
#print 'call func'
processed_args = list( arg.strip('"') for arg in func_args.split() )
try:
value = self.funcs[ func_name ]( self, *processed_args )
except:
return error_string
return str( value )
func_value = value
value = self.FUNC_PATTERN.sub( func_sub, func_value )
#print 'value3 %s' % value
#print
return value
def _read( self, fp, filename ):
#print filename
processed_fp = fp
if not filename in self.__class__.file_cache:
#print 'not cached'
new_fp = StringIO.StringIO()
new_fp.writelines( processed_fp.readlines() )
self.__class__.file_cache[ filename ] = new_fp
processed_fp.seek( 0 )
else:
#print 'cached'
processed_fp = self.__class__.file_cache[ filename ]
processed_fp.seek(0)
ConfigParser.ConfigParser._read( self, processed_fp, filename )
def optionxform(self, optionstr):
return optionstr
| gpl-2.0 | 6,231,090,226,505,312,000 | 40.576577 | 271 | 0.384832 | false |
ronen/Halide | python_bindings/tutorial/lesson_12_using_the_gpu.py | 1 | 11813 | #!/usr/bin/python3
# Halide tutorial lesson 12.
# This lesson demonstrates how to use Halide to run code on a GPU.
# This lesson can be built by invoking the command:
# make tutorial_lesson_12_using_the_gpu
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_12*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide `libpng-config --cflags --ldflags` -lpthread -ldl -o lesson_12
# LD_LIBRARY_PATH=../bin ./lesson_12
# On os x:
# g++ lesson_12*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide `libpng-config --cflags --ldflags` -o lesson_12
# DYLD_LIBRARY_PATH=../bin ./lesson_12
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
from halide import *
# Include some support code for loading pngs.
#include "image_io.h"
from scipy.misc import imread
import os.path
# Include a clock to do performance testing.
#include "clock.h"
from datetime import datetime
# Define some Vars to use.
x, y, c, i = Var("x"), Var("y"), Var("c"), Var("i")
# We're going to want to schedule a pipeline in several ways, so we
# define the pipeline in a class so that we can recreate it several
# times with different schedules.
class MyPipeline:
def __init__(self, input):
assert type(input) == Buffer_uint8
self.lut = Func("lut")
self.padded = Func("padded")
self.padded16 = Func("padded16")
self.sharpen = Func("sharpen")
self.curved = Func("curved")
self.input = input
# For this lesson, we'll use a two-stage pipeline that sharpens
# and then applies a look-up-table (LUT).
# First we'll define the LUT. It will be a gamma curve.
self.lut[i] = cast(UInt(8), clamp(pow(i / 255.0, 1.2) * 255.0, 0, 255))
# Augment the input with a boundary condition.
self.padded[x, y, c] = input[clamp(x, 0, input.width()-1),
clamp(y, 0, input.height()-1), c]
# Cast it to 16-bit to do the math.
self.padded16[x, y, c] = cast(UInt(16), self.padded[x, y, c])
# Next we sharpen it with a five-tap filter.
self.sharpen[x, y, c] = (self.padded16[x, y, c] * 2-
(self.padded16[x - 1, y, c] +
self.padded16[x, y - 1, c] +
self.padded16[x + 1, y, c] +
self.padded16[x, y + 1, c]) / 4)
# Then apply the LUT.
self.curved[x, y, c] = self.lut[self.sharpen[x, y, c]]
# Now we define methods that give our pipeline several different
# schedules.
def schedule_for_cpu(self):
# Compute the look-up-table ahead of time.
self.lut.compute_root()
# Compute color channels innermost. Promise that there will
# be three of them and unroll across them.
self.curved.reorder(c, x, y) \
.bound(c, 0, 3) \
.unroll(c)
# Look-up-tables don't vectorize well, so just parallelize
# curved in slices of 16 scanlines.
yo, yi = Var("yo"), Var("yi")
self.curved.split(y, yo, yi, 16) \
.parallel(yo)
# Compute sharpen as needed per scanline of curved, reusing
# previous values computed within the same strip of 16
# scanlines.
self.sharpen.store_at(self.curved, yo) \
.compute_at(self.curved, yi)
# Vectorize the sharpen. It's 16-bit so we'll vectorize it 8-wide.
self.sharpen.vectorize(x, 8)
# Compute the padded input at the same granularity as the
# sharpen. We'll leave the cast to 16-bit inlined into
# sharpen.
self.padded.store_at(self.curved, yo) \
.compute_at(self.curved, yi)
# Also vectorize the padding. It's 8-bit, so we'll vectorize
# 16-wide.
self.padded.vectorize(x, 16)
# JIT-compile the pipeline for the CPU.
self.curved.compile_jit()
return
# Now a schedule that uses CUDA or OpenCL.
def schedule_for_gpu(self):
# We make the decision about whether to use the GPU for each
# Func independently. If you have one Func computed on the
# CPU, and the next computed on the GPU, Halide will do the
# copy-to-gpu under the hood. For this pipeline, there's no
# reason to use the CPU for any of the stages. Halide will
# copy the input image to the GPU the first time we run the
# pipeline, and leave it there to reuse on subsequent runs.
# As before, we'll compute the LUT once at the start of the
# pipeline.
self.lut.compute_root()
# Let's compute the look-up-table using the GPU in 16-wide
# one-dimensional thread blocks. First we split the index
# into blocks of size 16:
block, thread = Var("block"), Var("thread")
self.lut.split(i, block, thread, 16)
# Then we tell cuda that our Vars 'block' and 'thread'
# correspond to CUDA's notions of blocks and threads, or
# OpenCL's notions of thread groups and threads.
self.lut.gpu_blocks(block) \
.gpu_threads(thread)
# This is a very common scheduling pattern on the GPU, so
# there's a shorthand for it:
# lut.gpu_tile(i, 16)
# Func::gpu_tile method is similar to Func::tile, except that
# it also specifies that the tile coordinates correspond to
# GPU blocks, and the coordinates within each tile correspond
# to GPU threads.
# Compute color channels innermost. Promise that there will
# be three of them and unroll across them.
self.curved.reorder(c, x, y) \
.bound(c, 0, 3) \
.unroll(c)
# Compute curved in 2D 8x8 tiles using the GPU.
self.curved.gpu_tile(x, y, 8, 8)
# This is equivalent to:
# curved.tile(x, y, xo, yo, xi, yi, 8, 8)
# .gpu_blocks(xo, yo)
# .gpu_threads(xi, yi)
# We'll leave sharpen as inlined into curved.
# Compute the padded input as needed per GPU block, storing the
# intermediate result in shared memory. Var::gpu_blocks, and
# Var::gpu_threads exist to help you schedule producers within
# GPU threads and blocks.
self.padded.compute_at(self.curved, Var.gpu_blocks())
# Use the GPU threads for the x and y coordinates of the
# padded input.
self.padded.gpu_threads(x, y)
# JIT-compile the pipeline for the GPU. CUDA or OpenCL are
# not enabled by default. We have to construct a Target
# object, enable one of them, and then pass that target
# object to compile_jit. Otherwise your CPU will very slowly
# pretend it's a GPU, and use one thread per output pixel.
# Start with a target suitable for the machine you're running
# this on.
target = get_host_target()
# Then enable OpenCL or CUDA.
#use_opencl = False
use_opencl = True
if use_opencl:
# We'll enable OpenCL here, because it tends to give better
# performance than CUDA, even with NVidia's drivers, because
# NVidia's open source LLVM backend doesn't seem to do all
# the same optimizations their proprietary compiler does.
target.set_feature(TargetFeature.OpenCL)
print("(Using OpenCL)")
else:
# Uncomment the next line and comment out the line above to
# try CUDA instead.
target.set_feature(TargetFeature.CUDA)
print("(Using CUDA)")
# If you want to see all of the OpenCL or CUDA API calls done
# by the pipeline, you can also enable the Debug
# flag. This is helpful for figuring out which stages are
# slow, or when CPU -> GPU copies happen. It hurts
# performance though, so we'll leave it commented out.
# target.set_feature(TargetFeature.Debug)
self.curved.compile_jit(target)
def test_performance(self):
# Test the performance of the scheduled MyPipeline.
output = Buffer(UInt(8),
self.input.width(),
self.input.height(),
self.input.channels())
# Run the filter once to initialize any GPU runtime state.
self.curved.realize(output)
# Now take the best of 3 runs for timing.
best_time = float("inf")
for i in range(3):
t1 = datetime.now()
# Run the filter 100 times.
for j in range(100):
self.curved.realize(output)
# Force any GPU code to finish by copying the buffer back to the CPU.
output.copy_to_host()
t2 = datetime.now()
elapsed = (t2 - t1).total_seconds()
if elapsed < best_time:
best_time = elapsed
# end of "best of three times"
print("%1.4f milliseconds" % (best_time * 1000))
def test_correctness(self, reference_output):
assert type(reference_output) == Buffer_uint8
output = self.curved.realize(self.input.width(),
self.input.height(),
self.input.channels())
assert type(output) == Buffer_uint8
# Check against the reference output.
for c in range(self.input.channels()):
for y in range(self.input.height()):
for x in range(self.input.width()):
if output(x, y, c) != reference_output(x, y, c):
print(
"Mismatch between output (%d) and "
"reference output (%d) at %d, %d, %d" % (
output(x, y, c),
reference_output(x, y, c),
x, y, c))
return
print("CPU and GPU outputs are consistent.")
def main():
# Load an input image.
image_path = os.path.join(os.path.dirname(__file__), "../../tutorial/images/rgb.png")
input_data = imread(image_path)
input = Buffer(input_data)
# Allocated an image that will store the correct output
reference_output = Buffer(UInt(8), input.width(), input.height(), input.channels())
print("Testing performance on CPU:")
p1 = MyPipeline(input)
p1.schedule_for_cpu()
p1.test_performance()
p1.curved.realize(reference_output)
if have_opencl():
print("Testing performance on GPU:")
p2 = MyPipeline(input)
p2.schedule_for_gpu()
p2.test_performance()
p2.test_correctness(reference_output)
else:
print("Not testing performance on GPU, "
"because I can't find the opencl library")
return 0
def have_opencl():
"""
A helper function to check if OpenCL seems to exist on this machine.
:return: bool
"""
import ctypes
import platform
try:
if platform.system() == "Windows":
ret = ctypes.windll.LoadLibrary("OpenCL.dll") != None
elif platform.system() == "Darwin": # apple
ret = ctypes.cdll.LoadLibrary("/System/Library/Frameworks/OpenCL.framework/Versions/Current/OpenCL") != None
elif platform.system() == "Linux":
ret = ctypes.cdll.LoadLibrary("libOpenCL.so") != None
else:
raise Exception("Cannot check for opencl presence "
"on unknown system '%s'" % platform.system())
except OSError:
ret = False
return ret
if __name__ == "__main__":
main()
| mit | 7,020,487,303,896,136,000 | 34.79697 | 130 | 0.584018 | false |
pathscale/ninja | platform_helper.py | 1 | 2126 | #!/usr/bin/env python
# Copyright 2011 Google Inc.
# Copyright 2013 Patrick von Reth <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def platforms():
return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
'mingw', 'msvc']
class Platform( object ):
def __init__( self, platform):
self._platform = platform
if not self._platform is None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('solaris'):
self._platform = 'solaris'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_solaris(self):
return self._platform == 'solaris'
def is_freebsd(self):
return self._platform == 'freebsd'
def is_openbsd(self):
return self._platform == 'openbsd'
def is_sunos5(self):
return self._platform == 'sunos5'
| apache-2.0 | 9,217,081,902,934,273,000 | 29.811594 | 74 | 0.629351 | false |
OpenHydrology/OH-Auto-Statistical-REST-API | application.py | 1 | 2633 | import flask
import flask_restful
import flask.ext.cors
from celery import Celery
from resources.analysis import AnalysisRes, AnalysisStatusRes
from resources.catchment import CatchmentListRes, CatchmentRes
from resources.dataimport import DataImportRes
import floodestimation
import floodestimation.loaders
import floodestimation.fehdata
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
from sqlalchemy.orm import sessionmaker
class Application(object):
def __init__(self, settings):
self.flask_app = flask.Flask(__name__)
self.flask_app.config.from_object(settings)
flask.ext.cors.CORS(self.flask_app, resources=r'/api/*', allow_headers=['Content-Type', 'Authorization'],
expose_headers=['Location'])
self.rest_api = flask_restful.Api(self.flask_app)
self.db = floodestimation.db
self.db.engine = create_engine(self.flask_app.config['DATABASE_URL'])
self.db.metadata = MetaData(bind=self.db.engine, reflect=True)
self.db.Session = sessionmaker(bind=self.db.engine)
self._set_db_session()
self._set_routes()
def _set_routes(self):
self.rest_api.add_resource(AnalysisRes, '/api/v0/analyses/', endpoint='post_analysis')
self.rest_api.add_resource(AnalysisRes, '/api/v0/analyses/<task_id>', endpoint='get_analysis')
self.rest_api.add_resource(AnalysisStatusRes, '/api/v0/analysis-tasks/<task_id>', endpoint='analysis_status')
self.rest_api.add_resource(CatchmentListRes, '/api/v0/catchments/')
self.rest_api.add_resource(CatchmentRes, '/api/v0/catchments/<int:catchment_id>')
self.rest_api.add_resource(DataImportRes, '/api/v0/data-imports/')
def _set_db_session(self):
@self.flask_app.before_request
def before_request():
flask.g.db_session = self.db.Session()
@self.flask_app.teardown_request
def teardown_request(exception):
db_session = getattr(flask.g, 'db_session', None)
if db_session is not None:
db_session.close()
def celery(self):
app = self.flask_app
celery = Celery(app.import_name)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
def start_app(self):
self.flask_app.run()
| gpl-3.0 | -304,302,401,893,663,360 | 35.569444 | 117 | 0.651348 | false |
stormi/tsunami | src/primaires/objet/types/veste.py | 1 | 1791 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type veste."""
from .vetement import Vetement
class Veste(Vetement):
nom_type = "veste"
def __init__(self, cle=""):
Vetement.__init__(self, cle)
self.emplacement = "corps"
self.positions = (1, 2) | bsd-3-clause | -1,354,158,303,473,432,800 | 43.8 | 79 | 0.765494 | false |
eduardoklosowski/ergo-notes | ergonotes/admin.py | 1 | 1098 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Eduardo Augusto Klosowski
#
# This file is part of Ergo Notes.
#
# Ergo Notes is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ergo Notes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Ergo Notes. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from . import models
@admin.register(models.Note)
class NoteAdmin(admin.ModelAdmin):
list_display = ('user', 'priority', 'title', 'show_on_home', 'create_on', 'modify_on', 'markup')
list_display_links = ('title',)
list_filter = ('priority', 'markup')
search_fields = ('=user', 'title')
| agpl-3.0 | -931,401,604,839,511,800 | 34.419355 | 100 | 0.720401 | false |
datawire/quark | quarkc/test/ffi/expected/py/org_example_foo/docs/conf.py | 1 | 1045 | # -*- coding: utf-8 -*-
#
# org_example_foo documentation build configuration file, created by Quark
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'org_example_foo'
copyright = u'2015, org_example_foo authors'
author = u'org_example_foo authors'
version = '0.1.0'
release = '0.1.0'
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'alabaster'
html_static_path = ['_static']
htmlhelp_basename = 'org_example_foodoc'
latex_elements = {}
latex_documents = [
(master_doc, 'org_example_foo.tex', u'org_example_foo Documentation',
u'org_example_foo authors', 'manual'),
]
man_pages = [
(master_doc, 'org_example_foo', u'org_example_foo Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'org_example_foo', u'org_example_foo Documentation',
author, 'org_example_foo', 'One line description of org_example_foo.',
'Miscellaneous'),
]
| apache-2.0 | 8,611,617,387,620,865,000 | 28.027778 | 75 | 0.679426 | false |
yephper/django | django/utils/feedgenerator.py | 1 | 18261 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import warnings
from django.utils import datetime_safe, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + '-0000'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + 'Z'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None,
enclosures=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosures, which is an iterable of instances of the
Enclosure class.
"""
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
if enclosure is None:
enclosures = [] if enclosures is None else enclosures
else:
warnings.warn(
"The enclosure keyword argument is deprecated, "
"use enclosures instead.",
RemovedInDjango20Warning,
stacklevel=2,
)
enclosures = [enclosure]
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosures': enclosures,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of RssFeed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" %
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"],
{"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosures']:
enclosures = list(item['enclosures'])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement('enclosure', '', {
'url': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: https://tools.ietf.org/html/rfc4287
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosures.
for enclosure in item['enclosures']:
handler.addQuickElement('link', '', {
'rel': 'enclosure',
'href': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of Atom1Feed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause | -5,429,916,121,263,096,000 | 37.784314 | 107 | 0.579048 | false |
zork9/pygame-pyMM | maproomdungeon.py | 1 | 4377 |
# Copyright (c) 2013 Johan Ceuppens.
# All rights reserved.
# Redistribution and use in source and binary forms are permitted
# provided that the above copyright notice and this paragraph are
# duplicated in all such forms and that any documentation,
# advertising materials, and other materials related to such
# distribution and use acknowledge that the software was developed
# by the Johan Ceuppens. The name of the
# Johan Ceuppens may not be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
# Copyright (C) Johan Ceuppens 2010
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame
from pygame.locals import *
from time import *
from maproom import *
from wall import *
class MaproomDungeon(MaproomBase):
"Room with a (big) map"
def __init__(self,x,y):
MaproomBase.__init__(self,x,y)
self.northwalls = []
self.southwalls = []
self.westwalls = []
self.eastwalls= []
self.gameobjects = []
self.tileboxes = []
self.pits = []
self.ropes = []
self.ladders = []
self.bullets = []
def addnorthwall(self, x,y,w,h,imagefilename):
self.northwalls.append(Wall(x,y,w,h,imagefilename))
def addsouthwall(self, x,y,w,h,imagefilename):
self.southwalls.append(Wall(x,y,w,h,imagefilename))
def addwestwall(self, x,y,w,h,imagefilename):
self.westwalls.append(Wall(x,y,w,h,imagefilename))
def addeastwall(self, x,y,w,h,imagefilename):
self.eastwalls.append(Wall(x,y,w,h,imagefilename))
def draw(self,screen):
##print "x=%d" % self.relativex
screen.blit(self.background, (0+self.relativex, 0+self.relativey))
for w in self.northwalls:
w.draw(screen,self)
for w in self.southwalls:
w.draw(screen,self)
for w in self.westwalls:
w.draw(screen,self)
for w in self.eastwalls:
w.draw(screen,self)
def collidewithladders(self, player):
for i in self.ladders:
if i != None and i.collidewithladder(self, player):
return 2
return 0
def collidewithladdersdown(self, player):
for i in self.ladders:
if i != None and i.collidewithladderdown(self, player):
return 2
return 0
# NOTE player can be enemy
def collide(self, player,hploss):
for i in self.gameobjects:
#print "go> %s" % i
if i != None and i.collide(self,player,hploss): ### NOTE hp loss of hploss
return 2 # 1 kills game
for i in self.northwalls:
if i != None and i.collide(self,player,hploss):
return 2
for i in self.southwalls:
if i != None and i.collide(self,player,hploss):
return 2
for i in self.westwalls:
if i != None and i.collide(self,player,hploss):
return 2
for i in self.eastwalls:
if i != None and i.collide(self,player,hploss):
return 2
# for i in self.tileboxes:
# if i != None and i.collide(self,player,hploss):
# #self.undomove()
# # FIXME self.undomove()
# return 2
# for i in self.pits:
# if i != None and i.collide(self,player,hploss):
# return 2
return 0
def collidewithenemy(self, enemy):
for t in self.tileboxes:
if t != None and t.collidewithenemy(self,enemy):
enemy.undomove()
return 2 # 1 kills game
return 0
def fall(self, player):
self.moveup()
for i in self.gameobjects:
if i != None and i.fallcollide(self, player):
self.movedown()
return 2 # 1 kills game
return 0
| gpl-2.0 | 2,741,953,282,796,637,700 | 31.422222 | 79 | 0.664839 | false |
gofed/gofed-ng | services/deps/service.py | 1 | 4379 | #!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import os
import shutil
import sys
from common.service.computationalService import ComputationalService
from common.service.serviceEnvelope import ServiceEnvelope
from common.service.action import action
from common.service.serviceResult import ServiceResult
from common.system.extractedRpmFile import ExtractedRpmFile
from common.system.extractedSrpmFile import ExtractedSrpmFile
from common.system.extractedTarballFile import ExtractedTarballFile
import gofedlib.gosymbolsextractor as gofedlib
class DepsService(ComputationalService):
''' Dependencies checks '''
def signal_process(self):
self.tmpfile_path = None
self.extracted1_path = None
self.extracted2_path = None
def signal_processed(self, was_error):
if self.tmpfile_path is not None:
os.remove(self.tmpfile_path)
if self.extracted1_path is not None:
shutil.rmtree(self.extracted1_path)
if self.extracted2_path is not None:
shutil.rmtree(self.extracted2_path)
@action
def deps_analysis(self, file_id, opts=None):
'''
Get deps of a file
@param file_id: file to be analysed
@param opts: additional analysis opts
@return: list of dependencies
'''
ret = ServiceResult()
default_opts = {'language': 'detect', 'tool': 'default'}
if opts is None:
opts = default_opts
else:
default_opts.update(opts)
opts = default_opts
self.tmpfile_path = self.get_tmp_filename()
with self.get_system() as system:
f = system.download(file_id, self.tmpfile_path)
self.extracted1_path = self.get_tmp_dirname()
d = f.unpack(self.extracted1_path)
if isinstance(d, ExtractedRpmFile):
src_path = d.get_content_path()
elif isinstance(d, ExtractedTarballFile):
src_path = d.get_path()
elif isinstance(d, ExtractedSrpmFile):
# we have to unpack tarball first
t = d.get_tarball()
self.extracted2_path = self.get_tmp_dirname()
d = f.unpack(self.extracted2_path)
src_path = d.get_path()
else:
raise ValueError("Filetype %s cannot be processed" % (d.get_type(),))
# TODO: handle opts
try:
ret.result = gofedlib.project_packages(src_path)
except:
exc_info = sys.exc_info()
ret.meta['error'] = [ str(exc_info[0]), str(exc_info[1]), str(exc_info[2])]
finally:
ret.meta['language'] = 'golang'
ret.meta['tool'] = 'gofedlib'
return ret
@action
def deps_diff(self, deps1, deps2, opts=None):
'''
Make a diff of dependencies
@param deps1: the first dependency list
@param deps2: the second dependency list
@param opts: additional analysis opts
@return: list of dependency differences
'''
default_opts = {'language': 'detect', 'tool': 'default'}
ret = ServiceResult()
if opts is None:
opts = default_opts
else:
default_opts.update(opts)
opts = default_opts
# TODO: implement deps difference
raise NotImplementedError("Currently not implemented")
return ret
if __name__ == "__main__":
ServiceEnvelope.serve(DepsService)
| gpl-3.0 | 6,155,984,285,590,203,000 | 33.480315 | 87 | 0.622745 | false |
lcy-seso/models | fluid/ocr_recognition/_ce.py | 1 | 1463 | # this file is only used for continuous evaluation test!
import os
import sys
sys.path.append(os.environ['ceroot'])
from kpi import CostKpi, DurationKpi, AccKpi
# NOTE kpi.py should shared in models in some way!!!!
train_cost_kpi = CostKpi('train_cost', 0.05, 0, actived=True)
test_acc_kpi = AccKpi('test_acc', 0.005, 0, actived=True)
train_duration_kpi = DurationKpi('train_duration', 0.06, 0, actived=True)
train_acc_kpi = AccKpi('train_acc', 0.005, 0, actived=True)
tracking_kpis = [
train_acc_kpi,
train_cost_kpi,
test_acc_kpi,
train_duration_kpi,
]
def parse_log(log):
'''
This method should be implemented by model developers.
The suggestion:
each line in the log should be key, value, for example:
"
train_cost\t1.0
test_cost\t1.0
train_cost\t1.0
train_cost\t1.0
train_acc\t1.2
"
'''
for line in log.split('\n'):
fs = line.strip().split('\t')
print(fs)
if len(fs) == 3 and fs[0] == 'kpis':
kpi_name = fs[1]
kpi_value = float(fs[2])
yield kpi_name, kpi_value
def log_to_ce(log):
kpi_tracker = {}
for kpi in tracking_kpis:
kpi_tracker[kpi.name] = kpi
for (kpi_name, kpi_value) in parse_log(log):
print(kpi_name, kpi_value)
kpi_tracker[kpi_name].add_record(kpi_value)
kpi_tracker[kpi_name].persist()
if __name__ == '__main__':
log = sys.stdin.read()
log_to_ce(log)
| apache-2.0 | -8,328,208,520,477,808,000 | 22.983607 | 73 | 0.608339 | false |
asphalt-framework/asphalt-mailer | tests/test_api.py | 1 | 4504 | from email.headerregistry import Address
from email.message import EmailMessage
from typing import Union, Iterable
import pytest
from asphalt.mailer.api import Mailer
class DummyMailer(Mailer):
def __init__(self, **message_defaults):
super().__init__(message_defaults)
self.messages = []
async def deliver(self, messages: Union[EmailMessage, Iterable[EmailMessage]]):
messages = [messages] if isinstance(messages, EmailMessage) else messages
self.messages.extend(messages)
@pytest.fixture
def kwargs():
return {
'charset': 'iso-8859-1',
'plain_body': 'Hello åäö',
'html_body': '<html><body>Hello åäö</body></html>',
'to': [Address('Test Recipient', 'test', 'domain.country'), '[email protected]'],
'cc': [Address('Test CC', 'testcc', 'domain.country'), '[email protected]'],
'bcc': [Address('Test BCC', 'testbcc', 'domain.country'), '[email protected]']
}
@pytest.fixture
def mailer():
return DummyMailer()
@pytest.mark.parametrize('plain_body, html_body', [
(True, True), (True, False), (False, True)
], ids=['both', 'plain', 'html'])
def test_create_message(mailer, kwargs, plain_body, html_body):
if not plain_body:
del kwargs['plain_body']
if not html_body:
del kwargs['html_body']
msg = mailer.create_message(subject='test subject', sender='[email protected]', **kwargs)
assert msg['From'] == '[email protected]'
assert msg['Subject'] == 'test subject'
assert msg['To'] == 'Test Recipient <[email protected]>, [email protected]'
assert msg['Cc'] == 'Test CC <[email protected]>, [email protected]'
assert msg['Bcc'] == 'Test BCC <[email protected]>, [email protected]'
if plain_body and html_body:
assert msg['Content-Type'] == 'multipart/alternative'
plain_part, html_part = msg.iter_parts()
elif plain_body:
plain_part, html_part = msg, None
else:
plain_part, html_part = None, msg
if plain_part:
assert plain_part['Content-Type'] == 'text/plain; charset="iso-8859-1"'
assert plain_part.get_content() == 'Hello åäö\n'
if html_part:
assert html_part['Content-Type'] == 'text/html; charset="iso-8859-1"'
assert html_part.get_content() == '<html><body>Hello åäö</body></html>\n'
def test_message_defaults():
"""
Test that message defaults are applied when the corresponding arguments have been omitted.
"""
mailer = DummyMailer(subject='default_subject', sender='[email protected]',
to='[email protected]', cc='[email protected]',
bcc='[email protected]', charset='utf-16')
msg = mailer.create_message(plain_body='Hello åäö')
assert msg['Subject'] == 'default_subject'
assert msg['From'] == '[email protected]'
assert msg['To'] == '[email protected]'
assert msg['Cc'] == '[email protected]'
assert msg['Bcc'] == '[email protected]'
assert msg.get_charsets() == ['utf-16']
def test_add_attachment(mailer):
msg = mailer.create_message(subject='foo')
mailer.add_attachment(msg, b'binary content', filename='test')
attachments = list(msg.iter_attachments())
assert len(attachments) == 1
assert attachments[0]['Content-Type'] == 'application/octet-stream'
assert attachments[0]['Content-Disposition'] == 'attachment; filename="test"'
@pytest.mark.asyncio
async def test_add_file_attachment(mailer):
msg = mailer.create_message(subject='foo')
await mailer.add_file_attachment(msg, __file__)
attachments = list(msg.iter_attachments())
assert len(attachments) == 1
assert attachments[0]['Content-Type'] == 'text/x-python'
assert attachments[0]['Content-Disposition'] == 'attachment; filename="test_api.py"'
def test_add_attachment_bad_mime_type(mailer):
msg = mailer.create_message(subject='foo')
exc = pytest.raises(ValueError, mailer.add_attachment, msg, b'abc', 'file.dat', '/badtype')
assert str(exc.value) == 'mimetype must be a string in the "maintype/subtype" format'
@pytest.mark.asyncio
async def test_create_and_deliver(mailer, kwargs):
await mailer.create_and_deliver(subject='test subject', sender='[email protected]', **kwargs)
assert len(mailer.messages) == 1
assert isinstance(mailer.messages[0], EmailMessage)
assert mailer.messages[0]['From'] == '[email protected]'
| apache-2.0 | -8,442,663,657,538,835,000 | 37.698276 | 95 | 0.658721 | false |
dansan/spring-replay-site | spring_replay_site/settings.py | 1 | 5436 | from os.path import abspath, dirname, join as path_join
from django.conf import global_settings
BASE_DIR = dirname(dirname(abspath(__file__)))
SRS_FILE_ROOT = path_join(BASE_DIR, "srs")
IMG_PATH = path_join(SRS_FILE_ROOT, "static/img")
MAPS_PATH = path_join(SRS_FILE_ROOT, "static/maps")
REPLAYS_PATH = path_join(SRS_FILE_ROOT, "static/replays")
FONTS_PATH = path_join(SRS_FILE_ROOT, "static/fonts")
TS_HISTORY_GRAPHS_PATH = path_join(SRS_FILE_ROOT, "ts_graphs")
THUMBNAIL_SIZES = {"home": (150, 100), "replay": (340, 1000)}
LOGIN_URL = "/login/"
LOGOUT_URL = "/logout/"
ACCOUNT_ACTIVATION_DAYS = 4
REGISTRATION_OPEN = True
EMAIL_HOST = "localhost"
DEFAULT_FROM_EMAIL = "[email protected]"
LOGIN_REDIRECT_URL = "/"
DATE_FORMAT = "d.m.Y"
DATETIME_FORMAT = DATE_FORMAT
SHORT_DATE_FORMAT = "d.m.Y"
# SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s (T)'
SHORT_DATETIME_FORMAT = SHORT_DATE_FORMAT
AUTHENTICATION_BACKENDS = ["lobbyauth.lobbybackend.LobbyBackend"] + list(
global_settings.AUTHENTICATION_BACKENDS
)
XMLRPC_METHODS = (("srs.upload.xmlrpc_upload", "xmlrpc_upload"),)
INDEX_REPLAY_RANGE = 12
AUTH_PROFILE_MODULE = "lobbyauth.UserProfile"
DATA_UPLOAD_MAX_MEMORY_SIZE = 31457280
TIME_ZONE = "Europe/Berlin"
LANGUAGE_CODE = "en-us"
SITE_ID = 1
USE_I18N = True
USE_L10N = False
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = path_join(SRS_FILE_ROOT, "static/media")
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = path_join(SRS_FILE_ROOT, "static")
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
"srs",
"infolog_upload",
"lobbyauth",
"django_comments",
"django_xmlrpc",
"eztables",
"django_extensions",
"jsonrpc",
"background_task",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.cache.CacheMiddleware',
]
ROOT_URLCONF = "spring_replay_site.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [path_join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "spring_replay_site.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
##########################################################################################
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# X_FRAME_OPTIONS = "DENY"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
},
}
# import site specific settings, defaults first
from .local_settings_ import *
# now overwrite default settings
try:
from .local_settings import *
except ImportError:
print(
"ERROR: Please copy local_settings_.py to local_settings.py, and overwrite\n default settings there."
)
exit(1)
| gpl-3.0 | -5,946,639,781,393,324,000 | 30.604651 | 115 | 0.673105 | false |
openenglishbible/USFM-Tools | transform/support/asciiRenderer.py | 1 | 5744 | # -*- coding: utf-8 -*-
#
import codecs
import io
import os
import textwrap
import abstractRenderer
#
# Simplest renderer. Ignores everything except ascii text.
#
class Renderer(abstractRenderer.AbstractRenderer):
def __init__(self, inputDir, outputDir, outputName, config):
self.identity = 'ascii renderer'
self.outputDescription = os.path.join(outputDir, outputName + '.txt')
abstractRenderer.AbstractRenderer.__init__(self, inputDir, outputDir, outputName, config)
# Unset
self.f = None # output file stream
# IO
self.inputDir = inputDir
self.outputFilename = os.path.join(outputDir, outputName + '.txt')
# Flags
self.d = False
self.narrower = False
self.inX = False
self.inND = False
def render(self):
self.f = io.StringIO()
self.loadUSFM(self.inputDir)
self.run()
v = self.f.getvalue()
self.f.close()
encoding=self.config.get('Plain Text','encoding')
if encoding == 'ascii':
self.logger.debug('Converting to ascii')
v = self.clean(v)
if self.config.get('Plain Text','wrapping'):
self.logger.debug('Wrapping')
v = self.wrap(v)
o = open(self.outputFilename, 'w', encoding=encoding)
o.write(v)
o.close()
self.logger.debug('Saved as ' + encoding)
# Support
def wrap(self, t):
nl = ''
for i in t.split('\n'):
nl = nl + textwrap.fill(i, width=80) + '\n'
return nl
def clean(self, text):
t = text.replace('‘', "'")
t = t.replace('’', "'")
t = t.replace('“', '"')
t = t.replace('”', '"')
t = t.replace('—', '--') # mdash
t = t.replace('\u2013', '--') # ndash
t = t.replace('\u2026', '...') # ellipsis
return t
def startNarrower(self, n):
s = '\n'
if not self.narrower: s = s + '\n'
self.narrower = True
return s + ' ' * n
def stopNarrower(self):
self.narrower = False
return ''
def startD(self):
self.d = True
return ''
def stopD(self):
self.d = False
return ''
def escape(self, text):
t = text
if self.inX:
return ''
t = t.upper() if self.inND else t
return t
def box(self, text):
t = (80 * '#') + '\n'
t = t + '#' + (78 * ' ') + '#\n'
t = t + '#' + text.center(78) + '#\n'
t = t + '#' + (78 * ' ') + '#\n'
t = t + (80 * '#') + '\n'
return t
def center(self, text):
return text.center(80)
# Tokens
def render_h(self, token): self.f.write('\n\n\n' + self.box(token.value) + '\n\n')
def render_mt1(self, token): self.f.write(self.center(token.value.upper()) + '\n')
def render_mt2(self, token): self.f.write(self.center(token.value.upper()) + '\n')
def render_mt3(self, token): self.f.write(self.center(token.value.upper()) + '\n')
def render_ms1(self, token): self.f.write('\n\n' + self.center('[' + token.value + ']') + '\n\n')
def render_ms2(self, token): self.f.write('\n\n' + self.center('[' + token.value + ']') + '\n\n')
def render_m(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n')
def render_p(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n ')
# Ignore indenting
def render_pi(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n ')
def render_b(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n ')
def render_s1(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n\n*' + token.value + '*\n ')
def render_s2(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n\n*' + token.value + '*\n ')
def render_c(self, token): self.f.write(' ' )
def render_v(self, token): self.f.write(' ' )
def render_text(self, token): self.f.write(self.escape(token.value))
def render_q(self, token): self.f.write(self.stopD() + self.startNarrower(1))
def render_q1(self, token): self.f.write(self.stopD() + self.startNarrower(1))
def render_q2(self, token): self.f.write(self.stopD() + self.startNarrower(2))
def render_q3(self, token): self.f.write(self.stopD() + self.startNarrower(3))
def render_nb(self, token): self.f.write(self.stopD() + self.stopNarrower() + "\n\n")
def render_li(self, token): self.f.write(' ')
def render_d(self, token): self.f.write(self.startD())
def render_sp(self, token): self.f.write(self.startD())
def render_pbr(self, token): self.f.write('\n')
def render_nd_s(self, token): self.inND = True
def render_nd_e(self, token): self.inND = False
# Ignore...
def render_x_s(self,token): self.inX = True
def render_x_e(self,token): self.inX = False
# Irrelevant
def render_pb(self,token): pass
def render_wj_s(self,token): pass
def render_wj_e(self,token): pass
def render_qs_s(self, token): pass
def render_qs_e(self, token): pass
def render_em_s(self, token): pass
def render_em_e(self, token): pass
def render_f_s(self,token): self.f.write('{ ')
def render_f_e(self,token): self.f.write(' }')
def render_fr(self, token): self.f.write('(' + self.escape(token.value) + ') ')
def render_ft(self, token): pass
def render_periph(self, token): pass
| mit | -775,057,629,401,138,200 | 34.8375 | 121 | 0.537147 | false |
bsanders/kazoo | kazoo/tests/test_counter.py | 1 | 1240 | import uuid
from nose.tools import eq_
from kazoo.testing import KazooTestCase
class KazooCounterTests(KazooTestCase):
def _makeOne(self, **kw):
path = "/" + uuid.uuid4().hex
return self.client.Counter(path, **kw)
def test_int_counter(self):
counter = self._makeOne()
eq_(counter.value, 0)
counter += 2
counter + 1
eq_(counter.value, 3)
counter -= 3
counter - 1
eq_(counter.value, -1)
def test_float_counter(self):
counter = self._makeOne(default=0.0)
eq_(counter.value, 0.0)
counter += 2.1
eq_(counter.value, 2.1)
counter -= 3.1
eq_(counter.value, -1.0)
def test_errors(self):
counter = self._makeOne()
self.assertRaises(TypeError, counter.__add__, 2.1)
self.assertRaises(TypeError, counter.__add__, b"a")
def test_pre_post_values(self):
counter = self._makeOne()
eq_(counter.value, 0)
eq_(counter.pre_value, None)
eq_(counter.post_value, None)
counter += 2
eq_(counter.pre_value, 0)
eq_(counter.post_value, 2)
counter -= 3
eq_(counter.pre_value, 2)
eq_(counter.post_value, -1)
| apache-2.0 | 4,873,811,123,374,780,000 | 25.382979 | 59 | 0.558871 | false |
ophiry/dvc | dvc/logger.py | 1 | 1532 | import sys
import logging
import colorama
colorama.init()
class Logger(object):
DEFAULT_LEVEL = logging.INFO
LEVEL_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'error': logging.ERROR
}
COLOR_MAP = {
'debug': colorama.Fore.BLUE,
'warn': colorama.Fore.YELLOW,
'error': colorama.Fore.RED
}
logging.basicConfig(stream=sys.stdout, format='%(message)s', level=DEFAULT_LEVEL)
_logger = logging.getLogger('dvc')
@staticmethod
def set_level(level):
Logger._logger.setLevel(Logger.LEVEL_MAP.get(level.lower(), logging.DEBUG))
@staticmethod
def be_quiet():
Logger._logger.setLevel(logging.CRITICAL)
@staticmethod
def be_verbose():
Logger._logger.setLevel(logging.DEBUG)
@staticmethod
def colorize(msg, typ):
header = ''
footer = ''
if sys.stdout.isatty():
header = Logger.COLOR_MAP.get(typ.lower(), '')
footer = colorama.Style.RESET_ALL
return u'{}{}{}'.format(header, msg, footer)
@staticmethod
def error(msg):
return Logger._logger.error(Logger.colorize(msg, 'error'))
@staticmethod
def warn(msg):
return Logger._logger.warn(Logger.colorize(msg, 'warn'))
@staticmethod
def debug(msg):
return Logger._logger.debug(Logger.colorize(msg, 'debug'))
@staticmethod
def info(msg):
return Logger._logger.info(Logger.colorize(msg, 'info'))
| apache-2.0 | -5,935,070,204,051,028,000 | 22.569231 | 85 | 0.609008 | false |
fzimmermann89/pyload | module/plugins/internal/Captcha.py | 1 | 4078 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import time
from module.plugins.internal.Plugin import Plugin
from module.plugins.internal.utils import encode
class Captcha(Plugin):
__name__ = "Captcha"
__type__ = "captcha"
__version__ = "0.47"
__status__ = "stable"
__description__ = """Base anti-captcha plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
def __init__(self, plugin): #@TODO: Pass pyfile instead plugin, so store plugin's html in its associated pyfile as data
self._init(plugin.pyload)
self.plugin = plugin
self.task = None #: captchaManager task
self.init()
def _log(self, level, plugintype, pluginname, messages):
messages = (self.__name__,) + messages
return self.plugin._log(level, plugintype, self.plugin.__name__, messages)
def recognize(self, image):
"""
Extend to build your custom anti-captcha ocr
"""
pass
def decrypt(self, url, get={}, post={}, ref=False, cookies=True, decode=False, req=None,
input_type='jpg', output_type='textual', ocr=True, timeout=120):
img = self.load(url, get=get, post=post, ref=ref, cookies=cookies, decode=decode, req=req or self.plugin.req)
return self.decrypt_image(img, input_type, output_type, ocr, timeout)
def decrypt_image(self, data, input_type='jpg', output_type='textual', ocr=False, timeout=120):
"""
Loads a captcha and decrypts it with ocr, plugin, user input
:param data: image raw data
:param get: get part for request
:param post: post part for request
:param cookies: True if cookies should be enabled
:param input_type: Type of the Image
:param output_type: 'textual' if text is written on the captcha\
or 'positional' for captcha where the user have to click\
on a specific region on the captcha
:param ocr: if True, ocr is not used
:return: result of decrypting
"""
result = ""
time_ref = ("%.2f" % time.time())[-6:].replace(".", "")
with open(os.path.join("tmp", "captcha_image_%s_%s.%s" % (self.plugin.__name__, time_ref, input_type)), "wb") as tmp_img:
tmp_img.write(encode(data))
if ocr:
if isinstance(ocr, basestring):
OCR = self.pyload.pluginManager.loadClass("captcha", ocr) #: Rename `captcha` to `ocr` in 0.4.10
result = OCR(self.plugin).recognize(tmp_img.name)
else:
result = self.recognize(tmp_img.name)
if not result:
captchaManager = self.pyload.captchaManager
try:
self.task = captchaManager.newTask(data, input_type, tmp_img.name, output_type)
captchaManager.handleCaptcha(self.task)
self.task.setWaiting(max(timeout, 50)) #@TODO: Move to `CaptchaManager` in 0.4.10
while self.task.isWaiting():
self.plugin.check_status()
time.sleep(1)
finally:
captchaManager.removeTask(self.task)
if self.task.error:
self.fail(self.task.error)
elif not self.task.result:
self.plugin.retry_captcha(msg=_("No captcha result obtained in appropriate time"))
result = self.task.result
if not self.pyload.debug:
try:
os.remove(tmp_img.name)
except OSError, e:
self.log_warning(_("Error removing `%s`") % tmp_img.name, e)
# self.log_info(_("Captcha result: ") + result) #@TODO: Remove from here?
return result
def invalid(self):
if not self.task:
return
self.log_warning(_("Invalid captcha"))
self.task.invalid()
def correct(self):
if not self.task:
return
self.log_info(_("Correct captcha"))
self.task.correct()
| gpl-3.0 | 4,668,200,575,721,839,000 | 30.859375 | 129 | 0.576018 | false |
Karaage-Cluster/karaage-debian | karaage/plugins/kgapplications/templatetags/applications.py | 1 | 3655 | # Copyright 2015 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" Application specific tags. """
import django_tables2 as tables
from django import template
from karaage.people.tables import PersonTable
from ..views.base import get_state_machine
register = template.Library()
@register.simple_tag(takes_context=True)
def application_state(context, application):
""" Render current state of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_state.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_request(context, application):
""" Render current detail of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_request.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_simple_state(context, application):
""" Render current state of application, verbose. """
state_machine = get_state_machine(application)
state = state_machine.get_state(application)
return state.name
@register.inclusion_tag(
'kgapplications/common_actions.html', takes_context=True)
def application_actions(context):
""" Render actions available. """
return {
'roles': context['roles'],
'actions': context['actions'],
'extra': "",
}
@register.tag(name="application_actions_plus")
def do_application_actions_plus(parser, token):
""" Render actions available with extra text. """
nodelist = parser.parse(('end_application_actions',))
parser.delete_first_token()
return ApplicationActionsPlus(nodelist)
class ApplicationActionsPlus(template.Node):
""" Node for rendering actions available with extra text. """
def __init__(self, nodelist):
super(ApplicationActionsPlus, self).__init__()
self.nodelist = nodelist
def render(self, context):
extra = self.nodelist.render(context)
nodelist = template.loader.get_template(
'kgapplications/common_actions.html')
new_context = {
'roles': context['roles'],
'extra': extra,
'actions': context['actions'],
}
output = nodelist.render(new_context)
return output
@register.assignment_tag(takes_context=True)
def get_similar_people_table(context, applicant):
queryset = applicant.similar_people()
table = PersonTable(
queryset,
empty_text="(No potential duplicates found, please check manually)")
config = tables.RequestConfig(context['request'], paginate={"per_page": 5})
config.configure(table)
return table
| gpl-3.0 | 2,375,639,057,141,617,700 | 31.061404 | 79 | 0.685636 | false |
usc-isi/extra-specs | nova/tests/api/openstack/compute/contrib/test_quotas.py | 1 | 8680 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import quotas
from nova.api.openstack import wsgi
from nova import test
from nova.tests.api.openstack import fakes
def quota_set(id):
return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100}}
class QuotaSetsTest(test.TestCase):
def setUp(self):
super(QuotaSetsTest, self).setUp()
self.controller = quotas.QuotaSetsController()
def test_format_quota_set(self):
raw_quota_set = {
'instances': 10,
'cores': 20,
'ram': 51200,
'volumes': 10,
'floating_ips': 10,
'metadata_items': 128,
'gigabytes': 1000,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}
quota_set = self.controller._format_quota_set('1234', raw_quota_set)
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['volumes'], 10)
self.assertEqual(qs['gigabytes'], 1000)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
def test_quotas_defaults(self):
uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
req = fakes.HTTPRequest.blank(uri)
res_dict = self.controller.defaults(req, 'fake_tenant')
expected = {'quota_set': {
'id': 'fake_tenant',
'instances': 10,
'cores': 20,
'ram': 51200,
'volumes': 10,
'gigabytes': 1000,
'floating_ips': 10,
'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}}
self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
self.assertEqual(res_dict, quota_set('1234'))
def test_quotas_show_as_unauthorized_user(self):
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_quotas_update_as_admin(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'volumes': 10,
'gigabytes': 1000, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'volumes': 10,
'gigabytes': 1000, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
'ram': -2, 'volumes': -2,
'gigabytes': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
class QuotaXMLSerializerTest(test.TestCase):
def setUp(self):
super(QuotaXMLSerializerTest, self).setUp()
self.serializer = quotas.QuotaTemplate()
self.deserializer = wsgi.XMLDeserializer()
def test_serializer(self):
exemplar = dict(quota_set=dict(
id='project_id',
metadata_items=10,
injected_file_content_bytes=20,
volumes=30,
gigabytes=40,
ram=50,
floating_ips=60,
instances=70,
injected_files=80,
security_groups=10,
security_group_rules=20,
key_pairs=100,
cores=90))
text = self.serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
self.assertEqual('project_id', tree.get('id'))
self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
for child in tree:
self.assertTrue(child.tag in exemplar['quota_set'])
self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
def test_deserializer(self):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
volumes='30',
gigabytes='40',
ram='50',
floating_ips='60',
instances='70',
injected_files='80',
security_groups='10',
security_group_rules='20',
key_pairs='100',
cores='90'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<quota_set>'
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
'<volumes>30</volumes>'
'<gigabytes>40</gigabytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<security_groups>10</security_groups>'
'<security_group_rules>20</security_group_rules>'
'<key_pairs>100</key_pairs>'
'<cores>90</cores>'
'</quota_set>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| apache-2.0 | 7,071,073,384,831,385,000 | 38.634703 | 79 | 0.521429 | false |
googleads/google-ads-python | examples/shopping_ads/add_shopping_product_listing_group_tree.py | 1 | 15958 | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds a shopping listing group tree to a shopping ad group.
The example will clear an existing listing group tree and rebuild it include the
following tree structure:
ProductCanonicalCondition NEW $0.20
ProductCanonicalCondition USED $0.10
ProductCanonicalCondition null (everything else)
ProductBrand CoolBrand $0.90
ProductBrand CheapBrand $0.01
ProductBrand null (everything else) $0.50
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
last_criterion_id = 0
def _next_id():
"""Returns a decreasing negative number for temporary ad group criteria IDs.
The ad group criteria will get real IDs when created on the server.
Returns -1, -2, -3, etc. on subsequent calls.
Returns:
The string representation of a negative integer.
"""
global last_criterion_id
last_criterion_id -= 1
return str(last_criterion_id)
# [START add_shopping_product_listing_group_tree]
def main(client, customer_id, ad_group_id, replace_existing_tree):
"""Adds a shopping listing group tree to a shopping ad group.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID to which the node will be added.
replace_existing_tree: Boolean, whether to replace the existing listing
group tree on the ad group. Defaults to false.
"""
# Get the AdGroupCriterionService client.
ad_group_criterion_service = client.get_service("AdGroupCriterionService")
# Optional: Remove the existing listing group tree, if it already exists
# on the ad group. The example will throw a LISTING_GROUP_ALREADY_EXISTS
# error if a listing group tree already exists and this option is not
# set to true.
if replace_existing_tree:
_remove_listing_group_tree(client, customer_id, ad_group_id)
# Create a list of ad group criteria operations.
operations = []
# Construct the listing group tree "root" node.
# Subdivision node: (Root node)
ad_group_criterion_root_operation = _create_listing_group_subdivision(
client, customer_id, ad_group_id
)
# Get the resource name that will be used for the root node.
# This resource has not been created yet and will include the temporary
# ID as part of the criterion ID.
ad_group_criterion_root_resource_name = (
ad_group_criterion_root_operation.create.resource_name
)
operations.append(ad_group_criterion_root_operation)
# Construct the listing group unit nodes for NEW, USED, and other.
product_condition_enum = client.enums.ProductConditionEnum
condition_dimension_info = client.get_type("ListingDimensionInfo")
# Biddable Unit node: (Condition NEW node)
# * Product Condition: NEW
# * CPC bid: $0.20
condition_dimension_info.product_condition.condition = (
product_condition_enum.NEW
)
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_root_resource_name,
condition_dimension_info,
200_000,
)
)
# Biddable Unit node: (Condition USED node)
# * Product Condition: USED
# * CPC bid: $0.10
condition_dimension_info.product_condition.condition = (
product_condition_enum.USED
)
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_root_resource_name,
condition_dimension_info,
100_000,
)
)
# Sub-division node: (Condition "other" node)
# * Product Condition: (not specified)
# Note that all sibling nodes must have the same dimension type, even if
# they don't contain a bid.
client.copy_from(
condition_dimension_info.product_condition,
client.get_type("ProductConditionInfo"),
)
ad_group_criterion_other_operation = _create_listing_group_subdivision(
client,
customer_id,
ad_group_id,
ad_group_criterion_root_resource_name,
condition_dimension_info,
)
# Get the resource name that will be used for the condition other node.
# This resource has not been created yet and will include the temporary
# ID as part of the criterion ID.
ad_group_criterion_other_resource_name = (
ad_group_criterion_other_operation.create.resource_name
)
operations.append(ad_group_criterion_other_operation)
# Build the listing group nodes for CoolBrand, CheapBrand, and other.
brand_dimension_info = client.get_type("ListingDimensionInfo")
# Biddable Unit node: (Brand CoolBrand node)
# * Brand: CoolBrand
# * CPC bid: $0.90
brand_dimension_info.product_brand.value = "CoolBrand"
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_other_resource_name,
brand_dimension_info,
900_000,
)
)
# Biddable Unit node: (Brand CheapBrand node)
# * Brand: CheapBrand
# * CPC bid: $0.01
brand_dimension_info.product_brand.value = "CheapBrand"
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_other_resource_name,
brand_dimension_info,
10_000,
)
)
# Biddable Unit node: (Brand other node)
# * CPC bid: $0.05
client.copy_from(
brand_dimension_info.product_brand,
client.get_type("ProductBrandInfo"),
)
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_other_resource_name,
brand_dimension_info,
50_000,
)
)
# Add the ad group criteria.
mutate_ad_group_criteria_response = (
ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=operations
)
)
# Print the results of the successful mutates.
print(
"Added ad group criteria for the listing group tree with the "
"following resource names:"
)
for result in mutate_ad_group_criteria_response.results:
print(f"\t{result.resource_name}")
print(f"{len(mutate_ad_group_criteria_response.results)} criteria added.")
# [END add_shopping_product_listing_group_tree]
def _remove_listing_group_tree(client, customer_id, ad_group_id):
"""Removes ad group criteria for an ad group's existing listing group tree.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID from which to remove the listing group
tree.
"""
# Get the GoogleAdsService client.
googleads_service = client.get_service("GoogleAdsService")
print("Removing existing listing group tree...")
# Create a search Google Ads request that will retrieve all listing groups
# where the parent ad group criterion is NULL (and hence the root node in
# the tree) for a given ad group id.
query = f"""
SELECT ad_group_criterion.resource_name
FROM ad_group_criterion
WHERE
ad_group_criterion.type = LISTING_GROUP
AND ad_group_criterion.listing_group.parent_ad_group_criterion IS NULL
AND ad_group.id = {ad_group_id}"""
results = googleads_service.search(customer_id=customer_id, query=query)
ad_group_criterion_operations = []
# Iterate over all rows to find the ad group criteria to remove.
for row in results:
criterion = row.ad_group_criterion
print(
"Found an ad group criterion with resource name: "
f"'{criterion.resource_name}'."
)
ad_group_criterion_operation = client.get_type(
"AdGroupCriterionOperation"
)
ad_group_criterion_operation.remove = criterion.resource_name
ad_group_criterion_operations.append(ad_group_criterion_operation)
if ad_group_criterion_operations:
# Remove the ad group criteria that define the listing group tree.
ad_group_criterion_service = client.get_service(
"AdGroupCriterionService"
)
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=ad_group_criterion_operations
)
print(f"Removed {len(response.results)} ad group criteria.")
def _create_listing_group_subdivision(
client,
customer_id,
ad_group_id,
parent_ad_group_criterion_resource_name=None,
listing_dimension_info=None,
):
"""Creates a new criterion containing a subdivision listing group node.
If the parent ad group criterion resource name or listing dimension info are
not specified, this method creates a root node.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID to which the node will be added.
parent_ad_group_criterion_resource_name: The string resource name of the
parent node to which this listing will be attached.
listing_dimension_info: A ListingDimensionInfo object containing details
for this listing.
Returns:
An AdGroupCriterionOperation containing a populated ad group criterion.
"""
# Create an ad group criterion operation and populate the criterion.
operation = client.get_type("AdGroupCriterionOperation")
ad_group_criterion = operation.create
# The resource name the criterion will be created with. This will define
# the ID for the ad group criterion.
ad_group_criterion.resource_name = client.get_service(
"AdGroupCriterionService"
).ad_group_criterion_path(customer_id, ad_group_id, _next_id())
ad_group_criterion.status = client.enums.AdGroupCriterionStatusEnum.ENABLED
listing_group_info = ad_group_criterion.listing_group
# Set the type as a SUBDIVISION, which will allow the node to be the
# parent of another sub-tree.
listing_group_info.type_ = client.enums.ListingGroupTypeEnum.SUBDIVISION
# If parent_ad_group_criterion_resource_name and listing_dimension_info
# are not null, create a non-root division by setting its parent and case
# value.
if (
parent_ad_group_criterion_resource_name
and listing_dimension_info != None
):
# Set the ad group criterion resource name for the parent listing group.
# This can include a temporary ID if the parent criterion is not yet
# created.
listing_group_info.parent_ad_group_criterion = (
parent_ad_group_criterion_resource_name
)
# Case values contain the listing dimension used for the node.
client.copy_from(listing_group_info.case_value, listing_dimension_info)
return operation
def _create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
parent_ad_group_criterion_resource_name,
listing_dimension_info,
cpc_bid_micros=None,
):
"""Creates a new criterion containing a biddable unit listing group node.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID to which the node will be added.
parent_ad_group_criterion_resource_name: The string resource name of the
parent node to which this listing will be attached.
listing_dimension_info: A ListingDimensionInfo object containing details
for this listing.
cpc_bid_micros: The cost-per-click bid for this listing in micros.
Returns:
An AdGroupCriterionOperation with a populated create field.
"""
# Note: There are two approaches for creating new unit nodes:
# (1) Set the ad group resource name on the criterion (no temporary ID
# required).
# (2) Use a temporary ID to construct the criterion resource name and set
# it to the 'resourceName' attribute.
# In both cases you must set the parent ad group criterion's resource name
# on the listing group for non-root nodes.
# This example demonstrates method (1).
operation = client.get_type("AdGroupCriterionOperation")
criterion = operation.create
criterion.ad_group = client.get_service("AdGroupService").ad_group_path(
customer_id, ad_group_id
)
criterion.status = client.enums.AdGroupCriterionStatusEnum.ENABLED
# Set the bid for this listing group unit.
# This will be used as the CPC bid for items that are included in this
# listing group.
if cpc_bid_micros:
criterion.cpc_bid_micros = cpc_bid_micros
listing_group = criterion.listing_group
# Set the type as a UNIT, which will allow the group to be biddable.
listing_group.type_ = client.enums.ListingGroupTypeEnum.UNIT
# Set the ad group criterion resource name for the parent listing group.
# This can have a temporary ID if the parent criterion is not yet created.
listing_group.parent_ad_group_criterion = (
parent_ad_group_criterion_resource_name
)
# Case values contain the listing dimension used for the node.
if listing_dimension_info != None:
client.copy_from(listing_group.case_value, listing_dimension_info)
return operation
if __name__ == "__main__":
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description="Add shopping product listing group tree to a shopping ad "
"group."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a",
"--ad_group_id",
type=str,
required=True,
help="The ID of the ad group that will receive the listing group tree.",
)
parser.add_argument(
"-r",
"--replace_existing_tree",
action="store_true",
required=False,
default=False,
help="Optional, whether to replace the existing listing group tree on "
"the ad group if one already exists. Defaults to false.",
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.ad_group_id,
args.replace_existing_tree,
)
except GoogleAdsException as ex:
print(
f"Request with ID '{ex.request_id}' failed with status "
f"'{ex.error.code().name}' and includes the following errors:"
)
for error in ex.failure.errors:
print(f"\tError with message '{error.message}'.")
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| apache-2.0 | 7,351,844,020,958,273,000 | 35.43379 | 80 | 0.665058 | false |
appium/appium | sample-code/python/test/test_ios_selectors.py | 1 | 1831 | import pytest
import os
import copy
from appium import webdriver
from helpers import report_to_sauce, take_screenshot_and_syslog, IOS_BASE_CAPS, EXECUTOR
class TestIOSSelectors():
@pytest.fixture(scope='function')
def driver(self, request, device_logger):
calling_request = request._pyfuncitem.name
caps = copy.copy(IOS_BASE_CAPS)
caps['name'] = calling_request
driver = webdriver.Remote(
command_executor=EXECUTOR,
desired_capabilities=caps
)
def fin():
report_to_sauce(driver.session_id)
take_screenshot_and_syslog(driver, device_logger, calling_request)
driver.quit()
request.addfinalizer(fin)
driver.implicitly_wait(10)
return driver
def test_should_find_elements_by_accessibility_id(self, driver):
search_parameters_element = driver.find_elements_by_accessibility_id('ComputeSumButton')
assert 1 == len(search_parameters_element)
def test_should_find_elements_by_class_name(self, driver):
window_elements = driver.find_elements_by_class_name('XCUIElementTypeWindow')
assert 2 == len(window_elements)
def test_should_find_elements_by_nspredicate(self, driver):
all_visible_elements = driver.find_elements_by_ios_predicate('visible = 1')
assert 24 <= len(all_visible_elements)
def test_should_find_elements_by_class_chain(self, driver):
window_element = driver.find_elements_by_ios_class_chain('XCUIElementTypeWindow[1]/*')
assert 1 == len(window_element)
def test_should_find_elements_by_xpath(self, driver):
action_bar_container_elements = driver.find_elements_by_xpath('//XCUIElementTypeWindow//XCUIElementTypeButton')
assert 7 <= len(action_bar_container_elements) <= 8
| apache-2.0 | -5,515,809,657,299,300,000 | 34.901961 | 119 | 0.680502 | false |
devincornell/networkxtimeseries | NetTS.py | 1 | 13522 |
# system imports
import multiprocessing
import pickle
import sys
from itertools import *
# anaconda imports
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class NetTS:
''' Network Time Series '''
### member vars ###
# self.nts - list of networks representing timeseries
# self.N - number of graphs in the timeseries
# self.ts is a timeseries list
def __init__(self, ts, nodes=None, edges=None, type='static_nodes', GraphType=nx.Graph):
ts = list(ts) # ts is a timeseries list
if nodes is not None: nodes = list(nodes) # nodes is a list of node names
if edges is not None: edges = list(edges) # edges is a list of edges
# set timeseries type
if type == 'static_nodes' or type == 'static_structure':
self.type = type
elif type == 'dynamic':
print('Error - choose at least a set of nodes in NetTS init.')
print('Support for dynamic nodes is not supported.')
exit()
else:
print('network type not recognized in NetTs init.')
exit()
# make networks
self.ts = ts
self.N = len(ts)
self.nts = []
for i in range(self.N):
self.nts.append(GraphType(name=ts[i]))
# set nodes
if nodes is not None:
self.nodes = nodes
self.add_nodes(nodes)
else:
self.nodes = list()
# set edges
self.edges = edges
if edges is not None:
for t in self.ts:
for e in edges:
self[t].add_edge(e)
else:
self.edges = list()
self.data = {} # for user data (similar to nx.Graph.graph)
def __str__(self):
return '<NetTs:type=%s,numnodes=%d,numedges=%d>' % (
self.type,
len(self.nodes) if self.nodes is not None else -1,
len(self.edges) if self.edges is not None else -1
)
def __getitem__(self,key):
i = self.ts.index(key)
return self.nts[i]
def save_nts(self,ntsfile):
with open(ntsfile,mode='wb') as f:
data = pickle.dump(self,f)
return
def save_xgmml(self, filename):
ndf = self.getNodeAttr()
edf = self.getEdgeAttr()
with open(filename,'w') as f:
build_xgmml_file(f,ndf,edf)
return
def rm_graph(self, key):
i = self.ts.index(key)
self.nts.pop(i)
self.ts.pop(i)
self.N = len(self.ts)
def add_nodes(self, nodes, t=None):
''' This function will nodes to every graph in the timeseries.'''
if t is None:
for t in self.ts:
for n in nodes:
self[t].add_node(n)
else:
#raise(Exception("This functionality hasn't been implemented yet."))
for n in nodes:
self[t].add_node(n)
return
##### Get/Set Graph, Node, and Edge Attributes #####
def get_node_attr(self,t=None,parallel=False):
''' Measure all node attributes across time.
'''
ndf = self.time_measure(meas_node_attr, meas_obj='nodes', parallel=parallel)
ndf.sort_index(axis='columns',inplace=True)
return ndf
def get_edge_attr(self,t=None,parallel=False):
''' Measure all edge attributes across time.
'''
edf = self.time_measure(meas_edge_attr, meas_obj='edges', parallel=parallel)
edf.sort_index(axis='columns',inplace=True)
return edf
def set_graph_attr(self, t, attrName, gdata):
''' Adds an attribute to every graph in the network
at time t. gdata is a list of attributes to apply.
'''
for t in self.ts:
self[t].graph[attrName] = gdata[i]
return
def set_node_attr(self, t, attrName, ndata):
''' Adds an attribute to every edge in the network
at time t. Name specified by attrName and data given
in edata, a dictionary of node->vlaue pairs.
'''
for key,val in ndata:
self[t].node[key][attrName] = val
return
def set_edge_attr(self, t, attrName, edata):
''' Adds an attribute to every edge in the network
at time t. Name specified by attrName and data given
in edata, a dictionary of edge(tuple)->value pairs.
'''
for i,j in edata.keys():
try:
self[t].edge[i][j]
except:
self[t].add_edge(i,j)
self.edges.append((i,j))
self[t].edge[i][j][attrName] = edata[(i,j)]
return
def set_node_attrdf(self, df):
''' Adds node data assuming that edata is a pandas
dataframe formatted with multiindexed columns
(n,attr) and indexed rows with time.
'''
for n in mdf(df.columns,()):
for attr in mdf(df.columns,(n,)):
for t in df.index:
try: self[t].node[n]
except KeyError: self[t].add_node(n)
self[t].node[n][attr] = df.loc[t,(n,attr)]
def set_edge_attrdf(self, df):
''' Adds edge data assuming that edata is a pandas
dataframe formatted with multiindexed columns
(u,v,attr) and indexed rows with time.
'''
for u in mdf(df.columns,()):
for v in mdf(df.columns,(u,)):
for attr in mdf(df.columns,(u,v)):
for t in df.index:
try:
self[t].edge[u][v]
except KeyError:
self[t].add_edge(u,v)
self[t].edge[u][v][attr] = df.loc[t,(u,v,attr)]
##### Modify the Graphs and Return NetTS #####
def modify_graphs(self, modFunc):
''' Returns a NetTs object where each graph has
been run through modFunc. modFunc
should take a graph and return a modified graph.
'''
outNet = NetTs(self.ts,nodes=self.nodes,edges=self.edges)
for t in self.ts:
outNet[t] = modFunc(self[t])
return outNet
##### Measure Properties of Graphs Over Time #####
def time_measure(self, measFunc, meas_obj='graph', addtnlArgs=list(), workers=1, verb=False):
''' Returns a multiindex dataframe of measurements for all nodes at each
point in time. measFunc should expect a graph object and return a
dictionary with (node,attr) as keys. Output: The index will be a timeseries,
columns will be multi-indexed - first by node name then by attribute.
'''
# error checking
if verb: print('error checking first graph at', meas_obj, 'level.')
if not (meas_obj == 'graph' or meas_obj == 'nodes' or meas_obj == 'edges'): raise
trymeas = measFunc(self.nts[0], *addtnlArgs)
try: dict(trymeas)
except TypeError: print('Error in measure(): measFunc should return a dict'); exit()
if meas_obj == 'nodes' or meas_obj == 'edges':
try: [list(m) for m in trymeas];
except TypeError: print('Error in measure(): measFunc keys should follow (node,attr).'); exit()
if len(trymeas) == 0: # return empty dataframe
return pd.DataFrame()
if meas_obj == 'graph':
cols = list(trymeas.keys())
if verb: print('measuring graphs.')
elif meas_obj == 'nodes':
cols = pd.MultiIndex.from_tuples(trymeas.keys(),names=['node','attr'])
if verb: print('measuring nodes.')
elif meas_obj == 'edges':
cols = pd.MultiIndex.from_tuples(trymeas.keys(),names=['from','to','attr'])
if verb: print('measuring edges.')
df = pd.DataFrame(index=self.ts,columns=cols, dtype=np.float64)
tdata = [(self[t],t,measFunc,addtnlArgs,meas_obj,cols) for t in self.ts]
if workers <= 1:
if verb: print('measuring in one thread.')
meas = map(self._thread_time_measure, tdata)
else:
if verb: print('measuring with', workers, 'cores.')
with multiprocessing.Pool(processes=workers) as p:
meas = p.map(self._thread_time_measure, tdata)
for t,mdf in meas:
df.loc[[t],:] = mdf
df = df.sort_index(axis=1)
return df
def _thread_time_measure(self, dat):
''' This is a thread function that will call measFunc on each
network in the timeseries. measFunc is responsible for returning
a dictionary with (node,attr) keys.
'''
G,t,measFunc,addtnlArgs,meas_obj,cols = dat
meas = measFunc(G, *addtnlArgs)
return t,pd.DataFrame([meas,],index=[t,],columns=cols)
def time_plot(self, *arg, **narg):
meas = self.time_measure(*arg, **narg)
ts = range(len(self.ts))
for col in meas.columns:
plt.plot(ts, meas[col], label=col)
plt.xticks(ts, self.ts)
plt.legend()
def mdf(mi,match):
''' Returns the list of children of the ordered match
set given by match. Specifically for dataframe looping.
'''
matchfilt = filter(lambda x: x[:len(match)] == match,mi)
return set([x[len(match)] for x in matchfilt])
def from_nts(ntsfilepath):
nts = None
with open(ntsfilepath,'rb') as f:
nts = pickle.load(f)
return nts
##### Standalone Measurement Functions #####
''' These functions are used in the class but not explicitly class
members.
'''
def meas_node_attr(G):
meas = dict()
attrnames = G.nodes(data=True)[0][1].keys() # attr dict from first node
for attrname in attrnames:
attr = nx.get_node_attributes(G,attrname)
meas.update({(n,attrname):attr[n] for n in G.nodes()})
return meas
def meas_edge_attr(G):
meas = dict()
e0 = G.edges()[0]
attrnames = G.get_edge_data(e0[0],e0[1]).keys()
for attrname in attrnames:
attr = nx.get_edge_attributes(G,attrname)
meas.update({(e[0],e[1],attrname):attr[e] for e in G.edges()})
return meas
##### Change Detection Functions #####
def get_value_changes(ds):
''' Takes a data series and outputs (start,val) pairs -
one for each change in the value of the data series.
'''
changes = [(ds.index[0],ds[ds.index[0]])]
for ind in ds.index[1:]:
if ds[ind] != changes[-1][1]:
changes.append((ind,ds[ind]))
return changes
##### XGMML File Output Functions #####
def build_xgmml_file(f,ndf,edf):
''' This function builds the xml file given the file object f,
a graph df, node df, and edge df. First it will look at when
attributes change, and then use that to decide when to add an
attribute tag.
'''
t0 = edf.index[0]
tf = edf.index[-1]
f.write(header_str)
f.write(graph_start_str.format(label='mygraph'))
for n in list(set([x[0] for x in ndf.columns])):
values = {'label':str(n),'id':str(n),'start':t0,'end':tf}
f.write(node_start_str.format(**values))
for attr in [x[1] for x in filter(lambda x:x[0]==n,ndf.columns)]:
changes = get_value_changes(ndf.loc[:,(n,attr)])
write_attr(f,attr,changes,tf)
f.write(node_end_str)
for u,v in list(set([x[:2] for x in edf.columns])):
values = {'label':'(%s,%s)'%(str(u),str(v)),'source':str(u),'target':str(v),'start':t0,'end':tf}
f.write(edge_start_str.format(**values))
for attr in [x[2] for x in filter(lambda x:x[:2] == (u,v),edf.columns)]:
changes = get_value_changes(edf.loc[:,(u,v,attr)])
write_attr(f,attr,changes,tf)
f.write(edge_end_str)
f.write(graph_end_str)
return
def write_attr(f,attr,changes,tf):
if type(changes[0][1]) is str:
typ = 'string'
changes = list(map(lambda x: (x[0],str(x[1])), changes))
elif type(changes[0][1]) is int or type(changes[0][1]) is float or type(changes[0][1]) is np.int64 or type(changes[0][1]) is np.float64:
typ = 'real'
changes = list(map(lambda x: (x[0],'{:.9f}'.format(float(x[1]))), changes))
else:
print('There was an error with the attribute type of the network timeseries:', type(changes[0][1]))
raise
for i in range(len(changes[:-1])):
if changes[i][1] is not 'nan' and changes[i][1] is not 'None':
values = {'name':attr,'type':typ,'value':changes[i][1],'start':changes[i][0],'end':changes[i+1][0]}
f.write(attr_str.format(**values))
if len(changes) == 1 and changes[0][1] is not 'None' and changes[0][1] is not 'nan':
values = {'name':attr,'type':typ,'value':changes[0][1],'start':changes[0][0],'end':tf}
f.write(attr_str.format(**values))
##### File Output Strings #####
header_str = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Created using the networkxtimeseries library for python. -->\n\n'''
graph_start_str = '<graph label="{label}" directed="0">\n'
graph_end_str = '</graph>\n'
node_start_str = '\t<node label="{label}" id="{id}" start="{start}" end="{end}">\n'
node_end_str = '\t</node>\n'
edge_start_str = '\t<edge label="{label}" source="{source}" target="{target}" start="{start}" end="{end}">\n'
edge_end_str = '\t</edge>\n'
attr_str = '\t\t<att name="{name}" type="{type}" value="{value}" start="{start}" end="{end}"/>\n'
| mit | -2,272,877,648,505,575,000 | 33.402036 | 140 | 0.564867 | false |
tensorflow/benchmarks | scripts/tf_cnn_benchmarks/cnn_util.py | 1 | 8496 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for CNN benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow.compat.v1 as tf
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
| apache-2.0 | 2,613,338,284,972,577,300 | 32.581028 | 80 | 0.668432 | false |
cartertech/odoo-hr-ng | hr_transfer/__init__.py | 1 | 1064 | #-*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 One Click Software (http://oneclick.solutions)
# and Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_transfer
| agpl-3.0 | -1,945,479,425,682,072,300 | 45.26087 | 80 | 0.614662 | false |
simodalla/newage | newage/views.py | 1 | 2723 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import ListView, DetailView
from django.utils.translation import ugettext_lazy as _
from .models import RdesktopSession, RdesktopUser
class DeployRdesktopTerminalServerList(ListView):
content_type = 'text/plain'
http_method_names = ['get']
template_name = 'newage/deploy/terminalserver_list.txt'
def render_to_response(self, context, **response_kwargs):
response = super(DeployRdesktopTerminalServerList,
self).render_to_response(context, **response_kwargs)
response['Content-Disposition'] = (
'attachment; filename="terminal_servers.txt"')
return response
def get_queryset(self):
user = get_object_or_404(RdesktopUser,
username__iexact=self.kwargs['username'])
queryset = RdesktopSession.objects.filter(user=user).order_by(
'server__fqdn')
format = self.request.GET.get('format', 'plain')
if format == 'url':
return [self.request.build_absolute_uri(session.get_absolute_url())
for session in queryset]
return [session.server.fqdn.lower() for session in queryset]
def get_context_data(self, **kwargs):
context = super(DeployRdesktopTerminalServerList,
self).get_context_data(**kwargs)
return context
class DeployRdesktopSessionDetail(DetailView):
model = RdesktopSession
content_type = 'text/plain'
http_method_names = ['get']
template_name = 'newage/deploy/rdesktopsession_detail.txt'
def render_to_response(self, context, **response_kwargs):
response = super(DeployRdesktopSessionDetail,
self).render_to_response(context, **response_kwargs)
response['Content-Disposition'] = (
'attachment; filename="redsktop_{}.desktop"'.format(
self.kwargs.get('fqdn')))
return response
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
username = self.kwargs.get('username')
fqdn = self.kwargs.get('fqdn')
try:
obj = queryset.filter(
user__username__iexact=username,
server__fqdn__iexact=fqdn).get()
except ObjectDoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
| bsd-3-clause | 3,315,744,594,874,511,000 | 37.352113 | 79 | 0.634594 | false |
kansanmuisti/kamu | Attic/eduskunta/find-mp-twitter.py | 1 | 2082 | #!/usr/bin/env python
import sys
import pickle
from twython import Twython
from django.core.management import setup_environ
sys.path.append('.')
import settings
setup_environ(settings)
from parliament.models import Member, MemberSocialFeed
PICKLE_FILE="mp-twitter.pickle"
twitter = Twython()
def read_twitter_lists():
twitter_lists = ((24404831, 6970755), (17680567, 3656966))
mps = {}
for tw_li in twitter_lists:
args = dict(list_id=tw_li[1], owner_id=tw_li[0], username=tw_li[0],
skip_status=True)
while True:
results = twitter.getListMembers(**args)
users = results['users']
for user in users:
if user['id'] not in mps:
mps[user['id']] = user
print("%s:%s" % (user['name'], user['id']))
cursor = results['next_cursor']
if not cursor:
break
args['cursor'] = cursor
return mps
try:
f = open(PICKLE_FILE, 'r')
tw_mps = pickle.load(f)
except IOError:
tw_mps = read_twitter_lists()
f = open(PICKLE_FILE, 'w')
pickle.dump(tw_mps, f)
f.close()
MP_TRANSFORM = {
"veltto virtanen": "Pertti Virtanen",
"n. johanna sumuvuori": "Johanna Sumuvuori",
"eeva-johanna elorant": "Eeva-Johanna Eloranta",
"outi alanko-kahiluot": "Outi Alanko-Kahiluoto",
}
print("%d Twitter feeds found" % len(list(tw_mps.keys())))
mp_list = list(Member.objects.all())
for (tw_id, tw_info) in list(tw_mps.items()):
for mp in mp_list:
name = tw_info['name'].lower()
if name in MP_TRANSFORM:
name = MP_TRANSFORM[name].lower()
if mp.get_print_name().lower() == name.lower():
break
else:
print("%s: no match" % tw_info['name'])
continue
try:
feed = MemberSocialFeed.objects.get(member=mp, type='TW', origin_id=tw_id)
except MemberSocialFeed.DoesNotExist:
feed = MemberSocialFeed(member=mp, type='TW', origin_id=tw_id)
feed.account_name = tw_info['screen_name']
feed.save()
| agpl-3.0 | -8,833,156,142,159,256,000 | 26.76 | 82 | 0.59318 | false |
JustinTulloss/harmonize.fm | fileprocess/fileprocess/configuration.py | 1 | 2469 | # A configuration file for the fileprocess. We could do a .ini, but everybody
# knows python here
import logging
import os
from logging import handlers
config = {
'port': 48260,
'S3.accesskey': '17G635SNK33G1Y7NZ2R2',
'S3.secret': 'PHDzFig4NYRJoKKW/FerfhojljL+sbNyYB9bEpHs',
'S3.music_bucket': 'music.rubiconmusicplayer.com',
'S3.upload': True,
'sqlalchemy.default.convert_unicode': True,
'upload_dir': '../masterapp/tmp',
'media_dir': '../masterapp/media',
'pyfacebook.callbackpath': None,
'pyfacebook.apikey': 'cec673d0ef3fbc12395d0d3500cd72f9',
'pyfacebook.secret': 'a08f822bf3d7f80ee25c47414fe98be1',
'pyfacebook.appid': '2364724122',
'musicdns.key': 'ffa7339e1b6bb1d26593776b4257fce1',
'maxkbps': 192000,
'sqlalchemy.default.url': 'sqlite:///../masterapp/music.db',
'cache_dir': '../masterapp/cache'
}
dev_config = {
'S3.upload': False,
'tagshelf': '../masterapp/tags.archive'
}
test_config = {
'sqlalchemy.default.url': 'sqlite:///:memory:',
'sqlalchemy.reflect.url': 'sqlite:///../../masterapp/music.db',
'upload_dir': './test/testuploaddir',
'media_dir': './test/teststagingdir',
'tagshelf': './test/tagshelf'
}
production_config = {
'S3.upload': True,
'sqlalchemy.default.url': \
'mysql://webappuser:gravelbits@localhost:3306/rubicon',
'sqlalchemy.default.pool_recycle': 3600,
'upload_dir': '/var/opt/stage_uploads',
'media_dir': os.environ.get('MEDIA'),
'tagshelf': '/var/opt/tagshelf.archive',
'cache_dir': '/tmp/stage_cache'
}
live_config = {
'port': 48262,
'upload_dir': '/var/opt/uploads',
'sqlalchemy.default.url': \
'mysql://webappuser:gravelbits@localhost:3306/harmonize',
'cache_dir': '/tmp/live_cache'
}
base_logging = {
'level': logging.INFO,
'format':'%(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s',
'datefmt': '%H:%M:%S',
'handler': logging.StreamHandler,
'handler_args': ()
}
dev_logging = {
'level': logging.DEBUG
}
production_logging = {
'level': logging.INFO,
'handler': handlers.TimedRotatingFileHandler,
'handler_args': ('/var/log/rubicon/filepipe', 'midnight', 0, 7)
}
live_logging = {
'handler_args': ('/var/log/harmonize/filepipe', 'midnight', 0, 7)
}
def update_config(nconfig):
global config
config.update(nconfig)
def lupdate_config(nconfig):
global base_logging
base_logging.update(config)
| mit | -6,707,404,347,512,413,000 | 27.056818 | 80 | 0.649656 | false |
wger-project/wger | wger/weight/api/views.py | 1 | 1461 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
# Third Party
from rest_framework import viewsets
# wger
from wger.weight.api.serializers import WeightEntrySerializer
from wger.weight.models import WeightEntry
class WeightEntryViewSet(viewsets.ModelViewSet):
"""
API endpoint for nutrition plan objects
"""
serializer_class = WeightEntrySerializer
is_private = True
ordering_fields = '__all__'
filterset_fields = ('date', 'weight')
def get_queryset(self):
"""
Only allow access to appropriate objects
"""
return WeightEntry.objects.filter(user=self.request.user)
def perform_create(self, serializer):
"""
Set the owner
"""
serializer.save(user=self.request.user)
| agpl-3.0 | 63,408,866,045,461,896 | 30.76087 | 78 | 0.711841 | false |
swingr/meeseeks | meeseeks.py | 1 | 1298 | import pebble as p
import audio
class Meeseeks():
def __init__(self, id="464F", name="Mr. Meeseeks"):
self.id = id
self.name = name
self.pebble = None
self.score = []
def connect(self):
self.pebble = p.Pebble(self.id)
def send(self, msg):
self.pebble.notification_sms(self.name, msg)
def start(self):
self.send("Are you ready to take two strokes off your game! Ohhhhh yeah!")
audio.start()
def shoulders(self):
self.send("Remember to square your shoulders!")
audio.shoulders()
def choke(self):
self.send("Choke up on the club!")
audio.choke()
def existence(self):
self.send("Existence is pain!")
audio.existence()
def frustrating(self):
self.send("Arrgghhhhhhh!")
audio.frustrating()
def head(self):
self.send("Keep your head down!")
audio.head()
def follow(self):
self.send("You gotta follow through!")
audio.follow()
def nice(self):
self.send("NIIIICCCCCEEEE!")
audio.nice()
def short(self):
self.send("What about your short game")
audio.short()
if __name__ == "__main__":
meeseeks = Meeseeks()
meeseeks.connect()
meeseeks.choke()
| mit | 8,157,238,295,061,237,000 | 22.178571 | 82 | 0.572419 | false |
operasoftware/dragonfly-build-tools | df2/codegen/msgdefs.py | 1 | 3763 | import os
import sys
import time
import protoparser
import protoobjects
import utils
INDENT = " "
CSS_CLASSES = {
protoobjects.NUMBER: "number",
protoobjects.BUFFER: "string",
protoobjects.BOOLEAN: "boolean",
}
def indent(count): return count * INDENT
def print_doc(file, field, depth):
if field.doc:
file.write("%s%s" % (indent(depth), "<span class=\"comment\">/**\n"))
for line in field.doc_lines:
file.write("%s%s%s\n" % (indent(depth), " * ", line.replace("&", "&").replace("<", "<")))
file.write(indent(depth) + " */</span>\n")
def print_enum(file, enum, depth=0):
file.write("%s{\n" % indent(depth))
depth += 1
for f in enum.fields:
print_doc(file, f, depth)
args = indent(depth), f.name, f.key
file.write("%s<span class=\"enum\">%s</span> = %s;\n" % args)
depth -= 1
file.write("%s}\n" % (indent(depth)))
def print_message(file, msg, include_message_name=True, depth=0, recurse_list=[]):
if include_message_name:
file.write("%smessage <span class=\"message\">%s</span>\n" % (indent(depth), msg.name))
file.write("%s{\n" % indent(depth))
depth += 1
for field in msg.fields:
f_type = field.type
print_doc(file, field, depth)
if f_type.sup_type in CSS_CLASSES:
args = indent(depth), field.q, CSS_CLASSES[f_type.sup_type], field.full_type_name, field.name, field.key
file.write("%s%s <span class=\"%s\">%s</span> %s = %s" % args)
else:
args = indent(depth), field.q, field.full_type_name, field.name, field.key
file.write("%s%s %s %s = %s" % args)
if hasattr(field.options, "default"):
file.write(" [default = %s]" % field.options.default.value)
file.write(";\n")
if f_type.sup_type == protoobjects.MESSAGE:
if not f_type in recurse_list:
print_message(file, f_type, False, depth, recurse_list[:] + [field.type])
if field.type.sup_type == protoobjects.ENUM:
print_enum(file, field.type, depth)
depth -= 1
file.write("%s}\n" % (indent(depth)))
def print_msg_def(dest, service, type, command_or_event, message):
service_name = service.name
version = service.options.version.value.strip("\"")
file_name = "%s.%s.%s.%s.def" % (service_name, version, type, command_or_event.name)
with open(os.path.join(dest, file_name), "wb") as file:
print_message(file, message)
def print_msg_defs(proto_path, dest):
with open(proto_path, "rb") as proto_file:
global_scope = protoparser.parse(proto_file.read())
for c in global_scope.service.commands:
print_msg_def(dest, global_scope.service, "commands", c, c.request_arg)
print_msg_def(dest, global_scope.service, "responses", c, c.response_arg)
for e in global_scope.service.events:
print_msg_def(dest, global_scope.service, "events", e, e.response_arg)
def msg_defs(args):
if not os.path.exists(args.dest): os.mkdir(args.dest)
if os.path.isfile(args.src):
print_masg_defs(args.src, args.dest)
elif os.path.isdir(args.src):
for path in utils.get_proto_files(args.src):
print_msg_defs(path, args.dest)
def setup_subparser(subparsers, config):
subp = subparsers.add_parser("msg-defs", help="Create html documentation.")
subp.add_argument("src", nargs="?", default=".", help="""proto file or directory (default: %(default)s)).""")
subp.add_argument("dest", nargs="?", default="msg-defs", help="the destination directory (default: %(default)s)).")
subp.set_defaults(func=msg_defs)
| apache-2.0 | 5,219,118,924,902,811,000 | 40.280899 | 120 | 0.594738 | false |
mudragada/util-scripts | PyProblems/CodeSignal/uberShortestDistance.py | 1 | 3276 |
#Consider a city where the streets are perfectly laid out to form an infinite square grid.
#In this city finding the shortest path between two given points (an origin and a destination) is much easier than in other more complex cities.
#As a new Uber developer, you are tasked to create an algorithm that does this calculation.
#
#Given user's departure and destination coordinates, each of them located on some street,
# find the length of the shortest route between them assuming that cars can only move along the streets.
# Each street can be represented as a straight line defined by the x = n or y = n formula, where n is an integer.
#
#Example
#
#For departure = [0.4, 1] and destination = [0.9, 3], the output should be
#perfectCity(departure, destination) = 2.7.
#
#0.6 + 2 + 0.1 = 2.7, which is the answer.
#
#Input/Output
#
#[execution time limit] 4 seconds (py3)
#
#[input] array.float departure
#
#An array [x, y] of x and y coordinates. It is guaranteed that at least one coordinate is integer.
#
#Guaranteed constraints:
#0.0 ≤ departure[i] ≤ 10.0.
#
#[input] array.float destination
#
#An array [x, y] of x and y coordinates. It is guaranteed that at least one coordinate is integer.
#
#Guaranteed constraints:
#0.0 ≤ destination[i] ≤ 10.0.
#
#[output] float
#
#The shorted distance between two points along the streets.
import math
def main():
departure = [0.4, 1]
destination = [0.9, 3]
print(perfectCity(departure, destination))
departure = [2.4, 1]
destination = [5, 7.3]
print(perfectCity(departure, destination))
departure = [0, 0.2]
destination = [7, 0.5]
print(perfectCity(departure, destination))
departure = [0.9, 6]
destination = [1.1, 5]
print(perfectCity(departure, destination))
departure = [0, 0.4]
destination = [1, 0.6]
print(perfectCity(departure, destination))
def perfectCity(departure, destination):
print(departure, destination)
x1 = departure[0]
x2 = destination[0]
y1 = departure[1]
y2 = destination[1]
xDist = 0
yDist = 0
if(int(x1) > int(x2)):
xDist = x1 - math.floor(x1) + math.ceil(x2) - x2
elif(int(x1) < int(x2)):
xDist = math.ceil(x1) - x1 + x2 - math.floor(x2)
elif(int(x1) == int(x2) and (x1+x2-int(x1)-int(x2)) <=1):
xDist = x1-math.floor(x1) + x2-math.floor(x2)
else:
xDist = math.ceil(x1)-x1 + math.ceil(x2)-x2
print("X Distance = " + str(xDist))
if(int(y1) > int(y2)):
if(isinstance(y1, int)):
y1x = y1
else:
y1x = y1 - math.floor(y1)
if(isinstance(y2, int)):
y2x = -y2
else:
y2x = math.ceil(y2) - y2
yDist = y1x + y2x
elif(int(y1) < int(y2)):
if(isinstance(y1, int)):
y1x = -y1
else:
y1x = math.ceil(y1) - y1
if(isinstance(y2, int)):
y2x = y2
else:
y2x = y2 - math.floor(y2)
yDist = y1x + y2x
elif(int(x1) == int(x2) and (x1+x2-int(x1)-int(x2)) <=1):
yDist = y1-math.floor(y1) + y2-math.floor(y2)
else:
yDist = math.ceil(y1)-y1 + math.ceil(y2)-y2
print("Y Distance = " + str(yDist))
return xDist + yDist
if __name__ == '__main__':
main()
| mit | 2,286,087,225,133,462,500 | 27.920354 | 144 | 0.612607 | false |
kbase/auth_service | python-libs/oauth.py | 1 | 8778 | import logging
import httplib2
import json
import os
import hashlib
# This module performs authentication based on the tokens
# issued by Globus Online's Nexus service, see this URL for
# details:
# http://globusonline.github.com/nexus-docs/api.html
#
# Import the Globus Online client libraries, originally
# sourced from:
# https://github.com/globusonline/python-nexus-client
from nexus import Client
from django.contrib.auth.models import AnonymousUser,User
from django.contrib.auth import login,authenticate
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.conf import settings
from django.http import HttpResponse
from pprint import pformat
"""
This is the 2-legged OAuth authentication code from tastypie
heavily modified into a django authentication middleware.
We base this on RemoteUserMiddleware so that we can get access to the
request object to have access to the request headers, and then we
simply re-use the existing remote user backend code
https://docs.djangoproject.com/en/1.4/howto/auth-remote-user/
You configure it the same way using the normal instructions, except
that you use this module oauth.TwoLeggedOAuthMiddleware instead of
django.contrib.auth.middleware.RemoteUserMiddleware
The django.contrib.auth.backends.RemoteUserBackend module is also
used with this module, add it into the AUTHENTICATION_BACKENDS
declaration in settings.py
To set the authentiction service to be used, set AUTHSVC in your
settings.py file. Here is an example:
AUTHSVC = 'https://graph.api.go.sandbox.globuscs.info/'
Django modules can check the request.META['KBASEsessid'] for the
session ID that will be used within the KBase session management
infrastructure
To test this, bind the sample handler into urls.py like this:
...
from oauth import AuthStatus
...
urlpatterns = patterns( ...
...
url(r'^authstatus/?$', AuthStatus),
...
)
Then visit the authstatus URL to see the auth state.
If you have the perl Bio::KBase::AuthToken libraries installed,
you can test it like this:
token=`perl -MBio::KBase::AuthToken -e 'print Bio::KBase::AuthToken->new( user_id => "papa", password => "papa")->token,"\n";'`
curl -H "Authorization: Bearer $token" http://127.0.0.1:8000/authstatus/
Steve Chan
[email protected]
9/6/2012
Previous documentation follows:
This is a simple 2-legged OAuth authentication model for tastypie.
Copied nearly verbatim from gregbayer's piston example
- https://github.com/gregbayer/django-piston-two-legged-oauth
Dependencies:
- python-oauth2: https://github.com/simplegeo/python-oauth2
Adapted from example:
- http://philipsoutham.com/post/2172924723/two-legged-oauth-in-python
"""
class OAuth2Middleware(AuthenticationMiddleware):
"""
Two Legged OAuth authenticator.
This Authentication method checks for a provided HTTP_AUTHORIZATION
and looks up to see if this is a valid OAuth Consumer
"""
# Authentication server
# Create a Python Globus client
client = Client(config_file=os.path.join(os.path.dirname(__file__), 'nexus/nexus.yml'))
try:
authsvc = "https://%s/" % client.config['server']
except:
authsvc = 'https://nexus.api.globusonline.org/'
# Set the salt used for computing a session hash from the signature hash
salt = "(African || European)?"
def __init__(self, realm='API'):
self.realm = realm
self.user = None
self.http = httplib2.Http(disable_ssl_certificate_validation=True)
# The shortcut option will bypass token validation if we already have a django session
self.shortcut = False
def process_request(self, request):
"""
Verify 2-legged oauth request. Parameters accepted as
values in "Authorization" header, or as a GET request
or in a POST body.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
if 'HTTP_AUTHORIZATION' in request.META:
auth_header = request.META.get('HTTP_AUTHORIZATION')
else:
logging.debug("No authorization header found.")
return None
# Extract the token based on whether it is an OAuth or Bearer
# token
if auth_header[:6] == 'OAuth ':
token = auth_header[6:]
elif auth_header[:7] == 'Bearer ':
token = auth_header[7:]
else:
logging.info("Authorization header did not contain OAuth or Bearer type token")
return None
# Push the token into the META for future reference
request.META['KBASEtoken'] = token
if (request.user.is_authenticated() and self.shortcut):
return
user_id = OAuth2Middleware.client.authenticate_user( token)
if not user_id:
logging.error("Authentication token failed validation")
return None
else:
logging.info("Validated as user " + user_id)
token_map = {}
for entry in token.split('|'):
key, value = entry.split('=')
token_map[key] = value
profile = self.get_profile(token)
if (profile == None):
logging.error("Token validated, but could not retrieve user profile")
return None
# For now, compute a sessionid based on hashing the
# the signature with the salt
request.META['KBASEsessid'] = hashlib.sha256(token_map['sig']+OAuth2Middleware.salt).hexdigest()
# Add in some useful details that came in from the token validation
request.META['KBASEprofile'] = profile
# See if the username is already associated with any currently logged
# in user, if so just pass over the rest
# Raises exception if it doesn't pass
user = authenticate(remote_user=profile['username'])
if user:
request.user = user
# For now, compute a sessionid based on hashing the
# the signature with the salt
request.META['KBASEsessid'] = hashlib.sha256(token_map['sig']+OAuth2Middleware.salt).hexdigest()
print pformat( request.META['KBASEsessid'])
# Add in some useful details that came in from the token validation
request.META['KBASEprofile'] = profile
login(request,user)
else:
logging.error( "Failed to return user from call to authenticate() with username " + profile['username'])
except KeyError, e:
logging.exception("KeyError in TwoLeggedOAuthMiddleware: %s" % e)
request.user = AnonymousUser()
except Exception, e:
logging.exception("Error in TwoLeggedOAuthMiddleware: %s" % e)
def get_profile(self,token):
try:
token_map = {}
for entry in token.split('|'):
key, value = entry.split('=')
token_map[key] = value
keyurl = self.__class__.authsvc + "/users/" + token_map['un'] + "?custom_fields=*"
res,body = self.http.request(keyurl,"GET",
headers={ 'Authorization': 'Globus-Goauthtoken ' + token })
if (200 <= int(res.status)) and ( int(res.status) < 300):
profile = json.loads( body)
return profile
logging.error( body)
raise Exception("HTTP", res)
except Exception, e:
logging.exception("Error in get_profile.")
return None
def AuthStatus(request):
res = "request.user.is_authenticated = %s \n" % request.user.is_authenticated()
if request.user.is_authenticated():
res = res + "request.user.username = %s\n" % request.user.username
if 'KBASEsessid' in request.META:
res = res + "Your KBase SessionID is %s\n" % request.META['KBASEsessid']
if 'KBASEprofile' in request.META:
res = res + "Your profile record is:\n%s\n" % pformat( request.META['KBASEprofile'])
if 'KBASEtoken' in request.META:
res = res + "Your OAuth token is:\n%s\n" % pformat( request.META['KBASEtoken'])
return HttpResponse(res)
| mit | -9,132,790,155,269,310,000 | 39.451613 | 127 | 0.640009 | false |
SecuredByTHEM/ndr-server | ndr_server/recorder.py | 1 | 5041 | #!/usr/bin/python3
# Copyright (C) 2017 Secured By THEM
# Original Author: Michael Casadevall <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Repesentation of an recorder'''
import datetime
import time
import ndr
import ndr_server
class Recorder(object):
'''Recorders are a system running the NDR package, and represent a source of data'''
def __init__(self, config):
self.config = config
self.pg_id = None
self.site_id = None
self.human_name = None
self.hostname = None
self.enlisted_at = None
self.last_seen = None
self.image_build_date = None
self.image_type = None
def __eq__(self, other):
# Recorders equal each other if the pg_id matches each other
# since its the same record in the database
if self.pg_id is None:
return False
return self.pg_id == other.pg_id
@classmethod
def create(cls, config, site, human_name, hostname, db_conn=None):
'''Creates the recorder within the database'''
recorder = cls(config)
recorder.human_name = human_name
recorder.hostname = hostname
recorder.site_id = site.pg_id
recorder.enlisted_at = time.time()
recorder.last_seen = recorder.enlisted_at
recorder.pg_id = config.database.run_procedure_fetchone(
"admin.insert_recorder", [site.pg_id, human_name, hostname],
existing_db_conn=db_conn)[0]
return recorder
def from_dict(self, recorder_dict):
'''Deserializes an recorder from a dictionary'''
self.human_name = recorder_dict['human_name']
self.hostname = recorder_dict['hostname']
self.site_id = recorder_dict['site_id']
self.pg_id = recorder_dict['id']
self.image_build_date = recorder_dict['image_build_date']
self.image_type = recorder_dict['image_type']
return self
def get_site(self, db_conn=None):
'''Gets the site object for this recorder'''
return ndr_server.Site.read_by_id(self.config, self.site_id, db_conn)
def set_recorder_sw_revision(self, image_build_date, image_type, db_conn):
'''Sets the recorder's software revision, and image type and updates the database
with that information'''
# Make sure we have an integer coming in
image_build_date = int(image_build_date)
self.config.database.run_procedure("admin.set_recorder_sw_revision",
[self.pg_id, image_build_date, image_type],
existing_db_conn=db_conn)
self.image_build_date = image_build_date
self.image_type = image_type
def get_message_ids_recieved_in_time_period(self,
message_type: ndr.IngestMessageTypes,
start_period: datetime.datetime,
end_period: datetime.datetime,
db_conn):
'''Retrieves message IDs recieved in for a given period. Returns None if
if no ids are found'''
message_ids = self.config.database.run_procedure_fetchone(
"admin.get_recorder_message_ids_recieved_in_period",
[self.pg_id,
message_type.value,
start_period,
end_period],
existing_db_conn=db_conn)[0]
return message_ids
@classmethod
def read_by_id(cls, config, recorder_id, db_conn=None):
'''Loads an recorder by ID number'''
rec = cls(config)
return rec.from_dict(config.database.run_procedure_fetchone(
"ingest.select_recorder_by_id", [recorder_id], existing_db_conn=db_conn))
@classmethod
def read_by_hostname(cls, config, hostname, db_conn=None):
'''Loads a recorder based of it's hostname in the database'''
rec = cls(config)
return rec.from_dict(config.database.run_procedure_fetchone(
"ingest.select_recorder_by_hostname", [hostname], existing_db_conn=db_conn))
@staticmethod
def get_all_recorder_names(config, db_conn=None):
'''Returns a list of all recorder names in the database'''
return config.database.run_procedure(
"admin.get_all_recorder_names", [], existing_db_conn=db_conn)
| agpl-3.0 | 4,874,409,016,993,405,000 | 38.077519 | 89 | 0.620115 | false |
mvaled/sentry | tests/sentry/api/endpoints/test_organization_user_issues.py | 1 | 3349 | from __future__ import absolute_import
import six
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from sentry import tagstore
from sentry.models import EventUser, OrganizationMemberTeam
from sentry.testutils import APITestCase
class OrganizationUserIssuesTest(APITestCase):
def setUp(self):
super(OrganizationUserIssuesTest, self).setUp()
self.org = self.create_organization()
self.org.flags.allow_joinleave = False
self.org.save()
self.team1 = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org)
self.project1 = self.create_project(teams=[self.team1])
self.project2 = self.create_project(teams=[self.team2])
self.group1 = self.create_group(
project=self.project1, last_seen=timezone.now() - timedelta(minutes=1)
)
self.group2 = self.create_group(project=self.project2)
self.euser1 = EventUser.objects.create(email="[email protected]", project_id=self.project1.id)
self.euser2 = EventUser.objects.create(email="[email protected]", project_id=self.project1.id)
self.euser3 = EventUser.objects.create(email="[email protected]", project_id=self.project2.id)
tagstore.create_group_tag_value(
key="sentry:user",
value=self.euser1.tag_value,
group_id=self.group1.id,
project_id=self.project1.id,
environment_id=None,
)
tagstore.create_group_tag_value(
key="sentry:user",
value=self.euser2.tag_value,
group_id=self.group1.id,
project_id=self.project1.id,
environment_id=None,
)
tagstore.create_group_tag_value(
key="sentry:user",
value=self.euser3.tag_value,
group_id=self.group2.id,
project_id=self.project2.id,
environment_id=None,
)
self.path = reverse(
"sentry-api-0-organization-user-issues", args=[self.org.slug, self.euser1.id]
)
def test_no_team_access(self):
user = self.create_user()
self.create_member(user=user, organization=self.org)
self.login_as(user=user)
response = self.client.get(self.path)
assert response.status_code == 200
assert len(response.data) == 0
def test_has_access(self):
user = self.create_user()
member = self.create_member(user=user, organization=self.org, teams=[self.team1])
self.login_as(user=user)
response = self.client.get(self.path)
# result shouldn't include results from team2/project2 or [email protected]
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["id"] == six.text_type(self.group1.id)
OrganizationMemberTeam.objects.create(
team=self.team2, organizationmember=member, is_active=True
)
response = self.client.get(self.path)
# now result should include results from team2/project2
assert response.status_code == 200
assert len(response.data) == 2
assert response.data[0]["id"] == six.text_type(self.group2.id)
assert response.data[1]["id"] == six.text_type(self.group1.id)
| bsd-3-clause | 5,731,906,424,005,365,000 | 36.211111 | 100 | 0.640191 | false |
phildini/django-invitations | invitations/models.py | 1 | 2846 | import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from allauth.account.adapter import DefaultAccountAdapter
from allauth.account.adapter import get_adapter
from .managers import InvitationManager
from .app_settings import app_settings
from . import signals
@python_2_unicode_compatible
class Invitation(models.Model):
email = models.EmailField(unique=True, verbose_name=_('e-mail address'))
accepted = models.BooleanField(verbose_name=_('accepted'), default=False)
created = models.DateTimeField(verbose_name=_('created'),
default=timezone.now)
key = models.CharField(verbose_name=_('key'), max_length=64, unique=True)
sent = models.DateTimeField(verbose_name=_('sent'), null=True)
objects = InvitationManager()
@classmethod
def create(cls, email):
key = get_random_string(64).lower()
instance = cls._default_manager.create(
email=email,
key=key)
return instance
def key_expired(self):
expiration_date = (
self.sent + datetime.timedelta(
days=app_settings.INVITATION_EXPIRY))
return expiration_date <= timezone.now()
def send_invitation(self, request, **kwargs):
current_site = (kwargs['site'] if 'site' in kwargs
else Site.objects.get_current())
invite_url = reverse('invitations:accept-invite',
args=[self.key])
invite_url = request.build_absolute_uri(invite_url)
ctx = {
'invite_url': invite_url,
'site_name': current_site.name,
'email': self.email,
'key': self.key,
}
email_template = 'invitations/email/email_invite'
get_adapter().send_mail(
email_template,
self.email,
ctx)
self.sent = timezone.now()
self.save()
signals.invite_url_sent.send(
sender=self.__class__,
instance=self,
invite_url_sent=invite_url,
inviter=request.user)
def __str__(self):
return "Invite: {0}".format(self.email)
class InvitationsAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
if hasattr(request, 'session') and request.session.get('account_verified_email'):
return True
elif app_settings.INVITATION_ONLY is True:
# Site is ONLY open for invites
return False
else:
# Site is open to signup
return True
| gpl-3.0 | -2,722,871,065,632,220,700 | 31.340909 | 89 | 0.625439 | false |
massimo-nocentini/competitive-programming | UVa/1062.py | 1 | 2541 |
#_________________________________________________________________________
import fileinput
from contextlib import contextmanager
@contextmanager
def line_bind(line, *ctors, splitter=lambda l: l.split(' '), do=None):
'''
Split `line` argument producing an iterable of mapped elements, in the sense of `ctors`.
Keyword argument `splitter` splits the given `line` respect `space` (' ')
character; however, it is possible to provide the desired behavior providing
a custom lambda expression of one parameter, eventually instantiated with `line`.
The iterable produced by `splitter` should match argument `ctors` in length;
if this holds, an iterable of mapped elements is produced, composed of elements
built by application of each function in `ctors` to element in the split, pairwise.
On the other hand, mapping happens according to the rules of `zip` if lengths differ.
Keyword argument `do` is an higher order operator, defaults to `None`: if
given, it should be a function that receive the generator, which is returned, otherwise.
Moreover, the returned iterable object is a generator, so a linear scan of the line
*is not* performed, hence there is no need to consume an higher order operator to
be applied during the scan, this provide good performances at the same time.
'''
g = (c(v) for c, v in zip(ctors, splitter(line)))
yield do(g) if do else g
@contextmanager
def stdin_input(getter=lambda: fileinput.input(), raw_iter=False):
'''
Produces a way to fetch lines by a source.
Keyword argument `getter` should be a thunk that produces an iterable, call it `i`;
by default, it produces the iterator which reads from standard input.
Keyword argument `raw_iter` is a boolean. If it is `True`, that iterator `i` is
returned as it is; otherwise, a thunk is returned which wraps the application `next(i)`.
'''
iterable = getter()
yield iterable if raw_iter else (lambda: next(iterable))
#________________________________________________________________________
with stdin_input() as next_line:
from itertools import count
for i in count(1):
containers = next_line()
if containers == 'end\n': break
stacks = []
for c in containers:
for s in stacks:
if c <= s[-1]:
s.append(c)
break
else:
stacks.append([c])
print("Case {}: {}".format(i, len(stacks)))
| mit | -4,782,286,211,673,975,000 | 34.291667 | 92 | 0.623377 | false |
the-duck/launcher | duck_launcher/defaultConfig.py | 1 | 1158 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#########
#Copyright (C) 2014 Mark Spurgeon <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########
Dict={
"r":255,
"g":92,
"b":36,
"r2":40,
"g2":40,
"b2":40,
"alpha":200,
"font":"Droid Sans",
"font-r":255,
"font-g":255,
"font-b":255,
"animation-speed":1.5,
"size":40,
"dock-apps":["Firefox Web Browser"],
"icon-size":95,
"blocks":"(lp0\n(dp1\nS'apps'\np2\n(lp3\nS'Firefox Web Browser'\np4\nasS'name'\np5\nS'Example'\np6\nsa.",
"init-manager":"systemd"
}
| gpl-2.0 | 5,718,008,555,111,466,000 | 28.692308 | 106 | 0.666667 | false |
markvdw/mltools | mltools/optimise_scg.py | 1 | 5939 | # Copyright I. Nabney, N.Lawrence and James Hensman (1996 - 2014)
# Scaled Conjuagte Gradients, originally in Matlab as part of the Netlab toolbox by I. Nabney, converted to python N. Lawrence and given a pythonic interface by James Hensman
# Modified from GPy SCG optimisation
from __future__ import print_function
import numpy as np
import sys
def print_out(len_maxiters, fnow, current_grad, beta, iteration):
print('\r', end=' ')
print('{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
def exponents(fnow, current_grad):
exps = [np.abs(np.float(fnow)), current_grad]
return np.sign(exps) * np.log10(exps).astype(int)
def SCG(f, gradf, x, optargs=(), callback=None, maxiter=500, max_f_eval=np.inf, display=True, xtol=None, ftol=None, gtol=None):
"""
Optimisation through Scaled Conjugate Gradients (SCG)
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
"""
if xtol is None:
xtol = 1e-6
if ftol is None:
ftol = 1e-6
if gtol is None:
gtol = 1e-5
sigma0 = 1.0e-7
fold = f(x, *optargs) # Initial function value.
function_eval = 1
fnow = fold
gradnew = gradf(x, *optargs) # Initial gradient.
function_eval += 1
#if any(np.isnan(gradnew)):
# raise UnexpectedInfOrNan, "Gradient contribution resulted in a NaN value"
current_grad = np.dot(gradnew, gradnew)
gradold = gradnew.copy()
d = -gradnew # Initial search direction.
success = True # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1.0e-15 # Lower bound on scale.
betamax = 1.0e15 # Upper bound on scale.
status = "Not converged"
iteration = 0
len_maxiters = len(str(maxiter))
if display:
print(' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters))
exps = exponents(fnow, current_grad)
p_iter = iteration
# Main optimization loop.
while iteration < maxiter:
# Calculate first and second directional derivatives.
if success:
mu = np.dot(d, gradnew)
if mu >= 0:
d = -gradnew
mu = np.dot(d, gradnew)
kappa = np.dot(d, d)
sigma = sigma0 / np.sqrt(kappa)
xplus = x + sigma * d
gplus = gradf(xplus, *optargs)
function_eval += 1
theta = np.dot(d, (gplus - gradnew)) / sigma
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta * kappa
if delta <= 0:
delta = beta * kappa
beta = beta - theta / kappa
alpha = -mu / delta
# Calculate the comparison ratio.
xnew = x + alpha * d
fnew = f(xnew, *optargs)
function_eval += 1
if function_eval >= max_f_eval:
status = "maximum number of function evaluations exceeded"
break
Delta = 2.*(fnew - fold) / (alpha * mu)
if Delta >= 0.:
success = True
nsuccess += 1
x = xnew
fnow = fnew
else:
success = False
fnow = fold
# Store relevant variables
if callback is not None:
callback(x, fval=fnow, gval=gradnew)
iteration += 1
if display:
print_out(len_maxiters, fnow, current_grad, beta, iteration)
n_exps = exponents(fnow, current_grad)
if iteration - p_iter >= 20 * np.random.rand():
a = iteration >= p_iter * 2.78
b = np.any(n_exps < exps)
if a or b:
p_iter = iteration
print('')
if b:
exps = n_exps
if success:
# Test for termination
if (np.abs(fnew - fold) < ftol):
status = 'converged - relative reduction in objective'
break
elif (np.max(np.abs(alpha * d)) < xtol):
status = 'converged - relative stepsize'
break
else:
# Update variables for new position
gradold = gradnew
gradnew = gradf(x, *optargs)
function_eval += 1
current_grad = np.dot(gradnew, gradnew)
fold = fnew
# If the gradient is zero then we are done.
if current_grad <= gtol:
status = 'converged - relative reduction in gradient'
break
# Adjust beta according to comparison ratio.
if Delta < 0.25:
beta = min(4.0 * beta, betamax)
if Delta > 0.75:
beta = max(0.25 * beta, betamin)
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if nsuccess == x.size:
d = -gradnew
beta = 1. # This is not in the original paper
nsuccess = 0
elif success:
Gamma = np.dot(gradold - gradnew, gradnew) / (mu)
d = Gamma * d - gradnew
else:
# If we get here, then we haven't terminated in the given number of
# iterations.
status = "maxiter exceeded"
if display:
print_out(len_maxiters, fnow, current_grad, beta, iteration)
print("")
print(status)
return x, status
| mit | -192,725,720,858,400,350 | 32.937143 | 228 | 0.555144 | false |
davy39/eric | eric6_re.py | 1 | 1832 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2004 - 2014 Detlev Offenbach <[email protected]>
#
"""
Eric6 Re.
This is the main Python script that performs the necessary initialization
of the PyRegExp wizard module and starts the Qt event loop. This is a
standalone version of the integrated PyRegExp wizard.
"""
from __future__ import unicode_literals
import Toolbox.PyQt4ImportHook # __IGNORE_WARNING__
try: # Only for Py2
import Utilities.compatibility_fixes # __IGNORE_WARNING__
except (ImportError):
pass
import sys
for arg in sys.argv:
if arg.startswith("--config="):
import Globals
configDir = arg.replace("--config=", "")
Globals.setConfigDir(configDir)
sys.argv.remove(arg)
break
from Globals import AppInfo
from Toolbox import Startup
def createMainWidget(argv):
"""
Function to create the main widget.
@param argv list of commandline parameters (list of strings)
@return reference to the main widget (QWidget)
"""
from Plugins.WizardPlugins.PyRegExpWizard.PyRegExpWizardDialog import \
PyRegExpWizardWindow
return PyRegExpWizardWindow()
def main():
"""
Main entry point into the application.
"""
options = [
("--config=configDir",
"use the given directory as the one containing the config files"),
]
appinfo = AppInfo.makeAppInfo(sys.argv,
"Eric6 RE",
"",
"Regexp editor for the Python re module",
options)
res = Startup.simpleAppStartup(sys.argv,
appinfo,
createMainWidget)
sys.exit(res)
if __name__ == '__main__':
main()
| gpl-3.0 | 46,288,976,475,146,880 | 25.171429 | 75 | 0.601528 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/keyword_plan_campaign_keyword.py | 1 | 2588 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import keyword_match_type
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'KeywordPlanCampaignKeyword',
},
)
class KeywordPlanCampaignKeyword(proto.Message):
r"""A Keyword Plan Campaign keyword.
Only negative keywords are supported for Campaign Keyword.
Attributes:
resource_name (str):
Immutable. The resource name of the Keyword Plan Campaign
keyword. KeywordPlanCampaignKeyword resource names have the
form:
``customers/{customer_id}/keywordPlanCampaignKeywords/{kp_campaign_keyword_id}``
keyword_plan_campaign (str):
The Keyword Plan campaign to which this
negative keyword belongs.
id (int):
Output only. The ID of the Keyword Plan
negative keyword.
text (str):
The keyword text.
match_type (google.ads.googleads.v6.enums.types.KeywordMatchTypeEnum.KeywordMatchType):
The keyword match type.
negative (bool):
Immutable. If true, the keyword is negative.
Must be set to true. Only negative campaign
keywords are supported.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
keyword_plan_campaign = proto.Field(
proto.STRING,
number=8,
optional=True,
)
id = proto.Field(
proto.INT64,
number=9,
optional=True,
)
text = proto.Field(
proto.STRING,
number=10,
optional=True,
)
match_type = proto.Field(
proto.ENUM,
number=5,
enum=keyword_match_type.KeywordMatchTypeEnum.KeywordMatchType,
)
negative = proto.Field(
proto.BOOL,
number=11,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 3,933,835,017,101,767,700 | 28.409091 | 95 | 0.63949 | false |
ayrokid/yowsup | send.py | 1 | 1165 | from yowsup.demos import sendclient
#import logging #tampilan log khusus centos os
import MySQLdb
import MySQLdb.cursors
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="root", # your password
db="push",
cursorclass=MySQLdb.cursors.DictCursor) # name of the data base
credentials = ['6281390688955', 'Ga3lQvVTvzt10PlCGC/W5MAJfuE=']
data = []
try:
cur = db.cursor()
cur.execute("select id,content from messages where status='1' limit 1")
msg = cur.fetchone()
#print "Message : %s " % msg['content']
cur.execute("select nomor from msisdn")
results = cur.fetchall()
i = 0;
for row in results:
data.append([ row['nomor'], msg['content'] ])
i += 1
#stack = sendclient.YowsupSendStack(credentials, [(['6285725523023', 'pesan dari ubuntu'])])
#stack = sendclient.YowsupSendStack(credentials, data)
#stack.start()
cur.execute("""update messages set status=0 where id=%s """, (msg['id']) )
db.commit()
print('\nKirim Sukses..')
except KeyboardInterrupt:
db.rollback()
print('\nYowsdown')
#disconnect from server
db.close()
| gpl-3.0 | -5,198,567,450,660,308,000 | 28.125 | 96 | 0.670386 | false |
trou/gdb-x86-sysutils | intel_sys_structs.py | 1 | 6572 | # Copyright 2015 - Raphaël Rigo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cstruct
class mem_map:
"""
Stores virt->phy map
Structure is a dict of :
addr => (start, end, physical address of start, physical end)
each referenced page should be added like this :
start => (start, end)
end => (start, end)
Adjacent pages are merged in add_page if physical
addresses are adjacent too
"""
def __init__(self):
self.p = {}
self.np = {}
def add_page(self, m, new_page_addr, size, new_page_phy=None):
new_page_end = new_page_addr+size
if not new_page_phy:
new_page_phy = new_page_addr
new_phy_end = new_page_phy+size
# Default case
new_r = (new_page_addr, new_page_end, new_page_phy, new_phy_end)
# Check if adjacent :
if new_page_addr in m:
# print "Found start in m " +repr(m[new_page_addr])
# addr equals previous page end
# => coallesce with previous
if new_page_addr == m[new_page_addr][1]:
# check if physical is also adjacent with previous end
if new_page_phy == m[new_page_addr][3] :
new_r = (m[new_page_addr][0], new_page_end, m[new_page_addr][2], new_phy_end)
del m[new_page_addr] # delete old "end"
else:
raise Exception("Page already present !")
elif new_page_end in m:
# print "Found end in m :"+repr(m[new_page_end])
# End of new page is equal to present page addr
# merge with next
# check if physical is also adjacent :
if new_page_end == m[new_page_end][2]:
new_r = (new_page_addr, m[new_page_end][1], new_page_phy, m[new_page_end][3])
del m[new_page_end] # delete start of old page
# Add new entry
m[new_r[0]] = new_r
m[new_r[1]] = new_r
def add_page_present(self, addr, size, phy=None):
self.add_page(self.p, addr, size, phy)
def add_page_4k_not_present(self, addr):
self.add_page(self.np, addr, 4096)
def prt(self):
s = set(self.p.values())
for r in sorted(list(s), key = lambda m: m[0]):
print("%08x-%08x => (%08x-%08x)" % r)
class segment_desc:
def __init__(self, val):
self.desc = val
self.base = (self.desc >> 16)&0xFFFFFF
self.base |= ((self.desc >> (32+24))&0xFFF)<<24;
self.limit = self.desc & 0xFFFF;
self.limit |= ((self.desc >> 32+16)&0xF)<<16;
self.type = (self.desc >> (32+8))&0xF;
self.s = (self.desc >> (32+12))&1;
self.dpl = (self.desc >> (32+13))&3;
self.p = (self.desc >> (32+15))&1;
self.avl = (self.desc >> (32+20))&1;
self.l = (self.desc >> (32+21))&1;
self.db = (self.desc >> (32+22))&1;
self.g = (self.desc >> (32+23))&1;
self.limit *= 4096 if self.g else 1
def is_tss(self):
if self.s == 0 and (self.type == 9 or self.type == 11):
return True
else:
return False
def type_str(self):
if (self.type>>3)&1:
# code
s = "C" if (self.type>>2)&1 else "c"
s += "R" if (self.type>>1)&1 else "r"
s += "A" if self.type&1 else "a"
return ("CODE", s)
else:
# data
s = "E" if (self.type>>2)&1 else "e"
s += "W" if (self.type>>1)&1 else "w"
s += "A" if self.type&1 else "a"
return ("DATA", s)
def __str__(self):
if self.p == 0:
return "Not Present !"
if self.s == 1:
# CODE/DATA
s = "DPL : %d Base : %08x Limit : %08x " % (self.dpl, self.base, self.limit)
if self.l == 1 and self.db == 0 :
s += "D/B: 64b "
else:
s += "D/B: %db " % (16,32)[self.db]
s += "Type: %s" % ",".join(self.type_str())
else:
# System
s = "DPL : %d Base : %08x Limit : %08x " % (self.dpl, self.base, self.limit)
s += "AVL : %d " % self.avl
s += "Type: %s" % ("Reserved", "16b TSS (A)", "LDT", "16b TSS (B)", "16b Call G", "Task Gate", "16b Int G", "16b Trap G", "Reserved", "32b TSS (A)", "Reserved", "32b TSS (B)", "32b Call G", "Reserved", "32b Int G", "32b Trap G")[self.type]
return s
class tss_data(cstruct.CStruct):
_fields = [ ("ptl", "u16"),
("_","u16"),
("esp0","u32"),
("ss0","u16"),
("_","u16"),
("esp1","u32"),
("ss1","u16"),
("_","u16"),
("esp2","u32"),
("ss2","u16"),
("_","u16"),
("cr3","u32"),
("eip","u32"),
("eflags","u32"),
("eax","u32"),
("ecx","u32"),
("edx","u32"),
("ebx","u32"),
("esp","u32"),
("ebp","u32"),
("esi","u32"),
("edi","u32"),
("es","u16"),
("_","u16"),
("cs","u16"),
("_","u16"),
("ss","u16"),
("_","u16"),
("ds","u16"),
("_","u16"),
("fs","u16"),
("_","u16"),
("gs","u16"),
("_","u16"),
("ldtss","u16"),
("t","u16"),
("iomap","u16")]
def __str__(self):
s = "Prev : %04x CR3 : %08x, CS:EIP: %04x:%08x " % (self.ptl, self.cr3, self.cs, self.eip)
s += "ds : %04x " % (self.ds)
s += "es : %04x " % (self.es)
s += "fs : %04x " % (self.fs)
s += "gs : %04x " % (self.gs)
s += "ss : %04x " % (self.ss)
return s
| gpl-2.0 | 5,077,701,676,599,962,000 | 34.907104 | 251 | 0.455943 | false |
goerz/LPBS | LPBS/Config.py | 1 | 9634 | # -*- coding: utf-8 -*-
############################################################################
# Copyright (C) 2015 by Michael Goerz #
# http://michaelgoerz.net #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
""" Manage Configuration """
from ConfigParser import SafeConfigParser, ParsingError
from ConfigParser import Error as ConfigParserError
import os
import sys
import re
from StringIO import StringIO
DEFAULTS = StringIO("""\
[Server]
# Full hostname of submission server (hostname.domain). Will be made available
# to running job through the environment variable PBS_SERVER. Job IDs will end
# in the server hostname
hostname: localhost
domain: local
[Node]
# Full hostname of the execution node (hostname.domain). Will be made available
# to running job through the environment variable PBS_O_HOST. Since LPBS is
# designed to execute jobs locally, the settings here should in general be
# identical to those in the [Server] section
hostname: localhost
domain: local
[LPBS]
# Setting for job execution.
# If 'username_in_jobid' is enabled, the job IDs will have the form
# 'seqnr.user.hostname.domain' where 'user' is the username of the user
# submitting the job.
# The file given in 'sequence_file' is used for keeping track of the 'seqnr'
# appearing in the job ID.
# The file given in 'logfile' is used for logging all LPBS events. Both
# 'sequence_file' and 'logfile' are relative to $LPBS_HOME.
username_in_jobid: 0
sequence_file: sequence
logfile: lpbs.log
[Scratch]
# Settings for the scratch space provided to jobs. 'scratch_root' defines a
# location where jobs should write temporary data. If given as a relative path,
# it is relative to $LPBS_HOME. Environment variables will be expanded at the
# time of the job submission.
# If the value of # 'create_jobid_folder' is set to 1, a folder with the name of
# the full job ID is created inside scratch_root. This folder is automatically
# deleted when the job ends, unless 'keep_scratch' is set to 1. If the job
# failed, the scratch will not be deleted, unless 'delete_failed_scratch' is set
# to 1.
scratch_root: $SCRATCH_ROOT
create_jobid_folder: 0
keep_scratch: 0
delete_failed_scratch: 0
[Notification]
# Settings on how the user should be be notified about events such as the start
# and end of a job. If sent_mail is set to 1, emails will be sent for
# notifications depending on the value of the '-m' option to lqsub. If
# 'send_growl' is set to 1, Growl (http://growl.info) is used for notification
# on MacOS X. Notifications via Growl do not take into account the '-m' options
# during job submission.
send_mail: 0
send_growl: 0
[Mail]
# SMTP settings for email notifications. Notification emails will be sent from
# the address given by the 'from' option. The SMTP server given in 'smtp' is
# used for sending the emails, if 'authenticate' is set to 1, authentication is
# done with the given 'username' and 'password'. If 'tls' is 1, TLS encryption
# will be used.
from: [email protected]
smtp: smtp.example.com:587
username: user
password: secret
authenticate: 0
tls: 1
[Growl]
# Settings for Growl notifications. Notifications are sent to either localhost
# or a remote host via the GNTP protocol. The 'hostname' setting gives the
# address and port of the Growl server, the given 'password' is used for
# authentication (note that if sending to localhost, no authentication is
# necessary). If 'sticky' is set to 1, the Growl notifications will be sticky.
# It is possible to send notifications to more than one host. In this case, both
# 'hostname' and 'password' should be a comma-separated list of values, with
# each item corresponding to one host.
hostname: localhost:23053
password:
sticky: 0
[Log]
# 'logfile' gives the name of the central log file, relative to $LPBS_HOME.
logfile: lpbs.log
""")
def verify_config(config):
""" Verify that a config data structure conains all valid entries. For those
entries that are not valid, print an error message and reset them to a
default
"""
try:
for (section, key) in [('LPBS', 'username_in_jobid'),
('Scratch', 'create_jobid_folder'), ('Scratch', 'keep_scratch'),
('Scratch', 'delete_failed_scratch'), ('Notification', 'send_mail'),
('Notification', 'send_growl'), ('Mail', 'authenticate'),
('Mail', 'tls'), ('Growl', 'sticky')]:
try:
config.getboolean(section, key)
except ValueError, error:
config.set(section, key, 'false')
print >> sys.stderr, "Illegal value for %s in Section %s." \
% (section, key)
print >> sys.stderr, str(error)
print >> sys.stderr, "Set %s to False" % key
hostname = config.get('Server', 'hostname')
if not re.match(r'^[A-Za-z0-9\-]+$', hostname):
print >> sys.stderr, "Server hostname was %s, " % hostname,
print >> sys.stderr, "must match '^[A-Za-z0-9\\-]+$'. " \
"Set to 'localhost'."
config.set('Server', 'hostname', 'localhost')
domain = config.get('Server', 'domain')
if not re.match(r'^[A-Za-z0-9\-\.]+$', domain):
print >> sys.stderr, "Server domain was %s, " % hostname,
print >> sys.stderr, "must match '^[A-Za-z0-9\\-\\.]+$'. " \
"Set to 'local'."
config.set('Server', 'domain', 'local')
hostname = config.get('Node', 'hostname')
if not re.match(r'^[A-Za-z0-9\-]+$', hostname):
print >> sys.stderr, "Node hostname was %s, " % hostname,
print >> sys.stderr, "must match '^[A-Za-z0-9\\-]+$'. " \
"Set to 'localhost'."
config.set('Node', 'hostname', 'localhost')
domain = config.get('Node', 'domain')
if not re.match(r'^[A-Za-z0-9\-\.]+$', domain):
print >> sys.stderr, "Node domain was %s, " % hostname,
print >> sys.stderr, "must match '^[A-Za-z0-9\\-\\.]+$'. " \
"Set to 'local'."
config.set('Node', 'domain', 'local')
except ConfigParserError, error:
print >> sys.stderr, "Unrecoverable error in config data:"
print >> sys.stderr, str(error)
sys.exit(1)
def get_config(config_file):
""" Return an instance of ConfigParser.SafeConfigParser, loaded with the
data in
a) $LPBS_HOME/lpbs.cfg
b) $HOME/.lpbs.cfg
c) the specified config_file
"""
config = SafeConfigParser()
# Defaults
config.readfp(DEFAULTS)
config_files = []
# $LPBS_HOME/lpbs.cfg
if os.environ.has_key('LPBS_HOME'):
global_config_file = os.path.join(os.environ['LPBS_HOME'], 'lpbs.cfg')
if os.path.isfile(global_config_file):
config_files.append(global_config_file)
# $HOME/.lpbs.cfg
if os.environ.has_key('HOME'):
user_config_file = os.path.join(os.environ['HOME'], '.lpbs.cfg')
if os.path.isfile(user_config_file):
config_files.append(user_config_file)
# Specified Config File
try:
if os.path.isfile(config_file):
config_files.append(config_file)
except TypeError:
pass
try:
config.read(config_files)
except ParsingError, error:
print >> sys.stderr, str(error)
verify_config(config)
return config
def verify_lpbs_home():
""" Verify existence and writability of LPBS_HOME. Try to create files as
necessary
"""
if not os.environ.has_key('LPBS_HOME'):
print >> sys.stderr, "LPBS_HOME must be defined"
return 1
if not os.path.isdir(os.environ['LPBS_HOME']):
print >> sys.stderr, "LPBS_HOME must be a directory"
return 1
if not os.access(os.environ['LPBS_HOME'], os.W_OK):
print >> sys.stderr, "LPBS_HOME must be writable"
return 1
configfile = os.path.join(os.environ['LPBS_HOME'], 'lpbs.cfg')
if not os.path.isfile(configfile):
configfile_fh = open(configfile, 'w')
configfile_fh.write(DEFAULTS.getvalue())
configfile_fh.close()
return 0
def full_expand(path_string):
""" Combination of os.path.expanduser and os.path.expandvars """
return os.path.expanduser(os.path.expandvars(path_string))
| gpl-3.0 | 7,518,117,689,724,686,000 | 37.536 | 80 | 0.605148 | false |
lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/plugins/fm/backend.py | 1 | 6269 | import grp
import logging
import os
import pwd
import re
import subprocess
import stat
import shutil
from datetime import datetime
from ajenti.api import *
from ajenti.util import str_fsize
from ajenti.plugins import manager
from ajenti.plugins.tasks.manager import TaskManager
from ajenti.plugins.tasks.tasks import CopyFilesTask, MoveFilesTask, DeleteFilesTask
class Item (object):
stat_bits = [
stat.S_IRUSR,
stat.S_IWUSR,
stat.S_IXUSR,
stat.S_IRGRP,
stat.S_IWGRP,
stat.S_IXGRP,
stat.S_IROTH,
stat.S_IWOTH,
stat.S_IXOTH,
]
def __init__(self, path):
self.checked = False
self.path, self.name = os.path.split(path)
self.fullpath = path
self.isdir = os.path.isdir(path)
self.icon = 'folder-close' if self.isdir else 'file'
try:
self.size = 0 if self.isdir else os.path.getsize(path)
except OSError:
self.size = 0
self.sizestr = '' if self.isdir else str_fsize(self.size)
self.mtime = datetime.utcfromtimestamp(os.stat(path).st_mtime)
self.atime = datetime.utcfromtimestamp(os.stat(path).st_atime)
self.ctime = datetime.utcfromtimestamp(os.stat(path).st_ctime)
def read(self):
stat = os.stat(self.fullpath)
try:
self.owner = pwd.getpwuid(stat.st_uid)[0]
except KeyError:
self.owner = str(stat.st_uid)
try:
self.group = grp.getgrgid(stat.st_gid)[0]
except KeyError:
self.group = str(stat.st_gid)
self.mod_ur, self.mod_uw, self.mod_ux, \
self.mod_gr, self.mod_gw, self.mod_gx, \
self.mod_ar, self.mod_aw, self.mod_ax = [
(stat.st_mode & Item.stat_bits[i] != 0)
for i in range(0, 9)
]
@property
def mode(self):
mods = [
self.mod_ur, self.mod_uw, self.mod_ux,
self.mod_gr, self.mod_gw, self.mod_gx,
self.mod_ar, self.mod_aw, self.mod_ax
]
return sum(
Item.stat_bits[i] * (1 if mods[i] else 0)
for i in range(0, 9)
)
def write(self):
newpath = os.path.join(self.path, self.name)
if self.fullpath != newpath:
logging.info('[fm] renaming %s -> %s' % (self.fullpath, newpath))
os.rename(self.fullpath, newpath)
self.fullpath = os.path.join(self.path, self.name)
os.chmod(self.fullpath, self.mode)
err = None
try:
uid = int(self.owner or -1)
except:
try:
uid = pwd.getpwnam(self.owner)[2]
except KeyError:
uid = -1
err = Exception('Invalid owner')
try:
gid = int(self.group or -1)
except:
try:
gid = grp.getgrnam(self.group)[2]
except KeyError:
gid = -1
err = Exception('Invalid group')
os.chown(self.fullpath, uid, gid)
if err:
raise err
@plugin
class FMBackend (BasePlugin):
FG_OPERATION_LIMIT = 1024 * 1024 * 50
def _escape(self, i):
if hasattr(i, 'fullpath'):
i = i.fullpath
return '\'%s\' ' % i.replace("'", "\\'")
def _total_size(self, items):
return sum(_.size for _ in items)
def _has_dirs(self, items):
return any(_.isdir for _ in items)
def init(self):
self.task_manager = TaskManager.get()
def remove(self, items, cb=lambda t: None):
logging.info('[fm] removing %s' % ', '.join(x.fullpath for x in items))
if self._total_size(items) > self.FG_OPERATION_LIMIT or self._has_dirs(items):
paths = [x.fullpath for x in items]
task = DeleteFilesTask.new(source=paths)
task.callback = cb
self.task_manager.run(task=task)
else:
for i in items:
if os.path.isdir(i.fullpath):
shutil.rmtree(i.fullpath)
else:
os.unlink(i.fullpath)
cb(None)
def move(self, items, dest, cb=lambda t: None):
logging.info('[fm] moving %s to %s' % (', '.join(x.fullpath for x in items), dest))
if self._total_size(items) > self.FG_OPERATION_LIMIT or self._has_dirs(items):
paths = [x.fullpath for x in items]
task = MoveFilesTask.new(source=paths, destination=dest)
task.callback = cb
self.task_manager.run(task=task)
else:
for i in items:
shutil.move(i.fullpath, dest)
cb(None)
def copy(self, items, dest, cb=lambda t: None):
logging.info('[fm] copying %s to %s' % (', '.join(x.fullpath for x in items), dest))
if self._total_size(items) > self.FG_OPERATION_LIMIT or self._has_dirs(items):
paths = [x.fullpath for x in items]
task = CopyFilesTask.new(source=paths, destination=dest)
task.callback = cb
self.task_manager.run(task=task)
else:
for i in items:
if os.path.isdir(i.fullpath):
shutil.copytree(i.fullpath, os.path.join(dest, i.name))
else:
shutil.copy(i.fullpath, os.path.join(dest, i.name))
cb(None)
@interface
class Unpacker (BasePlugin):
ext = None
command = None
@staticmethod
def find(fn):
for u in Unpacker.get_all():
if u.match(fn):
return u
def match(self, fn):
return any(re.match(x, fn) for x in self.ext)
def unpack(self, fn, cb=lambda: None):
self.context.launch('terminal',
command='cd "{0}"; {2} "{1}"'.format(
*(os.path.split(fn) + (self.command,))
), callback=cb)
@plugin
class TarUnpacker (Unpacker):
ext = [r'.+\.tar.gz', r'.+\.tgz', r'.+\.tar', r'.+\.tar.bz2',
r'.+\.tbz2']
command = 'tar xvf'
@plugin
class ZipUnpacker (Unpacker):
ext = [r'.+\.zip']
command = 'unzip'
@plugin
class RarUnpacker(Unpacker):
ext = [r'.+\.rar']
command = 'unrar x'
| apache-2.0 | -8,778,958,459,631,277,000 | 29.285024 | 92 | 0.535971 | false |
vrutkovs/beehive | beehive4cmd0/failing_steps.py | 1 | 1081 | # -*- coding: utf-8 -*-
"""
Generic failing steps.
Often needed in examples.
EXAMPLES:
Given a step fails
When another step fails
Then a step fails
Given ...
When ...
Then it should fail because "the person is unknown".
"""
from beehive import step, then
# -----------------------------------------------------------------------------
# STEPS FOR: failing
# -----------------------------------------------------------------------------
@step('{word:w} step fails')
def step_fails(context, word):
"""
Step that always fails, mostly needed in examples.
"""
assert False, "EXPECT: Failing step"
@then(u'it should fail because "{reason}"')
def then_it_should_fail_because(context, reason):
"""
Self documenting step that indicates why this step should fail.
"""
assert False, "FAILED: %s" % reason
# @step(u'an error should fail because "{reason}"')
# def then_it_should_fail_because(context, reason):
# """
# Self documenting step that indicates why this step should fail.
# """
# assert False, reason
| bsd-2-clause | 2,706,280,226,738,522,000 | 24.139535 | 79 | 0.552266 | false |
layzerar/gospel | gospel/scripts/gossc.py | 1 | 8923 | # -*- coding: utf-8 -*-
import io
import operator
import os
import re
import sys
import signal
import tempfile
import subprocess
import argparse
try:
import psutil
except ImportError:
psutil = None
def _log_info(msg, **kwds):
if kwds:
msg = msg.format(**kwds)
sys.stdout.write(msg)
sys.stdout.write('\n')
def _log_error(msg, **kwds):
if kwds:
msg = msg.format(**kwds)
sys.stderr.write(msg)
sys.stderr.write('\n')
def _parse_cli_arguments():
parser = argparse.ArgumentParser(prog='gossc',
description='high-level screen manager')
subparsers = parser.add_subparsers(title='action', dest='action')
# create the parser for the "init" command
parser_init = subparsers.add_parser('init',
help='init screen')
parser_init.add_argument('screen_name',
help='screen name')
parser_init.add_argument('--lines',
dest='lines',
type=int,
default=10000,
help='output buffer lines')
# create the parser for the "exec" command
parser_exec = subparsers.add_parser('exec',
help='execute commands in screen')
parser_exec.add_argument('screen_name',
help='screen name')
parser_exec.add_argument('script_name',
nargs='?',
default=None,
help='script name')
# create the parser for the "plist" command
parser_plist = subparsers.add_parser('plist',
help='list all processes in screen')
parser_plist.add_argument('screen_name',
help='screen name')
# create the parser for the "psck" command
parser_psck = subparsers.add_parser('psck',
help='check processes in screen')
parser_psck.add_argument('screen_name',
help='screen name')
parser_psck.add_argument('patterns',
nargs='?',
default=None,
help='patterns of entry')
# create the parser for the "plist" command
parser_pkill = subparsers.add_parser('pkill',
help='kill all processes in screen')
parser_pkill.add_argument('screen_name',
help='screen name')
parser_pkill.add_argument('--force',
dest='force',
action='store_true',
default=False,
help='force kill')
return parser.parse_args(sys.argv[1:])
def _find_screens(screen_name):
command = ['screen', '-ls', screen_name]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
output, unused_err = process.communicate()
unused_retcode = process.poll() # `screen -ls` always return 1
screens = []
screen_suffix = "." + screen_name
for raw_line in io.BytesIO(output):
if not raw_line.startswith("\t"):
continue
screen_sockname = raw_line.strip().partition("\t")[0]
if screen_sockname.endswith(screen_suffix):
screen_pid = int(screen_sockname.partition(".")[0])
screens.append(screen_pid)
return screens
def init_screen(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_info("create screen [{screen_name}]", screen_name=screen_name)
command = ['screen', '-dmS', screen_name,
'-h', str(namespace.lines)]
subprocess.call(command)
else:
command = ['screen', '-x', str(screens[0]),
'-p', '0', '-X', 'eval', 'stuff ^U']
subprocess.call(command)
def exec_jobs(namespace):
screen_name = namespace.screen_name
script_name = namespace.script_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
if script_name is not None:
try:
stream = open(script_name, 'r')
except IOError:
_log_error("script not exists [{script_name}]",
script_name=script_name)
return
else:
stream = sys.stdin
script_key = 'x'
screen_pid = screens[0]
script_fd, script_path = tempfile.mkstemp(prefix='gospel-')
os.write(script_fd, '\n') # add an additional '\n' ahead of the script
for line in stream:
os.write(script_fd, line.rstrip('\r\n') + '\n')
os.close(script_fd)
command = ['screen', '-x', str(screen_pid),
'-X', 'readreg', script_key, script_path]
subprocess.call(command)
command = ['screen', '-x', str(screen_pid),
'-p', '0', '-X', 'paste', script_key]
subprocess.call(command)
os.remove(script_path)
def _get_processes_in_screen(screen_pid, with_cmdline=False):
if psutil is None:
_log_error("No module named 'psutil'")
return
screen_proc = psutil.Process(screen_pid)
if psutil.version_info[0] >= 2:
# psutil >= 2.0
get_name = operator.methodcaller('name')
get_cmdline = operator.methodcaller('cmdline')
get_children = operator.methodcaller('children')
else:
get_name = operator.attrgetter('name')
get_cmdline = operator.attrgetter('cmdline')
get_children = operator.methodcaller('get_children')
for level3_proc in get_children(screen_proc):
if get_name(level3_proc) == 'login':
# pstree: screen -- login -- sh
level2_proc_list = get_children(level3_proc)
else:
# pstree: screen -- sh
level2_proc_list = [level3_proc]
for level2_proc in level2_proc_list:
for level1_proc in get_children(level2_proc):
if with_cmdline:
yield level1_proc.pid, get_cmdline(level1_proc)
else:
yield level1_proc.pid
def plist_jobs(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
for child_pid in _get_processes_in_screen(screens[0]):
_log_info("{child_pid}", child_pid=child_pid)
def psck_jobs(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
patterns = namespace.patterns
if patterns is None:
stream = sys.stdin
else:
stream = patterns.splitlines()
entries = []
for line in stream:
line = line.strip()
if not line:
continue
patterns = []
for regex in line.split('&&'):
regex = regex.strip()
if not regex:
continue
patterns.append(re.compile(regex))
if patterns:
entries.append((line, tuple(patterns)))
if not entries:
return
mismatched = 0
processes = dict(_get_processes_in_screen(screens[0], with_cmdline=True))
for line, patterns in entries:
matched_pid = None
for child_pid, arguments in processes.iteritems():
if all(any(pattern.search(arg)
for arg in arguments)
for pattern in patterns):
matched_pid = child_pid
break
if matched_pid is None:
mismatched += 1
_log_error('{pid}\t{entry}', pid='NIL', entry=line)
else:
processes.pop(matched_pid, None)
_log_info('{pid}\t{entry}', pid=matched_pid, entry=line)
if mismatched == len(entries):
exit(code=255)
else:
exit(code=mismatched)
def pkill_jobs(namespace):
screen_name = namespace.screen_name
screens = _find_screens(screen_name)
if not screens:
_log_error("screen not exists [{screen_name}]",
screen_name=screen_name)
return
if namespace.force:
sig = signal.SIGKILL
else:
sig = signal.SIGINT
for child_pid in _get_processes_in_screen(screens[0]):
os.kill(child_pid, sig)
def main():
namespace = _parse_cli_arguments()
{
'init': init_screen,
'exec': exec_jobs,
'plist': plist_jobs,
'psck': psck_jobs,
'pkill': pkill_jobs,
}[namespace.action](namespace)
if __name__ == '__main__':
main()
| mit | 7,736,834,664,173,986,000 | 30.754448 | 77 | 0.545781 | false |
kevinseelbach/generic_utils | src/generic_utils/json_utils.py | 1 | 7686 | """
Functions which support creating dicts (mappings) with interesting structures and simplify getting or setting
values which may be nested deeply within the object.
"""
from __future__ import absolute_import
# stdlib
import collections
def query_json_struct_from_path(json_struct, path):
"""
Query the json structure given the path expression
:param json_struct: A json structure / dictionary
:param path: The path to use to locate the data value being requested
:return:
"""
if json_struct is None:
return None
assert isinstance(json_struct, collections.Mapping)
if path is None or not (isinstance(path, str) or isinstance(path, str)):
return None
else:
return path_query(json_struct, path.split('.'))
def path_query(json_struct, path_elts, default_val=None):
"""
QUery the json structure given an array of path elements
:param json_struct: A json structure / dictionary
:param path_elts: The elements in the path to follow
:param default_val: The value to return in=f there is no value at the given path
:return:
"""
if not json_struct or not isinstance(json_struct, collections.Mapping):
return default_val
elif not path_elts:
return default_val
elif len(path_elts) == 1:
return json_struct.get(path_elts[0], default_val)
else: # len(path_elts) > 1
next_step = json_struct.get(path_elts[0], None)
return path_query(next_step, path_elts[1:])
def increment_json_value_from_path(json_struct, path, value):
"""
Increment the numeric value for the path, creating the path if necessary
:param json_struct: The json object to increment
:param path: The path to the selected numeric value
:param value: The value to add, use negative values to subtrreact
:return:
"""
if json_struct is None:
json_struct = {}
assert isinstance(json_struct, collections.Mapping)
default_val = 0
path_elts = path.split('.')
previous_val = path_query(json_struct, path_elts, default_val=default_val)
new_val = previous_val + value
return update_json_struct_add(json_struct, path_elts, new_val)
def multi_update_json_struct(json_struct, new_attr_vals, delete_data=False):
"""
This function will do multiple calls to update_json_struct_from_path on the same json record
:param json_struct: The input json record
:param new_attr_vals: A dictionary containing, for each update, an entry with the key as a path
expression to the field being updated and the value being the new value for the field
:param delete_data: True if you want to remove the information from the json as opposed to adding it
:return: The updated json record
"""
if new_attr_vals:
try:
for key, val in new_attr_vals.items():
json_struct = update_json_struct_from_path(json_struct, key, val, delete_data=delete_data)
except AttributeError:
pass
return json_struct
def update_json_struct_from_path(json_struct, path, value, delete_data=False):
"""
Update the json struct element at path, as directed
:param json_struct: The json struct to update
:param path: The path to the element, as a path string, e.g. 'a.b.c'
:param value: The value you want to add/delete
:param delete_data: True if you want to delete the value, False if you want to add it
:return:
"""
if json_struct is None:
json_struct = {}
assert isinstance(json_struct, collections.Mapping)
if path is None:
# No place to update this value, so ignore
return json_struct
path_elts = path.split('.')
if not delete_data:
return update_json_struct_add(json_struct, path_elts, value) if path else json_struct
else:
return update_json_struct_delete(json_struct, path_elts, value) if path else json_struct
def make_json_struct(path_elts, value):
"""
Make a new json structure with a single path, with its endpoint set to value
:param path_elts: The elements of the path to traverse in the json struct
to reach the value
:param value: The value to set at the end of hte path
:return: The created json struct
"""
new_struct = dict()
if not path_elts or len(path_elts) == 0:
new_struct = None
elif len(path_elts) == 1:
new_struct[path_elts[0]] = value
else:
new_struct[path_elts[0]] = make_json_struct(path_elts[1:], value)
return new_struct
def update_json_struct_add(json_struct, path_elts, value):
"""
Update the json struct element at path, as directed
:param json_struct: The json struct to update
:param path_elts: The path to the element, as a path string, e.g. 'a.b.c'
:param value: The value you want to add/delete
:return:
"""
if json_struct is None:
json_struct = {}
assert isinstance(json_struct, collections.Mapping)
if not path_elts or len(path_elts) == 0:
updated = json_struct
elif json_struct == {}:
updated = make_json_struct(path_elts, value)
else:
key = path_elts[0]
val = json_struct.get(key, None)
updated = dict(json_struct)
if len(path_elts) == 1:
# if both the value to be updated, and the new value are lists, the extend the existing list.
if key in updated and isinstance(value, list) and isinstance(updated[key], list):
updated[key].extend(value)
# Need to remove duplicates
updated[key] = list(set(updated[key]))
else:
updated[key] = value
else:
rest_of_path = path_elts[1:]
if not val or not isinstance(val, collections.Mapping):
updated[key] = make_json_struct(rest_of_path, value)
else:
updated[key] = update_json_struct_add(val, rest_of_path, value)
return updated
def update_json_struct_delete(json_struct, path_elts, value):
"""
Update the json struct element at path, as directed
:param json_struct: The json struct to update
:param path_elts: The path to the element, as a path string, e.g. 'a.b.c'
:param value: The value you want to add/delete
:return:
"""
if json_struct is None or json_struct == {}:
return json_struct
if not path_elts or len(path_elts) == 0:
return json_struct
else:
key = path_elts[0]
val = json_struct.get(key, None)
updated = dict(json_struct)
if len(path_elts) == 1:
# if both the value to be updated, and the input value are lists,
# then remove the input elements from the existing list.
original = updated[key]
if not value or original == value:
# Just clear out the field
updated.pop(key, None)
if updated == {}:
updated = None # Need to be able to clear out keys all the way up the path
elif key in updated and isinstance(value, list) and isinstance(updated[key], list):
# Remove the items from the input list from the json struct
updated[key] = [x for x in original if x not in value]
else:
rest_of_path = path_elts[1:]
if val and isinstance(val, collections.Mapping):
new_k = update_json_struct_delete(val, rest_of_path, value)
if new_k:
updated[key] = update_json_struct_delete(val, rest_of_path, value)
else:
updated.pop(key, None)
return updated
| bsd-3-clause | -6,572,456,200,221,115,000 | 36.31068 | 109 | 0.63362 | false |
WalrusCow/euler | Solutions/problem12.py | 1 | 1428 | # Project Euler Problem 12
# Created on: 2012-06-15
# Created by: William McDonald
import math
# Short list of prime numbers under 20
primeList = [2, 3, 5, 7, 9, 11, 13, 17, 19]
last = 21
# Returns True if n is prime, otherwise False
def isPrime(n):
prime = True
for i in primeList:
if n % i == 0:
prime = False
break
if i > math.floor(math.sqrt(n)):
break
return prime
# Return the powers of the divisors of n in a list
def divisors(n):
global last
while last <= n:
if isPrime(last):
primeList.append(last)
last += 2
lst = []
for i in primeList:
c = 0
while n % i == 0:
n /= i
c += 1
lst.append(c)
return lst
# Returns the number of divisors of two numbers
# represented by lists of the exponents of their
# prime factorization
def numDivisors(l1, l2):
lst = []
while len(l1) < len(l2):
l1.append(0)
while len(l2) < len(l1):
l2.append(0)
for i in range(len(l1)):
lst.append(l1[i] + l2[i] + 1)
lst[0] -= 1
return reduce(lambda x, y: x * y, lst, 1)
def findAns():
n = 1
m = n + 1
d = 0
dn = divisors(n)
while d <= 500:
dm = divisors(m)
d = numDivisors(dn, dm)
if d > 500:
return (n * m) / 2
n, m = m, m + 1
dn = dm
ans = findAns()
print(ans)
| mit | -6,129,487,564,086,719,000 | 20.636364 | 50 | 0.52381 | false |
sgenoud/scikit-learn | sklearn/lda.py | 1 | 9466 | """
The :mod:`sklearn.lda` module implements Linear Discriminant Analysis (LDA).
"""
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg, ndimage
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsumexp
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print 'warning: the priors do not sum to 1. Renormalizing'
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X = np.asarray(X)
y = np.asarray(y)
if y.dtype.char.lower() not in ('b', 'h', 'i'):
# We need integer values to be able to use
# ndimage.measurements and np.bincount on numpy >= 2.0.
# We currently support (u)int8, (u)int16 and (u)int32.
# Note that versions of scipy >= 0.8 can also accept
# (u)int64. We however don't support it for backwards
# compatibility.
y = y.astype(np.int32)
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if X.shape[0] != y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while y '
'has %s' % (X.shape[0], y.shape[0]))
n_samples = X.shape[0]
n_features = X.shape[1]
classes = np.unique(y)
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
classes_indices = [(y == c).ravel() for c in classes]
if self.priors is None:
counts = np.array(ndimage.measurements.sum(
np.ones(n_samples, dtype=y.dtype), y, index=classes))
self.priors_ = counts / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for group_indices in classes_indices:
Xg = X[group_indices, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.intercept_ = -0.5 * np.sum(self.coef_ ** 2, axis=1) + \
np.log(self.priors_)
self.classes = classes
return self
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
X = np.asarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = np.asarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self.decision_function(X)
y_pred = self.classes[d.argmax(1)]
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function return posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsumexp(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
| bsd-3-clause | -2,355,859,689,835,530,000 | 32.807143 | 79 | 0.553243 | false |
abossi/42projects | api/app.py | 1 | 1356 | from flask import Flask, send_from_directory, jsonify, request
from flask_socketio import SocketIO
import settings
import os
app = Flask(__name__, static_url_path='')
socketio = SocketIO(app)
@app.route('/', methods=['GET'])
def home():
return send_from_directory('../front', 'index.html')
@app.route('/front/<path:path>', methods=['GET'])
def get_extern(path):
return send_from_directory('../front', path)
@app.route('/requestFileOpen', methods=['GET'])
def requestFileOpen():
if not request.args or not 'file' in request.args:
return jsonify({
"status": "error",
"message": "error in json"
}), 400
try:
with open(os.path.join(settings.PROJECT_PATH, request.args['file'])) as f:
lines = f.read()
except FileNotFoundError:
return {
"status": "error",
"message": "file not found",
}
except PermissionError:
return {
"status": "error",
"message": "permission denied",
}
return jsonify({
"file": request.args['file'],
"content": lines
})
@socketio.on('connect', namespace='/chat')
def test_connect():
emit('my response', {'data': 'Connected'})
@socketio.on('my event')
def handle_my_custom_event(json):
print('received json: ' + str(json)) | mit | -6,426,062,721,653,113,000 | 27.270833 | 82 | 0.584808 | false |
gotostack/neutron-lbaas | neutron_lbaas/drivers/haproxy/synchronous_namespace_driver.py | 1 | 26298 | # Copyright 2014-2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import socket
import uuid
import netaddr
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron import context as ncontext
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LW
from neutron.openstack.common import service
from neutron.plugins.common import constants
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from neutron_lbaas.drivers import driver_base
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.services.loadbalancer.agent import agent as lb_agent
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer.drivers.haproxy import jinja_cfg
from neutron_lbaas.services.loadbalancer.drivers.haproxy \
import namespace_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'nlbaas-'
STATS_TYPE_BACKEND_REQUEST = 2
STATS_TYPE_BACKEND_RESPONSE = '1'
STATS_TYPE_SERVER_REQUEST = 4
STATS_TYPE_SERVER_RESPONSE = '2'
# Do not want v1 instances to be in same directory as v2
STATE_PATH_V2_APPEND = 'v2'
DEFAULT_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
cfg.CONF.register_opts(namespace_driver.OPTS, 'haproxy')
cfg.CONF.register_opts(lb_agent.OPTS, 'haproxy')
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF.register_opts(config.INTERFACE_DRIVER_OPTS, 'haproxy')
def get_ns_name(namespace_id):
return NS_PREFIX + namespace_id
class SimpleHaproxyStatsService(service.Service):
def __init__(self, driver):
super(SimpleHaproxyStatsService, self).__init__()
self.driver = driver
def start(self):
super(SimpleHaproxyStatsService, self).start()
self.tg.add_timer(self.driver.conf.haproxy.periodic_interval,
self.driver.periodic_tasks,
None,
None)
class HaproxyNSDriver(driver_base.LoadBalancerBaseDriver):
def __init__(self, plugin):
super(HaproxyNSDriver, self).__init__(plugin)
self.conf = cfg.CONF
self.state_path = os.path.join(
self.conf.haproxy.loadbalancer_state_path, STATE_PATH_V2_APPEND)
if not self.conf.haproxy.interface_driver:
self.conf.haproxy.interface_driver = DEFAULT_INTERFACE_DRIVER
try:
vif_driver = importutils.import_object(
self.conf.haproxy.interface_driver, self.conf)
except ImportError:
with excutils.save_and_reraise_exception():
msg = (_LE('Error importing interface driver: %s')
% self.conf.haproxy.interface_driver)
LOG.exception(msg)
self.vif_driver = vif_driver
# instantiate managers here
self.load_balancer = LoadBalancerManager(self)
self.listener = ListenerManager(self)
self.pool = PoolManager(self)
self.member = MemberManager(self)
self.health_monitor = HealthMonitorManager(self)
self.admin_ctx = ncontext.get_admin_context()
self.deployed_loadbalancer_ids = set()
self._deploy_existing_instances()
SimpleHaproxyStatsService(self).start()
def _deploy_existing_instances(self):
dirs = self._retrieve_deployed_instance_dirs()
loadbalancers = self._retrieve_db_loadbalancers_from_dirs(dirs)
loadbalancer_ids = [loadbalancer.id for loadbalancer in loadbalancers]
self.deployed_loadbalancer_ids.update(loadbalancer_ids)
for loadbalancer in loadbalancers:
try:
self.update_instance(loadbalancer)
except RuntimeError:
# do not stop anything this is a minor error
LOG.warn(_LW("Existing load balancer %s could not be deployed"
" on the system.") % loadbalancer.id)
def _retrieve_deployed_instance_dirs(self):
if not os.path.exists(self.state_path):
os.makedirs(self.state_path)
return [dir for dir in os.listdir(self.state_path)
if os.path.isdir(os.path.join(self.state_path, dir))]
def _retrieve_db_loadbalancers_from_dirs(self, dirs):
loadbalancers = []
for dir in dirs:
try:
db_lb = self.plugin.db.get_loadbalancer(self.admin_ctx, dir)
loadbalancers.append(db_lb)
except loadbalancerv2.EntityNotFound:
# Doesn't exist in database so clean up
self._delete_instance_from_system(dir)
continue
return loadbalancers
def _plug_vip_port(self, context, port):
port_dict = self.plugin.db._core_plugin.get_port(context, port.id)
port_dict.update(self._build_port_dict())
self.plugin.db._core_plugin.update_port(
context,
port.id,
{'port': port_dict}
)
def _build_port_dict(self):
return {'admin_state_up': True,
'device_owner': 'neutron:{0}'.format(
constants.LOADBALANCER),
'device_id': str(uuid.uuid5(uuid.NAMESPACE_DNS,
str(self.conf.host))),
portbindings.HOST_ID: self.conf.host}
def _get_state_file_path(self, loadbalancer_id, kind,
ensure_state_dir=True):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.state_path))
conf_dir = os.path.join(confs_dir, loadbalancer_id)
if ensure_state_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _populate_subnets(self, context, port):
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self.plugin.db._core_plugin.get_subnet(
context, fixed_ip.subnet_id)
def _plug(self, context, namespace, port, reuse_existing=True):
self._plug_vip_port(context, port)
interface_name = self.vif_driver.get_device_name(port)
if ip_lib.device_exists(interface_name, namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name
)
else:
self.vif_driver.plug(
port.network_id,
port.id,
interface_name,
port.mac_address,
namespace=namespace
)
self._populate_subnets(context, port)
cidrs = [
'%s/%s' % (ip.ip_address,
netaddr.IPNetwork(ip.subnet['cidr']).prefixlen)
for ip in port.fixed_ips
]
self.vif_driver.init_l3(interface_name, cidrs,
namespace=namespace)
gw_ip = port.fixed_ips[0].subnet.get('gateway_ip')
if not gw_ip:
host_routes = port.fixed_ips[0].subnet.get('host_routes', [])
for host_route in host_routes:
if host_route['destination'] == "0.0.0.0/0":
gw_ip = host_route['nexthop']
break
if gw_ip:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
# When delete and re-add the same vip, we need to
# send gratuitous ARP to flush the ARP cache in the Router.
gratuitous_arp = self.conf.haproxy.send_gratuitous_arp
if gratuitous_arp > 0:
for ip in port.fixed_ips:
cmd_arping = ['arping', '-U',
'-I', interface_name,
'-c', gratuitous_arp,
ip.ip_address]
ip_wrapper.netns.execute(cmd_arping, check_exit_code=False)
def _unplug(self, namespace, port_id):
port_stub = {'id': port_id}
interface_name = self.vif_driver.get_device_name(
namespace_driver.Wrap(port_stub))
self.vif_driver.unplug(interface_name, namespace=namespace)
def _spawn(self, loadbalancer, extra_cmd_args=()):
namespace = get_ns_name(loadbalancer.id)
conf_path = self._get_state_file_path(loadbalancer.id, 'haproxy.conf')
pid_path = self._get_state_file_path(loadbalancer.id,
'haproxy.pid')
sock_path = self._get_state_file_path(loadbalancer.id,
'haproxy_stats.sock')
user_group = self.conf.haproxy.user_group
state_path = self._get_state_file_path(loadbalancer.id, '')
jinja_cfg.save_config(conf_path, loadbalancer, sock_path, user_group,
state_path)
cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
cmd.extend(extra_cmd_args)
ns = ip_lib.IPWrapper(namespace=namespace)
ns.netns.execute(cmd)
# remember deployed loadbalancer id
self.deployed_loadbalancer_ids.add(loadbalancer.id)
def _get_backend_stats(self, parsed_stats):
for stats in parsed_stats:
if stats.get('type') == STATS_TYPE_BACKEND_RESPONSE:
unified_stats = dict((k, stats.get(v, ''))
for k, v in jinja_cfg.STATS_MAP.items())
return unified_stats
return {}
def _get_servers_stats(self, parsed_stats):
res = {}
for stats in parsed_stats:
if stats.get('type') == STATS_TYPE_SERVER_RESPONSE:
res[stats['svname']] = {
lb_const.STATS_STATUS: (constants.INACTIVE
if stats['status'] == 'DOWN'
else constants.ACTIVE),
lb_const.STATS_HEALTH: stats['check_status'],
lb_const.STATS_FAILED_CHECKS: stats['chkfail']
}
return res
def _get_stats_from_socket(self, socket_path, entity_type):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % entity_type)
raw_stats = ''
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warn(_LW('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return []
stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
res_stats = []
for raw_values in stat_lines[1:]:
if not raw_values:
continue
stat_values = [value.strip() for value in raw_values.split(',')]
res_stats.append(dict(zip(stat_names, stat_values)))
return res_stats
def _collect_and_store_stats(self):
for loadbalancer_id in self.deployed_loadbalancer_ids:
loadbalancer = self.plugin.db.get_loadbalancer(self.admin_ctx,
loadbalancer_id)
stats = self.get_stats(loadbalancer)
self.plugin.db.update_loadbalancer_stats(
self.admin_ctx, loadbalancer.id, stats)
if 'members' in stats:
self._set_member_status(self.admin_ctx, loadbalancer,
stats['members'])
def _get_members(self, loadbalancer):
for listener in loadbalancer.listeners:
if listener.default_pool:
for member in listener.default_pool.members:
yield member
def _set_member_status(self, context, loadbalancer, members_stats):
for member in self._get_members(loadbalancer):
if member.id in members_stats:
status = members_stats[member.id].get('status')
if status and status == constants.ACTIVE:
self.plugin.db.update_status(
context, self.member.model_class, member.id,
operating_status=lb_const.ONLINE)
else:
self.plugin.db.update_status(
context, self.member.model_class, member.id,
operating_status=lb_const.OFFLINE)
def _remove_config_directory(self, loadbalancer_id):
conf_dir = os.path.dirname(
self._get_state_file_path(loadbalancer_id, ''))
if os.path.isdir(conf_dir):
shutil.rmtree(conf_dir)
if loadbalancer_id in self.deployed_loadbalancer_ids:
# If it doesn't exist then didn't need to remove in the first place
self.deployed_loadbalancer_ids.remove(loadbalancer_id)
def _cleanup_namespace(self, loadbalancer_id):
namespace = get_ns_name(loadbalancer_id)
ns = ip_lib.IPWrapper(namespace=namespace)
try:
for device in ns.get_devices(exclude_loopback=True):
if ip_lib.device_exists(device.name):
self.vif_driver.unplug(device.name, namespace=namespace)
except RuntimeError as re:
LOG.warn(_LW('An error happend on namespace cleanup: '
'%s') % re.message)
ns.garbage_collect_namespace()
def _kill_processes(self, loadbalancer_id):
pid_path = self._get_state_file_path(loadbalancer_id, 'haproxy.pid')
# kill the process
namespace_driver.kill_pids_in_file(pid_path)
def _unplug_vip_port(self, loadbalancer):
namespace = get_ns_name(loadbalancer.id)
if loadbalancer.vip_port_id:
self._unplug(namespace, loadbalancer.vip_port_id)
def _delete_instance_from_system(self, loadbalancer_id):
self._kill_processes(loadbalancer_id)
self._cleanup_namespace(loadbalancer_id)
self._remove_config_directory(loadbalancer_id)
@log_helpers.log_method_call
def periodic_tasks(self, *args):
try:
self._collect_and_store_stats()
except Exception:
LOG.exception(_LE("Periodic task failed."))
def create_instance(self, context, loadbalancer):
namespace = get_ns_name(loadbalancer.id)
self._plug(context, namespace, loadbalancer.vip_port)
self._spawn(loadbalancer)
def update_instance(self, loadbalancer):
pid_path = self._get_state_file_path(loadbalancer.id,
'haproxy.pid')
extra_args = ['-sf']
extra_args.extend(p.strip() for p in open(pid_path, 'r'))
self._spawn(loadbalancer, extra_args)
def delete_instance(self, loadbalancer, cleanup_namespace=False):
self._kill_processes(loadbalancer.id)
# unplug the ports
self._unplug_vip_port(loadbalancer)
# delete all devices from namespace;
# used when deleting orphans and vip_port_id is not known for
# loadbalancer_id
if cleanup_namespace:
self._cleanup_namespace(loadbalancer.id)
self._remove_config_directory(loadbalancer.id)
def exists(self, loadbalancer):
namespace = get_ns_name(loadbalancer.id)
root_ns = ip_lib.IPWrapper()
socket_path = self._get_state_file_path(
loadbalancer.id, 'haproxy_stats.sock', False)
if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
return True
except socket.error:
pass
return False
def get_stats(self, loadbalancer):
socket_path = self._get_state_file_path(loadbalancer.id,
'haproxy_stats.sock',
False)
if os.path.exists(socket_path):
parsed_stats = self._get_stats_from_socket(
socket_path,
entity_type=(STATS_TYPE_BACKEND_REQUEST |
STATS_TYPE_SERVER_REQUEST))
lb_stats = self._get_backend_stats(parsed_stats)
lb_stats['members'] = self._get_servers_stats(parsed_stats)
return lb_stats
else:
LOG.warn(_LW('Stats socket not found for load balancer %s'),
loadbalancer.id)
return {}
class LoadBalancerManager(driver_base.BaseLoadBalancerManager):
def refresh(self, context, loadbalancer):
super(LoadBalancerManager, self).refresh(context, loadbalancer)
if not self.deployable(loadbalancer):
#TODO(brandon-logan): Ensure there is a way to sync the change
#later. Periodic task perhaps.
return
if self.driver.exists(loadbalancer):
self.driver.update_instance(loadbalancer)
else:
self.driver.create_instance(context, loadbalancer)
def delete(self, context, loadbalancer):
super(LoadBalancerManager, self).delete(context, loadbalancer)
try:
self.driver.delete_instance(loadbalancer)
self.successful_completion(context, loadbalancer, delete=True)
except Exception as e:
self.failed_completion(context, loadbalancer)
raise e
def create(self, context, loadbalancer):
super(LoadBalancerManager, self).create(context, loadbalancer)
# loadbalancer has no listeners then do nothing because haproxy will
# not start without a tcp port. Consider this successful anyway.
if not loadbalancer.listeners:
self.successful_completion(context, loadbalancer)
return
try:
self.refresh(context, loadbalancer)
except Exception as e:
self.failed_completion(context, loadbalancer)
raise e
self.successful_completion(context, loadbalancer)
def stats(self, context, loadbalancer):
super(LoadBalancerManager, self).stats(context, loadbalancer)
return self.driver.get_stats(loadbalancer)
def update(self, context, old_loadbalancer, loadbalancer):
super(LoadBalancerManager, self).update(context, old_loadbalancer,
loadbalancer)
try:
self.refresh(context, loadbalancer)
except Exception as e:
self.failed_completion(context, loadbalancer)
raise e
self.successful_completion(context, loadbalancer)
def deployable(self, loadbalancer):
if not loadbalancer:
return False
acceptable_listeners = [
listener for listener in loadbalancer.listeners
if (listener.provisioning_status != constants.PENDING_DELETE and
listener.admin_state_up)]
return (bool(acceptable_listeners) and loadbalancer.admin_state_up and
loadbalancer.provisioning_status != constants.PENDING_DELETE)
class ListenerManager(driver_base.BaseListenerManager):
def _remove_listener(self, loadbalancer, listener_id):
index_to_remove = None
for index, listener in enumerate(loadbalancer.listeners):
if listener.id == listener_id:
index_to_remove = index
loadbalancer.listeners.pop(index_to_remove)
def update(self, context, old_listener, new_listener):
super(ListenerManager, self).update(context, old_listener,
new_listener)
try:
self.driver.load_balancer.refresh(context,
new_listener.loadbalancer)
except Exception as e:
self.failed_completion(context, new_listener)
raise e
self.successful_completion(context, new_listener)
def create(self, context, listener):
super(ListenerManager, self).create(context, listener)
try:
self.driver.load_balancer.refresh(context, listener.loadbalancer)
except Exception as e:
self.failed_completion(context, listener)
raise e
self.successful_completion(context, listener)
def delete(self, context, listener):
super(ListenerManager, self).delete(context, listener)
loadbalancer = listener.loadbalancer
self._remove_listener(loadbalancer, listener.id)
try:
if len(loadbalancer.listeners) > 0:
self.driver.load_balancer.refresh(context, loadbalancer)
else:
# delete instance because haproxy will throw error if port is
# missing in frontend
self.driver.delete_instance(loadbalancer)
except Exception as e:
self.failed_completion(context, listener)
raise e
self.successful_completion(context, listener, delete=True)
class PoolManager(driver_base.BasePoolManager):
def update(self, context, old_pool, new_pool):
super(PoolManager, self).update(context, old_pool, new_pool)
try:
self.driver.load_balancer.refresh(context,
new_pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, new_pool)
raise e
self.successful_completion(context, new_pool)
def create(self, context, pool):
super(PoolManager, self).delete(context, pool)
try:
self.driver.load_balancer.refresh(context,
pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, pool)
raise e
self.successful_completion(context, pool)
def delete(self, context, pool):
super(PoolManager, self).delete(context, pool)
loadbalancer = pool.listener.loadbalancer
pool.listener.default_pool = None
try:
# just refresh because haproxy is fine if only frontend is listed
self.driver.load_balancer.refresh(context, loadbalancer)
except Exception as e:
self.failed_completion(context, pool)
raise e
self.successful_completion(context, pool, delete=True)
class MemberManager(driver_base.BaseMemberManager):
def _remove_member(self, pool, member_id):
index_to_remove = None
for index, member in enumerate(pool.members):
if member.id == member_id:
index_to_remove = index
pool.members.pop(index_to_remove)
def update(self, context, old_member, new_member):
super(MemberManager, self).update(context, old_member, new_member)
try:
self.driver.load_balancer.refresh(
context, new_member.pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, new_member)
raise e
self.successful_completion(context, new_member)
def create(self, context, member):
super(MemberManager, self).create(context, member)
try:
self.driver.load_balancer.refresh(
context, member.pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, member)
raise e
self.successful_completion(context, member)
def delete(self, context, member):
super(MemberManager, self).delete(context, member)
self._remove_member(member.pool, member.id)
try:
self.driver.load_balancer.refresh(
context, member.pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, member)
raise e
self.successful_completion(context, member, delete=True)
class HealthMonitorManager(driver_base.BaseHealthMonitorManager):
def update(self, context, old_hm, new_hm):
super(HealthMonitorManager, self).update(context, old_hm, new_hm)
try:
self.driver.load_balancer.refresh(
context, new_hm.pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, new_hm)
raise e
self.successful_completion(context, new_hm)
def create(self, context, hm):
super(HealthMonitorManager, self).create(context, hm)
try:
self.driver.load_balancer.refresh(
context, hm.pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, hm)
raise e
self.successful_completion(context, hm)
def delete(self, context, hm):
super(HealthMonitorManager, self).delete(context, hm)
hm.pool.healthmonitor = None
try:
self.driver.load_balancer.refresh(context,
hm.pool.listener.loadbalancer)
except Exception as e:
self.failed_completion(context, hm)
raise e
self.successful_completion(context, hm, delete=True)
| apache-2.0 | 5,295,248,444,146,570,000 | 38.427286 | 79 | 0.599589 | false |
messagebird/python-rest-api | tests/test_number.py | 1 | 3373 | import unittest
from unittest.mock import Mock
from messagebird import Client, ErrorException
class TestNumber(unittest.TestCase):
def test_available_numbers_list(self):
http_client = Mock()
http_client.request.return_value = '{"items":[{"number":"3197010260188","country":"NL","region":"","locality":"","features":["sms","voice"],"type":"mobile"}],"limit":20,"count":1}'
numbers = Client('', http_client).available_numbers_list('NL', {'number': 319})
http_client.request.assert_called_once_with('available-phone-numbers/NL', 'GET', {'number': 319, 'limit': 20, 'offset': 0})
self.assertEqual(1, numbers.count)
self.assertEqual(1, len(numbers.items))
self.assertEqual('3197010260188', numbers.items[0].number)
def test_purchase_number(self):
http_client = Mock()
http_client.request.return_value = '{"number":"31971234567","country":"NL","region":"Haarlem","locality":"Haarlem","features":["sms","voice"],"tags":[],"type":"landline_or_mobile","status":"active","createdAt":"2019-04-25T14:04:04Z","renewalAt":"2019-05-25T00:00:00Z"}'
number = Client('', http_client).purchase_number('31971234567', 'NL', 1)
http_client.request.assert_called_once_with(
'phone-numbers', 'POST',
{
"number": "31971234567",
"countryCode": "NL",
"billingIntervalMonths": 1
}
)
self.assertEqual('Haarlem', number.region)
self.assertEqual(["sms", "voice"], number.features)
def test_delete_number(self):
http_client = Mock()
http_client.request.return_value = '{}'
Client('', http_client).delete_number('31971234567')
http_client.request.assert_called_once_with('phone-numbers/31971234567', 'DELETE', None)
def test_delete_number_invalid(self):
http_client = Mock()
http_client.request.return_value = '{"errors": [{"code": 20, "description": "number not found", "parameter": null}]}'
with self.assertRaises(ErrorException):
Client('', http_client).delete_number('non-existent-number')
http_client.request.assert_called_once_with('phone-numbers/non-existent-number', 'DELETE', None)
def test_purchased_number(self):
http_client = Mock()
http_client.request.return_value = '{"number":"31612345670","country":"NL","region":"Texel","locality":"Texel","features":["sms","voice"],"tags":[],"type":"mobile","status":"active"}'
number = Client('', http_client).purchased_number('31612345670')
http_client.request.assert_called_once_with('phone-numbers/31612345670', 'GET', None)
self.assertEqual('Texel', number.locality)
def test_purchased_numbers_list(self):
http_client = Mock()
http_client.request.return_value = '{"items":[{"number":"3197010260188","country":"NL","region":"","locality":"","features":["sms","voice"],"type":"mobile"}],"limit":20,"count":1}'
numbers = Client('', http_client).purchased_numbers_list({'number': 319}, 40, 2)
http_client.request.assert_called_once_with('phone-numbers', 'GET', {'number': 319, 'limit': 40, 'offset': 2})
self.assertEqual(1, numbers.count)
self.assertEqual(1, len(numbers.items))
self.assertEqual('3197010260188', numbers.items[0].number)
| bsd-2-clause | -8,275,679,311,754,160,000 | 43.973333 | 277 | 0.628817 | false |
hanteng/pyCHNadm1 | pyCHNadm1/02_converting_csv_pkl.py | 1 | 2853 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
import ConfigParser
Config = ConfigParser.ConfigParser()
Config.read("config.ini")
dir_src = Config.get("Directory",'source')
dir_out = Config.get("Directory",'outcome')
fn_suffix = Config.get("Filename",'suffix')
fn_datasrc= Config.get("Filename",'datasource')
fn_mapping =Config.get("Filename",'mapping')
dir_db = Config.get("Directory",'database')
fn_db = Config.get("Filename",'database')
fn_meta = Config.get("Filename",'meta')
import os
import glob
list_src= glob.glob(os.path.join(dir_src,fn_datasrc))
list_indicators=[x.split("stats.gov.cn_")[1].split(".")[0] for x in list_src]
#>>> list_indicators
#['broadband_accounts', 'broadband_ports', 'domainnames', 'GDP', 'IPop', 'LP', 'webpages', 'websites']
import pandas as pd
dfCN=pd.read_pickle(os.path.join(dir_db,'.'.join([fn_mapping.split('.')[0], fn_suffix])))
dict_zhs_iso=dfCN.set_index("name_zhs")['ISO']
from StringIO import StringIO
import codecs
ldf={}
mdf={}
for i,ind in enumerate(list_indicators):
#i=0
#ind=list_indicators[i]
fn=list_src[i]
s = StringIO()
meta={}
with codecs.open(fn, 'r', encoding="gb2312") as f:
read_data = f.readline()
meta['db']=read_data.strip().split(u":")[1]
read_data = f.readline()
meta['indicator']=read_data.strip().split(u":")[1]
read_rest = f.readlines()
for n,line in enumerate(read_rest[:32]):
s.write(line.encode("utf8"))
meta['note'] =u"".join(read_rest[32:]).replace("\r\n", " ")
mdf[ind]=meta
s.seek(0)
df = pd.read_csv(s, encoding="utf8", sep=",", na_values='',keep_default_na=False)
s.close()
## Cleaning the data and translating the names
columns=[x.replace(u"\u5e74","") for x in df.columns] #u"\u5e74" == u"年"
columns=["geocode",]+[int(x) for x in columns[1:]]
df.columns=columns
df['geocode']=[dict_zhs_iso.get(x,None) for x in df.geocode]
df=df.set_index('geocode')
## Saving to Pickle
fn_output=os.path.join(dir_out,'.'.join([ind, fn_suffix]))
df.to_pickle(fn_output)
## Storing together
ldf[ind]=df
print fn_output,
CNp =pd.Panel(ldf)
CNmeta=pd.DataFrame(mdf).transpose()
## Saving to Pickle database
fn_output=os.path.join(dir_db,fn_db)
CNp.to_pickle(fn_output)
fn_output=os.path.join(dir_db,fn_meta)
CNmeta.to_pickle(fn_output)
'''
>>> CNmeta.index
Index([u'GDP', u'IPop', u'LP', u'broadband_accounts', u'broadband_ports', u'domainnames', u'webpages', u'websites'], dtype='object')
>>> CNmeta.columns
Index([u'db', u'indicator', u'note'], dtype='object')
>>> CNp.items
Index([u'GDP', u'IPop', u'LP', u'broadband_accounts', u'broadband_ports', u'domainnames', u'webpages', u'websites'], dtype='object')
>>> CNp['GDP',:,:]
>>> CNp['IPop',:,:2013]
'''
| gpl-3.0 | -2,508,967,665,729,506,300 | 27.938144 | 132 | 0.633773 | false |
UCSBarchlab/PyRTL | tests/test_memblock.py | 1 | 15471 | import unittest
import pyrtl
from random import randint
# -------------------------------------------------------------------
class RTLMemBlockDesignBase(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
self.bitwidth = 3
self.addrwidth = 5
self.output1 = pyrtl.Output(self.bitwidth, "output1")
self.output2 = pyrtl.Output(self.bitwidth, "output2")
self.mem_read_address1 = pyrtl.Input(self.addrwidth, name='mem_read_address1')
self.mem_read_address2 = pyrtl.Input(self.addrwidth, name='mem_read_address2')
self.mem_write_address = pyrtl.Input(self.addrwidth, name='mem_write_address')
self.mem_write_data = pyrtl.Input(self.bitwidth, name='mem_write_data')
self.memory = pyrtl.MemBlock(bitwidth=self.bitwidth, addrwidth=self.addrwidth,
name='self.memory', max_read_ports=None)
def tearDown(self):
pyrtl.reset_working_block()
def test_memblock_simple(self):
self.output1 <<= self.memory[self.mem_read_address1]
self.output2 <<= self.memory[self.mem_read_address2]
self.memory[self.mem_write_address] <<= self.mem_write_data
pyrtl.working_block().sanity_check()
def test_memblock_assign_with_extention(self):
big_output = pyrtl.Output(self.bitwidth + 1, "big_output")
big_output <<= self.memory[self.mem_read_address1]
self.output1 <<= 1
self.output2 <<= 2
self.memory[self.mem_write_address] <<= self.mem_write_data
pyrtl.working_block().sanity_check()
def test_memblock_with_write_enable_with_equalsign(self):
we = pyrtl.Const(1, bitwidth=1)
self.output1 <<= self.memory[self.mem_read_address1]
self.output2 <<= self.memory[self.mem_read_address2]
self.memory[self.mem_write_address] <<= \
pyrtl.MemBlock.EnabledWrite(self.mem_write_data, enable=we)
pyrtl.working_block().sanity_check()
def test_memblock_direct_assignment_error(self):
with self.assertRaises(pyrtl.PyrtlError):
self.memory[self.mem_write_address] = self.mem_write_data
def test_memblock_connection_with_ints(self):
self.memory[self.mem_write_address] <<= 5
# test does not check functionality, just that it will generate hardware
def test_memblock_to_memblock_direct_operation(self):
temp = (self.memory[self.mem_read_address1] == self.memory[self.mem_read_address2])
temp = (self.memory[self.mem_read_address1] != self.memory[self.mem_read_address2])
temp = (self.memory[self.mem_read_address1] & self.memory[self.mem_read_address2])
temp = (self.memory[self.mem_read_address1] | self.memory[self.mem_read_address2])
temp = (self.memory[self.mem_read_address1] + self.memory[self.mem_read_address2])
temp = (self.memory[self.mem_read_address1] - self.memory[self.mem_read_address2])
temp2 = (self.memory[self.mem_read_address1] * self.memory[self.mem_read_address2])
self.output1 <<= temp
self.output2 <<= temp2
pyrtl.working_block().sanity_check()
def test_2read_1write(self):
small_memory = pyrtl.MemBlock(bitwidth=self.bitwidth, addrwidth=self.addrwidth,
name='small_memory', max_read_ports=2, max_write_ports=1)
temp = small_memory[self.mem_read_address1] # read
temp2 = small_memory[self.mem_read_address2] # read
self.output1 <<= temp
self.output2 <<= temp2
small_memory[self.mem_write_address] <<= pyrtl.Const(6) # write
pyrtl.working_block().sanity_check()
def test_over_max_read_ports(self):
lim_memory = pyrtl.MemBlock(bitwidth=self.bitwidth, addrwidth=self.addrwidth,
name='lim_memory', max_read_ports=8)
for i in range(lim_memory.max_read_ports):
self.output1 <<= lim_memory[self.mem_read_address1]
with self.assertRaises(pyrtl.PyrtlError):
self.output2 <<= lim_memory[self.mem_read_address2]
def test_over_max_write_ports(self):
lim_memory = pyrtl.MemBlock(bitwidth=self.bitwidth, addrwidth=self.addrwidth,
name='lim_memory', max_write_ports=4)
for i in range(lim_memory.max_write_ports):
lim_memory[self.mem_write_address] <<= pyrtl.Const(6)
with self.assertRaises(pyrtl.PyrtlError):
lim_memory[self.mem_write_address] <<= pyrtl.Const(6)
def test_memblock_added_user_named(self):
mem_name = 'small_memory'
small_memory = pyrtl.MemBlock(bitwidth=self.bitwidth, addrwidth=self.addrwidth,
name=mem_name, max_read_ports=2, max_write_ports=1)
self.assertIs(pyrtl.working_block().get_memblock_by_name(mem_name), small_memory)
def test_memblock_added_default_named(self):
mem = pyrtl.MemBlock(32, 8)
self.assertIs(pyrtl.working_block().get_memblock_by_name(mem.name), mem)
class MemIndexedTests(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
def test_memindexed_name(self):
self.mem = pyrtl.MemBlock(8, 8)
x = self.mem[2]
x.name = 'test_name'
self.assertEqual(x.name, 'test_name')
self.assertEqual(x.wire.name, 'test_name')
def test_read_memindexed_ilshift(self):
self.mem = pyrtl.MemBlock(8, 8)
self.mem_val_map = {self.mem: {0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0}}
a = pyrtl.Input(3)
x = self.mem[a]
y = pyrtl.Output(8, 'y')
z = pyrtl.Output(8, 'z')
y <<= x
z <<= x
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace, memory_value_map=self.mem_val_map)
for i in range(5):
sim.step({
a: i
})
self.assertEqual(sim.inspect(y), 5 - i)
self.assertEqual(sim.inspect(z), 5 - i)
self.assertEqual(self.mem.num_read_ports, 1)
def test_write_memindexed_ilshift(self):
self.mem1 = pyrtl.MemBlock(8, 8)
self.mem2 = pyrtl.MemBlock(8, 8, asynchronous=True)
self.mem_val_map = {self.mem1: {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}}
addr1 = pyrtl.Input(3)
addr2 = pyrtl.Input(3) # will be one behind addr1
inp = pyrtl.Input(3)
x = self.mem1[addr1] # value follows addr1
self.mem2[x] <<= inp
out = pyrtl.Output(9, name='out')
out <<= self.mem2[addr2] # one behind addr1, so one behind x
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace, memory_value_map=self.mem_val_map)
for i in range(5):
sim.step({
addr1: i,
addr2: 0 if i == 0 else i - 1, # one behind addr1
inp: 5 - i
})
self.assertEqual(sim.inspect(out), 0 if i == 0 else 5 - (i - 1))
self.assertEqual(self.mem1.num_read_ports, 1) # 2 b/c of the output read
self.assertEqual(self.mem2.num_write_ports, 1)
def test_read_memindexed_ior(self):
self.mem = pyrtl.MemBlock(8, 8)
self.mem_val_map = {self.mem: {0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0}}
decide = pyrtl.Input(1)
ind = pyrtl.Input(3)
x = self.mem[ind]
y = pyrtl.Output(8, 'y')
z = pyrtl.Output(8, 'z')
w = pyrtl.Output(8, 'w')
with pyrtl.conditional_assignment:
with decide:
y |= x
z |= x
with pyrtl.otherwise:
w |= x
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace, memory_value_map=self.mem_val_map)
for i in range(5):
sim.step({
decide: i % 2,
ind: i
})
if i == 0:
y_exp, z_exp, w_exp = 0, 0, 5
elif i == 1:
y_exp, z_exp, w_exp = 4, 4, 0
elif i == 2:
y_exp, z_exp, w_exp = 0, 0, 3
elif i == 3:
y_exp, z_exp, w_exp = 2, 2, 0
else:
y_exp, z_exp, w_exp = 0, 0, 1
self.assertEqual(sim.inspect(y), y_exp)
self.assertEqual(sim.inspect(z), z_exp)
self.assertEqual(sim.inspect(w), w_exp)
self.assertEqual(self.mem.num_read_ports, 1)
def test_write_memindexed_ior(self):
self.mem1 = pyrtl.MemBlock(8, 8)
self.mem2 = pyrtl.MemBlock(8, 8, asynchronous=True)
self.mem_val_map = {self.mem1: {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}}
decide = pyrtl.Input(1)
inp = pyrtl.Input(3)
addr1 = pyrtl.Input(3)
addr2 = pyrtl.Input(3) # will be one behind addr1
zero = pyrtl.Const(0, 3)
x = self.mem1[addr1]
x.name = 'x'
out = pyrtl.Output(8, name='out')
with pyrtl.conditional_assignment:
with decide:
self.mem2[x] |= inp
with pyrtl.otherwise:
self.mem2[x] |= zero
out <<= self.mem2[addr2] # one behind addr1, so one behind x
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace, memory_value_map=self.mem_val_map)
for i in range(5):
sim.step({
decide: i % 2,
addr1: i,
addr2: 0 if i == 0 else i - 1, # one behind addr1
inp: 5 - i
})
if (i == 0) | (i == 1) | (i == 3):
out_exp = 0
elif i == 2:
out_exp = 4
else:
out_exp = 2
self.assertEqual(sim.inspect(out), out_exp)
self.assertEqual(self.mem1.num_read_ports, 1)
self.assertEqual(self.mem2.num_write_ports, 1)
class RTLRomBlockWiring(unittest.TestCase):
data = list(range(2**5))
def setUp(self):
pyrtl.reset_working_block()
self.bitwidth = 3
self.addrwidth = 5
self.output1 = pyrtl.Output(self.bitwidth, "output1")
self.in1 = pyrtl.Input(self.addrwidth, name='mem_write_address')
self.in2 = pyrtl.Input(self.addrwidth, name='mem_write_address')
self.memory = pyrtl.RomBlock(bitwidth=self.bitwidth, addrwidth=self.addrwidth,
name='self.memory', romdata=self.data, max_read_ports=None)
def tearDown(self):
pyrtl.reset_working_block()
def test_read(self):
self.output1 <<= self.memory[self.in1]
def test_direct_assignment_error(self):
with self.assertRaises(pyrtl.PyrtlError):
self.memory[self.in1] = self.in2
def test_int_index_error(self):
with self.assertRaises(pyrtl.PyrtlError):
x = self.memory[3]
def test_other_non_wire_index_error(self):
with self.assertRaises(pyrtl.PyrtlError):
y = self.memory[()]
with self.assertRaises(pyrtl.PyrtlError):
y = self.memory["test"]
with self.assertRaises(pyrtl.PyrtlError):
y = self.memory["15"]
with self.assertRaises(pyrtl.PyrtlError):
y = self.memory[False]
def test_write(self):
with self.assertRaises(pyrtl.PyrtlError):
self.memory[self.in1] <<= 5
# test does not check functionality, just that it will generate hardware
def test_rom_to_rom_direct_operation(self):
temp = (self.memory[self.in1] == self.memory[self.in2])
temp = (self.memory[self.in1] != self.memory[self.in2]) # != creates two nets
temp = (self.memory[self.in1] & self.memory[self.in2])
temp = (self.memory[self.in1] | self.memory[self.in2])
temp = (self.memory[self.in1] + self.memory[self.in2])
temp = (self.memory[self.in1] - self.memory[self.in2])
temp = (self.memory[self.in1] * self.memory[self.in2])
block = pyrtl.working_block()
self.assertEqual(len(block.logic), 22)
self.output1 <<= temp
class RTLRomGetReadData(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
@staticmethod
def sample_roms():
def rom_func(address):
return (2 * address + 1) % 8
rom = pyrtl.RomBlock(3, 3, [2, 4, 7, 1])
romf = pyrtl.RomBlock(3, 3, rom_func)
return rom, romf
def invalid_rom_read(self, rom, address):
with self.assertRaises(pyrtl.PyrtlError):
rom._get_read_data(address)
def test_invalid_address(self):
for rom in self.sample_roms():
self.invalid_rom_read(rom, -1)
self.invalid_rom_read(rom, 8)
self.invalid_rom_read(rom, 5809)
def test_invalid_address_types(self):
for rom in self.sample_roms():
self.invalid_rom_read(rom, 'test')
self.invalid_rom_read(rom, pyrtl.Const(10))
self.invalid_rom_read(rom, [])
self.invalid_rom_read(rom, slice(1, 3))
# self.invalid_rom_read(rom, False) # should this be valid?
def test_invalid_value_function(self):
def bad_func(address):
return str(address)
def bad_func_2(address):
return pyrtl.Const(address)
rom1 = pyrtl.RomBlock(5, 5, ['test', ()])
rom2 = pyrtl.RomBlock(5, 5, [pyrtl.Const(0), bad_func])
romf1 = pyrtl.RomBlock(5, 5, bad_func)
romf2 = pyrtl.RomBlock(5, 5, bad_func_2)
for rom in (rom1, rom2, romf1, romf2):
self.invalid_rom_read(rom, 0)
self.invalid_rom_read(rom, 1)
def test_value_out_of_range(self):
def rom_func(address):
return 2 * (8 - address) + 1
rom1 = pyrtl.RomBlock(3, 3, [15, 8, 7, 1])
romf1 = pyrtl.RomBlock(3, 3, rom_func)
for rom in (rom1, romf1):
self.invalid_rom_read(rom, 0)
self.invalid_rom_read(rom, 1)
def test_out_of_range(self):
for rom in self.sample_roms():
self.invalid_rom_read(rom, -1)
self.invalid_rom_read(rom, 8)
self.invalid_rom_read(rom, 5809)
def test_over_max_read_ports(self):
width = 6
rom = pyrtl.RomBlock(width, width, [2, 4, 7, 1])
for i in range(rom.max_read_ports):
rom_read_address = pyrtl.Input(width)
rom_out = pyrtl.Output(width)
rom_out <<= rom[rom_read_address]
rom_read_address = pyrtl.Input(width)
rom_out = pyrtl.Output(width)
with self.assertRaises(pyrtl.PyrtlError):
rom_out <<= rom[rom_read_address]
def test_valid_get_read(self):
rom, romf = self.sample_roms()
for address, expected in enumerate((2, 4, 7, 1)):
self.assertEqual(rom._get_read_data(address), expected)
for address, expected in enumerate((1, 3, 5, 7, 1)):
self.assertEqual(romf._get_read_data(address), expected)
def test_build_new_roms(self):
width = 6
rom = pyrtl.RomBlock(6, 6, [2, 4, 7, 1], build_new_roms=True)
for i in range(width):
rom_read_address = pyrtl.Input(width)
rom_out = pyrtl.Output(width)
rom_out <<= rom[rom_read_address]
roms = set()
for romNet in pyrtl.working_block().logic_subset('m'):
curr_rom = romNet.op_param[1]
roms.add(curr_rom)
self.assertEqual(len(roms), 3)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -3,061,218,392,473,956,000 | 39.289063 | 96 | 0.572103 | false |
dchabot/ophyd | ophyd/mca.py | 2 | 5086 |
import logging
from collections import OrderedDict
from .signal import (EpicsSignal, EpicsSignalRO)
from .device import Device
from .device import Component as C, DynamicDeviceComponent as DDC
from .areadetector import EpicsSignalWithRBV as SignalWithRBV
logger = logging.getLogger(__name__)
class ROI(Device):
# 'name' is not an allowed attribute
label = C(EpicsSignal, 'NM', lazy=True)
count = C(EpicsSignalRO, '', lazy=True)
net_count = C(EpicsSignalRO, 'N', lazy=True)
preset_count = C(EpicsSignal, 'P', lazy=True)
is_preset = C(EpicsSignal, 'IP', lazy=True)
bkgnd_chans = C(EpicsSignal, 'BG', lazy=True)
hi_chan = C(EpicsSignal, 'HI', lazy=True)
lo_chan = C(EpicsSignal, 'LO', lazy=True)
def __init__(self, prefix, *, read_attrs=None, configuration_attrs=None,
name=None, parent=None, **kwargs):
super().__init__(prefix, read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
name=name, parent=parent, **kwargs)
def add_rois(range_, **kwargs):
'''Add one or more ROIs to an MCA instance
Parameters:
-----------
range_ : sequence of ints
Must be be in the set [0,31]
By default, an EpicsMCA is initialized with all 32 rois.
These provide the following Components as EpicsSignals (N=[0,31]):
EpicsMCA.rois.roiN.(label,count,net_count,preset_cnt, is_preset,
bkgnd_chans, hi_chan, lo_chan)
'''
defn = OrderedDict()
for roi in range_:
if not (0 <= roi < 32):
raise ValueError('roi must be in the set [0,31]')
attr = 'roi{}'.format(roi)
defn[attr] = (ROI, '.R{}'.format(roi), kwargs)
return defn
class EpicsMCARecord(Device):
'''SynApps MCA Record interface'''
stop_signal = C(EpicsSignal, '.STOP')
preset_real_time = C(EpicsSignal, '.PRTM')
preset_live_time = C(EpicsSignal, '.PLTM')
elapsed_real_time = C(EpicsSignalRO, '.ERTM')
elapsed_live_time = C(EpicsSignalRO, '.ELTM')
spectrum = C(EpicsSignalRO, '.VAL')
background = C(EpicsSignalRO, '.BG')
mode = C(EpicsSignal, '.MODE', string=True)
rois = DDC(add_rois(range(0, 32)))
def __init__(self, prefix, *, read_attrs=None, configuration_attrs=None,
name=None, parent=None, **kwargs):
default_read_attrs = ['spectrum', 'preset_real_time',
'elapsed_real_time']
default_configuration_attrs = ['preset_real_time']
if read_attrs is None:
read_attrs = default_read_attrs
if configuration_attrs is None:
configuration_attrs = default_configuration_attrs
super().__init__(prefix, read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
name=name, parent=parent, **kwargs)
# could arguably be made a configuration_attr instead...
self.stage_sigs.update([(self.mode, 'PHA')])
def stop(self):
self.stop_signal.put(1)
class EpicsMCA(EpicsMCARecord):
start = C(EpicsSignal, 'Start')
erase_start = C(EpicsSignal, 'EraseStart', trigger_value=1)
class EpicsDXP(Device):
preset_mode = C(EpicsSignal, 'PresetMode', string=True)
# NOTE: all SignalWithRBV are "lazy=True"
# Trigger Filter PVs
trigger_peaking_time = C(SignalWithRBV, 'TriggerPeakingTime')
trigger_threshold = C(SignalWithRBV, 'TriggerThreshold')
trigger_gap_time = C(SignalWithRBV, 'TriggerGapTime')
max_width = C(SignalWithRBV, 'MaxWidth')
# Energy Filter PVs
peaking_time = C(SignalWithRBV, 'PeakingTime')
energy_threshold = C(SignalWithRBV, 'EnergyThreshold')
gap_time = C(SignalWithRBV, 'GapTime')
# Baseline PVs
baseline_cut_percent = C(SignalWithRBV, 'BaselineCutPercent')
baseline_cut_enable = C(SignalWithRBV, 'BaselineCutEnable')
baseline_filter_length = C(SignalWithRBV, 'BaselineFilterLength')
baseline_threshold = C(SignalWithRBV, 'BaselineThreshold')
# Misc PVs
preamp_gain = C(SignalWithRBV, 'PreampGain')
detector_polarity = C(SignalWithRBV, 'DetectorPolarity')
reset_delay = C(SignalWithRBV, 'ResetDelay')
decay_time = C(SignalWithRBV, 'DecayTime')
max_energy = C(SignalWithRBV, 'MaxEnergy')
adc_percent_rule = C(SignalWithRBV, 'ADCPercentRule')
# read-only diagnostics
triggers = C(EpicsSignalRO, 'Triggers', lazy=True)
events = C(EpicsSignalRO, 'Events', lazy=True)
overflows = C(EpicsSignalRO, 'Overflows', lazy=True)
underflows = C(EpicsSignalRO, 'Underflows', lazy=True)
input_count_rate = C(EpicsSignalRO, 'InputCountRate', lazy=True)
output_count_rate = C(EpicsSignalRO, 'OutputCountRate', lazy=True)
def __init__(self, prefix, *, read_attrs=None, configuration_attrs=None,
name=None, parent=None, **kwargs):
super().__init__(prefix, read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
name=name, parent=parent, **kwargs)
| bsd-3-clause | -7,716,721,573,256,273,000 | 33.835616 | 76 | 0.638223 | false |
awsteiner/bamr | bamr_ex2.py | 1 | 3952 | # -------------------------------------------------------------------
#
# Copyright (C) 2020, Andrew W. Steiner and Sarah Wellence
#
# This file is part of bamr.
#
# bamr is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# bamr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bamr. If not, see <http://www.gnu.org/licenses/>.
#
# -------------------------------------------------------------------
# An example of the use of the python bamr interface
import bamr
bp=bamr.bamr_py(b'tews_threep_ligo',3)
bp.settings(inc_baryon_mass=True,addl_quants=True,verbose=2,
norm_max=False,crust_from_L=False,
compute_cthick=True,apply_intsc=True,
cached_intsc=True,prior_eta=True)
bp.add_data_alt("6304",
"data/shb18/6304_H_nopl_syst_wilm.o2",
"data/shb18/6304_He_nopl_syst_wilm.o2",
"like",0.7,"rescaled")
bp.add_data_alt("6397",
"data/shb18/6397_H_syst_wilm.o2",
"data/shb18/6397_He_syst_wilm3.o2",
"like",0.7,"rescaled")
bp.add_data_alt("M13",
"data/shs18/M13_H_rs.o2",
"data/shs18/M13_He_rs.o2",
"like",0.7,"rescaled_0")
bp.add_data_alt("M28",
"data/shb18/M28_H_syst_wilm.o2",
"data/shb18/M28_He_syst_wilm.o2",
"like",0.7,"rescaled")
bp.add_data_alt("M30",
"data/shb18/M30_H_syst_wilm.o2",
"data/shb18/M30_He_syst_wilm.o2",
"like",0.7,"rescaled")
bp.add_data_alt("wCen",
"data/shb18/wCen_H_syst_wilm.o2",
"data/shb18/wCen_H_syst_wilm.o2",
"like",0.7,"rescaled")
bp.add_data_alt("X7",
"data/shb18/X7_H_syst_wilm.o2",
"data/shb18/X7_He_syst_wilm.o2",
"like",0.7,"rescaled")
bp.add_data_alt("1810b",
"data/nks15/1810.o2",
"data/nks15/1810.o2",
"weights",0.7,"mcarlo")
bp.add_data_alt("1724b",
"data/nks15/1724.o2",
"data/nks15/1724.o2",
"weights",0.7,"mcarlo")
bp.add_data_alt("1702",
"data/nat17/1702_D_X_int.o2",
"data/nat17/1702_D_X_int.o2",
"avgs",0.7,"hist2_table")
bp.add_data_alt("0030",
"data/nicer/0030_st_pst.o2",
"data/nicer/0030_st_pst.o2",
"prob",0.7,"table3d")
(iret,npar,names,units,low,high)=bp.bamr_init()
print('init return value (0 is success):',iret)
print('number of parameters:',npar)
print('parameter names:',names)
print('parameter units:',units)
print('parameter lower limits:',low)
print('parameter upper limits:',high)
lw=bp.compute_point([1.322748e+01,4.884697e-01,3.257073e+01,
4.455746e+01,4.381964e-01,3.203634e+00,
5.517590e+00,6.987111e+00,1.182554e+00,
1.197660e+00,2.462268e-01,5.957640e-01,
5.420301e-01,4.968797e-01,6.401870e-01,
5.838393e-01,7.508562e-01,8.855128e-01,
9.716234e-01,5.129025e-01,8.293522e-01,
8.446897e-01,4.968797e-01,-1.816410e+00,
-1.635335e+00,-1.841114e+00,-4.185872e-01,
-1.178018e+00,-1.149620e+00,-1.801794e-01,
-4.783507e-01,-8.689520e-01,-8.779179e-01,
-1.635335e+00],verbose=3)
print('log weight',lw)
bp.summarize_tables()
| gpl-3.0 | 5,815,714,334,031,064,000 | 39.326531 | 71 | 0.541498 | false |
CLVsol/oehealth | oehealth_professional/oehealth_tag.py | 1 | 1876 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp.osv import orm, fields
class oehealth_tag(orm.Model):
_inherit = 'oehealth.tag'
_columns = {
'professional_ids': fields.many2many('oehealth.professional',
'oehealth_professional_tag_rel',
'tag_id',
'professional_id',
'Professionals'),
}
oehealth_tag()
| agpl-3.0 | 1,110,120,343,766,397,000 | 55.848485 | 80 | 0.405117 | false |
GoWebyCMS/goweby-core-dev | blog/views.py | 1 | 2842 | from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView
from itertools import chain
from django.core.paginator import Paginator, EmptyPage,PageNotAnInteger
from django.db.models import Count
from .forms import SearchForm
from .models import Post, Category, Tag
# Create your views here.
"""
class PostListView(ListView):
queryset = []
posts = Post.published.all()
categories = Category.objects.all()
tags = Tag.objects.all()
queryset = list(chain(posts, categories, tags))
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/post/list.html'
"""
def post_list(request, tag_slug=None):
# grub post list as an object list for pagenation
object_list = Post.published.all()
# Tags: let user list all posts tagged with a specific tag
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
# Pagenation
paginator = Paginator(object_list, 3) # 3 posts in each page
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts = paginator.page(paginator.num_pages)
# TODO: fetch limited latest projects for the related sidebar widjet
return render(request, 'blog/post/list.html', {
'posts': posts,
'page' : page,
'tag' : tag,
})
def post_detail(request, slug):
post = get_object_or_404(Post, slug=slug)
posts = Post.objects.all()
# retrieve a list of similar posts filtering by tag
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
return render(request, 'blog/post/detail.html',
{
'post': post,
'similar_posts': similar_posts,
})
def post_search(request):
form = SearchForm()
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
cd = form.cleaned_data
results = SearchQuerySet().models(Post).filter(content=cd['query']).load_all()
# count total results
total_results = results.count()
return render(request,
'blog/post/search.html',
{'form': form,
#'cd': cd,
#'results': results,
#'total_results': total_results
})
| mit | -833,982,272,543,700,500 | 31.666667 | 105 | 0.607671 | false |
jamespcole/home-assistant | homeassistant/components/generic/camera.py | 1 | 5982 | """
Support for IP Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.generic/
"""
import asyncio
import logging
import aiohttp
import async_timeout
import requests
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_AUTHENTICATION,
HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION, CONF_VERIFY_SSL)
from homeassistant.exceptions import TemplateError
from homeassistant.components.camera import (
PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, SUPPORT_STREAM, Camera)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import config_validation as cv
from homeassistant.util.async_ import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
CONF_CONTENT_TYPE = 'content_type'
CONF_LIMIT_REFETCH_TO_URL_CHANGE = 'limit_refetch_to_url_change'
CONF_STILL_IMAGE_URL = 'still_image_url'
CONF_STREAM_SOURCE = 'stream_source'
CONF_FRAMERATE = 'framerate'
DEFAULT_NAME = 'Generic Camera'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STILL_IMAGE_URL): cv.template,
vol.Optional(CONF_STREAM_SOURCE, default=None): vol.Any(None, cv.string),
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION):
vol.In([HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]),
vol.Optional(CONF_LIMIT_REFETCH_TO_URL_CHANGE, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_CONTENT_TYPE, default=DEFAULT_CONTENT_TYPE): cv.string,
vol.Optional(CONF_FRAMERATE, default=2): cv.positive_int,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up a generic IP Camera."""
async_add_entities([GenericCamera(hass, config)])
class GenericCamera(Camera):
"""A generic implementation of an IP camera."""
def __init__(self, hass, device_info):
"""Initialize a generic camera."""
super().__init__()
self.hass = hass
self._authentication = device_info.get(CONF_AUTHENTICATION)
self._name = device_info.get(CONF_NAME)
self._still_image_url = device_info[CONF_STILL_IMAGE_URL]
self._stream_source = device_info[CONF_STREAM_SOURCE]
self._still_image_url.hass = hass
self._limit_refetch = device_info[CONF_LIMIT_REFETCH_TO_URL_CHANGE]
self._frame_interval = 1 / device_info[CONF_FRAMERATE]
self._supported_features = SUPPORT_STREAM if self._stream_source else 0
self.content_type = device_info[CONF_CONTENT_TYPE]
self.verify_ssl = device_info[CONF_VERIFY_SSL]
username = device_info.get(CONF_USERNAME)
password = device_info.get(CONF_PASSWORD)
if username and password:
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
self._auth = HTTPDigestAuth(username, password)
else:
self._auth = aiohttp.BasicAuth(username, password=password)
else:
self._auth = None
self._last_url = None
self._last_image = None
@property
def supported_features(self):
"""Return supported features for this camera."""
return self._supported_features
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return self._frame_interval
def camera_image(self):
"""Return bytes of camera image."""
return run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop).result()
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
url = self._still_image_url.async_render()
except TemplateError as err:
_LOGGER.error(
"Error parsing template %s: %s", self._still_image_url, err)
return self._last_image
if url == self._last_url and self._limit_refetch:
return self._last_image
# aiohttp don't support DigestAuth yet
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
def fetch():
"""Read image from a URL."""
try:
response = requests.get(url, timeout=10, auth=self._auth,
verify=self.verify_ssl)
return response.content
except requests.exceptions.RequestException as error:
_LOGGER.error("Error getting camera image: %s", error)
return self._last_image
self._last_image = await self.hass.async_add_job(
fetch)
# async
else:
try:
websession = async_get_clientsession(
self.hass, verify_ssl=self.verify_ssl)
with async_timeout.timeout(10, loop=self.hass.loop):
response = await websession.get(
url, auth=self._auth)
self._last_image = await response.read()
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting image from: %s", self._name)
return self._last_image
except aiohttp.ClientError as err:
_LOGGER.error("Error getting new camera image: %s", err)
return self._last_image
self._last_url = url
return self._last_image
@property
def name(self):
"""Return the name of this device."""
return self._name
@property
def stream_source(self):
"""Return the source of the stream."""
return self._stream_source
| apache-2.0 | 7,431,058,157,576,323,000 | 37.101911 | 79 | 0.636242 | false |
3dfxsoftware/cbss-addons | report_aeroo/wizard/report_print_by_action.py | 1 | 2630 | ##############################################################################
#
# Copyright (c) 2008-2012 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <[email protected]>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import osv
from openerp.osv import fields
class report_print_by_action(osv.osv_memory):
_name = 'aeroo.print_by_action'
def to_print(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids[0], context=context)
report_xml = self.pool.get(context['active_model']).browse(cr, uid, context['active_id'], context=context)
print_ids = eval("[%s]" % this.object_ids, {})
data = {'model':report_xml.model, 'ids':print_ids, 'id':print_ids[0], 'report_type': 'aeroo'}
return {
'type': 'ir.actions.report.xml',
'report_name': report_xml.report_name,
'datas': data,
'context':context
}
_columns = {
'name':fields.text('Object Model', readonly=True),
'object_ids':fields.char('Object IDs', size=250, required=True, help="Comma separated records ID"),
}
def _get_model(self, cr, uid, context):
return self.pool.get(context['active_model']).read(cr, uid, context['active_id'], ['model'], context=context)['model']
_defaults = {
'name': _get_model,
}
| gpl-2.0 | -1,483,548,834,182,775,600 | 40.746032 | 126 | 0.637262 | false |
whiteear/cloudbase-init | cloudbaseinit/tests/plugins/windows/test_winrmcertificateauth.py | 1 | 6477 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import exception
from cloudbaseinit.plugins.common import base
from cloudbaseinit.plugins.common import constants
class ConfigWinRMCertificateAuthPluginTests(unittest.TestCase):
def setUp(self):
self._ctypes_mock = mock.MagicMock()
self._win32com_mock = mock.MagicMock()
self._pywintypes_mock = mock.MagicMock()
self._moves_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'ctypes': self._ctypes_mock,
'win32com': self._win32com_mock,
'pywintypes': self._pywintypes_mock,
'six.moves': self._moves_mock})
self._module_patcher.start()
self._winreg_mock = self._moves_mock.winreg
self.winrmcert = importlib.import_module(
'cloudbaseinit.plugins.windows.winrmcertificateauth')
self._certif_auth = self.winrmcert.ConfigWinRMCertificateAuthPlugin()
def tearDown(self):
self._module_patcher.stop()
def _test_get_credentials(self, fake_user, fake_password):
mock_shared_data = mock.MagicMock()
mock_shared_data.get.side_effect = [fake_user, fake_password]
if fake_user is None or fake_password is None:
self.assertRaises(exception.CloudbaseInitException,
self._certif_auth._get_credentials,
mock_shared_data)
else:
response = self._certif_auth._get_credentials(mock_shared_data)
expected = [mock.call(constants.SHARED_DATA_USERNAME),
mock.call(constants.SHARED_DATA_PASSWORD)]
self.assertEqual(expected, mock_shared_data.get.call_args_list)
mock_shared_data.__setitem__.assert_called_once_with(
'admin_password', None)
self.assertEqual((fake_user, fake_password), response)
def test_test_get_credentials(self):
self._test_get_credentials(fake_user='fake user',
fake_password='fake password')
def test_test_get_credentials_no_user(self):
self._test_get_credentials(fake_user=None,
fake_password='fake password')
def test_test_get_credentials_no_password(self):
self._test_get_credentials(fake_user='fake user',
fake_password=None)
@mock.patch('cloudbaseinit.plugins.windows.winrmcertificateauth'
'.ConfigWinRMCertificateAuthPlugin._get_credentials')
@mock.patch('cloudbaseinit.utils.windows.winrmconfig.WinRMConfig')
@mock.patch('cloudbaseinit.utils.windows.x509.CryptoAPICertManager.'
'import_cert')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.utils.windows.security.WindowsSecurityUtils'
'.set_uac_remote_restrictions')
@mock.patch('cloudbaseinit.utils.windows.security.WindowsSecurityUtils'
'.get_uac_remote_restrictions')
def _test_execute(self, get_uac_rs, set_uac_rs, mock_get_os_utils,
mock_import_cert, mock_WinRMConfig,
mock_get_credentials, cert_data, cert_upn):
mock_osutils = mock.MagicMock()
mock_service = mock.MagicMock()
mock_cert_thumprint = mock.MagicMock()
fake_credentials = ('fake user', 'fake password')
mock_get_credentials.return_value = fake_credentials
mock_import_cert.return_value = (mock_cert_thumprint, cert_upn)
mock_WinRMConfig.get_cert_mapping.return_value = True
mock_service.get_client_auth_certs.return_value = [cert_data]
mock_get_os_utils.return_value = mock_osutils
expected_set_token_calls = [mock.call(enable=False),
mock.call(enable=True)]
mock_osutils.check_os_version.side_effect = [True, False]
get_uac_rs.return_value = True
expected_check_version_calls = [mock.call(6, 0), mock.call(6, 2)]
response = self._certif_auth.execute(mock_service,
shared_data='fake data')
if not cert_data:
self.assertEqual((base.PLUGIN_EXECUTION_DONE, False), response)
else:
mock_service.get_client_auth_certs.assert_called_once_with()
self.assertEqual(expected_check_version_calls,
mock_osutils.check_os_version.call_args_list)
mock_get_os_utils.assert_called_once_with()
self.assertEqual(expected_set_token_calls,
set_uac_rs.call_args_list)
mock_get_credentials.assert_called_once_with('fake data')
mock_import_cert.assert_called_once_with(
cert_data, store_name=self.winrmcert.x509.STORE_NAME_ROOT)
mock_WinRMConfig().set_auth_config.assert_called_once_with(
certificate=True)
mock_WinRMConfig().get_cert_mapping.assert_called_once_with(
mock_cert_thumprint, cert_upn)
mock_WinRMConfig().delete_cert_mapping.assert_called_once_with(
mock_cert_thumprint, cert_upn)
mock_WinRMConfig().create_cert_mapping.assert_called_once_with(
mock_cert_thumprint, cert_upn, 'fake user',
'fake password')
self.assertEqual((base.PLUGIN_EXECUTION_DONE, False), response)
def test_execute(self):
cert_data = 'fake cert data'
cert_upn = mock.MagicMock()
self._test_execute(cert_data=cert_data, cert_upn=cert_upn)
def test_execute_no_cert_data(self):
cert_upn = mock.MagicMock()
self._test_execute(cert_data=None, cert_upn=cert_upn)
| apache-2.0 | 8,448,140,944,114,927,000 | 41.611842 | 78 | 0.630076 | false |
SoFolichon/ISN-Twitter | src/lib/mttkinter.py | 1 | 9704 | """Thread-safe version of tkinter.
Copyright (c) 2014, Andrew Barnert
Based on mtTkinter (for Python 2.x), copyright (c) 2009, Allen B. Taylor
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser Public License for more details.
You should have received a copy of the GNU Lesser Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage:
import mttkinter as tkinter
# Use "t." as usual.
or
from mtt import *
# Use tkinter module definitions as usual.
This module modifies the original tkinter module in memory, making all
functionality thread-safe. It does this by wrapping the Tk class' tk
instance with an object that diverts calls through an event queue when
the call is issued from a thread other than the thread in which the Tk
instance was created. The events are processed in the creation thread
via an 'after' event.
The modified Tk class accepts two additional keyword parameters on its
__init__ method:
mtDebug:
0 = No debug output (default)
1 = Minimal debug output
...
9 = Full debug output
mtCheckPeriod:
Amount of time in milliseconds (default 100) between checks for
out-of-thread events when things are otherwise idle. Decreasing
this value can improve GUI responsiveness, but at the expense of
consuming more CPU cycles.
Note that, because it modifies the original tkinter module (in memory),
other modules that use tkinter (e.g., Pmw) reap the benefits automagically
as long as mttkinter is imported at some point before extra threads are
created.
Author: Allen B. Taylor, [email protected]
Note : this version is from https://github.com/abarnert/mttkinter/issues/1 by https://github.com/Smurf-IV
"""
import queue
import threading
from tkinter import *
class _Tk(object):
"""
Wrapper for underlying attribute tk of class Tk.
"""
def __init__(self, tk, mtDebug=0, mtCheckPeriod=10):
self._tk = tk
# Create the incoming event queue.
self._eventQueue = queue.Queue(1)
# Identify the thread from which this object is being created so we can
# tell later whether an event is coming from another thread.
self._creationThread = threading.currentThread()
# Store remaining values.
self._debug = mtDebug
self._checkPeriod = mtCheckPeriod
def __getattr__(self, name):
# Divert attribute accesses to a wrapper around the underlying tk
# object.
return _TkAttr(self, getattr(self._tk, name))
class _TkAttr(object):
"""
Thread-safe callable attribute wrapper.
"""
def __init__(self, tk, attr):
self._tk = tk
self._attr = attr
def __call__(self, *args, **kwargs):
"""
Thread-safe method invocation.
Diverts out-of-thread calls through the event queue.
Forwards all other method calls to the underlying tk object directly.
"""
# Check if we're in the creation thread.
# noinspection PyProtectedMember
# if threading.currentThread() == self._tk._creationThread:
# fix from https://stackoverflow.com/questions/14073463/mttkinter-doesnt-terminate-threads
if (threading.currentThread() == self._tk._creationThread) \
or isinstance(threading.currentThread(), threading._DummyThread):
# We're in the creation thread; just call the event directly.
# noinspection PyProtectedMember
if self._tk._debug >= 8 or self._tk._debug >= 3 \
and self._attr.__name__ == 'call' \
and len(args) >= 1 \
and args[0] == 'after':
print('Calling event directly: {} {} {}'.format(
self._attr.__name__, args, kwargs))
return self._attr(*args, **kwargs)
else:
# We're in a different thread than the creation thread; enqueue
# the event, and then wait for the response.
responseQueue = queue.Queue(1)
# noinspection PyProtectedMember
if self._tk._debug >= 1:
print('Marshalling event: {} {} {}'.format(
self._attr.__name__, args, kwargs))
# noinspection PyProtectedMember
self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue))
isException, response = responseQueue.get()
# Handle the response, whether it's a normal return value or
# an exception.
if isException:
exType, exValue, exTb = response
raise exType(exValue).with_traceback(exTb)
else:
return response
# Define a hook for class Tk's __init__ method.
def _Tk__init__(self, *args, **kwargs):
# We support some new keyword arguments that the original __init__ method
# doesn't expect, so separate those out before doing anything else.
new_kwnames = ('mtCheckPeriod', 'mtDebug')
new_kwargs = {}
for name, value in kwargs.items():
if name in new_kwnames:
new_kwargs[name] = value
# Handle the modification of kwargs whilst iterating from above for loop
for name in new_kwnames:
kwargs.pop(name, None)
# Call the original __init__ method, creating the internal tk member.
self.__original__init__mttkinter(*args, **kwargs)
# Replace the internal tk member with a wrapper that handles calls from
# other threads.
self.tk = _Tk(self.tk, **new_kwargs)
# Set up the first event to check for out-of-thread events.
self.after_idle(_CheckEvents, self)
# Replace Tk's original __init__ with the hook.
Tk.__original__init__mttkinter = Tk.__init__
Tk.__init__ = _Tk__init__
def _CheckEvents(tk):
"""Event checker event."""
used = False
try:
# Process all enqueued events, then exit.
while True:
try:
# Get an event request from the queue.
# noinspection PyProtectedMember
method, args, kwargs, responseQueue = \
tk.tk._eventQueue.get_nowait()
except:
# noinspection PyProtectedMember
if tk.tk._debug >= 2:
print('Event queue empty')
# No more events to process.
break
else:
# Call the event with the given arguments, and then return
# the result back to the caller via the response queue.
used = True
# noinspection PyProtectedMember
if tk.tk._debug >= 2:
print('Calling event from main thread: {} {} {}'
.format(method.__name__, args, kwargs))
try:
responseQueue.put((False, method(*args, **kwargs)))
except SystemExit as ex:
raise SystemExit(ex)
except Exception:
# Calling the event caused an exception; return the
# exception back to the caller so that it can be raised
# in the caller's thread.
from sys import exc_info
exType, exValue, exTb = exc_info()
responseQueue.put((True, (exType, exValue, exTb)))
finally:
# Schedule to check again. If we just processed an event, check
# immediately; if we didn't, check later.
if used:
tk.after_idle(_CheckEvents, tk)
else:
# noinspection PyProtectedMember
tk.after(tk.tk._checkPeriod, _CheckEvents, tk)
# Test thread entry point.
def _testThread(root):
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text += "\nThis should be a cedilla: \347"
except NameError:
pass # no unicode support
try:
if root.globalgetvar('tcl_platform(threaded)'):
text += "\nTcl is built with thread support"
else:
raise RuntimeError
except:
text += "\nTcl is NOT built with thread support"
text += "\nmttkinter works with or without Tcl thread support"
label = Label(root, text=text)
label.pack()
button = Button(root, text="Click me!",
command=lambda root=root: root.button.configure(
text="[%s]" % root.button['text']))
button.pack()
root.button = button
quitBtn = Button(root, text="QUIT", command=root.destroy)
quitBtn.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
# Simulate button presses...
button.invoke()
root.after(1000, _pressOk, root, button)
# Test button continuous press event.
def _pressOk(root, button):
button.invoke()
try:
root.after(1000, _pressOk, root, button)
except:
pass # Likely we're exiting
# Test. Mostly borrowed from the tkinter module, but the important bits moved
# into a separate thread.
if __name__ == '__main__':
root = Tk(mtDebug=1)
thread = threading.Thread(target=_testThread, args=(root,))
thread.start()
root.mainloop()
thread.join()
| gpl-3.0 | 1,259,108,486,377,189,400 | 34.940741 | 105 | 0.613355 | false |
DonHilborn/DataGenerator | faker/providers/lt_LT/person.py | 1 | 3340 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..person import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{last_name}}, {{first_name}}'
)
first_names = (
'Tomas', 'Lukas', 'Mantas', 'Deividas', 'Arnas', 'Artūras',
'Karolis', 'Dovydas', 'Dominykas', 'Darius', 'Edvinas', 'Jonas',
'Martynas', 'Kajus', 'Donatas', 'Andrius', 'Matas', 'Rokas',
'Augustas', 'Danielius', 'Mindaugas', 'Paulius', 'Marius',
'Armandas', 'Edgaras', 'Jokūbas', 'Nedas', 'Tadas', 'Nerijus',
'Simonas', 'Vytautas', 'Artūras', 'Robertas', 'Eimantas', 'Arijus',
'Nojus', 'Egidijus', 'Aurimas', 'Emilis', 'Laurynas', 'Edvardas',
'Joris', 'Pijus', 'Erikas', 'Domas', 'Vilius', 'Evaldas', 'Justinas',
'Aleksandras', 'Kristupas', 'Gabrielius', 'Benas', 'Gytis', 'Arminas',
'Vakris', 'Tautvydas', 'Domantas', 'Justas', 'Markas', 'Antanas',
'Arūnas', 'Ernestas', 'Aronas', 'Vaidas', 'Ąžuolas', 'Titas', 'Giedrius',
'Ignas', 'Povilas', 'Saulius', 'Julius', 'Arvydas', 'Kęstutis', 'Rytis',
'Aistis', 'Gediminas', 'Algirdas', 'Naglis', 'Irmantas', 'Rolandas',
'Aivaras', 'Simas', 'Faustas', 'Ramūnas', 'Šarūnas', 'Gustas', 'Tajus',
'Dainius', 'Arnoldas', 'Linas', 'Rojus', 'Adomas', 'Žygimantas',
'Ričardas', 'Orestas', 'Kipras', 'Juozas', 'Audrius', 'Romualdas',
'Petras', 'Eleonora', 'Raminta', 'Dovilė', 'Sandra', 'Dominyka', 'Ana',
'Erika', 'Kristina', 'Gintarė', 'Rūta', 'Edita', 'Karina', 'Živilė',
'Jolanta', 'Radvilė', 'Ramunė', 'Svetlana', 'Ugnė', 'Eglė', 'Viktorija',
'Justina', 'Brigita', 'Rasa', 'Marija', 'Giedrė', 'Iveta', 'Sonata',
'Vitalija', 'Adrija', 'Goda', 'Paulina', 'Kornelija', 'Liepa', 'Vakarė',
'Milda', 'Meda', 'Vaida', 'Izabelė', 'Jovita', 'Irma', 'Žemyna', 'Leila',
'Rimantė', 'Mantė', 'Rytė', 'Perla', 'Greta', 'Monika', 'Ieva', 'Indrė',
'Ema', 'Aurelija', 'Smiltė', 'Ingrida', 'Simona', 'Amelija', 'Sigita',
'Olivija', 'Laurita', 'Jorūnė', 'Leticija', 'Vigilija', 'Medėja', 'Laura',
'Agnė', 'Evelina', 'Kotryna', 'Lėja', 'Aušra', 'Neringa', 'Gerda',
'Jurgita', 'Rusnė', 'Aušrinė', 'Rita', 'Elena', 'Ineta', 'Ligita',
'Vasarė', 'Vėjūnė', 'Ignė', 'Gytė', 'Ariana', 'Arielė', 'Vytė', 'Eidvilė',
'Karolina', 'Miglė', 'Viltė', 'Jolanta', 'Enrika', 'Aurėja', 'Vanesa',
'Darija', 'Reda', 'Milana', 'Rugilė', 'Diana'
)
last_names = (
'Kazlauskas', 'Jankauskas', 'Petrauskas', 'Pocius', 'Stankevičius',
'Vsiliauskas', 'Žukauskas', 'Butkus', 'Paulauskas', 'Urbonas',
'Kavaliauskas', 'Sakalauskas', 'Žukauskas', 'Akelis' ,'Ambrasas',
'Kairys', 'Kalvaitis', 'Kalvelis', 'Kalvėnas', 'Kaupas', 'Kiška',
'Gagys', 'Gailius', 'Gailys', 'Gaižauskas', 'Gaičiūnas', 'Galdikas',
'Gintalas', 'Ginzburgas', 'Grinius', 'Gronskis', 'Nagys', 'Naujokas',
'Narušis', 'Nausėda', 'Poška', 'Povilonis'
)
| mit | 6,121,079,894,068,943,000 | 59.685185 | 86 | 0.529753 | false |
JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osgcubemap.py | 1 | 5601 | #!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgcubemap"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgcubemap.cpp'
# OpenSceneGraph example, osgcubemap.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Group>
#include <osg/StateSet>
#include <osg/TextureCubeMap>
#include <osg/TexGen>
#include <osg/TexEnvCombine>
#include <osgUtil/ReflectionMapGenerator>
#include <osgUtil/HighlightMapGenerator>
#include <osgUtil/HalfWayMapGenerator>
#include <osgUtil/Optimizer>
#include <osgDB/ReadFile>
#include <osgDB/Registry>
#include <osgGA/TrackballManipulator>
#include <osgGA/FlightManipulator>
#include <osgGA/DriveManipulator>
#include <osgViewer/Viewer>
#include <iostream>
#include <string>
#include <vector>
def create_specular_highlights(node):
ss = node.getOrCreateStateSet()
# create and setup the texture object
tcm = osg.TextureCubeMap()
tcm.setWrap(osg.Texture.WRAP_S, osg.Texture.CLAMP)
tcm.setWrap(osg.Texture.WRAP_T, osg.Texture.CLAMP)
tcm.setWrap(osg.Texture.WRAP_R, osg.Texture.CLAMP)
tcm.setFilter(osg.Texture.MIN_FILTER, osg.Texture.LINEAR_MIPMAP_LINEAR)
tcm.setFilter(osg.Texture.MAG_FILTER, osg.Texture.LINEAR)
# generate the six highlight map images (light direction = [1, 1, -1])
mapgen = osgUtil.HighlightMapGenerator(
osg.Vec3(1, 1, -1), # light direction
osg.Vec4(1, 0.9, 0.8, 1), # light color
8) # specular exponent
mapgen.generateMap()
# assign the six images to the texture object
tcm.setImage(osg.TextureCubeMap.POSITIVE_X, mapgen.getImage(osg.TextureCubeMap.POSITIVE_X))
tcm.setImage(osg.TextureCubeMap.NEGATIVE_X, mapgen.getImage(osg.TextureCubeMap.NEGATIVE_X))
tcm.setImage(osg.TextureCubeMap.POSITIVE_Y, mapgen.getImage(osg.TextureCubeMap.POSITIVE_Y))
tcm.setImage(osg.TextureCubeMap.NEGATIVE_Y, mapgen.getImage(osg.TextureCubeMap.NEGATIVE_Y))
tcm.setImage(osg.TextureCubeMap.POSITIVE_Z, mapgen.getImage(osg.TextureCubeMap.POSITIVE_Z))
tcm.setImage(osg.TextureCubeMap.NEGATIVE_Z, mapgen.getImage(osg.TextureCubeMap.NEGATIVE_Z))
# enable texturing, replacing any textures in the subgraphs
ss.setTextureAttributeAndModes(0, tcm, osg.StateAttribute.OVERRIDE | osg.StateAttribute.ON)
# texture coordinate generation
tg = osg.TexGen()
tg.setMode(osg.TexGen.REFLECTION_MAP)
ss.setTextureAttributeAndModes(0, tg, osg.StateAttribute.OVERRIDE | osg.StateAttribute.ON)
# use TexEnvCombine to add the highlights to the original lighting
te = osg.TexEnvCombine()
te.setCombine_RGB(osg.TexEnvCombine.ADD)
te.setSource0_RGB(osg.TexEnvCombine.TEXTURE)
te.setOperand0_RGB(osg.TexEnvCombine.SRC_COLOR)
te.setSource1_RGB(osg.TexEnvCombine.PRIMARY_COLOR)
te.setOperand1_RGB(osg.TexEnvCombine.SRC_COLOR)
ss.setTextureAttributeAndModes(0, te, osg.StateAttribute.OVERRIDE | osg.StateAttribute.ON)
int main(int argc, char *argv[])
# use an ArgumentParser object to manage the program arguments.
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.Viewer()
# load the nodes from the commandline arguments.
rootnode = osgDB.readNodeFiles(arguments)
# if not loaded assume no arguments passed in, try use default mode instead.
if not rootnode : rootnode = osgDB.readNodeFile("cessna.osgt")
if not rootnode :
osg.notify(osg.NOTICE), "Please specify a model filename on the command line."
return 1
# create specular highlights
create_specular_highlights(rootnode)
# run optimization over the scene graph
optimzer = osgUtil.Optimizer()
optimzer.optimize(rootnode)
# add a viewport to the viewer and attach the scene graph.
viewer.setSceneData(rootnode)
# create the windows and run the threads.
viewer.realize()
# now check to see if texture cube map is supported.
for(unsigned int contextID = 0
contextID<osg.DisplaySettings.instance().getMaxNumberOfGraphicsContexts()
++contextID)
tcmExt = osg.TextureCubeMap.getExtensions(contextID,False)
if tcmExt :
if not tcmExt.isCubeMapSupported() :
print "Warning: texture_cube_map not supported by OpenGL drivers, unable to run application."
return 1
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | -8,895,560,723,117,979,000 | 36.34 | 109 | 0.725763 | false |
msiemens/PyGitUp | PyGitUp/tests/test_rebase_arguments.py | 1 | 1524 | # System imports
import os
from os.path import join
import pytest
from git import *
from PyGitUp.git_wrapper import RebaseError
from PyGitUp.tests import basepath, write_file, init_master, update_file, testfile_name
test_name = 'rebase-arguments'
repo_path = join(basepath, test_name + os.sep)
def _read_file(path):
with open(path) as f:
return f.read()
def setup():
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
assert repo.working_dir == path
# Modify file in master
master_file = update_file(master, test_name)
# Modify file in our repo
contents = _read_file(master_file)
contents = contents.replace('line 1', 'line x')
repo_file = join(path, testfile_name)
write_file(repo_file, contents)
repo.index.add([repo_file])
repo.index.commit(test_name)
# Set git-up.rebase.arguments to '--abort', what results in an
# invalid cmd and thus git returning an error, that we look for.
repo.git.config('git-up.rebase.arguments', '--abort')
def test_rebase_arguments():
""" Run 'git up' with rebasing.arguments """
os.chdir(repo_path)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
with pytest.raises(RebaseError):
gitup.run()
assert len(gitup.states) == 1
assert gitup.states[0] == 'rebasing'
| mit | -9,036,158,272,451,131,000 | 23.983607 | 87 | 0.674541 | false |
woutersmet/Zeosummer | lib/zeobuilder/gui/visual/camera.py | 2 | 10839 | # Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2009 Toon Verstraelen <[email protected]>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""
To understand this module properly, it is essential to be aware of the different
coordinate systems that are used in Zeobuilder
1) Screen coordinates
These are just the pixel coordinates in the drawing area. x goes from 0 to
width-1 (from left to right) and y goes from 0 to height-1 (from top to
bottom).
2) Camera coordinates
These are also two dimensional coordinates and they are just a simple
transformation of the screen coordinates. x remains horizontal and y remains
vertical. x = -0.5..0.5, y= -0.5..0.5
3) Eye coordinates
These are three dimensional coordinates. The center of this coordinate frame
is defined by the variable self.eye. The rotation of this frame is
defined by the matrix self.rotation. The negative z-axis of this frame, is
the direction in which the camera looks, while the two other axes define the
tilt of the camera.
4) Model coordinates. These coordinates are used as 'fixed' coordinates in
Zeobuilder. In the case of a periodic model, the origin corresponds to one
of the corners of the periodic box.
All the visualization related coordinates are defined in one of the four
coordinate frames above. Some of the relevant variables for the 3D visualization
are explained below:
* self.rotation_center, defined in model coordinates + center of unit cell
This is the point in space defines the center of rotation when the user
rotates the whole model. (i.e. when nothing is selected)
* self.opening_angle
This variable is used to determine the (minimal )opening angle of te camera,
when the perspective projection is used. When this variable is set to zero,
the orthogonal projection is used.
* self.znear
The distance from the eye to the frontal clipping plane. In the case of
orthogonal projection, this is alwas zero
* self.window_size
The interpretation of this variable depends on the projection type.
- Orthogonal: The distance in model_coordinates or obeserver coordinates
(these are the same) of the min(width, height) of the screen.
- Perspective: The distance in model_coordinates or obeserver coordinates
(these are the same) of the min(width, height) of the screen, in the plane
orthogonal to the viewing direction at a distance of self._znear from the
eye.
* self.window_depth
The distance in model or eye coordinates between the frontal and the
back clipping plane.
* self.rotation.r
This is a 3x3 orthonormal rotation matrix. it rotates a vector in model
coordinates into eye coordinates. The full transformation then becomes...
FIXME
* self.eye FIXME
"""
from zeobuilder import context
from zeobuilder.nodes.glmixin import GLTransformationMixin
from molmod.units import angstrom
from molmod.transformations import Translation, Rotation
import numpy
class Camera(object):
def __init__(self):
# register configuration settings: default camera
from zeobuilder.gui import fields
from zeobuilder.gui.fields_dialogs import DialogFieldInfo
config = context.application.configuration
config.register_setting(
"viewer_distance",
100.0*angstrom,
DialogFieldInfo("Default Viewer", (1, 0), fields.faulty.Length(
label_text="Distance from origin",
attribute_name="viewer_distance",
low=0.0,
low_inclusive=True,
)),
)
config.register_setting(
"opening_angle",
0.0,
DialogFieldInfo("Default Viewer", (1, 1), fields.faulty.MeasureEntry(
measure="Angle",
label_text="Camera opening angle",
attribute_name="opening_angle",
low=0.0,
low_inclusive=True,
high=0.5*numpy.pi,
high_inclusive=False,
show_popup=False,
)),
)
config.register_setting(
"window_size",
25*angstrom,
DialogFieldInfo("Default Viewer", (1, 2), fields.faulty.Length(
label_text="Window size",
attribute_name="window_size",
low=0.0,
low_inclusive=False,
)),
)
config.register_setting(
"window_depth",
200.0*angstrom,
DialogFieldInfo("Default Viewer", (1, 3), fields.faulty.Length(
label_text="Window depth",
attribute_name="window_depth",
low=0.0,
low_inclusive=False,
)),
)
self.reset()
def reset(self):
config = context.application.configuration
self.rotation_center = Translation()
self.rotation = Rotation()
self.eye = Translation()
self.eye.t[2] = config.viewer_distance
self.opening_angle = config.opening_angle
self.window_size = config.window_size
self.window_depth = config.window_depth
def get_znear(self):
if self.opening_angle > 0.0:
return 0.5*self.window_size/numpy.tan(0.5*self.opening_angle)
else:
return 0.0
znear = property(get_znear)
# coordinate transformations
def eye_to_camera(self, vector_e):
tmp = numpy.ones(2, float)
znear = self.znear
if znear > 0:
return -vector_e[:2]/vector_e[2]/self.window_size*znear
else:
return vector_e[:2]/self.window_size
def camera_window_to_eye(self, vector_c):
tmp = numpy.zeros(3, float)
tmp[:2] = vector_c*self.window_size
znear = self.znear
if znear > 0:
tmp[2] = -self.znear
else:
tmp[2] = -self.window_size/3.0
return tmp
def model_to_eye(self, vector_m):
scene = context.application.scene
tmp = scene.model_center.vector_apply_inverse(vector_m)
tmp = self.rotation_center.vector_apply_inverse(tmp)
tmp = self.rotation.vector_apply_inverse(tmp)
tmp[2] -= self.znear
tmp = self.eye.vector_apply_inverse(tmp)
return tmp
def eye_to_model(self, vector_e):
scene = context.application.scene
tmp = self.eye.vector_apply(vector_e)
tmp[2] += self.znear
tmp = self.rotation.vector_apply(tmp)
tmp = self.rotation_center.vector_apply(tmp)
tmp = scene.model_center.vector_apply(tmp)
return tmp
def model_to_camera(self, vector_m):
return self.eye_to_camera(self.model_to_eye(vector_m))
def camera_window_to_model(self, vector_c):
return self.eye_to_model(self.camera_window_to_eye(vector_c))
def object_to_depth(self, gl_object):
result = -self.model_to_eye(gl_object.get_absolute_frame().t)[2]
return result
def object_to_camera(self, gl_object):
return self.eye_to_camera(self.model_to_eye(gl_object.get_absolute_frame().t))
def object_to_eye(self, gl_object):
return self.model_to_eye(gl_object.get_absolute_frame().t)
def object_eye_rotation(self, gl_object):
"""
Returns a matrix that consists of the x, y and z axes of the
eye frame in the coordinates of the parent frame of the given
object.
"""
if hasattr(gl_object, "parent") and \
isinstance(gl_object.parent, GLTransformationMixin):
parent_matrix = gl_object.parent.get_absolute_frame().r
else:
parent_matrix = numpy.identity(3, float)
result = numpy.dot(self.rotation.r.transpose(), parent_matrix).transpose()
return result
def depth_to_scale(self, depth):
""" transforms a depth into a scale (au/camcoords)"""
znear = self.znear
if znear > 0:
return depth/znear*self.window_size
else:
return self.window_size
def vector_in_plane(self, r, p_m):
"""Returns a vector at camera position r in a plane (through p, orthogonal to viewing direction)
Arguments
r -- a two-dimensional vector in camera coordinates
p_m -- a three-dimensional vector in model coordinates
Returns
rp -- a three-dimensional vector in model coordinates that lies
at the intersection of a plane and a line. The plane is
orthogonal to the viewing direction and goes through the
point p. The line connects the eye (eye_m below) with the
point r (r_m below) in the camera window.
"""
eye_m = self.eye_to_model(numpy.zeros(3, float))
r_m = self.camera_window_to_model(r)
center_m = self.camera_window_to_model(numpy.zeros(2, float))
normal = (eye_m - center_m)
normal /= numpy.linalg.norm(normal)
if self.znear > 0:
# the line is defined as r = eye_m + d*t, where t = -infinity ... infinity
d = eye_m - r_m
# t at the intersection:
t = -numpy.dot(eye_m - p_m, normal)/numpy.dot(d, normal)
return eye_m + d*t
else:
# the line is defined as r = r_m + d*t, where t = -infinity ... infinity
d = normal
# t at the intersection:
t = -numpy.dot(r_m - p_m, normal)/numpy.dot(d, normal)
return r_m + d*t
| gpl-3.0 | 2,054,813,364,544,159,200 | 34.890728 | 104 | 0.640834 | false |
Crystal-SDS/dashboard | crystal_dashboard/dashboards/crystal/projects/groups/tables.py | 1 | 2270 | from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.core.urlresolvers import reverse
from horizon import tables
from horizon import exceptions
from crystal_dashboard.api import projects as api
class MyFilterAction(tables.FilterAction):
name = "myfilter"
class CreateGroup(tables.LinkAction):
name = "create_group"
verbose_name = _("Create Group")
url = "horizon:crystal:projects:groups:create"
classes = ("ajax-modal",)
icon = "plus"
class UpdateGroup(tables.LinkAction):
name = "update_group"
verbose_name = _("Edit")
url = "horizon:crystal:projects:groups:update"
classes = ("ajax-modal",)
icon = "plus"
class DeleteGroup(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Group",
u"Delete Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Group",
u"Deleted Groups",
count
)
name = "delete_group"
success_url = "horizon:crystal:projects:index"
def delete(self, request, obj_id):
try:
response = api.delete_project_group(request, obj_id)
if 200 <= response.status_code < 300:
pass
# messages.success(request, _("Successfully deleted controller: %s") % obj_id)
else:
raise ValueError(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:projects:index")
error_message = "Unable to remove group.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class DeleteMultipleGroups(DeleteGroup):
name = "delete_multiple_groups"
class GroupsTable(tables.DataTable):
id = tables.Column('id', verbose_name=_("ID"))
name = tables.Column('name', verbose_name=_("Name"))
tenants = tables.Column('projects', verbose_name=_("Projects"))
class Meta:
name = "groups"
verbose_name = _("Groups")
table_actions = (MyFilterAction, CreateGroup, DeleteMultipleGroups)
row_actions = (UpdateGroup, DeleteGroup)
| gpl-3.0 | 4,537,338,068,862,969,300 | 28.102564 | 94 | 0.634361 | false |
enriquefynn/dwm-scripts | status_bar.py | 1 | 3351 | #!/usr/bin/env python
import json
import time
import urllib2
import re
from subprocess import call,check_output
redfg = '\x1b[38;5;196m'
bluefg = '\x1b[38;5;21m'
darkgreenfg = '\x1b[38;5;78m'
darkbluefg = '\x1b[38;5;74m'
winefg = '\x1b[38;5;118m'
yellowfg = '\x1b[38;5;226m'
redbg = '\x1b[48;5;196m'
greenbg = '\x1b[48;5;47m'
yellowbg = '\x1b[48;5;226m'
blackbg = '\x1b[48;5;16m'
reset = '\x1b[0m'
proxy = urllib2.ProxyHandler()
opener = urllib2.build_opener(proxy)
#BTC
def getCoinFromBtce(coin):
try:
ccoin = round(json.loads(opener.open("https://btc-e.com/api/2/{}_usd/ticker"\
.format(coin)).read())['ticker']['last'], 2)
except:
return None
return ccoin
#Weather
city_name = 'Lugano'
def getWeatherInfo(cityName):
w = {}
try:
weather = json.loads(opener.open("http://api.openweathermap.org/data/2.5/weather?q={}&units=metric".format(cityName)).read())
w['temp_min'] = weather['main']['temp_min']
w['temp_act'] = weather['main']['temp']
w['temp_max'] = weather['main']['temp_max']
w['sunrise'] = weather['sys']['sunrise']
w['sunset'] = weather['sys']['sunset']
except:
return None
return w
weather = getWeatherInfo(city_name)
weather_bar = []
if weather != None:
sunrise = time.strftime('%H:%M', time.localtime(weather['sunrise']))
sunset = time.strftime('%H:%M', time.localtime(weather['sunset']))
weather_bar = " {}{}C{}-{}{}C{}-{}{}C{} {}{}{}-{}{}{} | ".format(bluefg, weather['temp_min'], reset,
winefg, weather['temp_act'], reset,
redfg, weather['temp_max'], reset,
yellowfg, sunrise, reset,
redfg, sunset, reset)
#Battery
battery = check_output(['acpiconf','-i', '0'])
battery_state = re.search(r'State:\t*(\w*)', battery).group(1)
battery_percentage = int(re.search(r'Remaining capacity:\t*(\d*)', battery).group(1))
battery_bar = [bluefg, 'On AC', reset]
if battery_state != 'high':
if battery_percentage >= 60:
bg = greenbg
elif 60 > battery_percentage > 30:
bg = yellowbg
elif battery_percentage <= 3:
call(['shutdown', '-p', 'now'])
else:
bg = redbg
battery_bar = ['Batt: ', bg, ' '*(battery_percentage/10), \
blackbg, ' '*(10-(battery_percentage/10)), reset, ' ', str(battery_percentage) + '%']
if battery_state == 'charging':
battery_bar.append('c')
#Wireless
try:
wlan = check_output(['ifconfig', 'wlan0'])
ssid = re.search(r'ssid \t*\"*(.+?)\"* channel', wlan).group(1)
except:
ssid = None
#Sound
sound = re.search(r'hw.snd.default_unit: (.*)', check_output(['sysctl', 'hw.snd.default_unit'])).group(1)
if sound == '0':
sound = 'Speaker'
elif sound == '1':
sound = 'Headset'
else:
sound = 'HDMI'
#Date
date = [check_output(['date', '+%d/%m/%Y %H:%M']).strip()]
attr_list = []
attr_list.extend(sound)
attr_list += weather_bar
if ssid != None:
attr_list += ['wlan: ', winefg, ssid, reset, ' | ']
btc_ticker = getCoinFromBtce('btc')
if btc_ticker != None:
attr_list.extend(['{}BTC: {} {}'.format(darkgreenfg,\
btc_ticker,\
darkbluefg,\
reset)])
attr_list += '| '
attr_list.extend(battery_bar)
attr_list += ' | '
attr_list.extend(date)
call(['xsetroot','-name',''.join(
attr_list
)], shell=False)
| bsd-3-clause | -1,261,675,721,522,088,000 | 27.887931 | 133 | 0.587586 | false |
looooo/paraBEM | examples/vtk/vtk_cylinder_linear_dirichlet.py | 1 | 1537 | import numpy as np
import paraBEM
from paraBEM.pan2d import DirichletDoublet1Case2 as Case
from paraBEM.utils import check_path
from paraBEM.vtk_export import VtkWriter
# geometry
numpoints = 100
phi = np.linspace(0, 2 * np.pi, numpoints + 1)
x = np.cos(phi)[:-1]
y = np.sin(phi)[:-1]
xy = np.transpose(np.array([x, y]))
# mapping the geometry
vector = [paraBEM.PanelVector2(*i) for i in xy]
vector += [vector[0]] # important for calculating the gradients
panels = [paraBEM.Panel2([vec, vector[i+1]]) for i, vec in enumerate(vector[:-1])]
vector[0].wake_vertex = True
# setting up the case
case = Case(panels)
case.v_inf = paraBEM.Vector2(1, 0.5)
case.run()
nx = 100
ny = 100
space_x = np.linspace(-2, 2, nx)
space_y = np.linspace(-2, 2, ny)
grid = [paraBEM.Vector2(x, y) for y in space_y for x in space_x]
velocity = list(map(case.off_body_velocity, grid))
pot = list(map(case.off_body_potential, grid))
with open(check_path("results/cylinder_2d_linear/field.vtk"), "w") as _file:
writer = VtkWriter()
writer.structed_grid(_file, "airfoil", [nx, ny, 1])
writer.points(_file, grid)
writer.data(_file, velocity, name="velocity", _type="VECTORS", data_type="POINT_DATA")
writer.data(_file, pot, name="pot", _type="SCALARS", data_type="POINT_DATA")
with open(check_path("results/cylinder_2d_linear/shape.vtk"), "w") as _file:
writer = VtkWriter()
writer.unstructed_grid(_file, "airfoil")
writer.points(_file, [[i[0], i[1], 0]for i in vector])
writer.lines(_file, [range(len(vector))]) | gpl-3.0 | -7,619,371,730,664,298,000 | 33.954545 | 90 | 0.676643 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.