repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
julio-vaz/iolanda | main.py | 1 | 1453 | import subprocess
import os
import shutil
from urllib.parse import urlparse
import feedparser
import requests
import newspaper
from ffmpy import FFmpeg
feed = feedparser.parse('http://feeds.feedburner.com/iolandachannel')
chapters = []
for entry in feed['entries']:
title = entry['title']
print(f'Parsing: {title}')
news_link = entry['link']
article = newspaper.Article(news_link)
article.download()
article.parse()
media = article.top_img
if not media:
continue
images = []
response = requests.get(media, stream=True)
filename = os.path.basename(urlparse(media).path)
filename = f'images/{filename}'
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
images.append(filename)
from gtts import gTTS
text = article.text
tts = gTTS(text=text, lang='pt', slow=False)
tts.save("article.mp3")
inputs = {
'article.mp3': None,
}
for image in images:
inputs[image] = '-loop 1 -r 1'
ff = FFmpeg(inputs=inputs, outputs={'article.avi': '-y -acodec copy -shortest -qscale 5'})
print(ff.cmd)
ff.run()
command = f'youtube-upload article.avi --title "{title}"'
command += f' --description "{text}\n\n'
command += f'LINK PARA A NOTÍCIA ORIGINAL: {news_link}"'
subprocess.call(command, shell=True)
print(f'Article parsed: {title}')
import sys; sys.exit()
print('That\'s all folks!')
| apache-2.0 | 2,849,347,827,935,572,000 | 28.04 | 94 | 0.653581 | false | 3.36891 | false | false | false |
djphan/c410-Repo | c410-A4CanvasAJAX/server.py | 1 | 3497 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You can start this by executing it in python:
# python server.py
#
# remember to:
# pip install flask
import flask
from flask import Flask, request, send_from_directory, url_for, redirect, jsonify
import json
app = Flask(__name__)
app.debug = True
# An example world
# {
# 'a':{'x':1, 'y':2},
# 'b':{'x':2, 'y':3}
# }
class World:
def __init__(self):
self.clear()
def update(self, entity, key, value):
entry = self.space.get(entity,dict())
entry[key] = value
self.space[entity] = entry
def set(self, entity, data):
self.space[entity] = data
def clear(self):
self.space = dict()
def get(self, entity):
return self.space.get(entity,dict())
def world(self):
return self.space
# you can test your webservice from the commandline
# curl -v -H "Content-Type: appication/json" -X PUT http://127.0.0.1:5000/entity/X -d '{"x":1,"y":1}'
myWorld = World()
# I give this to you, this is how you get the raw body/data portion of a post in flask
# this should come with flask but whatever, it's not my project.
def flask_post_json(request):
'''Ah the joys of frameworks! They do so much work for you
that they get in the way of sane operation!'''
if (request.json != None):
return request.json
elif (request.data != None and request.data != ''):
return json.loads(request.data)
else:
return json.loads(request.form.keys()[0])
@app.route("/")
def hello():
'''Return something coherent here.. perhaps redirect to /static/index.html '''
return send_from_directory('static', 'index.html')
@app.route("/entity/<entity>", methods=['POST','PUT'])
def update(entity):
'''update the entities via this interface'''
#Fixed to use Hindle's JSON functions
myData = flask_post_json(request)
if request.method == "POST":
myWorld.set(entity, myData)
elif request.method == "PUT":
for myKey, myValue in myData.iteritems():
myWorld.update(entity, myKey, myValue)
# Return the world not redirect to page
return jsonify(myWorld.get(entity))
@app.route("/world", methods=['POST','GET'])
def world():
'''you should probably return the world here'''
if request.method == "POST":
aWorld = flask_post_json(request)
print(aWorld)
return jsonify(myWorld.world())
elif request.method == "GET":
return jsonify(myWorld.world())
@app.route("/entity/<entity>")
def get_entity(entity):
'''This is the GET version of the entity interface, return a representation of the entity'''
return jsonify(** myWorld.get(entity))
@app.route("/clear", methods=['POST','GET'])
def clear():
# Call built in function
myWorld.clear()
return jsonify(myWorld.world())
if __name__ == "__main__":
app.run()
| gpl-3.0 | -8,542,675,591,337,391,000 | 28.635593 | 104 | 0.645696 | false | 3.627593 | false | false | false |
redhat-openstack/rdo-infra | ci-scripts/dlrnapi_promoter/qcow_client.py | 1 | 10253 | """
This file contains classes and functionto interact with qcow images servers
"""
import copy
import logging
import os
import paramiko
from common import PromotionError
class QcowConnectionClient(object):
"""
Proxy class for client connection
"""
_log = logging.getLogger("promoter")
def __init__(self, server_conf):
self._host = server_conf['host']
self._user = server_conf['user']
self._client_type = server_conf['client']
self._keypath = server_conf['keypath']
self._client = os
if self._client_type == "sftp":
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy)
keypath = os.path.expanduser(self._keypath)
self.key = paramiko.rsakey.RSAKey(filename=keypath)
self.kwargs = {}
if self._user is not None:
self.kwargs['username'] = self._user
else:
self.kwargs['username'] = os.environ.get("USER")
self._log.debug("Connecting to %s as user %s", self._host,
self._user)
self.ssh_client = client
def connect(self):
if hasattr(self, 'ssh_client'):
self.ssh_client.connect(self._host, pkey=self.key, **self.kwargs)
self._client = self.ssh_client.open_sftp()
def __getattr__(self, item):
return getattr(self._client, item)
def close(self):
if self._client_type == "sftp":
self._client.close()
class QcowClient(object):
"""
This class interacts with qcow images servers
"""
log = logging.getLogger("promoter")
def __init__(self, config):
self.config = config
self.git_root = self.config.git_root
self.promote_script = os.path.join(self.git_root,
'ci-scripts', 'promote-images.sh')
self.distro_name = self.config.distro_name
self.distro_version = self.config.distro_version
self.rollback_links = {}
server_conf = self.config.overcloud_images.get('qcow_servers')
self.user = server_conf['local']['user']
self.root = server_conf['local']['root']
self.host = server_conf['local']['host']
self.client = QcowConnectionClient(server_conf['local'])
self.images_dir = os.path.join(self.root, config.distro,
config.release, "rdo_trunk")
def validate_qcows(self, dlrn_hash, name=None, assume_valid=False):
"""
Check we have the images dir in the server
if name is specified, verify that name points to the hash
- maybe qcow ran and failed
Check at which point of qcow promotion we stopped
1) did we create a new symlink ?
2) did we create the previous symlink ?
3) are all the images uploaded correctly ?
:param dlrn_hash: The hash to check
:param name: The promotion name
:param assume_valid: report everything worked unconditionally
:return: A dict with result of the validation
"""
try:
self.client.listdir(self.images_dir)
self.client.chdir(self.images_dir)
except EnvironmentError as ex:
self.log.error("Qcow-client: Image root dir %s does not exist "
"in the server, or is not accessible")
self.log.exception(ex)
raise
results = {
"hash_valid": False,
"promotion_valid": False,
"qcow_valid": False,
"missing_qcows": copy.copy(
self.config.overcloud_images['qcow_images']),
"present_qcows": [],
}
stat = None
images = None
images_path = os.path.join(self.images_dir, dlrn_hash.full_hash)
try:
stat = self.client.stat(images_path)
images = sorted(self.client.listdir(images_path))
except EnvironmentError:
self.log.error("Images path for hash %s not present or "
"accessible", dlrn_hash)
if not images:
self.log.error("No images found")
if stat and images:
results['hash_valid'] = True
results['present_qcows'] = images
results['missing_qcows'] = \
list(set(self.config.overcloud_images[
'qcow_images']).difference(
images))
if images == self.config.overcloud_images['qcow_images']:
results['qcow_valid'] = True
if name is not None:
try:
link = self.client.readlink(name)
if link == dlrn_hash.full_hash:
results['promotion_valid'] = True
except EnvironmentError:
self.log.error("%s was not promoted to %s",
dlrn_hash.full_hash, name)
return results
def rollback(self):
"""
Rolls back the link to the initial status
Rollback is guaranteed to work only for caught exceptions, and it may
not be really useful. We have a rollback only if a remove or a symlink
fails.
- If a remove fails, it means that we don't need to rollback
- If a symlink fails, then it will probably fail on rollback too.
:return: None
"""
for name, target in self.rollback_links.items():
self.client.remove(name)
self.client.symlink(target, name)
self.rollback_links = {}
def promote(self, candidate_hash, target_label, candidate_label=None,
create_previous=True, validation=True):
"""
Effective promotion of the images. This method will handle symbolic
links to the dir containing images from the candidate hash,
optionally saving the current link as previous
:param candidate_hash: The dlrn hash to promote
:param target_label: The name of the link to create
:param candidate_label: Currently unused
:param create_previous: A bool to determine if previous link is created
:param validation: A bool to determine if qcow validation should be done
:return: None
"""
self.client.connect()
if validation:
self.validate_qcows(candidate_hash)
self.client.chdir(self.images_dir)
log_header = "Qcow promote '{}' to {}:".format(candidate_hash,
target_label)
self.log.info("%s Attempting promotion", log_header)
# Check if candidate_hash dir is present
try:
self.client.stat(candidate_hash.full_hash)
except EnvironmentError as ex:
self.log.error("%s images dir for hash %s not present or not "
"accessible", log_header, candidate_hash)
self.log.exception(ex)
self.client.close()
raise PromotionError("{} No images dir for hash {}"
"".format(log_header, candidate_hash))
# Check if the target label exists and points to a hash dir
current_hash = None
try:
current_hash = self.client.readlink(target_label)
except EnvironmentError:
self.log.debug("%s No link named %s exists", log_header,
target_label)
# If this exists Check if we can remove the symlink
if current_hash:
self.rollback_links['target_label'] = current_hash
try:
self.client.remove(target_label)
except EnvironmentError as ex:
self.log.debug("Unable to remove the target_label: %s",
target_label)
self.log.exception(ex)
self.client.close()
raise
# Check if a previous link exists and points to an hash-dir
previous_label = "previous-{}".format(target_label)
previous_hash = None
try:
previous_hash = self.client.readlink(previous_label)
except EnvironmentError:
self.log.debug("%s No previous-link named %s exists",
log_header,
previous_label)
self.log.debug("Previous hash %s", previous_hash)
# If it exists and we are handling it, check if we can remove and
# reassign it
if current_hash and previous_hash and create_previous:
self.rollback_links[previous_label] = previous_hash
try:
self.client.remove(previous_label)
except EnvironmentError as ex:
self.log.debug("Unable to remove the target_label: %s",
target_label)
self.log.exception(ex)
self.client.close()
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
raise
try:
self.client.symlink(current_hash, previous_label)
except EnvironmentError as ex:
self.log.error("%s failed to link %s to %s", log_header,
previous_label, current_hash)
self.log.exception(ex)
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
self.client.close()
raise
# Finally the effective promotion
try:
self.client.symlink(candidate_hash.full_hash, target_label)
except EnvironmentError as ex:
self.log.error("%s failed to link %s to %s", log_header,
target_label, candidate_hash.full_hash)
self.log.exception(ex)
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
finally:
self.client.close()
self.log.info("%s Successful promotion", log_header)
| apache-2.0 | 5,680,711,239,510,294,000 | 37.115242 | 80 | 0.558276 | false | 4.436608 | true | false | false |
execunix/vinos | xsrc/external/mit/MesaLib7/dist/src/glsl/builtins/tools/generate_builtins.py | 4 | 8415 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import re
import sys
from glob import glob
from os import path
from subprocess import Popen, PIPE
from sys import argv
# Local module: generator for texture lookup builtins
from texture_builtins import generate_texture_functions
builtins_dir = path.join(path.dirname(path.abspath(__file__)), "..")
# Get the path to the standalone GLSL compiler
if len(argv) != 2:
print "Usage:", argv[0], "<path to compiler>"
sys.exit(1)
compiler = argv[1]
# Read the files in builtins/ir/*...add them to the supplied dictionary.
def read_ir_files(fs):
for filename in glob(path.join(path.join(builtins_dir, 'ir'), '*')):
with open(filename) as f:
fs[path.basename(filename)] = f.read()
# Return a dictionary containing all builtin definitions (even generated)
def get_builtin_definitions():
fs = {}
generate_texture_functions(fs)
read_ir_files(fs)
return fs
def stringify(s):
# Work around MSVC's 65535 byte limit by outputting an array of characters
# rather than actual string literals.
if len(s) >= 65535:
#t = "/* Warning: length " + repr(len(s)) + " too large */\n"
t = ""
for c in re.sub('\s\s+', ' ', s):
if c == '\n':
t += '\n'
else:
t += "'" + c + "',"
return '{' + t[:-1] + '}'
t = s.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n"\n "')
return ' "' + t + '"\n'
def write_function_definitions():
fs = get_builtin_definitions()
for k, v in sorted(fs.iteritems()):
print 'static const char builtin_' + k + '[] ='
print stringify(v), ';'
def run_compiler(args):
command = [compiler, '--dump-lir'] + args
p = Popen(command, 1, stdout=PIPE, shell=False)
output = p.communicate()[0]
# Clean up output a bit by killing whitespace before a closing paren.
kill_paren_whitespace = re.compile(r'[ \n]*\)', re.MULTILINE)
output = kill_paren_whitespace.sub(')', output)
# Also toss any duplicate newlines
output = output.replace('\n\n', '\n')
return (output, p.returncode)
def write_profile(filename, profile):
(proto_ir, returncode) = run_compiler([filename])
if returncode != 0:
print '#error builtins profile', profile, 'failed to compile'
return
# Kill any global variable declarations. We don't want them.
kill_globals = re.compile(r'^\(declare.*\n', re.MULTILINE)
proto_ir = kill_globals.sub('', proto_ir)
print 'static const char prototypes_for_' + profile + '[] ='
print stringify(proto_ir), ';'
# Print a table of all the functions (not signatures) referenced.
# This is done so we can avoid bothering with a hash table in the C++ code.
function_names = set()
for func in re.finditer(r'\(function (.+)\n', proto_ir):
function_names.add(func.group(1))
print 'static const char *functions_for_' + profile + ' [] = {'
for func in sorted(function_names):
print ' builtin_' + func + ','
print '};'
def write_profiles():
profiles = get_profile_list()
for (filename, profile) in profiles:
write_profile(filename, profile)
def get_profile_list():
profiles = []
for pfile in sorted(glob(path.join(path.join(builtins_dir, 'profiles'), '*'))):
profiles.append((pfile, path.basename(pfile).replace('.', '_')))
return profiles
if __name__ == "__main__":
print """/* DO NOT MODIFY - automatically generated by generate_builtins.py */
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include "main/core.h" /* for struct gl_shader */
#include "glsl_parser_extras.h"
#include "ir_reader.h"
#include "program.h"
#include "ast.h"
extern "C" struct gl_shader *
_mesa_new_shader(struct gl_context *ctx, GLuint name, GLenum type);
gl_shader *
read_builtins(GLenum target, const char *protos, const char **functions, unsigned count)
{
struct gl_context fakeCtx;
fakeCtx.API = API_OPENGL;
fakeCtx.Const.GLSLVersion = 130;
fakeCtx.Extensions.ARB_ES2_compatibility = true;
gl_shader *sh = _mesa_new_shader(NULL, 0, target);
struct _mesa_glsl_parse_state *st =
new(sh) _mesa_glsl_parse_state(&fakeCtx, target, sh);
st->language_version = 130;
st->symbols->language_version = 130;
st->ARB_texture_rectangle_enable = true;
st->EXT_texture_array_enable = true;
_mesa_glsl_initialize_types(st);
sh->ir = new(sh) exec_list;
sh->symbols = st->symbols;
/* Read the IR containing the prototypes */
_mesa_glsl_read_ir(st, sh->ir, protos, true);
/* Read ALL the function bodies, telling the IR reader not to scan for
* prototypes (we've already created them). The IR reader will skip any
* signature that does not already exist as a prototype.
*/
for (unsigned i = 0; i < count; i++) {
_mesa_glsl_read_ir(st, sh->ir, functions[i], false);
if (st->error) {
printf("error reading builtin: %.35s ...\\n", functions[i]);
printf("Info log:\\n%s\\n", st->info_log);
ralloc_free(sh);
return NULL;
}
}
reparent_ir(sh->ir, sh);
delete st;
return sh;
}
"""
write_function_definitions()
write_profiles()
profiles = get_profile_list()
print 'static gl_shader *builtin_profiles[%d];' % len(profiles)
print """
void *builtin_mem_ctx = NULL;
void
_mesa_glsl_release_functions(void)
{
ralloc_free(builtin_mem_ctx);
builtin_mem_ctx = NULL;
memset(builtin_profiles, 0, sizeof(builtin_profiles));
}
static void
_mesa_read_profile(struct _mesa_glsl_parse_state *state,
int profile_index,
const char *prototypes,
const char **functions,
int count)
{
gl_shader *sh = builtin_profiles[profile_index];
if (sh == NULL) {
sh = read_builtins(GL_VERTEX_SHADER, prototypes, functions, count);
ralloc_steal(builtin_mem_ctx, sh);
builtin_profiles[profile_index] = sh;
}
state->builtins_to_link[state->num_builtins_to_link] = sh;
state->num_builtins_to_link++;
}
void
_mesa_glsl_initialize_functions(struct _mesa_glsl_parse_state *state)
{
if (builtin_mem_ctx == NULL) {
builtin_mem_ctx = ralloc_context(NULL); // "GLSL built-in functions"
memset(&builtin_profiles, 0, sizeof(builtin_profiles));
}
state->num_builtins_to_link = 0;
"""
i = 0
for (filename, profile) in profiles:
if profile.endswith('_vert'):
check = 'state->target == vertex_shader && '
elif profile.endswith('_frag'):
check = 'state->target == fragment_shader && '
version = re.sub(r'_(vert|frag)$', '', profile)
if version.isdigit():
check += 'state->language_version == ' + version
else: # an extension name
check += 'state->' + version + '_enable'
print ' if (' + check + ') {'
print ' _mesa_read_profile(state, %d,' % i
print ' prototypes_for_' + profile + ','
print ' functions_for_' + profile + ','
print ' Elements(functions_for_' + profile + '));'
print ' }'
print
i = i + 1
print '}'
| apache-2.0 | 990,475,953,581,760,400 | 31.237548 | 88 | 0.624673 | false | 3.598802 | false | false | false |
joshuamorton/coursesat.tech | app.py | 1 | 1337 | from flask import Flask, jsonify
import pymongo
import json
app = Flask(__name__)
client = pymongo.MongoClient()
db = client.grouch
courses = db.courses
@app.route('/')
def index():
return 'test'
@app.route('/spring2016/')
def year():
schools = courses.distinct('school')
response = jsonify({'schools': schools})
response.headers['Access-Control-Allow-Origin'] = "*"
return response
@app.route('/spring2016/<school>/')
def for_school(school):
aggregationPipeline = [
{
'$match': {
'school': school
},
},
{
'$group': {
'_id': None,
'classes': {
'$push': '$number'
}
}
}
]
result = list(courses.aggregate(aggregationPipeline))
classes = result[0].get('classes') if len(result) > 0 else None
response = jsonify({'numbers': classes})
response.headers['Access-Control-Allow-Origin'] = "*"
return response
@app.route('/spring2016/<school>/<number>')
def single_course(school, number):
course = courses.find_one({'school':school, 'number':number}, {'_id': 0})
response = jsonify(course)
response.headers['Access-Control-Allow-Origin'] = "*"
return response
if __name__ == '__main__':
app.run()
| mit | -256,736,483,378,657,400 | 21.283333 | 77 | 0.559461 | false | 3.967359 | false | false | false |
usajusaj/sga_utils | sga/toolbox/table_norm.py | 1 | 3016 | '''
MIT License
Copyright (c) 2017 Matej Usaj
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Created on Apr 10, 2017
@author: Matej Usaj
'''
import logging
import numpy as np
import pandas as p
from . import USE_C_OPT
logger = logging.getLogger(__name__)
def _c_normalize(data3_tableix, t1, data3_nn, cpu=1):
from . import c_impl
c_impl.table_norm(data3_nn, t1, data3_tableix)
def _normalize(index, t1, data3_nn):
for x in t1:
index += data3_nn > x
index[index == 0] = 1
def normalize(table, t1, t2):
data = table.values.flatten()
result = np.full_like(data, np.nan)
data_values = ~np.isnan(data)
data = data[data_values]
data_index = np.zeros_like(data, dtype=np.int64)
if USE_C_OPT:
_c_normalize(data_index, t1, data)
else:
_normalize(data_index, t1, data)
data_index -= 1 # leftover from matlab code conversion
result[data_values] = t2[data_index]
return p.DataFrame(result.reshape(table.shape), index=table.index, columns=table.columns)
def _quantile_normalize(data, refdist):
percentiles = np.linspace(100. / data.shape[0], 100, num=data.shape[0])
ref_quantiles = np.percentile(refdist, percentiles, interpolation='midpoint') # interpolation used in matlab
sort_ind = np.argsort(data, kind='mergesort') # sorting alg used in matlab
result = np.zeros_like(data)
result[sort_ind] = ref_quantiles
return result
def table_normalize(data1, data2, data3):
data1 = data1.values.flatten()
data2 = data2.values.flatten()
nn = ~np.isnan(data1) & ~np.isnan(data2) # extract cells with values in both arrays
data2_norm = np.full_like(data2, np.nan)
data2_norm[nn] = _quantile_normalize(data2[nn], data1[nn]);
table = p.DataFrame({'data2': data2[nn], 'data2_norm': data2_norm[nn]})
table = table.sort_values('data2', kind='mergesort')
table = table.groupby('data2').median().reset_index()
return normalize(data3, table.data2, table.data2_norm)
| mit | -7,065,601,722,802,952,000 | 32.88764 | 112 | 0.706233 | false | 3.482679 | false | false | false |
relayr/python-sdk | demos/noise.py | 1 | 3222 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example script accessing data from a WunderBar microphone via MQTT.
This will connect to a microphone, read its noise level and send
an email notification to some receiver if that noise level exceeds
a certain threshold.
"""
import sys
import json
import time
import getpass
import smtplib
from email.mime.text import MIMEText
from relayr import Client
from relayr.resources import Device
from relayr.dataconnection import MqttStream
# Replace with your own values!
ACCESS_TOKEN = '...'
MICROPHONE_ID = '...'
# EMAIL/SMTP settings, please provide your own!
RECEIVER = '...'
SMTP_SERVER = '...'
SMTP_USERNAME = '...'
SMTP_PASSWORD = '' # will be requested at run time if left empty
SMTP_SENDER = 'WunderBar <[email protected]>'
SMTP_USE_SSL = False
try:
settings = [ACCESS_TOKEN, MICROPHONE_ID, RECEIVER, SMTP_SERVER, SMTP_USERNAME]
assert not any(map(lambda x: x=='...', settings))
except AssertionError:
print('Please provide meaningful settings in the code first!')
sys.exit(1)
class Callbacks(object):
"A class providing callbacks for incoming data from some device."
def __init__(self, device):
"An initializer to capture the device for later use."
self.device = device
def send_email(self, text):
"Send an email notification."
sender = SMTP_SENDER
subject = 'WunderBar Notification from Device: %s' % self.device.name
msg = MIMEText(text)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = RECEIVER
if SMTP_USE_SSL == True:
s = smtplib.SMTP_SSL(SMTP_SERVER)
else:
s = smtplib.SMTP(SMTP_SERVER)
prompt = "SMTP user password for user '%s'? " % SMTP_USERNAME
global SMTP_PASSWORD
SMTP_PASSWORD = SMTP_PASSWORD or getpass.getpass(prompt)
s.login(SMTP_USERNAME, SMTP_PASSWORD)
s.sendmail(sender, [RECEIVER], msg.as_string())
s.quit()
print("Email notification sent to '%s'" % RECEIVER)
def microphone(self, topic, message):
"Callback displaying incoming noise level data and email if desired."
readings = json.loads(message)['readings']
level = [r for r in readings if r['meaning']=='noiseLevel'][0]['value']
print(level)
threshold = 75
if level > threshold:
dname, did = self.device.name, self.device.id
text = "Notification from '%s' (%s):\n" % (dname, did)
text += ("The noise level now is %d (> %d)! " % (level, threshold))
text += "Put on a sound protection helmet before you get deaf!"
self.send_email(text)
def connect():
"Connect to a device and read data for some time."
c = Client(token=ACCESS_TOKEN)
mic = Device(id=MICROPHONE_ID, client=c).get_info()
callbacks = Callbacks(mic)
print("Monitoring '%s' (%s) for 60 seconds..." % (mic.name, mic.id))
stream = MqttStream(callbacks.microphone, [mic], transport='mqtt')
stream.start()
try:
time.sleep(60)
except KeyboardInterrupt:
print('')
stream.stop()
print("Stopped")
if __name__ == "__main__":
connect()
| mit | -8,249,296,945,999,933,000 | 29.685714 | 82 | 0.639044 | false | 3.768421 | false | false | false |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/docs/core/howto/tutorial/listings/finger/finger22.py | 2 | 8392 | # Do everything properly, and componentize
from twisted.application import internet, service, strports
from twisted.internet import protocol, reactor, defer, endpoints
from twisted.words.protocols import irc
from twisted.protocols import basic
from twisted.python import components
from twisted.web import resource, server, static, xmlrpc
from twisted.spread import pb
from zope.interface import Interface, implementer
from OpenSSL import SSL
import cgi
class IFingerService(Interface):
def getUser(user):
"""
Return a deferred returning a string.
"""
def getUsers():
"""
Return a deferred returning a list of strings.
"""
class IFingerSetterService(Interface):
def setUser(user, status):
"""
Set the user's status to something.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value+'\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class IFingerFactory(Interface):
def getUser(user):
"""
Return a deferred returning a string.
"""
def buildProtocol(addr):
"""
Return a protocol returning a string.
"""
@implementer(IFingerFactory)
class FingerFactoryFromService(protocol.ServerFactory):
protocol = FingerProtocol
def __init__(self, service):
self.service = service
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
class FingerSetterProtocol(basic.LineReceiver):
def connectionMade(self):
self.lines = []
def lineReceived(self, line):
self.lines.append(line)
def connectionLost(self, reason):
if len(self.lines) == 2:
self.factory.setUser(*self.lines)
class IFingerSetterFactory(Interface):
def setUser(user, status):
"""
Return a deferred returning a string.
"""
def buildProtocol(addr):
"""
Return a protocol returning a string.
"""
@implementer(IFingerSetterFactory)
class FingerSetterFactoryFromService(protocol.ServerFactory):
protocol = FingerSetterProtocol
def __init__(self, service):
self.service = service
def setUser(self, user, status):
self.service.setUser(user, status)
components.registerAdapter(FingerSetterFactoryFromService,
IFingerSetterService,
IFingerSetterFactory)
class IRCReplyBot(irc.IRCClient):
def connectionMade(self):
self.nickname = self.factory.nickname
irc.IRCClient.connectionMade(self)
def privmsg(self, user, channel, msg):
user = user.split('!')[0]
if self.nickname.lower() == channel.lower():
d = self.factory.getUser(msg)
d.addErrback(catchError)
d.addCallback(lambda m: "Status of %s: %s" % (msg, m))
d.addCallback(lambda m: self.msg(user, m))
class IIRCClientFactory(Interface):
"""
@ivar nickname
"""
def getUser(user):
"""
Return a deferred returning a string.
"""
def buildProtocol(addr):
"""
Return a protocol.
"""
@implementer(IIRCClientFactory)
class IRCClientFactoryFromService(protocol.ClientFactory):
protocol = IRCReplyBot
nickname = None
def __init__(self, service):
self.service = service
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(IRCClientFactoryFromService,
IFingerService,
IIRCClientFactory)
class UserStatusTree(resource.Resource):
def __init__(self, service):
resource.Resource.__init__(self)
self.service=service
# add a specific child for the path "RPC2"
self.putChild("RPC2", UserStatusXR(self.service))
# need to do this for resources at the root of the site
self.putChild("", self)
def _cb_render_GET(self, users, request):
userOutput = ''.join(["<li><a href=\"%s\">%s</a></li>" % (user, user)
for user in users])
request.write("""
<html><head><title>Users</title></head><body>
<h1>Users</h1>
<ul>
%s
</ul></body></html>""" % userOutput)
request.finish()
def render_GET(self, request):
d = self.service.getUsers()
d.addCallback(self._cb_render_GET, request)
# signal that the rendering is not complete
return server.NOT_DONE_YET
def getChild(self, path, request):
return UserStatus(user=path, service=self.service)
components.registerAdapter(UserStatusTree, IFingerService, resource.IResource)
class UserStatus(resource.Resource):
def __init__(self, user, service):
resource.Resource.__init__(self)
self.user = user
self.service = service
def _cb_render_GET(self, status, request):
request.write("""<html><head><title>%s</title></head>
<body><h1>%s</h1>
<p>%s</p>
</body></html>""" % (self.user, self.user, status))
request.finish()
def render_GET(self, request):
d = self.service.getUser(self.user)
d.addCallback(self._cb_render_GET, request)
# signal that the rendering is not complete
return server.NOT_DONE_YET
class UserStatusXR(xmlrpc.XMLRPC):
def __init__(self, service):
xmlrpc.XMLRPC.__init__(self)
self.service = service
def xmlrpc_getUser(self, user):
return self.service.getUser(user)
def xmlrpc_getUsers(self):
return self.service.getUsers()
class IPerspectiveFinger(Interface):
def remote_getUser(username):
"""
Return a user's status.
"""
def remote_getUsers():
"""
Return a user's status.
"""
@implementer(IPerspectiveFinger)
class PerspectiveFingerFromService(pb.Root):
def __init__(self, service):
self.service = service
def remote_getUser(self, username):
return self.service.getUser(username)
def remote_getUsers(self):
return self.service.getUsers()
components.registerAdapter(PerspectiveFingerFromService,
IFingerService,
IPerspectiveFinger)
@implementer(IFingerService)
class FingerService(service.Service):
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
with open(self.filename) as f:
for line in f:
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read)
def getUser(self, user):
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
application = service.Application('finger', uid=1, gid=1)
f = FingerService('/etc/users')
serviceCollection = service.IServiceCollection(application)
f.setServiceParent(serviceCollection)
strports.service("tcp:79", IFingerFactory(f)
).setServiceParent(serviceCollection)
site = server.Site(resource.IResource(f))
strports.service("tcp:8000", site,
).setServiceParent(serviceCollection)
strports.service("ssl:port=443:certKey=cert.pem:privateKey=key.pem", site
).setServiceParent(serviceCollection)
i = IIRCClientFactory(f)
i.nickname = 'fingerbot'
internet.ClientService(
endpoints.clientFromString(reactor, "tcp:irc.freenode.org:6667"),
i).setServiceParent(serviceCollection)
strports.service("tcp:8889", pb.PBServerFactory(IPerspectiveFinger(f))
).setServiceParent(serviceCollection)
| mit | -8,842,067,437,010,118,000 | 25.225 | 78 | 0.626549 | false | 4.075765 | false | false | false |
eayunstack/neutron | neutron/services/logapi/drivers/manager.py | 1 | 4600 | # Copyright (c) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from oslo_log import log as logging
from neutron.common import exceptions
from neutron.services.logapi.common import constants as log_const
from neutron.services.logapi.common import db_api
from neutron.services.logapi.common import exceptions as log_exc
from neutron.services.logapi.rpc import server as server_rpc
LOG = logging.getLogger(__name__)
def _get_param(args, kwargs, name, index):
try:
return kwargs[name]
except KeyError:
try:
return args[index]
except IndexError:
msg = "Missing parameter %s" % name
raise log_exc.LogapiDriverException(exception_msg=msg)
@registry.has_registry_receivers
class LoggingServiceDriverManager(object):
def __init__(self):
self._drivers = set()
self.rpc_required = False
registry.publish(log_const.LOGGING_PLUGIN, events.AFTER_INIT, self)
if self.rpc_required:
self._start_rpc_listeners()
self.logging_rpc = server_rpc.LoggingApiNotification()
@property
def drivers(self):
return self._drivers
def register_driver(self, driver):
"""Register driver with logging plugin.
This method is called from drivers on INIT event.
"""
self._drivers.add(driver)
self.rpc_required |= driver.requires_rpc
def _start_rpc_listeners(self):
self._skeleton = server_rpc.LoggingApiSkeleton()
return self._skeleton.conn.consume_in_threads()
@property
def supported_logging_types(self):
if not self._drivers:
return set()
log_types = set()
for driver in self._drivers:
log_types |= set(driver.supported_logging_types)
LOG.debug("Supported logging types (logging types supported "
"by at least one loaded log_driver): %s", log_types)
return log_types
def call(self, method_name, *args, **kwargs):
"""Helper method for calling a method across all extension drivers."""
exc_list = []
for driver in self._drivers:
try:
getattr(driver, method_name)(*args, **kwargs)
except Exception as exc:
exception_msg = ("Extension driver '%(name)s' failed in "
"%(method)s")
exception_data = {'name': driver.name, 'method': method_name}
LOG.exception(exception_msg, exception_data)
exc_list.append(exc)
if exc_list:
raise exceptions.DriverCallError(exc_list=exc_list)
if self.rpc_required:
context = _get_param(args, kwargs, 'context', index=0)
log_obj = _get_param(args, kwargs, 'log_obj', index=1)
try:
rpc_method = getattr(self.logging_rpc, method_name)
except AttributeError:
LOG.error("Method %s is not implemented in logging RPC",
method_name)
return
rpc_method(context, log_obj)
@registry.receives(resources.SECURITY_GROUP_RULE,
[events.AFTER_CREATE, events.AFTER_DELETE])
def _handle_sg_rule_callback(self, resource, event, trigger, **kwargs):
"""Handle sg_rule create/delete events
This method handles sg_rule events, if sg_rule bound by log_resources,
it should tell to agent to update log_drivers.
"""
context = kwargs['context']
sg_rules = kwargs.get('security_group_rule')
if sg_rules:
sg_id = sg_rules.get('security_group_id')
else:
sg_id = kwargs.get('security_group_id')
log_resources = db_api.get_logs_bound_sg(context, sg_id)
if log_resources:
self.call(
log_const.RESOURCE_UPDATE, context, log_resources)
| apache-2.0 | -6,518,660,458,877,669,000 | 34.658915 | 78 | 0.626087 | false | 4.189435 | false | false | false |
fujy/ROS-Project | src/rbx2/rbx2_diagnostics/nodes/monitor_dynamixels.py | 1 | 5378 | #!/usr/bin/env python
""" monitor_dynamixels.py - Version 0.1 2013-07-07
Monitor the /diagnostics topic for Dynamixel messages and disable a servo if we
receive an ERROR status message (e.g. overheating).
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2014 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from arbotix_msgs.srv import Relax, Enable
class MonitorDynamixels:
def __init__(self):
# Initialize the node
rospy.init_node("monitor_dynamixels")
# The arbotix controller uses the /arbotix namespace
namespace = '/arbotix'
# Get the list of joints (servos)
self.joints = rospy.get_param(namespace + '/joints', '')
# Minimum time to rest servos that are hot
self.minimum_rest_interval = rospy.get_param('~minimum_rest_interval', 60)
# Initialize the rest timer
self.rest_timer = 0
# Are we already resting a servo?
self.resting = False
# Are the servos enabled?
self.servos_enabled = False
# Have we displayed a warning recently?
self.warned = False
# Connect to the servo services
self.connect_servos()
rospy.Subscriber('diagnostics', DiagnosticArray, self.get_diagnostics)
def get_diagnostics(self, msg):
if self.rest_timer != 0:
if rospy.Time.now() - self.rest_timer < rospy.Duration(self.minimum_rest_interval):
return
else:
self.resting = False
rest_timer = 0
# Track if we have issued a warning on this pass
warn = False
for k in range(len(msg.status)):
# Check for the Dynamixel identifying string in the name field
if not '_joint' in msg.status[k].name:
# Skip other diagnostic messages
continue
# Check the DiagnosticStatus level for this servo
if msg.status[k].level == DiagnosticStatus.ERROR:
# If the servo is overheating and not already resting, then disable all servos
if not self.resting:
rospy.loginfo("DANGER: Overheating servo: " + str(msg.status[k].name))
rospy.loginfo("Disabling servos for a minimum of " + str(self.minimum_rest_interval) + " seconds...")
self.disable_servos()
self.servos_enabled = False
self.rest_timer = rospy.Time.now()
self.resting = True
break
elif msg.status[k].level == DiagnosticStatus.WARN:
# If the servo is starting to get toasty, display a warning but do not disable
rospy.loginfo("WARNING: Servo " + str(msg.status[k].name) + " getting hot...")
self.warned = True
warn = True
# No servo is overheated so re-enable all servos
if not self.resting and not self.servos_enabled:
rospy.loginfo("Dynamixel temperatures OK so enabling")
self.enable_servos()
self.servos_enabled = True
self.resting = False
# Check if a prior warning is no longer necessary
if self.warned and not warn:
rospy.loginfo("All servos back to a safe temperature")
self.warned = False
def connect_servos(self):
# Create a dictionary to hold the torque and enable services
self.relax = dict()
self.enable = dict()
# Connect to the set_speed services and define a position publisher for each servo
rospy.loginfo("Waiting for joint controllers services...")
for joint in sorted(self.joints):
# A service to relax a servo
relax = '/' + joint + '/relax'
rospy.wait_for_service(relax)
self.relax[joint] = rospy.ServiceProxy(relax, Relax)
# A service to enable/disable a servo
enable_service = '/' + joint + '/enable'
rospy.wait_for_service(enable_service)
self.enable[joint] = rospy.ServiceProxy(enable_service, Enable)
rospy.loginfo("Connected to servos.")
def disable_servos(self):
for joint in sorted(self.joints):
self.relax[joint]()
self.enable[joint](False)
def enable_servos(self):
for joint in sorted(self.joints):
self.enable[joint](True)
if __name__ == '__main__':
MonitorDynamixels()
rospy.spin()
| mit | 8,523,361,733,486,667,000 | 37.421429 | 121 | 0.587207 | false | 4.344103 | false | false | false |
liweitianux/97dev | 97suifangqa/apps/info/models.py | 1 | 4844 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
# Create your models here.
class KeyWord(models.Model):
content = models.CharField(u"内容", max_length=200)
description = models.TextField(u"描述", blank=True)
categoryid = models.IntegerField(u"分类编号", null=True, blank=True) #TODO:弄清这里的详细意义
standard_judge = models.BooleanField(u"是否为标准关键词", default=False)
created_at = models.DateTimeField(auto_now_add=True, verbose_name = u"创建时间")
user = models.ForeignKey(User, verbose_name=u"用户", related_name="keywords", null=True, blank=True)
content_type = models.ForeignKey(ContentType, null=True, blank=True) #将KeyWord作为GFK
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey("content_type", "object_id")
class Meta:
verbose_name_plural = u"关键词"
def __unicode__(self):
return "%s" % self.content
# Query {{{
class Query(models.Model):
content = models.CharField(u"内容", max_length=500)
level = models.PositiveIntegerField(u"级数",default=1)
categoryid = models.IntegerField(u"分类编号", null=True, blank=True, default=1) #TODO:弄清这里的详细意义
created_at = models.DateTimeField(auto_now_add=True, verbose_name = u"创建时间")
standard_judge = models.BooleanField(u"是否为标准问题", default=False)
user = models.ForeignKey(User, verbose_name=u"用户", related_name="querys")
content_type = models.ForeignKey(ContentType, null=True, blank=True) #将Query作为GFK
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey("content_type", "object_id")
class Meta:
verbose_name_plural = u"问题"
def __unicode__(self):
return "< Query: %s >" % self.content
def show(self):
"""
used in 'search/search.html'
to show search result
"""
return self.__unicode__()
# }}}
class WordWordRelation(models.Model):
value = models.FloatField(u"关联度")
word1 = models.ForeignKey("KeyWord", verbose_name=u"关键词1", related_name="relations_with_other_words_as_primary", null=True, blank=True)
word2 = models.ForeignKey("KeyWord", verbose_name=u"关键词2", related_name="relations_with_other_words_as_deputy", null=True, blank=True)
class Meta:
verbose_name_plural = u"关键词与关键词的关系"
def __unicode__(self):
return "< WordWordRelation: (%s, %s) >" % (self.word1.content, self.word2.content)
class QueryQueryRelation(models.Model):
value = models.FloatField(u"关联度")
query1 = models.ForeignKey("Query", verbose_name=u"问题1", related_name="relations_with_other_querys_as_primary", null=True, blank=True)
query2 = models.ForeignKey("Query", verbose_name=u"问题2", related_name="relations_with_other_querys_as_deputy", null=True, blank=True)
class Meta:
verbose_name_plural = u"问题与问题的关系"
def __unicode__(self):
return "< QueryQueryRelation: (%s, %s) >" % (self.query1.content, self.query2.content)
class WordQueryRelation(models.Model):
value = models.FloatField(u"关联度")
word = models.ForeignKey("KeyWord", verbose_name=u"关键词", related_name="relations_with_querys", null=True, blank=True)
query2 = models.ForeignKey("Query", verbose_name=u"问题", related_name="relations_with_words", null=True, blank=True)
class Meta:
verbose_name_plural = u"关键词与问题的关系"
def __unicode__(self):
return "< WordQueryRelation: (%s, %s) >" % (self.word.content, self.query.content)
class BlogQueryRelation(models.Model):
value = models.FloatField(u"关联度")
blog = models.ForeignKey("sciblog.SciBlog", verbose_name=u"文章", related_name="relations_with_querys", null=True, blank=True)
query = models.ForeignKey("Query", verbose_name=u"问题", related_name="relations_with_blogs", null=True, blank=True)
class Meta:
verbose_name_plural = u"文章与问题的关系"
def __unicode__(self):
return "< BlogRelation: (%s, %s) >" % (self.blog.title, self.query.content)
admin.site.register([
KeyWord,
Query,
WordWordRelation,
WordQueryRelation,
BlogQueryRelation,
])
| bsd-2-clause | 472,562,872,915,401,660 | 37.386555 | 139 | 0.644046 | false | 3.14384 | false | false | false |
ShadowApex/Tuxemon | tuxemon/core/states/persistance/save_menu.py | 2 | 3498 | from __future__ import division
import logging
import os
import pygame
from core import prepare
from core.tools import open_dialog
from core.components import save
from core.components.menu import PopUpMenu
from core.components.menu.interface import MenuItem
from core.components.ui import text
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
logger.debug("%s successfully imported" % __name__)
class SaveMenuState(PopUpMenu):
number_of_slots = 3
shrink_to_items = True
def initialize_items(self):
empty_image = None
rect = self.game.screen.get_rect()
slot_rect = pygame.Rect(0, 0, rect.width * 0.80, rect.height // 6)
for i in range(self.number_of_slots):
# Check to see if a save exists for the current slot
if os.path.exists(prepare.SAVE_PATH + str(i + 1) + ".save"):
image = self.render_slot(slot_rect, i + 1)
yield MenuItem(image, "SAVE", None, None)
else:
if not empty_image:
empty_image = self.render_empty_slot(slot_rect)
yield MenuItem(empty_image, "SAVE", None, None)
def render_empty_slot(self, rect):
slot_image = pygame.Surface(rect.size, pygame.SRCALPHA)
rect = rect.move(0, rect.height // 2 - 10)
text.draw_text(slot_image, "Empty Slot", rect, font=self.font)
return slot_image
def render_slot(self, rect, slot_num):
slot_image = pygame.Surface(rect.size, pygame.SRCALPHA)
# TODO: catch missing file
thumb_image = pygame.image.load(prepare.SAVE_PATH + str(slot_num) + ".png").convert()
thumb_rect = thumb_image.get_rect().fit(rect)
thumb_image = pygame.transform.smoothscale(thumb_image, thumb_rect.size)
# Draw the screenshot
slot_image.blit(thumb_image, (rect.width * .20, 0))
# Draw the slot text
rect = rect.move(0, rect.height // 2 - 10)
text.draw_text(slot_image, "Slot " + str(slot_num), rect, font=self.font)
# Try and load the save game and draw details about the save
try:
save_data = save.load(slot_num)
except Exception as e:
logger.error(e)
save_data = dict()
save_data["error"] = "Save file corrupted"
logger.error("Failed loading save file.")
raise
if "error" not in save_data:
x = int(rect.width * .5)
text.draw_text(slot_image, save_data['player_name'], (x, 0, 500, 500), font=self.font)
text.draw_text(slot_image, save_data['time'], (x, 50, 500, 500), font=self.font)
return slot_image
def on_menu_selection(self, menuitem):
logger.info("Saving!")
try:
save.save(self.game.player1,
self.capture_screenshot(),
self.selected_index + 1,
self.game)
except Exception as e:
logger.error("Unable to save game!!")
logger.error(e)
open_dialog(self.game, ["There was a problem saving!"])
self.game.pop_state(self)
else:
open_dialog(self.game, ["Saved!"])
self.game.pop_state(self)
def capture_screenshot(self):
screenshot = pygame.Surface(self.game.screen.get_size())
world = self.game.get_state_name("WorldState")
world.draw(screenshot)
return screenshot
| gpl-3.0 | -4,179,556,928,142,119,400 | 35.4375 | 98 | 0.595769 | false | 3.76129 | false | false | false |
xiaoyongaa/ALL | ATM/src/user.py | 1 | 15988 | import os
import json
import datetime
import logging
import sys
d2=os.getcwd()
d2=d2.replace("bin","")
d2=d2.replace("\\","/")
sys.path.append(d2)
d2=d2+"db/admin/"
##################
d3=os.getcwd()
d3=d3.replace("bin","")
d3=d3.replace("\\","/")
sys.path.append(d3)
d3=d3+"db/user/"
time=datetime.date.today()
time=str(time)
time=time.replace("-","_")
user_stat={"kahao":"","user":"","pass":"","edu":"","benyueedu":"","createdata":"","status":"","saving":""}
def main():
falg=True
msg=["1.取款","2.存款","3.转账","4.还款"]
while falg:
for i in msg:
print(i)
chosse=input("请输入你要选择操作的编号:")
chosse=chosse.strip()
if chosse=="1":
print("你选择了取款")
qukuan()
elif chosse=="2":
print("你选择了存款")
chunkuan()
elif chosse=="3":
print("你选择了转账")
zhuanzhang()
elif chosse=="4":
print("你选择了还款")
huankuan()
elif chosse=="q":
break
else:
print("你选择的操作编号不正确,请重新输入")
#取款模块
def qukuan():
falg=True
kahao=user_stat.get("kahao")
saving=user_stat.get("saving")
user=user_stat.get("user")
edu=user_stat.get("edu")
saving=int(saving)
edu=int(edu)
while falg:
choose=input("请选择取款金额:")
choose=choose.strip()
if choose=="q":
exit("你选择了退出")
elif choose=="b":
flag=False
break
if choose.isdigit():
print("你输入的金额格式正确")
choose=int(choose)
if choose<=saving:
print("你的储存卡的余额足够,可以提现,提现成功!!!!")
saving=saving-choose
#更新用户数据操作
os.chdir(d3)
os.chdir(kahao)
basic_infor=json.load(open("basic_infor","r"))
basic_infor["saving"]=saving
json.dump(basic_infor,open("basic_infor","w"))
#更新用户数据操作结束
message="卡号{kahao},用户{user},取款{choose}成功!!!".format(kahao=kahao,user=user,choose=choose)
log_suer("普通用户取款成功",kahao,message)
user_stat["saving"]=saving
#####写入账单
zhangdan_user("提现记录",kahao,message)
break
elif choose>saving:
print("你的储存卡的余额不够,需要从信用卡提现")
kamax=edu*0.7 #信用卡最多提现的钱
tixian=choose-saving #减去储存卡里面的余额,从信用卡上提现的钱
if tixian<=kamax:
print("可以提现,提现成功!!!!")
#更新用户数据操作
os.chdir(d3)
os.chdir(kahao)
basic_infor=json.load(open("basic_infor","r"))
basic_infor["saving"]="0"
edu=edu-tixian-tixian*0.05
#basic_infor["edu"]=edu
basic_infor["benyueedu"]=edu
json.dump(basic_infor,open("basic_infor","w"))
#更新用户数据操作结束
message="卡号{kahao},用户{user},取款{choose}成功!!!".format(kahao=kahao,user=user,choose=choose)
log_suer("普通用户取款成功",kahao,message)
user_stat["saving"]="0"
user_stat["benyueedu"]=edu
#####写入账单
zhangdan_user("提现记录",kahao,message)
break
elif tixian>kamax:
print("不可以提现,你要提现的金额超出范围")
message="不可以提现,你要提现的金额超出范围"
log_suer("不可以提现,你要提现的金额超出范围",kahao,message)
else:
print("你输入的金额格式错误,请重新输入")
message="你输入的金额格式错误,请重新输入"
log_suer("你输入的金额格式错误,请重新输入",kahao,message)
#取款模块
#存款模块
def chunkuan():
falg=True
kahao=user_stat.get("kahao")
saving=user_stat.get("saving")
user=user_stat.get("user")
while falg:
chosse=input("请选择要存款的金额: ")
chosse=chosse.strip()
if chosse=="q":
exit("你选择了退出")
elif chosse=="b":
falg=False
break
if chosse.isdigit():
print("你输入的金额正确")
chosse=int(chosse)
saving=int(saving)
saving=chosse+saving
##更新用户信息
os.chdir(d3)
os.chdir(kahao)
basic_infor=json.load(open("basic_infor","r"))
basic_infor["saving"]=saving
json.dump(basic_infor,open("basic_infor","w"))
##更新用户信息完毕
message="卡号{kahao}用户{user}存款{chosse}".format(kahao=kahao,user=user,chosse=chosse)
log_suer("用户存款成功!!!",kahao,message)
user_stat["saving"]=saving
#####写入账单
zhangdan_user("存款记录",kahao,message)
break
else:
print("你输入的金额不正确")
message="你输入的金额不正确"
log_suer("你输入的金额不正确!!!",kahao,message)
#存款模块
#转账模块
def zhuanzhang():
flag=True
kahao=user_stat.get("kahao")
saving=user_stat.get("saving")
user=user_stat.get("user")
while flag:
choose_user=input("请选择转账用户卡号:")
if choose_user=="q":
exit("你选择了退出")
elif choose_user=="b":
flag=False
break
choose_cash=input("请选择转账金额: ")
choose_user=choose_user.strip()
choose_cash=choose_cash.strip()
os.chdir(d3)
if os.path.exists(choose_user):
print("你要转账的用户卡号在系统中,可以转账")
if choose_cash.isdigit():
print("你输入的金额格式正确")
choose_cash=int(choose_cash)
saving=int(saving)
#判断是否有钱转账
if saving>=choose_cash:
print("你账户里面有足够的钱可以转账")
##自己的账户先扣钱
os.chdir(kahao)
basic_infor=json.load(open("basic_infor","r"))
saving=saving-choose_cash
basic_infor["saving"]=saving
json.dump(basic_infor,open("basic_infor","w"))
user_stat["saving"]=saving
##扣钱完毕
##转给要转账的用户
os.chdir("G:/python代码/ATM/db/user")
os.chdir(choose_user)
basic_infor=json.load(open("basic_infor","r"))
old=basic_infor.get("saving")
old=int(old) #原来账户里面的余额
new=old+choose_cash
basic_infor["saving"]=new
json.dump(basic_infor,open("basic_infor","w"))
print("转账成功!!!!!!!!!!!!")
#转账完毕
message="卡号{kahao}用户{user}转入给{choose_user}转账金额{choose_cash}元".format(kahao=kahao,user=user,choose_user=choose_user,choose_cash=choose_cash)
log_suer("用户转账成功!!!",kahao,message)
#####写入账单
zhangdan_user("转账记录",kahao,message)
break
else:
print("你的账户余额不足,不能转账,重新输入金额")
message="你的账户余额不足,不能转账"
log_suer("账户余额不足",kahao,message)
else:
print("你输入的金额格式不正确,请重新输入")
message="你输入的金额格式不正确,请重新输入"
log_suer("输入的金额格式不正确",kahao,message)
else:
print("你要转账的用户卡号不在系统中,请重新输入")
message="你要转账的用户卡号不在系统中,请重新输入"
log_suer("转账的用户卡号不在系统中",kahao,message)
#转账模块
#还款模块
def huankuan():
flag=True
kahao=user_stat.get("kahao")
edu=user_stat.get("edu") #规定的额度
benyueedu=user_stat.get("benyueedu") #本月额度
saving=user_stat.get("saving") #储存卡的余额
edu=int(edu)
benyueedu=int(benyueedu)
saving=int(saving)
while flag:
if edu>benyueedu:
print("你需要还款")
cash=input("请输入你要还款的金额:")
cash=cash.strip()
if cash=="q":
exit("你选择了退出")
elif cash=="b":
flag=False
break
if cash.isdigit():
print("你输入的金额合法")
cash=int(cash)
if cash<=saving:
print("你的余额足够,开始还款")
os.chdir(d3)
os.chdir(kahao)
basic_infor=json.load(open("basic_infor","r"))
saving=saving-cash
benyueedu=benyueedu+cash
benyueedu=int(benyueedu)
if edu==benyueedu:
basic_infor["saving"]=saving
basic_infor["benyueedu"]=benyueedu
######更新
json.dump(basic_infor,open("basic_infor","w"))
user_stat["saving"]=saving
user_stat["benyueedu"]=benyueedu
######更新完毕
print("你已经全部还完额度")
message="你已经全部还完额度"
log_suer("你已经全部还完额度",kahao,message)
#####写入账单
zhangdan_user("还款记录",kahao,message)
elif edu>benyueedu:
basic_infor["saving"]=saving
basic_infor["benyueedu"]=benyueedu
######更新
json.dump(basic_infor,open("basic_infor","w"))
user_stat["saving"]=saving
user_stat["benyueedu"]=benyueedu
message="你已经还款,但是还没还清"
log_suer("你已经还款,但是还没还清",kahao,message)
######更新完毕
#####写入账单
zhangdan_user("还款记录",kahao,message)
elif edu<benyueedu:
print("你所还的钱超出了你的欠款,请重新输入")
message="你所还的钱超出了你的欠款,请重新输入"
log_suer("你所还的钱超出了你的欠款,请重新输入",kahao,message)
break
else:
print("你的余额不足,无法还款")
message="你的余额不足,无法还款"
log_suer("你的余额不足,无法还款",kahao,message)
else:
print("你输入的金额不合法,请重新输入")
message="你输入的金额不合法,请重新输入"
log_suer("你输入的金额不合法,请重新输入",kahao,message)
elif edu==benyueedu:
print("你不需要还款")
message="你不需要还款"
log_suer("你不需要还款",kahao,message)
break
#还款模块
#登录模块
def longin():
falg=True
while falg:
os.chdir(d3)
user=input("请输入卡号: ")
pas=input("请输入密码: ")
user=user.strip()
pas=pas.strip()
if os.path.exists(user):
print("你输入的用户存在")
os.chdir(user)
basic_infor=json.load(open("basic_infor","r"))
kahao=basic_infor.get("kahao")
pas2=basic_infor.get("pass")
status=basic_infor.get("status")
edu=basic_infor.get("edu")
benyueedu=basic_infor.get("benyueedu")
user=basic_infor.get("user")
createdata=basic_infor.get("createdata")
saving=basic_infor.get("saving")
if pas==pas2 and status==0:
print("账户密码正确,成功登陆")
user_stat["kahao"]=kahao
user_stat["user"]=user
user_stat["pass"]=pas2
user_stat["edu"]=edu
user_stat["benyueedu"]=benyueedu
user_stat["createdata"]=createdata
user_stat["status"]=status
user_stat["saving"]=saving
message="卡号{kahao},用户{user}".format(kahao=kahao,user=user)
os.chdir(d2)
log("普通用户登录成功","record/",message)
###############
log_suer("普通用户登录成功",kahao,message)
return True
else:
print("账户密码不正确,登陆失败")
os.chdir(d2)
message="账户密码不正确,登陆失败"
log("账户密码不正确,登陆失败","record/",message)
else:
print("你输入的用户不存在")
os.chdir(d2)
message="你输入的用户不存在"
log("你输入的用户不存在","record/",message)
#登录模块
#日志模块
def log_suer(name,kahao,message):
#create logger
logger=logging.getLogger(name)
logger.setLevel(logging.DEBUG)
#文件输出Handler
os.chdir(d3)
os.chdir(kahao)
os.chdir("record")
fh=logging.FileHandler(time+".log")
fh.setLevel(logging.DEBUG)
#指定日志格式
formatter=logging.Formatter("%(asctime)s-%(name)s-%(levelname)s-%(message)s")
#Formatter注册给Handler
fh.setFormatter(formatter)
#Handler注册给logeer
logger.addHandler(fh)
######################
logger.info(message)
def log(name,path,message):
#create logger
logger=logging.getLogger(name)
logger.setLevel(logging.DEBUG)
#文件输出Handler
os.chdir(path)
fh=logging.FileHandler(time+".log")
fh.setLevel(logging.DEBUG)
#指定日志格式
formatter=logging.Formatter("%(asctime)s-%(name)s-%(levelname)s-%(message)s")
#Formatter注册给Handler
fh.setFormatter(formatter)
#Handler注册给logeer
logger.addHandler(fh)
######################
logger.info(message)
def zhangdan_user(name,kahao,message):
#create logger
logger=logging.getLogger(name)
logger.setLevel(logging.DEBUG)
#文件输出Handler
os.chdir(d3)
os.chdir(kahao)
os.chdir("record")
fh=logging.FileHandler(time+".record")
fh.setLevel(logging.DEBUG)
#指定日志格式
formatter=logging.Formatter("%(asctime)s-%(name)s-%(levelname)s-%(message)s")
#Formatter注册给Handler
fh.setFormatter(formatter)
#Handler注册给logeer
logger.addHandler(fh)
######################
logger.info(message)
def run():
r=longin()
if r==True:
main() | apache-2.0 | 5,816,718,957,689,920,000 | 32.576733 | 159 | 0.488204 | false | 2.69823 | false | false | false |
pedrolegold/uforge-cli | src/uforgecli/uforgecli.py | 1 | 8700 | '''
UForgeCLI
'''
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import argparse
import getpass
import base64
import httplib2
import os
import sys
import ussclicore.utils.generics_utils
from ussclicore.utils import printer
from ussclicore.cmd import Cmd, CmdUtils
from ussclicore.argumentParser import CoreArgumentParser, ArgumentParser, ArgumentParserError
import commands
from uforge.application import Api
from utils import *
__author__ = "UShareSoft"
__license__ = "Apache License 2.0"
class CmdBuilder(object):
@staticmethod
def generateCommands(class_):
# Create subCmds if not exist
if not hasattr(class_, 'subCmds'):
class_.subCmds = {}
user = commands.user.User_Cmd()
class_.subCmds[user.cmd_name] = user
entitlement = commands.entitlement.Entitlement_Cmd()
class_.subCmds[entitlement.cmd_name] = entitlement
subscription = commands.subscription.Subscription_Cmd()
class_.subCmds[subscription.cmd_name] = subscription
role = commands.role.Role_Cmd()
class_.subCmds[role.cmd_name] = role
images = commands.images.Images_Cmd()
class_.subCmds[images.cmd_name] = images
org = commands.org.Org_Cmd()
class_.subCmds[org.cmd_name] = org
os = commands.os.Os_Cmd()
class_.subCmds[os.cmd_name] = os
pimages = commands.pimages.Pimages_Cmd()
class_.subCmds[pimages.cmd_name] = pimages
usergrp = commands.usergrp.Usergrp_Cmd()
class_.subCmds[usergrp.cmd_name] = usergrp
template = commands.template.Template_Cmd()
class_.subCmds[template.cmd_name] = template
## Main cmd
class Uforgecli(Cmd):
#subCmds = {
# 'tools': CmdUtils
#}
def __init__(self):
super(Uforgecli, self).__init__()
self.prompt = 'uforge-cli >'
def do_exit(self, args):
return True
def do_quit(self, args):
return True
def arg_batch(self):
doParser = ArgumentParser("batch", add_help = False, description="Execute uforge-cli batch command from a file (for scripting)")
mandatory = doParser.add_argument_group("mandatory arguments")
optionnal = doParser.add_argument_group("optional arguments")
mandatory.add_argument('--file', dest='file', required=True, help="uforge-cli batch file commands")
optionnal.add_argument('-f', '--fatal', dest='fatal', action='store_true',required=False, help="exit on first error in batch file (default is to continue)")
# Help is not call at the doParser declaration because it would create two separate argument group for optional arguments.
optionnal.add_argument('-h', '--help', action='help', help="show this help message and exit")
return doParser
def do_batch(self, args):
try:
doParser = self.arg_batch()
try:
doArgs = doParser.parse_args(args.split())
except SystemExit as e:
return
with open(doArgs.file) as f:
for line in f:
try:
self.run_commands_at_invocation([line])
except:
printer.out("bad command '"+line+"'", printer.ERROR)
# If fatal optionnal argument is specified.
if doArgs.fatal:
printer.out("Fatal error leading to exit task", printer.ERROR)
return
print "\n"
except IOError as e:
printer.out("File error: "+str(e), printer.ERROR)
return
except ArgumentParserError as e:
printer.out("In Arguments: "+str(e), printer.ERROR)
self.help_batch()
def help_batch(self):
doParser = self.arg_batch()
doParser.print_help()
def cmdloop(self, args):
if len(args):
code = self.run_commands_at_invocation([str.join(' ', args)])
sys.exit(code)
else:
self._cmdloop()
def generate_base_doc(app, uforgecli_help):
myactions=[]
cmds= sorted(app.subCmds)
for cmd in cmds:
myactions.append(argparse._StoreAction(
option_strings=[],
dest=str(cmd),
nargs=None,
const=None,
default=None,
type=str,
choices=None,
required=False,
help=str(app.subCmds[cmd].__doc__),
metavar=None))
return myactions
def set_globals_cmds(subCmds):
for cmd in subCmds:
if hasattr(subCmds[cmd], 'set_globals'):
subCmds[cmd].set_globals(api, username, password)
if hasattr(subCmds[cmd], 'subCmds'):
set_globals_cmds(subCmds[cmd].subCmds)
#Generate Uforgecli base command + help base command
CmdBuilder.generateCommands(Uforgecli)
app = Uforgecli()
myactions=generate_base_doc(app, uforgecli_help="")
# Args parsing
mainParser = CoreArgumentParser(add_help=False)
CoreArgumentParser.actions=myactions
mainParser.add_argument('-U', '--url', dest='url', type=str, help='the server URL endpoint to use', required = False)
mainParser.add_argument('-u', '--user', dest='user', type=str, help='the user name used to authenticate to the server', required = False)
mainParser.add_argument('-p', '--password', dest='password', type=str, help='the password used to authenticate to the server', required = False)
mainParser.add_argument('-v', action='version', help='displays the current version of the uforge-cli tool', version="%(prog)s version '"+constants.VERSION+"'")
mainParser.add_argument('-h', '--help', dest='help', action='store_true', help='show this help message and exit', required = False)
mainParser.add_argument('-k', '--publickey', dest='publickey', type=str, help='public API key to use for this request. Default: no default', required = False)
mainParser.add_argument('-s', '--secretkey', dest='secretkey', type=str, help='secret API key to use for this request. Default: no default', required = False)
mainParser.add_argument('-c', '--no-check-certificate', dest='crypt', action="store_true", help='Don\'t check the server certificate against the available certificate authorities', required = False)
mainParser.set_defaults(help=False)
mainParser.add_argument('cmds', nargs='*', help='UForge CLI cmds')
mainArgs, unknown = mainParser.parse_known_args()
if mainArgs.help and not mainArgs.cmds:
mainParser.print_help()
exit(0)
if mainArgs.user is not None and mainArgs.url is not None:
if not mainArgs.password:
mainArgs.password = getpass.getpass()
username=mainArgs.user
password=mainArgs.password
url=mainArgs.url
if mainArgs.crypt == True:
sslAutosigned = True
else:
sslAutosigned = False
else:
mainParser.print_help()
exit(0)
#UForge API instanciation
client = httplib2.Http(disable_ssl_certificate_validation=sslAutosigned, timeout=constants.HTTP_TIMEOUT)
#activate http caching
#client = httplib2.Http(generics_utils.get_Uforgecli_dir()+os.sep+"cache")
headers = {}
headers['Authorization'] = 'Basic ' + base64.encodestring( username + ':' + password )
api = Api(url, client = client, headers = headers)
set_globals_cmds(app.subCmds)
if mainArgs.help and len(mainArgs.cmds)>=1:
argList=mainArgs.cmds + unknown;
argList.insert(len(mainArgs.cmds)-1, "help")
app.cmdloop(argList)
elif mainArgs.help:
app.cmdloop(mainArgs.cmds + unknown + ["-h"])
else:
app.cmdloop(mainArgs.cmds + unknown)
| apache-2.0 | 6,297,265,963,414,802,000 | 38.545455 | 198 | 0.564483 | false | 4.300544 | false | false | false |
fenceFoil/canopto | NarrowWavyLines01.py | 1 | 1459 | #!/bin/python3
from Canopto import Canopto
import pygame
from pygame import *
from pygame.locals import *
import time
from random import randint
from colorsys import *
import sys
display_size = (8, 8)
cans = Canopto (display_size[0], display_size[1], True, True)
# Create an image, wider than the display, with randomized lines across it
# and a little buffer to each side
# Scroll image over display
while True:
# Make an image 1000 pixels wide
lines = Surface((1000, display_size[1]+2))
lines.fill(Color(0, 0, 92, 255))
# Draw lines
# Come up with sets of points. Alternate between high and low lines. Allow random space between each, and generate up to the end of the surface
# Simple algorithm: generate one line.
margin = 5
currX = margin
points = [(margin, randint(0, lines.get_height()-1))]
while currX < lines.get_width() - margin:
currX = randint(currX+7, currX+30)
currX = min(lines.get_width()-margin, currX)
points.append ((currX, randint(1, lines.get_height()-2)))
# Draw line from points
#line_color = Color(54, 255, 54, 255)
line_color = Color(255, 128, 0, 255)
pygame.draw.aalines(lines, line_color, False, points)
# Scroll image across canopto
for x in range (0, lines.get_width()-(display_size[0]-1)):
frame = Surface (display_size)
frame.blit (lines, (-x, -1))
cans.drawSurface(frame)
cans.update()
for event in pygame.event.get():
if event.type==QUIT:
sys.exit(0)
time.sleep(0.03) | bsd-3-clause | 2,710,826,725,089,136,000 | 27.627451 | 144 | 0.699109 | false | 2.989754 | false | false | false |
jiafengwu0301/App_BackEnd | api/serializers.py | 1 | 3530 |
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Account
from rest_framework.exceptions import ValidationError
class AccountCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
password = serializers.CharField(source='user.password', style={'input_type': 'password'})
class Meta:
model = Account
fields = [
'id',
'username',
'displayName',
'facebook',
'password',
]
def create(self, validated_data):
user_data = validated_data.pop('user')
user = User.objects.create(**user_data)
user.set_password(user_data['password'])
user.save()
account = Account.objects.create(user=user, **validated_data)
account.username = user.username
account.save()
return account
class AccountSerializer(serializers.ModelSerializer):
class Meta:
model = Account
fields = [
'id',
'username',
'displayName',
'facebook',
]
class AccountRetrieveSerializer(serializers.ModelSerializer):
class Meta:
model = Account
fields = [
'id',
'username',
'displayName',
'facebook',
]
class UpdateAccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(source='user.password', allow_blank=True, allow_null=True)
facebook = serializers.CharField(allow_blank=True, allow_null=True)
displayName = serializers.CharField(allow_blank=True, allow_null=True)
class Meta:
model = Account
fields = [
'displayName',
'facebook',
'password'
]
def update(self, instance, validated_data):
user_data = validated_data.pop('user', None)
user = User.objects.get(id=instance.user.id)
instance.displayName = self.value_or_keep(instance.displayName, validated_data.get('displayName', instance.displayName))
instance.facebook = self.value_or_keep(instance.facebook, validated_data.get('facebook',instance.facebook))
if user_data['password'] != "":
user.set_password(user_data['password'])
user.save()
instance.save()
return instance
@staticmethod
def value_or_keep(field, value):
if value == "":
return field
return value
class AuthenticateSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
password = serializers.CharField(source='user.password', style={'input_type': 'password'})
account = AccountSerializer(allow_null=True, read_only=True)
class Meta:
model = User
depth = 1
fields = [
'username',
'password',
'account',
]
extra_kwargs = {"password": {"write_only": True}}
def validate(self, attrs):
validation_data = dict(attrs)['user']
username = validation_data.get('username', None)
password = validation_data.get('password', None)
try:
user = User.objects.get(username=username)
except:
raise ValidationError("Incorrect Username/Password")
if user.check_password(password):
attrs['account'] = user.account
return attrs
raise ValidationError("Incorrect login/password.")
| apache-2.0 | 2,381,743,067,424,387,000 | 27.467742 | 128 | 0.609632 | false | 4.554839 | false | false | false |
googleapis/releasetool | tests/commands/tag/test_tag_java.py | 1 | 1502 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from releasetool.commands.tag.java import (
_parse_release_tag,
kokoro_job_name,
package_name,
)
RELEASE_PLEASE_OUTPUT = """
✔ creating release v1.20.0
✔ Created release: https://github.com/googleapis/java-bigtable/releases/tag/v1.20.0.
✔ adding comment to https://github.com/googleapis/java-bigtable/issue/610
✔ adding label autorelease: tagged to https://github.com/googleapis/java-bigtable/pull/610
✔ removing label autorelease: pending from 610
"""
def test_releasetool_release_tag():
expected = "v1.20.0"
assert _parse_release_tag(RELEASE_PLEASE_OUTPUT) == expected
def test_kokoro_job_name():
job_name = kokoro_job_name("upstream-owner/upstream-repo", "some-package-name")
assert job_name == "cloud-devrel/client-libraries/java/upstream-repo/release/stage"
def test_package_name():
name = package_name({"head": {"ref": "release-storage-v1.2.3"}})
assert name is None
| apache-2.0 | 1,289,058,786,634,031,900 | 34.52381 | 90 | 0.736595 | false | 3.352809 | false | false | false |
anythingrandom/eclcli | eclcli/dh/dhclient/common/cliutils.py | 1 | 7010 | from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import six
from six import moves
from ..common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
| apache-2.0 | -5,111,194,649,591,776,000 | 27.495935 | 79 | 0.593723 | false | 3.992027 | false | false | false |
kiwiheretic/logos-v2 | feed/migrations/0005_auto_20160418_0325.py | 1 | 1833 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('feed', '0004_auto_20160418_0027'),
]
operations = [
migrations.CreateModel(
name='FeedSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('network', models.CharField(max_length=50)),
('room', models.CharField(max_length=50)),
('periodic', models.CharField(default=b'', max_length=15, blank=True)),
('active', models.BooleanField(default=True)),
('last_read', models.DateTimeField()),
('feed', models.ForeignKey(to='feed.Feed')),
('user_added', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='cache',
name='actioned',
),
migrations.RemoveField(
model_name='cache',
name='network',
),
migrations.RemoveField(
model_name='cache',
name='room',
),
migrations.RemoveField(
model_name='feed',
name='network',
),
migrations.RemoveField(
model_name='feed',
name='periodic',
),
migrations.RemoveField(
model_name='feed',
name='room',
),
migrations.RemoveField(
model_name='feed',
name='user_added',
),
]
| apache-2.0 | -2,000,784,673,389,948,000 | 29.55 | 114 | 0.515548 | false | 4.640506 | false | false | false |
redapesolutions/django-pin-auth | tests/test_models.py | 1 | 4150 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-pin-auth
------------
Tests for `django-pin-auth` models module.
"""
import mock
import datetime
from django.test import TestCase
from django.apps import apps
from django.contrib.auth.models import User
from faker import Faker
from django_pin_auth import models
from django_pin_auth.read_policies import ReadPolicy
fake = Faker('ja_JP') # anything that's UTF8 will do
class TokenCreate(TestCase):
def setUp(self):
self.user = User.objects.create_user(username=fake.email())
self.token = models.SingleUseToken.objects.create(user=self.user)
class TestCreateDelete(TokenCreate):
def test_create_token_value(self):
"""Should automatically create a 6 digit token."""
assert self.token.token.__len__() == 6
for character in self.token.token:
try:
int(character)
except ValueError:
raise AssertionError('Character "%s" is not a digit' % character)
def test_read_gone(self):
"""After read, shouldn't be found by the manager, regardless of policy."""
self.token.read()
with self.assertRaises(models.SingleUseToken.DoesNotExist):
models.SingleUseToken.objects.get(pk=self.token.pk)
def test_read_full_delete(self):
"""After read, should be totally gone if policy is delete (default)."""
self.token.read()
with self.assertRaises(models.SingleUseToken.DoesNotExist):
models.SingleUseToken.all_objects.get(pk=self.token.pk)
def test_read_soft_delete(self):
"""After read, should be still there, just disabled, if policy is mark."""
config = apps.get_app_config('django_pin_auth')
config.read_policy = ReadPolicy.mark
self.token.read()
try:
models.SingleUseToken.all_objects.get(pk=self.token.pk)
except models.SingleUseToken.DoesNotExist:
raise AssertionError('Token should still exist')
config.read_policy = ReadPolicy.delete
class TestValidity(TokenCreate):
@mock.patch('django_pin_auth.models.datetime')
def test_valid_within_timerange(self, mock_dt):
"""Token is valid within the time provided."""
config = apps.get_app_config('django_pin_auth')
mock_dt.datetime.now = mock.Mock(return_value=datetime.datetime.now(datetime.timezone.utc)+config.pin_validity-datetime.timedelta(seconds=1))
assert self.token.is_valid() is True
@mock.patch('django_pin_auth.models.datetime')
def test_invalid_after_timerange(self, mock_dt):
"""Token is invalid after the time provided."""
config = apps.get_app_config('django_pin_auth')
mock_dt.datetime.now = mock.Mock(return_value=datetime.datetime.now(datetime.timezone.utc)+config.pin_validity+datetime.timedelta(seconds=1))
assert self.token.is_valid() is False
@mock.patch('django_pin_auth.models.datetime')
def test_always_valid(self, mock_dt):
"""Token is always valid if no time given."""
config = apps.get_app_config('django_pin_auth')
keep_value = config.pin_validity
config.pin_validity = None
mock_dt.datetime.now = mock.Mock(return_value=datetime.datetime(2713, 12, 25))
assert self.token.is_valid() is True
config.pin_validity = keep_value
class TestUserToken(TokenCreate):
def setUp(self):
super().setUp()
# Copy the values and do it again
self.user2 = self.user
self.token2 = self.token
super().setUp()
def test_correct_user_token(self):
"""Should find token."""
self.assertEqual(models.get_user_token(self.user, self.token.token), self.token)
def test_incorrect_user(self):
"""Should not find token with not correct user."""
self.assertEqual(models.get_user_token(self.user2, self.token), None)
def test_incorrect_token(self):
"""Should not find token with not correct token.
Well, which is incorrect is relative..."""
self.assertEqual(models.get_user_token(self.user2, self.token), None) | mit | 850,674,357,809,966,200 | 37.794393 | 149 | 0.661928 | false | 3.821363 | true | false | false |
Azure/azure-sdk-for-python | sdk/synapse/azure-synapse/azure/synapse/accesscontrol/aio/operations_async/_access_control_operations_async.py | 1 | 20087 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccessControlOperations:
"""AccessControlOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.accesscontrol.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_role_definitions(
self,
**kwargs
) -> AsyncIterable["models.RolesListResponse"]:
"""List roles.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RolesListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.synapse.accesscontrol.models.RolesListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RolesListResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.get_role_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RolesListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorContract, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_role_definitions.metadata = {'url': '/rbac/roles'} # type: ignore
async def get_role_definition_by_id(
self,
role_id: str,
**kwargs
) -> "models.SynapseRole":
"""Get role by role Id.
:param role_id: Synapse Built-In Role Id.
:type role_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SynapseRole, or the result of cls(response)
:rtype: ~azure.synapse.accesscontrol.models.SynapseRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SynapseRole"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
# Construct URL
url = self.get_role_definition_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'roleId': self._serialize.url("role_id", role_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynapseRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_role_definition_by_id.metadata = {'url': '/rbac/roles/{roleId}'} # type: ignore
async def create_role_assignment(
self,
create_role_assignment_options: "models.RoleAssignmentOptions",
**kwargs
) -> "models.RoleAssignmentDetails":
"""Create role assignment.
:param create_role_assignment_options: Details of role id and object id.
:type create_role_assignment_options: ~azure.synapse.accesscontrol.models.RoleAssignmentOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentDetails, or the result of cls(response)
:rtype: ~azure.synapse.accesscontrol.models.RoleAssignmentDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RoleAssignmentDetails"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_role_assignment.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_role_assignment_options, 'RoleAssignmentOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RoleAssignmentDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_role_assignment.metadata = {'url': '/rbac/roleAssignments'} # type: ignore
async def get_role_assignments(
self,
role_id: Optional[str] = None,
principal_id: Optional[str] = None,
continuation_token_parameter: Optional[str] = None,
**kwargs
) -> List["models.RoleAssignmentDetails"]:
"""List role assignments.
:param role_id: Synapse Built-In Role Id.
:type role_id: str
:param principal_id: Object ID of the AAD principal or security-group.
:type principal_id: str
:param continuation_token_parameter: Continuation token.
:type continuation_token_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of RoleAssignmentDetails, or the result of cls(response)
:rtype: list[~azure.synapse.accesscontrol.models.RoleAssignmentDetails]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.RoleAssignmentDetails"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
# Construct URL
url = self.get_role_assignments.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if role_id is not None:
query_parameters['roleId'] = self._serialize.query("role_id", role_id, 'str')
if principal_id is not None:
query_parameters['principalId'] = self._serialize.query("principal_id", principal_id, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if continuation_token_parameter is not None:
header_parameters['x-ms-continuation'] = self._serialize.header("continuation_token_parameter", continuation_token_parameter, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
deserialized = self._deserialize('[RoleAssignmentDetails]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_role_assignments.metadata = {'url': '/rbac/roleAssignments'} # type: ignore
async def get_role_assignment_by_id(
self,
role_assignment_id: str,
**kwargs
) -> "models.RoleAssignmentDetails":
"""Get role assignment by role assignment Id.
:param role_assignment_id: The ID of the role assignment.
:type role_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentDetails, or the result of cls(response)
:rtype: ~azure.synapse.accesscontrol.models.RoleAssignmentDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RoleAssignmentDetails"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
# Construct URL
url = self.get_role_assignment_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RoleAssignmentDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_role_assignment_by_id.metadata = {'url': '/rbac/roleAssignments/{roleAssignmentId}'} # type: ignore
async def delete_role_assignment_by_id(
self,
role_assignment_id: str,
**kwargs
) -> None:
"""Delete role assignment by role assignment Id.
:param role_assignment_id: The ID of the role assignment.
:type role_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
# Construct URL
url = self.delete_role_assignment_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_role_assignment_by_id.metadata = {'url': '/rbac/roleAssignments/{roleAssignmentId}'} # type: ignore
async def get_caller_role_assignments(
self,
**kwargs
) -> List[str]:
"""List role assignments of the caller.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of str, or the result of cls(response)
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[str]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
# Construct URL
url = self.get_caller_role_assignments.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[str]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_caller_role_assignments.metadata = {'url': '/rbac/getMyAssignedRoles'} # type: ignore
| mit | -6,482,964,127,619,796,000 | 45.605568 | 144 | 0.642256 | false | 4.191778 | true | false | false |
Taluu/vdebug | tests/test_breakpoint_breakpoint.py | 3 | 7373 | import unittest
import vdebug.breakpoint
import vdebug.error
import vdebug.util
import base64
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class LineBreakpointTest(unittest.TestCase):
def test_get_file(self):
""" Test that the line number is retrievable."""
ui = None
file = "/path/to/file"
line = 1
bp = vdebug.breakpoint.LineBreakpoint(ui,file,line)
self.assertEqual(bp.get_file(),file)
def test_get_line(self):
""" Test that the line number is retrievable."""
ui = None
file = "/path/to/file"
line = 10
bp = vdebug.breakpoint.LineBreakpoint(ui,file,line)
self.assertEqual(bp.get_line(),line)
def test_get_cmd(self):
""" Test that the dbgp command is correct."""
ui = None
file = vdebug.util.FilePath("/path/to/file")
line = 20
bp = vdebug.breakpoint.LineBreakpoint(ui,file,line)
self.assertEqual(bp.get_cmd(),"-t line -f \"file://%s\" -n %i -s enabled" %(file, line))
def test_on_add_sets_ui_breakpoint(self):
""" Test that the breakpoint is placed on the source window."""
ui = Mock()
file = vdebug.util.FilePath("/path/to/file")
line = 20
bp = vdebug.breakpoint.LineBreakpoint(ui,file,line)
bp.on_add()
ui.register_breakpoint.assert_called_with(bp)
def test_on_remove_deletes_ui_breakpoint(self):
""" Test that the breakpoint is removed from the source window."""
ui = Mock()
file = vdebug.util.FilePath("/path/to/file")
line = 20
bp = vdebug.breakpoint.LineBreakpoint(ui,file,line)
bp.on_remove()
ui.remove_breakpoint.assert_called_with(bp)
class ConditionalBreakpointTest(unittest.TestCase):
def setUp(self):
vdebug.opts.Options.set({})
def test_get_cmd(self):
""" Test that the dbgp command is correct."""
ui = None
file = vdebug.util.FilePath("/path/to/file")
line = 20
condition = "$x > 20"
bp = vdebug.breakpoint.ConditionalBreakpoint(ui,file,line,condition)
b64cond = base64.encodebytes(condition.encode("UTF-8")).decode("UTF-8")
exp_cmd = "-t conditional -f \"file://%s\" -n %i -s enabled -- %s" %(file, line, b64cond)
self.assertEqual(bp.get_cmd(), exp_cmd)
class ExceptionBreakpointTest(unittest.TestCase):
def test_get_cmd(self):
""" Test that the dbgp command is correct."""
ui = None
exception = "ExampleException"
bp = vdebug.breakpoint.ExceptionBreakpoint(ui,exception)
exp_cmd = "-t exception -x %s -s enabled" % exception
self.assertEqual(bp.get_cmd(), exp_cmd)
class CallBreakpointTest(unittest.TestCase):
def test_get_cmd(self):
""" Test that the dbgp command is correct."""
ui = None
function = "myfunction"
bp = vdebug.breakpoint.CallBreakpoint(ui,function)
exp_cmd = "-t call -m %s -s enabled" % function
self.assertEqual(bp.get_cmd(), exp_cmd)
class ReturnBreakpointTest(unittest.TestCase):
def test_get_cmd(self):
""" Test that the dbgp command is correct."""
ui = None
function = "myfunction"
bp = vdebug.breakpoint.ReturnBreakpoint(ui,function)
exp_cmd = "-t return -m %s -s enabled" % function
self.assertEqual(bp.get_cmd(), exp_cmd)
class BreakpointTest(unittest.TestCase):
def test_id_is_unique(self):
"""Test that each vdebug.breakpoint has a unique ID.
Consecutively generated breakpoints should have
different IDs."""
bp1 = vdebug.breakpoint.Breakpoint(None)
bp2 = vdebug.breakpoint.Breakpoint(None)
self.assertNotEqual(bp1.get_id(),bp2.get_id())
def test_parse_with_line_breakpoint(self):
""" Test that a LineBreakpoint is created."""
Mock.__len__ = Mock(return_value=1)
ui = Mock()
ret = vdebug.breakpoint.Breakpoint.parse(ui,"")
self.assertIsInstance(ret,vdebug.breakpoint.LineBreakpoint)
def test_parse_with_empty_line_raises_error(self):
""" Test that a LineBreakpoint is created."""
Mock.__len__ = Mock(return_value=0)
ui = Mock()
re = 'Cannot set a breakpoint on an empty line'
self.assertRaisesRegex(vdebug.error.BreakpointError,\
re,vdebug.breakpoint.Breakpoint.parse,ui,"")
def test_parse_with_conditional_breakpoint(self):
""" Test that a ConditionalBreakpoint is created."""
ui = Mock()
ret = vdebug.breakpoint.Breakpoint.parse(ui,"conditional $x == 3")
self.assertIsInstance(ret,vdebug.breakpoint.ConditionalBreakpoint)
self.assertEqual(ret.condition, "$x == 3")
def test_parse_with_conditional_raises_error(self):
""" Test that an exception is raised with invalid conditional args."""
ui = Mock()
args = "conditional"
re = "Conditional breakpoints require a condition "+\
"to be specified"
self.assertRaisesRegex(vdebug.error.BreakpointError,\
re, vdebug.breakpoint.Breakpoint.parse, ui, args)
def test_parse_with_exception_breakpoint(self):
""" Test that a ExceptionBreakpoint is created."""
ui = Mock()
ret = vdebug.breakpoint.Breakpoint.parse(ui,"exception ExampleException")
self.assertIsInstance(ret,vdebug.breakpoint.ExceptionBreakpoint)
self.assertEqual(ret.exception, "ExampleException")
def test_parse_with_exception_raises_error(self):
""" Test that an exception is raised with invalid exception args."""
ui = Mock()
args = "exception"
re = "Exception breakpoints require an exception name "+\
"to be specified"
self.assertRaisesRegex(vdebug.error.BreakpointError,\
re, vdebug.breakpoint.Breakpoint.parse, ui, args)
def test_parse_with_call_breakpoint(self):
""" Test that a CallBreakpoint is created."""
ui = Mock()
ret = vdebug.breakpoint.Breakpoint.parse(ui,"call myfunction")
self.assertIsInstance(ret,vdebug.breakpoint.CallBreakpoint)
self.assertEqual(ret.function , "myfunction")
def test_parse_with_call_raises_error(self):
""" Test that an exception is raised with invalid call args."""
ui = Mock()
args = "call"
re = "Call breakpoints require a function name "+\
"to be specified"
self.assertRaisesRegex(vdebug.error.BreakpointError,\
re, vdebug.breakpoint.Breakpoint.parse, ui, args)
def test_parse_with_return_breakpoint(self):
""" Test that a ReturnBreakpoint is created."""
ui = Mock()
ret = vdebug.breakpoint.Breakpoint.parse(ui,"return myfunction")
self.assertIsInstance(ret,vdebug.breakpoint.ReturnBreakpoint)
self.assertEqual(ret.function, "myfunction")
def test_parse_with_return_raises_error(self):
""" Test that an exception is raised with invalid return args."""
ui = Mock()
args = "return"
re = "Return breakpoints require a function name "+\
"to be specified"
self.assertRaisesRegex(vdebug.error.BreakpointError,\
re, vdebug.breakpoint.Breakpoint.parse, ui, args)
| mit | 2,124,813,365,746,041,000 | 38.010582 | 97 | 0.631765 | false | 4.028962 | true | false | false |
ancafarcas/superdesk-core | superdesk/vocabularies/vocabularies.py | 1 | 6887 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import json
from flask import request, current_app as app
from eve.utils import config
from eve.methods.common import serialize_value
from superdesk import privilege
from superdesk.notification import push_notification
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.users import get_user_from_request
from superdesk.utc import utcnow
from superdesk.errors import SuperdeskApiError
logger = logging.getLogger(__name__)
privilege(name="vocabularies", label="Vocabularies Management",
description="User can manage vocabularies' contents.")
# TODO(petr): add api to specify vocabulary schema
vocab_schema = {
'crop_sizes': {
'width': {'type': 'integer'},
'height': {'type': 'integer'},
}
}
class VocabulariesResource(Resource):
schema = {
'_id': {
'type': 'string',
'required': True,
'unique': True
},
'display_name': {
'type': 'string',
'required': True
},
'type': {
'type': 'string',
'required': True,
'allowed': ['manageable', 'unmanageable']
},
'items': {
'type': 'list',
'required': True
},
'single_value': {
'type': 'boolean',
},
'schema_field': {
'type': 'string',
'required': False,
'nullable': True
},
'dependent': {
'type': 'boolean',
},
'service': {
'type': 'dict',
},
'priority': {
'type': 'integer'
},
'unique_field': {
'type': 'string',
'required': False,
'nullable': True
}
}
item_url = 'regex("[\w]+")'
item_methods = ['GET', 'PATCH']
resource_methods = ['GET']
privileges = {'PATCH': 'vocabularies', }
class VocabulariesService(BaseService):
def on_replace(self, document, original):
document[app.config['LAST_UPDATED']] = utcnow()
document[app.config['DATE_CREATED']] = original[app.config['DATE_CREATED']] if original else utcnow()
logger.info("updating vocabulary item: %s", document["_id"])
def on_fetched(self, doc):
"""Overriding to filter out inactive vocabularies and pops out 'is_active' property from the response.
It keeps it when requested for manageable vocabularies.
"""
if request and hasattr(request, 'args') and request.args.get('where'):
where_clause = json.loads(request.args.get('where'))
if where_clause.get('type') == 'manageable':
return doc
for item in doc[config.ITEMS]:
self._filter_inactive_vocabularies(item)
self._cast_items(item)
def on_fetched_item(self, doc):
"""
Overriding to filter out inactive vocabularies and pops out 'is_active' property from the response.
"""
self._filter_inactive_vocabularies(doc)
self._cast_items(doc)
def on_update(self, updates, original):
"""Checks the duplicates if a unique field is defined"""
unique_field = original.get('unique_field')
if unique_field:
self._check_uniqueness(updates.get('items', []), unique_field)
def on_updated(self, updates, original):
"""
Overriding this to send notification about the replacement
"""
self._send_notification(original)
def on_replaced(self, document, original):
"""
Overriding this to send notification about the replacement
"""
self._send_notification(document)
def _check_uniqueness(self, items, unique_field):
"""Checks the uniqueness if a unique field is defined
:param items: list of items to check for uniqueness
:param unique_field: name of the unique field
"""
unique_values = []
for item in items:
# compare only the active items
if not item.get('is_active'):
continue
if not item.get(unique_field):
raise SuperdeskApiError.badRequestError("{} cannot be empty".format(unique_field))
unique_value = str(item.get(unique_field)).upper()
if unique_value in unique_values:
raise SuperdeskApiError.badRequestError("Value {} for field {} is not unique".
format(item.get(unique_field), unique_field))
unique_values.append(unique_value)
def _filter_inactive_vocabularies(self, item):
vocs = item['items']
active_vocs = ({k: voc[k] for k in voc.keys() if k != 'is_active'}
for voc in vocs if voc.get('is_active', True))
item['items'] = list(active_vocs)
def _cast_items(self, vocab):
"""Cast values in vocabulary items using predefined schema.
:param vocab
"""
schema = vocab_schema.get(vocab.get('_id'), {})
for item in vocab.get('items', []):
for field, field_schema in schema.items():
if field in item:
item[field] = serialize_value(field_schema['type'], item[field])
def _send_notification(self, updated_vocabulary):
"""
Sends notification about the updated vocabulary to all the connected clients.
"""
user = get_user_from_request()
push_notification('vocabularies:updated', vocabulary=updated_vocabulary.get('display_name'),
user=str(user[config.ID_FIELD]) if user else None)
def get_rightsinfo(self, item):
rights_key = item.get('source', item.get('original_source', 'default'))
all_rights = self.find_one(req=None, _id='rightsinfo')
if not all_rights or not all_rights.get('items'):
return {}
try:
default_rights = next(info for info in all_rights['items'] if info['name'] == 'default')
except StopIteration:
default_rights = None
try:
rights = next(info for info in all_rights['items'] if info['name'] == rights_key)
except StopIteration:
rights = default_rights
if rights:
return {
'copyrightholder': rights.get('copyrightHolder'),
'copyrightnotice': rights.get('copyrightNotice'),
'usageterms': rights.get('usageTerms'),
}
else:
return {}
| agpl-3.0 | -1,100,923,193,334,934,500 | 31.952153 | 110 | 0.577029 | false | 4.238154 | true | false | false |
hasgeek/funnel | funnel/views/contact.py | 1 | 6404 | from __future__ import annotations
from datetime import datetime, timedelta
from io import StringIO
from typing import Dict, Optional
import csv
from sqlalchemy.exc import IntegrityError
from flask import (
Response,
current_app,
jsonify,
make_response,
render_template,
request,
)
from baseframe import _
from coaster.auth import current_auth
from coaster.utils import getbool, make_name, midnight_to_utc, utcnow
from coaster.views import ClassView, render_with, requestargs, route
from .. import app
from ..models import ContactExchange, Project, TicketParticipant, db
from ..utils import abort_null, format_twitter_handle
from .login_session import requires_login
def contact_details(ticket_participant: TicketParticipant) -> Dict[str, Optional[str]]:
return {
'fullname': ticket_participant.fullname,
'company': ticket_participant.company,
'email': ticket_participant.email,
'twitter': format_twitter_handle(ticket_participant.twitter),
'phone': ticket_participant.phone,
}
@route('/account/contacts')
class ContactView(ClassView):
current_section = 'account'
def get_project(self, uuid_b58):
return (
Project.query.filter_by(uuid_b58=uuid_b58)
.options(db.load_only(Project.id, Project.uuid, Project.title))
.one_or_404()
)
@route('', endpoint='contacts')
@requires_login
@render_with('contacts.html.jinja2')
def contacts(self):
"""Return contacts grouped by project and date."""
archived = getbool(request.args.get('archived'))
return {
'contacts': ContactExchange.grouped_counts_for(
current_auth.user, archived=archived
)
}
def contacts_to_csv(self, contacts, timezone, filename):
"""Return a CSV of given contacts."""
outfile = StringIO(newline='')
out = csv.writer(outfile)
out.writerow(
[
'scanned_at',
'fullname',
'email',
'phone',
'twitter',
'job_title',
'company',
'city',
]
)
for contact in contacts:
proxy = contact.current_access()
ticket_participant = proxy.ticket_participant
out.writerow(
[
proxy.scanned_at.astimezone(timezone)
.replace(second=0, microsecond=0, tzinfo=None)
.isoformat(), # Strip precision from timestamp
ticket_participant.fullname,
ticket_participant.email,
ticket_participant.phone,
ticket_participant.twitter,
ticket_participant.job_title,
ticket_participant.company,
ticket_participant.city,
]
)
outfile.seek(0)
return Response(
outfile.getvalue(),
content_type='text/csv',
headers=[
(
'Content-Disposition',
f'attachment;filename="{filename}.csv"',
)
],
)
@route('<uuid_b58>/<datestr>.csv', endpoint='contacts_project_date_csv')
@requires_login
def project_date_csv(self, uuid_b58, datestr):
"""Return contacts for a given project and date in CSV format."""
archived = getbool(request.args.get('archived'))
project = self.get_project(uuid_b58)
date = datetime.strptime(datestr, '%Y-%m-%d').date()
contacts = ContactExchange.contacts_for_project_and_date(
current_auth.user, project, date, archived
)
return self.contacts_to_csv(
contacts,
timezone=project.timezone,
filename='contacts-{project}-{date}'.format(
project=make_name(project.title), date=date.strftime('%Y%m%d')
),
)
@route('<uuid_b58>.csv', endpoint='contacts_project_csv')
@requires_login
def project_csv(self, uuid_b58):
"""Return contacts for a given project in CSV format."""
archived = getbool(request.args.get('archived'))
project = self.get_project(uuid_b58)
contacts = ContactExchange.contacts_for_project(
current_auth.user, project, archived
)
return self.contacts_to_csv(
contacts,
timezone=project.timezone,
filename=f'contacts-{make_name(project.title)}',
)
@route('scan', endpoint='scan_contact')
@requires_login
def scan(self):
"""Scan a badge."""
return render_template('scan_contact.html.jinja2')
@route('scan/connect', endpoint='scan_connect', methods=['POST'])
@requires_login
@requestargs(('puk', abort_null), ('key', abort_null))
def connect(self, puk, key):
"""Verify a badge scan and create a contact."""
ticket_participant = TicketParticipant.query.filter_by(puk=puk, key=key).first()
if ticket_participant is None:
return make_response(
jsonify(status='error', message="Attendee details not found"), 404
)
project = ticket_participant.project
if project.end_at:
if (
midnight_to_utc(project.end_at + timedelta(days=1), project.timezone)
< utcnow()
):
return make_response(
jsonify(status='error', message=_("This project has concluded")),
403,
)
try:
contact_exchange = ContactExchange(
user=current_auth.actor, ticket_participant=ticket_participant
)
db.session.add(contact_exchange)
db.session.commit()
except IntegrityError:
current_app.logger.warning("Contact already scanned")
db.session.rollback()
return jsonify(contact=contact_details(ticket_participant))
else:
# FIXME: when status='error', the message should be in `error_description`.
return make_response(
jsonify(status='error', message=_("Unauthorized contact exchange")), 403
)
ContactView.init_app(app)
| agpl-3.0 | 5,105,638,350,269,869,000 | 33.06383 | 88 | 0.570737 | false | 4.472067 | false | false | false |
timothyb89/stackviz | stackviz/settings.py | 2 | 3482 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Django settings for stackviz project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*to^*vlhq&05jo0^kad)=kboy$8@&x9s6i23ukh*^%w_$=5bmh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stackviz.urls'
WSGI_APPLICATION = 'stackviz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATE_CONTEXT_PROCESSORS = (
'stackviz.global_template_injector.inject_extra_context',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'stackviz', 'static')
]
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'stackviz', 'templates')
]
# If True, read a stream from stdin (only valid for exported sites)
TEST_STREAM_STDIN = False
# A list of files containing directly-accessible subunit streams.
TEST_STREAMS = []
# A list of test repositories containing (potentially) multiple subunit
# streams.
TEST_REPOSITORIES = [
os.path.join(BASE_DIR, 'test_data')
]
# The input dstat file
DSTAT_CSV = 'dstat.log'
# If true, AJAX calls should attempt to load `*.json.gz` files rather than
# plain `*.json` files. This should only ever be toggled `True` for static site
# exports and is not currently supported on live servers.
USE_GZIP = False
# Toggles offline/online mode for static export. Will trigger menu to show
# either the full site or only links supported by static exporter.
OFFLINE = False
| apache-2.0 | -5,850,671,426,330,975,000 | 26.634921 | 79 | 0.742102 | false | 3.427165 | false | false | false |
b3j0f/annotation | b3j0f/annotation/async.py | 1 | 8481 | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2015 Jonathan Labéjof <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Decorators dedicated to asynchronous programming."""
from __future__ import absolute_import
try:
from threading import Thread, RLock
except ImportError:
from dummythreading import Thread, RLock
from time import sleep
from signal import signal, SIGALRM, alarm
from six import callable
from six.moves.queue import Queue
from .core import Annotation
from .interception import PrivateInterceptor
from .oop import Mixin
__all__ = [
'Synchronized', 'SynchronizedClass',
'Asynchronous', 'TimeOut', 'Wait', 'Observable'
]
class Synchronized(PrivateInterceptor):
"""Transform a target into a thread safe target."""
#: lock attribute name
_LOCK = '_lock'
__slots__ = (_LOCK,) + PrivateInterceptor.__slots__
def __init__(self, lock=None, *args, **kwargs):
super(Synchronized, self).__init__(*args, **kwargs)
self._lock = RLock() if lock is None else lock
def _interception(self, joinpoint):
self._lock.acquire()
result = joinpoint.proceed()
self._lock.release()
return result
class SynchronizedClass(Synchronized):
"""Transform a class into a thread safe class."""
def on_bind_target(self, target, ctx=None):
for attribute in target.__dict__:
if callable(attribute):
Synchronized(attribute, self._lock)
class Asynchronous(Annotation):
"""Transform a target into an asynchronous callable target."""
def __init__(self, *args, **kwargs):
super(Asynchronous, self).__init__(*args, **kwargs)
self.queue = None
def _threaded(self, *args, **kwargs):
"""Call the target and put the result in the Queue."""
for target in self.targets:
result = target(*args, **kwargs)
self.queue.put(result)
def on_bind_target(self, target, ctx=None):
# add start function to wrapper
super(Asynchronous, self).on_bind_target(target, ctx=ctx)
setattr(target, 'start', self.start)
def start(self, *args, **kwargs):
"""Start execution of the function."""
self.queue = Queue()
thread = Thread(target=self._threaded, args=args, kwargs=kwargs)
thread.start()
return Asynchronous.Result(self.queue, thread)
class NotYetDoneException(Exception):
"""Handle when a result is not yet available."""
class Result(object):
"""In charge of receive asynchronous function result."""
__slots__ = ('queue', 'thread', 'result')
def __init__(self, queue, thread):
super(Asynchronous.Result, self).__init__()
self.result = None
self.queue = queue
self.thread = thread
def is_done(self):
"""True if result is available."""
return not self.thread.is_alive()
def get_result(self, wait=-1):
"""Get result value.
Wait for it if necessary.
:param int wait: maximum wait time.
:return: result value.
"""
if not self.is_done():
if wait >= 0:
self.thread.join(wait)
else:
raise Asynchronous.NotYetDoneException(
'the call has not yet completed its task'
)
if self.result is None:
self.result = self.queue.get()
return self.result
class TimeOut(PrivateInterceptor):
"""Raise an Exception if the target call has not finished in time."""
class TimeOutError(Exception):
"""Exception thrown if time elapsed before the end of the target call.
"""
#: Default time out error message.
DEFAULT_MESSAGE = \
'Call of {0} with parameters {1} and {2} is timed out in frame {3}'
def __init__(self, timeout_interceptor, frame):
super(TimeOut.TimeOutError, self).__init__(
timeout_interceptor.message.format(
timeout_interceptor.target,
timeout_interceptor.args,
timeout_interceptor.kwargs,
frame
)
)
SECONDS = 'seconds'
ERROR_MESSAGE = 'error_message'
__slots__ = (SECONDS, ERROR_MESSAGE) + PrivateInterceptor.__slots__
def __init__(
self,
seconds, error_message=TimeOutError.DEFAULT_MESSAGE,
*args, **kwargs
):
super(TimeOut, self).__init__(*args, **kwargs)
self.seconds = seconds
self.error_message = error_message
def _handle_timeout(self, frame=None, **_):
"""Sig ALARM timeout function."""
raise TimeOut.TimeOutError(self, frame)
def _interception(self, joinpoint):
signal(SIGALRM, self._handle_timeout)
alarm(self.seconds)
try:
result = joinpoint.proceed()
finally:
alarm(0)
return result
class Wait(PrivateInterceptor):
"""Define a time to wait before and after a target call."""
DEFAULT_BEFORE = 1 #: default seconds to wait before the target call.
DEFAULT_AFTER = 1 #: default seconds to wait after the target call.
BEFORE = 'before' #: before attribute name.
AFTER = 'after' #: after attribute name.
__slots__ = (BEFORE, AFTER) + PrivateInterceptor.__slots__
def __init__(
self, before=DEFAULT_BEFORE, after=DEFAULT_AFTER, *args, **kwargs
):
super(Wait, self).__init__(*args, **kwargs)
self.before = before
self.after = after
def _interception(self, joinpoint):
sleep(self.before)
result = joinpoint.proceed()
sleep(self.after)
return result
class Observable(PrivateInterceptor):
"""Imlementation of the observer design pattern.
It transforms a target into an observable object in adding method
register_observer, unregister_observer and notify_observers.
Observers listen to pre/post target interception.
"""
def __init__(self, *args, **kwargs):
super(Observable, self).__init__(*args, **kwargs)
self.observers = set()
def register_observer(self, observer):
"""Register an observer."""
self.observers.add(observer)
def unregister_observer(self, observer):
"""Unregister an observer."""
self.observers.remove(observer)
def notify_observers(self, joinpoint, post=False):
"""Notify observers with parameter calls and information about
pre/post call.
"""
_observers = tuple(self.observers)
for observer in _observers:
observer.notify(joinpoint=joinpoint, post=post)
def on_bind_target(self, target, ctx=None):
Mixin.set_mixin(target, self.register_observer)
Mixin.set_mixin(target, self.unregister_observer)
Mixin.set_mixin(target, self.notify_observers)
def _interception(self, joinpoint):
self.notify_observers(joinpoint=joinpoint)
result = joinpoint.proceed()
self.notify_observers(joinpoint=joinpoint, post=True)
return result
| mit | -6,628,463,442,555,860,000 | 26.983498 | 79 | 0.613162 | false | 4.323814 | false | false | false |
bernard357/plumbery | plumbery/polishers/prepare.py | 1 | 26568 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import yaml
import netifaces
from libcloud.compute.base import NodeState
from libcloud.compute.deployment import Deployment
from libcloud.compute.deployment import ScriptDeployment
from libcloud.compute.deployment import SSHKeyDeployment
from libcloud.compute.ssh import SSHClient
from plumbery.exception import PlumberyException
from plumbery.nodes import PlumberyNodes
from plumbery.polisher import PlumberyPolisher
from plumbery.text import PlumberyText
from plumbery.text import PlumberyNodeContext
from plumbery.plogging import plogging
class FileContentDeployment(Deployment):
"""
Installs a file on a target node.
"""
def __init__(self, content, target):
"""
:type content: ``str``
:keyword content: Content of the target file to create
:type target: ``str``
:keyword target: Path to install file on node
"""
self.content = content
self.target = target
def run(self, node, client):
"""
Writes the file.
See also :class:`Deployment.run`
"""
client.put(path=self.target, contents=self.content)
return node
class RebootDeployment(Deployment):
"""
Reboots a node and let cloud-init do the dirty job.
"""
def __init__(self, container):
"""
:param container: the container of this node
:type container: :class:`plumbery.PlumberyInfrastructure`
"""
self.region = container.region
def run(self, node, client):
"""
Reboots the node.
See also :class:`Deployment.run`
"""
repeats = 0
while True:
try:
self.region.reboot_node(node)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
if 'VM_TOOLS_INVALID_STATUS' in str(feedback):
if repeats < 5:
time.sleep(10)
repeats += 1
continue
plogging.error("- unable to reboot node")
plogging.error(str(feedback))
finally:
return node
class PreparePolisher(PlumberyPolisher):
"""
Bootstraps nodes via ssh
This polisher looks at each node in sequence, and contact selected nodes
via ssh to prepare them. The goal here is to accelerate post-creation
tasks as much as possible.
Bootstrapping steps can consist of multiple tasks:
* push a SSH public key to allow for automated secured communications
* ask for package update
* install docker
* install any pythons script
* install Stackstorm
* configure a Chef client
* register a node to a monitoring dashboard
* ...
To activate this polisher you have to mention it in the fittings plan,
like in the following example::
---
safeMode: False
actions:
- prepare:
key: ~/.ssh/myproject_rsa.pub
---
# Frankfurt in Europe
locationId: EU6
regionId: dd-eu
...
Plumbery will only prepare nodes that have been configured for it. The
example below demonstrates how this can be done for multiple docker
containers::
# some docker resources
- docker:
domain: *vdc1
ethernet: *containers
nodes:
- docker1:
prepare: &docker
- run prepare.update.sh
- run prepare.docker.sh
- docker2:
prepare: *docker
- docker3:
prepare: *docker
In the real life when you have to prepare any appliance, you need to be
close to the stuff and to touch it. This is the same for virtual fittings.
This polisher has the need to communicate directly with target
nodes over the network.
This connectivity can become quite complicated because of the potential mix
of private and public networks, firewalls, etc. To stay safe plumbery
enforces a simple beachheading model, where network connectivity with end
nodes is a no brainer.
This model is based on predefined network addresses for plumbery itself,
as in the snippet below::
---
# Frankfurt in Europe
locationId: EU6
regionId: dd-eu
# network subnets are 10.1.x.y
prepare:
- beachhead: 10.1.3.4
Here nodes at EU6 will be prepared only if the machine that is
executing plumbery has the adress 10.1.3.4. In other cases, plumbery will
state that the location is out of reach.
"""
def upgrade_vmware_tools(self, node):
"""
Upgrade VMware tools on target node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
"""
if self.engine.safeMode:
return True
while True:
try:
self.region.ex_update_vm_tools(node=node)
plogging.info("- upgrading vmware tools")
return True
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
if 'Please try again later' in str(feedback):
time.sleep(10)
continue
if 'NO_CHANGE' in str(feedback):
plogging.debug("- vmware tools is already up-to-date")
return True
plogging.warning("- unable to upgrade vmware tools")
plogging.warning(str(feedback))
return False
def _apply_prepares(self, node, steps):
"""
Does the actual job over SSH
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param steps: the various steps of the preparing
:type steps: ``list`` of ``dict``
:return: ``True`` if everything went fine, ``False`` otherwise
:rtype: ``bool``
"""
if node is None or node.state != NodeState.RUNNING:
plogging.warning("- skipped - node is not running")
return False
# select the address to use
if len(node.public_ips) > 0:
target_ip = node.public_ips[0]
elif node.extra['ipv6']:
target_ip = node.extra['ipv6']
else:
target_ip = node.private_ips[0]
# use libcloud to communicate with remote nodes
session = SSHClient(hostname=target_ip,
port=22,
username=self.user,
password=self.secret,
key_files=self.key_files,
timeout=10)
repeats = 0
while True:
try:
session.connect()
break
except Exception as feedback:
repeats += 1
if repeats > 5:
plogging.error("Error: can not connect to '{}'!".format(
target_ip))
plogging.error("- failed to connect")
return False
plogging.debug(str(feedback))
plogging.debug("- connection {} failed, retrying".format(repeats))
time.sleep(10)
continue
while True:
try:
if self.engine.safeMode:
plogging.info("- skipped - no ssh interaction in safe mode")
else:
for step in steps:
plogging.info('- {}'.format(step['description']))
step['genius'].run(node, session)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
plogging.error("Error: unable to prepare '{}' at '{}'!".format(
node.name, target_ip))
plogging.error(str(feedback))
plogging.error("- failed")
result = False
else:
result = True
break
try:
session.close()
except:
pass
return result
def _get_prepares(self, node, settings, container):
"""
Defines the set of actions to be done on a node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param settings: the fittings plan for this node
:type settings: ``dict``
:param container: the container of this node
:type container: :class:`plumbery.PlumberyInfrastructure`
:return: a list of actions to be performed, and related descriptions
:rtype: a ``list`` of `{ 'description': ..., 'genius': ... }``
"""
if not isinstance(settings, dict):
return []
environment = PlumberyNodeContext(node=node,
container=container,
context=self.facility)
prepares = []
for key_file in self.key_files:
try:
path = os.path.expanduser(key_file)
with open(path) as stream:
key = stream.read()
stream.close()
prepares.append({
'description': 'deploy SSH public key',
'genius': SSHKeyDeployment(key=key)})
except IOError:
plogging.warning("no ssh key in {}".format(key_file))
if ('prepare' in settings
and isinstance(settings['prepare'], list)
and len(settings['prepare']) > 0):
plogging.info('- using prepare commands')
for script in settings['prepare']:
tokens = script.split(' ')
if len(tokens) == 1:
tokens.insert(0, 'run')
if tokens[0] in ['run', 'run_raw']: # send and run a script
script = tokens[1]
if len(tokens) > 2:
args = tokens[2:]
else:
args = []
plogging.debug("- {} {} {}".format(
tokens[0], script, ' '.join(args)))
try:
with open(script) as stream:
text = stream.read()
if(tokens[0] == 'run'
and PlumberyText.could_expand(text)):
plogging.debug("- expanding script '{}'"
.format(script))
text = PlumberyText.expand_string(
text, environment)
if len(text) > 0:
plogging.info("- running '{}'"
.format(script))
prepares.append({
'description': ' '.join(tokens),
'genius': ScriptDeployment(
script=text,
args=args,
name=script)})
else:
plogging.error("- script '{}' is empty"
.format(script))
except IOError:
plogging.error("- unable to read script '{}'"
.format(script))
elif tokens[0] in ['put', 'put_raw']: # send a file
file = tokens[1]
if len(tokens) > 2:
destination = tokens[2]
else:
destination = './'+file
plogging.debug("- {} {} {}".format(
tokens[0], file, destination))
try:
with open(file) as stream:
content = stream.read()
if(tokens[0] == 'put'
and PlumberyText.could_expand(content)):
plogging.debug("- expanding file '{}'"
.format(file))
content = PlumberyText.expand_string(
content, environment)
plogging.info("- putting file '{}'"
.format(file))
prepares.append({
'description': ' '.join(tokens),
'genius': FileContentDeployment(
content=content,
target=destination)})
except IOError:
plogging.error("- unable to read file '{}'"
.format(file))
else: # echo a sensible message eventually
if tokens[0] == 'echo':
tokens.pop(0)
message = ' '.join(tokens)
message = PlumberyText.expand_string(
message, environment)
plogging.info("- {}".format(message))
if ('cloud-config' in settings
and isinstance(settings['cloud-config'], dict)
and len(settings['cloud-config']) > 0):
plogging.info('- using cloud-config')
# mandatory, else cloud-init will not consider user-data
plogging.debug('- preparing meta-data')
meta_data = 'instance_id: dummy\n'
destination = '/var/lib/cloud/seed/nocloud-net/meta-data'
prepares.append({
'description': 'put meta-data',
'genius': FileContentDeployment(
content=meta_data,
target=destination)})
plogging.debug('- preparing user-data')
expanded = PlumberyText.expand_string(
settings['cloud-config'], environment)
user_data = '#cloud-config\n'+expanded
plogging.debug(user_data)
destination = '/var/lib/cloud/seed/nocloud-net/user-data'
prepares.append({
'description': 'put user-data',
'genius': FileContentDeployment(
content=user_data,
target=destination)})
plogging.debug('- preparing remote install of cloud-init')
script = 'prepare.cloud-init.sh'
try:
path = os.path.dirname(__file__)+'/'+script
with open(path) as stream:
text = stream.read()
if text:
prepares.append({
'description': 'run '+script,
'genius': ScriptDeployment(
script=text,
name=script)})
except IOError:
raise PlumberyException("Error: cannot read '{}'"
.format(script))
plogging.debug('- preparing reboot to trigger cloud-init')
prepares.append({
'description': 'reboot node',
'genius': RebootDeployment(
container=container)})
return prepares
def go(self, engine):
"""
Starts the prepare process
:param engine: access to global parameters and functions
:type engine: :class:`plumbery.PlumberyEngine`
"""
super(PreparePolisher, self).go(engine)
self.report = []
self.user = engine.get_shared_user()
self.secret = engine.get_shared_secret()
self.key_files = engine.get_shared_key_files()
if 'key' in self.settings:
key = self.settings['key']
key = os.path.expanduser(key)
if os.path.isfile(key):
plogging.debug("- using shared key {}".format(key))
if self.key_files is None:
self.key_files = [key]
else:
self.key_files.insert(0, key)
else:
plogging.error("Error: missing file {}".format(key))
def move_to(self, facility):
"""
Checks if we can beachhead at this facility
:param facility: access to local parameters and functions
:type facility: :class:`plumbery.PlumberyFacility`
This function lists all addresses of the computer that is running
plumbery. If there is at least one routable IPv6 address, then
it assumes that communication with nodes is possible. If no suitable
IPv6 address can be found, then plumbery falls back to IPv4.
Beachheading is granted only if the address of the computer running
plumbery matches the fitting parameter ``beachhead``.
"""
self.facility = facility
self.region = facility.region
self.nodes = PlumberyNodes(facility)
self.beachheading = False
try:
self.addresses = []
for interface in netifaces.interfaces():
addresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET in addresses.keys():
for address in addresses[netifaces.AF_INET]:
# strip local loop
if address['addr'].startswith('127.0.0.1'):
continue
self.addresses.append(address['addr'])
if netifaces.AF_INET6 in addresses.keys():
for address in addresses[netifaces.AF_INET6]:
# strip local loop
if address['addr'].startswith('::1'):
continue
# strip local link addresses
if address['addr'].startswith('fe80::'):
continue
# we have a routable ipv6, so let's go
self.beachheading = True
except Exception as feedback:
plogging.error(str(feedback))
for item in self.facility.get_setting('prepare', []):
if not isinstance(item, dict):
continue
if 'beachhead' not in item.keys():
continue
if item['beachhead'] in self.addresses:
self.beachheading = True
break
if self.beachheading:
plogging.debug("- beachheading at '{}'".format(
self.facility.get_setting('locationId')))
else:
plogging.debug("- not beachheading at '{}'".format(
self.facility.get_setting('locationId')))
def attach_node_to_internet(self, node, ports=[]):
"""
Adds address translation for one node
:param node: node that has to be reachable from the internet
:type node: :class:`libcloud.common.Node`
:param ports: the ports that have to be opened
:type ports: a list of ``str``
"""
plogging.info("Making node '{}' reachable from the internet"
.format(node.name))
domain = self.container.get_network_domain(
self.container.blueprint['domain']['name'])
internal_ip = node.private_ips[0]
external_ip = None
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == internal_ip:
external_ip = rule.external_ip
plogging.info("- node is reachable at '{}'".format(external_ip))
if self.engine.safeMode:
plogging.info("- skipped - safe mode")
return
if external_ip is None:
external_ip = self.container._get_ipv4()
if external_ip is None:
plogging.info("- no more ipv4 address available -- assign more")
return
while True:
try:
self.region.ex_create_nat_rule(
domain,
internal_ip,
external_ip)
plogging.info("- node is reachable at '{}'".format(
external_ip))
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return
else:
plogging.info("- unable to add address translation")
plogging.error(str(feedback))
break
candidates = self.container._list_candidate_firewall_rules(node, ports)
for rule in self.container._list_firewall_rules():
if rule.name in candidates.keys():
plogging.info("Creating firewall rule '{}'"
.format(rule.name))
plogging.info("- already there")
candidates = {k: candidates[k]
for k in candidates if k != rule.name}
for name, rule in candidates.items():
plogging.info("Creating firewall rule '{}'"
.format(name))
if self.engine.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
self.container._ex_create_firewall_rule(
network_domain=domain,
rule=rule,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
return external_ip
def shine_node(self, node, settings, container):
"""
prepares a node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param settings: the fittings plan for this node
:type settings: ``dict``
:param container: the container of this node
:type container: :class:`plumbery.PlumberyInfrastructure`
"""
self.container = container
plogging.info("Preparing node '{}'".format(settings['name']))
if node is None:
plogging.error("- not found")
return
timeout = 300
tick = 6
while node.extra['status'].action == 'START_SERVER':
time.sleep(tick)
node = self.nodes.get_node(node.name)
timeout -= tick
if timeout < 0:
break
if node.state != NodeState.RUNNING:
plogging.error("- skipped - node is not running")
return
self.upgrade_vmware_tools(node)
prepares = self._get_prepares(node, settings, container)
if len(prepares) < 1:
plogging.info('- nothing to do')
self.report.append({node.name: {
'status': 'skipped - nothing to do'
}})
return
if len(node.public_ips) > 0:
plogging.info("- node is reachable at '{}'".format(
node.public_ips[0]))
node.transient = False
elif container.with_transient_exposure():
external_ip = self.attach_node_to_internet(node, ports=['22'])
if external_ip is None:
plogging.error('- no IP has been assigned')
self.report.append({node.name: {
'status': 'unreachable'
}})
return
node.public_ips = [external_ip]
node.transient = True
elif not self.beachheading:
plogging.error('- node is unreachable')
self.report.append({node.name: {
'status': 'unreachable'
}})
return
descriptions = []
for item in prepares:
descriptions.append(item['description'])
if self._apply_prepares(node, prepares):
self.report.append({node.name: {
'status': 'completed',
'prepares': descriptions
}})
else:
self.report.append({node.name: {
'status': 'failed',
'prepares': descriptions
}})
if node.transient:
self.container._detach_node_from_internet(node)
def reap(self):
"""
Reports on preparing
"""
if 'output' not in self.settings:
return
fileName = self.settings['output']
plogging.info("Reporting on preparations in '{}'".format(fileName))
with open(fileName, 'w') as stream:
stream.write(yaml.dump(self.report, default_flow_style=False))
stream.close()
| apache-2.0 | -8,472,979,137,495,152,000 | 31.8 | 82 | 0.503312 | false | 4.95302 | false | false | false |
vfine/webplatform | pmModules/joblfn.py | 1 | 5460 | """ Show he list of Panda Jobs with LFN or versa verse LFNs by Panda IDs </td><td>$Rev$"""
# $Id: joblfn.py 19632 2014-07-06 07:30:10Z jschovan $
from pmUtils.pmState import pmstate
from pmCore.pmModule import pmRoles
from pmTaskBuffer.pmTaskBuffer import pmtaskbuffer as pmt
import pmUtils.pmUtils as utils
from pmCore.pmModule import pmModule
class joblfn(pmModule):
""" Show the list of Panda id with LFN or versa verse LFNs by Panda IDs """
#______________________________________________________________________________________
def __init__(self,name=None,parent=None,obj=None):
pmModule.__init__(self,name,parent,obj)
self.publishUI(self.doJson)
#______________________________________________________________________________________
def doJson(self,lfn=None, jobs=None,type='input',ds=None,table='new',limit=1000,jobstatus=None,site=None,jobtype='production',days=1,user=None,select=None):
"""
Show the list of Panda id with LFN or versa verse LFNs by Panda IDs
<ul>
<li><code>lfn</code> - the list of the comma separated files
<li><code>ds</code> - the list of the comma separated datasets
<li><code>jobs</code> - the list of the comma separated Panda's job IDs
<li><code>table</code> = "new" (default) look up the records for last 3 days
<br> ="old" - look up the records those more than 3 days old (slow)
<br> ="deep" - look up the "old" and "new" tables (slow)
<li><code>type</code> - the type selector. <br>
= 'input - the default value<br>
= '*' | 'all' - list all types available.
<li><code>jobstatus</code> = the comma separated list of the job status to filter
<br>For example: 'defined, waiting,assigned,activated,sent,starting,running'
<li><code>site</code> = the comma separated list of the sizte to list the jobs from
<br> For example 'UTA_SWT2'
<li><code>jobtype</code> = the comma separated list of the job type to filter
<br> For example, "analysis, production"
<li><code>days</code> = the number of the days to look up the list of the jobs if either 'jobstatus' or 'site' parameter is defined
<li><code>user</code> = the comma separated list of the usernames .
<br>NB. The names may contain the the wildcard symbol '*'. Be aware the wildcard slows the search down
</ul>
"""
title = 'The list of files for the '
if jobstatus and jobstatus.strip() =='': jobstatus = None
if site and site.strip() =='': site = None
if lfn and lfn.strip() =='': lfn = None
if jobs and isinstance(jobs,str) and jobs.strip() =='': jobs = None
if ds and ds.strip() =='': ds=None
if type and type.strip() =='': type='all'
if lfn==None and jobs==None and ds==None and jobstatus==None and site==None:
self.publishTitle("Ambigios query: lfn=%(lfn)s; pandaid=%(pandaid)s either lfn or padaid can be defined. One can not define lfn and pandaid at once" % { 'lfn': lfn, 'pandaid' : jobs} )
self.publish('<h2>Check the input parameters. Click the "?" to see the API documentaion<h2>', role=pmRoles.html())
else:
nav = ''
if limit:
nav += "Limit %s rows." % limit
if type=='*' or type==None: type = 'all'
if lfn != None:
self.publishTitle("The list of the PANDA jobs with the LFN of the '%s' type provided" % type)
if not '*' in lfn: # disregard the jobtype parameter
if utils.isFilled(jobtype):
nav += " Disregarding the jobtype='%s' default parameter" % jobtype
jobtype = None
if ds != None:
self.publishTitle("The list of the PANDA jobs with the DATASETs of the '%s' type provided" % type)
if jobs!=None:
self.publishTitle("The list of the '%s' LFN with the PANDA Job IDs provided" % type)
if utils.isFilled(nav):
self.publishNav(nav)
main = {}
main["buffer"] = {}
main["buffer"]["method"] = "joblfn"
main["buffer"]["params"] = (lfn if lfn!=None else '',jobs if jobs!= None else '' )
if jobs != None: main["buffer"]["jobs"] = jobs
main["buffer"]["type"] = False
if (utils.isFilled(jobstatus) or utils.isFilled(site) or utils.isFilled(user)) and not utils.isFilled(jobs):
tables = ['atlas_panda.jobsArchived4','atlas_panda.jobsActive4','atlas_panda.jobsWaiting4','atlas_panda.jobsDefined4']
r = pmt.getJobIds(site, jobtype,jobstatus,table=tables,days=days,username=user)
jobs = [i[0] for i in r['rows']]
if not utils.isFilled(select):
select = []
if jobs == None or ( not isinstance(jobs,int) and len(jobs) > 1): select.append('pandaid');
select += ['type', 'lfn', 'fsize', 'dataset', 'guid', 'scope', 'destinationse']
else:
select = utils.parseArray(select);
main["buffer"]["data"] = pmt.getJobLFN(select=','.join(select),table=table,lfn=lfn,pandaid=jobs,type=type,ds=ds,limit=limit)
self.publish(main)
self.publish( "%s/%s" % (self.server().fileScriptURL(),"taskBuffer/%s.js" % "getJobLFN"),role=pmRoles.script())
| lgpl-3.0 | -1,811,687,502,093,652,000 | 61.045455 | 195 | 0.575275 | false | 3.554688 | false | false | false |
LokiNetworks/empower-runtime | empower/persistence/persistence.py | 1 | 7560 | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Empower persistence layer."""
import uuid
import empower.datatypes.etheraddress as etheraddress
import empower.datatypes.ssid as ssid
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.types import TypeDecorator, Unicode
from sqlalchemy.orm import relationship
from empower.persistence import ENGINE
Base = declarative_base()
class UUID(TypeDecorator):
"""UUID type."""
impl = Unicode
def __init__(self):
self.impl.length = 16
TypeDecorator.__init__(self, length=self.impl.length)
def process_bind_param(self, value, dialect=None):
if value and isinstance(value, uuid.UUID):
return value.bytes
elif value and not isinstance(value, uuid.UUID):
raise ValueError('value %s is not a valid uuid.UUID' % value)
else:
return None
def process_result_value(self, value, dialect=None):
if value:
return uuid.UUID(bytes=value)
else:
return None
def is_mutable(self):
return False
class EtherAddress(TypeDecorator):
"""EtherAddress type."""
impl = Unicode
def __init__(self):
self.impl.length = 6
TypeDecorator.__init__(self, length=self.impl.length)
def process_bind_param(self, value, dialect=None):
if value and isinstance(value, etheraddress.EtherAddress):
return value.to_raw()
elif value and not isinstance(value, etheraddress.EtherAddress):
raise ValueError('value %s is not a valid EtherAddress' % value)
else:
return None
def process_result_value(self, value, dialect=None):
if value:
return etheraddress.EtherAddress(value)
else:
return None
def is_mutable(self):
return False
class SSID(TypeDecorator):
"""EtherAddress type."""
impl = Unicode
def __init__(self):
self.impl.length = 30
TypeDecorator.__init__(self, length=self.impl.length)
def process_bind_param(self, value, dialect=None):
if value and isinstance(value, ssid.SSID):
return value.to_raw()
elif value and not isinstance(value, ssid.SSID):
raise ValueError('value %s is not a valid SSID' % value)
else:
return None
def process_result_value(self, value, dialect=None):
if value:
return ssid.SSID(value)
else:
return None
def is_mutable(self):
return False
class TblFeed(Base):
""" Energino Feeds Table. """
__tablename__ = 'Feed'
feed_id = Column(Integer, primary_key=True)
title = Column(String)
created = Column(String)
updated = Column(String)
pnfdev_addr = Column(EtherAddress, nullable=True)
class TblAccount(Base):
""" Account table. """
__tablename__ = 'account'
username = Column(String, primary_key=True)
password = Column(String)
name = Column(String)
surname = Column(String)
email = Column(String)
role = Column(String)
class TblPendingTenant(Base):
""" List of pending Tenant request. """
__tablename__ = 'pending_tenant'
tenant_id = Column("tenant_id",
UUID(),
primary_key=True,
default=uuid.uuid4)
tenant_name = Column(SSID, unique=True)
desc = Column(String)
owner = Column(String)
bssid_type = Column(String)
def to_dict(self):
""" Return a JSON-serializable dictionary representing the request """
return {'tenant_id': self.tenant_id,
'owner': self.owner,
'tenant_name': self.tenant_name,
'desc': self.desc,
'bssid_type': self.bssid_type}
class TblTenant(Base):
""" Tenant table. """
__tablename__ = 'tenant'
tenant_id = Column("tenant_id",
UUID(),
primary_key=True,
default=uuid.uuid4)
tenant_name = Column(SSID, unique=True)
desc = Column(String)
owner = Column(String)
bssid_type = Column(String)
class TblPNFDev(Base):
""" Programmable network fabric device table. """
__tablename__ = 'pnfdev'
addr = Column("addr",
EtherAddress(),
primary_key=True)
label = Column(String)
tbl_type = Column(String(20))
__mapper_args__ = {
'polymorphic_on': tbl_type,
'polymorphic_identity': 'pnfdevs'
}
class TblBelongs(Base):
"""Link PNFDevs with Tenants"""
__tablename__ = 'belongs'
addr = Column(EtherAddress(),
ForeignKey('pnfdev.addr'),
primary_key=True)
tenant_id = Column(UUID(),
ForeignKey('tenant.tenant_id'),
primary_key=True)
class TblCPP(TblPNFDev):
""" Programmable network fabric device table. """
__mapper_args__ = {
'polymorphic_identity': 'cpps'
}
class TblWTP(TblPNFDev):
""" Wireless Termination point. """
__mapper_args__ = {
'polymorphic_identity': 'wtps'
}
class TblVBS(TblPNFDev):
""" Virtual Base Station Point. """
__mapper_args__ = {
'polymorphic_identity': 'vbses'
}
class TblAllow(Base):
""" Allow table. """
__tablename__ = 'allow'
addr = Column("addr",
EtherAddress(),
primary_key=True)
label = Column(String)
class TblDeny(Base):
""" Deny table. """
__tablename__ = 'deny'
addr = Column("addr",
EtherAddress(),
primary_key=True)
label = Column(String)
class TblRule(Base):
"""rule table"""
__tablename__ = 'rule'
rule_id = Column(Integer, primary_key=True , autoincrement=True)
slvap = Column("slvap",EtherAddress())
swtp = Column("swtp",EtherAddress())
type = Column(String)
dwtp = Column("dwtp",EtherAddress())
dlvap = Column("dlvap",EtherAddress())
def to_dict(self):
""" Return a JSON-serializable dictionary representing the request """
return {'rule_id': self.rule_id,
'slvap': self.slvap,
'dwtp': self.dwtp,
'type' : self.type,
'swtp': self.swtp,
'dlvap': self.dlvap
}
class TblRulegroupid(Base):
"""rule relation"""
__tablename__ = 'groupid '
id = Column(Integer,primary_key=True,autoincrement=True)
target = Column("target",EtherAddress(),nullable=True)
lvaps = relationship('TblRulegroup')
class TblRulegroup(Base):
"""rule lvaps"""
__tablename__ = 'rulegroup'
groupid = Column(Integer, primary_key=True, autoincrement=True)
lvap = Column("lvap",EtherAddress())
rule_id = Column(Integer,ForeignKey(TblRulegroupid.id))
Base.metadata.create_all(ENGINE)
| apache-2.0 | 5,023,260,074,562,165,000 | 23.387097 | 78 | 0.599339 | false | 3.882897 | false | false | false |
kartta-labs/reservoir | third_party/3dmr/modelrepository/settings.py | 1 | 3669 | """
Django settings for modelrepository project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&$)1o$zbwi&y=qu)fb@1o_@p&bzjtnq3f2!gz*h+xex=(e@_&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'mainapp',
'django_pgviews',
'compressor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelrepository.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGIN_REDIRECT_URL = '/login'
WSGI_APPLICATION = 'modelrepository.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('RESERVOIR_DB_NAME', 'reservoir'),
'USER': os.environ.get('RESERVOIR_DB_USER', 'reservoir'),
'PASSWORD': os.environ.get('RESERVOIR_DB_PASSWORD','reservoir'),
'HOST': os.environ.get('RESERVOIR_DB_HOST','127.0.0.1'),
'PORT': os.environ.get('RESERVOIR_DB_PORT','5432'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/home/tdmr/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
| apache-2.0 | -6,878,966,147,424,092,000 | 25.781022 | 91 | 0.681112 | false | 3.477725 | false | false | false |
jianajavier/pnc-cli | pnc_cli/productversions.py | 1 | 4985 | from argh import arg
from six import iteritems
import logging
from pnc_cli import swagger_client
from pnc_cli.swagger_client.apis.productversions_api import ProductversionsApi
from pnc_cli.swagger_client.apis.products_api import ProductsApi
from pnc_cli import utils
versions_api = ProductversionsApi(utils.get_api_client())
products_api = ProductsApi(utils.get_api_client())
__author__ = 'thauser'
def create_product_version_object(**kwargs):
created_version = swagger_client.ProductVersionRest()
for key, value in iteritems(kwargs):
setattr(created_version, key, value)
return created_version
def version_exists(id):
response = utils.checked_api_call(versions_api, 'get_specific', id=id)
if not response:
return False
return True
def version_exists_for_product(id, version):
existing_products = products_api.get_product_versions(id=id).content
if existing_products:
return version in [x.version for x in existing_products]
else:
return False
@arg("-p", "--page-size", help="Limit the amount of build records returned")
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_product_versions(page_size=200, sort="", q=""):
"""
List all ProductVersions
"""
response = utils.checked_api_call(versions_api, 'get_all', page_size=page_size, sort=sort, q=q)
if response:
return response.content
# TODO: Version needs to be checked for validity.
@arg("product_id", help="ID of product to add a version to")
@arg("version", help="Version to add")
@arg("-cm", "--current-product-milestone-id",
help="ID of the milestone this version should be on")
@arg("-pr", "--product-releases", type=int, nargs="+",
help="List of product release IDs for this Product version")
@arg("-pm", "--product-milestones", type=int, nargs="+",
help="List of milestone IDs to associate with the new version")
@arg("-bc", "--build-configuration-set-ids", type=int, nargs="+",
help="List of build configuration set IDs to associate with the new version")
def create_product_version(product_id, version, **kwargs):
"""
Create a new ProductVersion.
Each ProductVersion represents a supported product release stream, which includes milestones and releases typically associated with a single major.minor version of a Product.
Follows the Red Hat product support cycle, and typically includes Alpha, Beta, GA, and CP releases with the same major.minor version.
Example:
ProductVersion 1.0 includes the following releases:
1.0.Beta1, 1.0.GA, 1.0.1, etc.
"""
if version_exists_for_product(product_id, version):
logging.error("Version {} already exists for product: {}".format(
version, products_api.get_specific(id=product_id).content.name))
return
kwargs['product_id'] = product_id
kwargs['version'] = version
product_version = create_product_version_object(**kwargs)
response = utils.checked_api_call(versions_api, 'create_new_product_version',
body=product_version)
if response: return response.content
@arg("id", help="ID of the ProductVersion to retrieve")
def get_product_version(id):
"""
Retrieve a specific ProductVersion by ProductVersion ID
"""
if not version_exists(id):
logging.error("No ProductVersion with ID {} exists.".format(id))
return
response = utils.checked_api_call(versions_api, 'get_specific', id=id)
if response: return response.content
# TODO: how should constraints be defined? Can a new productId be specified?
# TODO: Version needs to be checked for validity.
@arg("id", help="ID of the ProductVersion to update.")
@arg("-pid", "--product-id", help="ID of product to add a version to")
@arg("-v", "--version", help="Version to add")
@arg("-cm", "--current-product-milestone-id", type=int,
help="ID of the ProductMilestone this version should be on")
@arg("-pr", "--product-releases", type=int, nargs="+",
help="List of ProductRelease IDs for this Product version")
@arg("-pm", "--product-milestones", type=int, nargs="+",
help="List of ProductMilestone IDs to associate with the new version")
@arg("-bc", "--build-configuration-set-ids", type=int, nargs="+",
help="List of BuildConfigurationSet IDs to associate with the new version")
def update_product_version(id, **kwargs):
"""
Update the ProductVersion with ID id with new values.
"""
if not version_exists(id):
logging.error("A ProductVersion with id {} doesn't exist.".format(id))
return
to_update = versions_api.get_specific(id=id).content
for key, value in kwargs.items():
if value is not None:
setattr(to_update, key, value)
response = utils.checked_api_call(versions_api, 'update',
id=id,
body=to_update)
if response:
return response.content
| apache-2.0 | 1,279,193,986,023,931,000 | 39.201613 | 178 | 0.677031 | false | 3.852396 | false | false | false |
ltiao/basketball-intelligence | bball_intel/bball_intel/settings/base.py | 1 | 2103 | """
Django settings for bball_intel project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: BASE_DIR.child(...)
from unipath import Path
PROJECT_DIR = Path(__file__).ancestor(4)
BASE_DIR = Path(__file__).ancestor(3)
import dj_database_url
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8f03d!k79490u5t@5+pxs(j$%lq@kp$n5od3br#d$#0)0f*14a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bball_intel.urls'
WSGI_APPLICATION = 'bball_intel.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default = 'sqlite:///{base}/db.sqlite3'.format(base=BASE_DIR)
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit | 5,895,827,493,710,168,000 | 23.741176 | 71 | 0.731336 | false | 3.260465 | false | false | false |
trammell/bincfi | glookup_policy/extract_code.py | 2 | 1418 | #!/usr/bin/python
from __future__ import with_statement
from struct import *
#from __future__ import with_statement
import re
import os
import sys
def get_section_info(binname, secname, info):
pattern = re.compile(r"\s*\[\s*(?P<num>[\d]{1,2})\]\s*"
"(?P<name>[\S]+)\s*"
"(?P<type>[\S]+)\s*"
"(?P<addr>[\S]+)\s*"
"(?P<offset>[\S]+)\s*"
"(?P<size>[\S]+)\s*"
"[^\n]*$")
cmd = "readelf -S " + binname;
with os.popen(cmd) as file:
for line in file:
line = line.strip();
m=pattern.match(line);
if((m != None) and (m.group('name') == secname)):
if(info == 'num'):
return int(m.group(info),10)
if((info == 'addr') or
(info == 'offset') or
(info == 'size')
):
return int(m.group(info),16)
else:
return m.group(info)
return m.group(info)
return None
def extract_data(binname,secname,output):
gen_asm_offset = get_section_info(binname, secname, "offset");
gen_asm_size = get_section_info(binname, secname, "size");
fd = os.open(binname,os.O_RDWR);
os.lseek(fd, gen_asm_offset, os.SEEK_SET);
buf = os.read(fd, gen_asm_size);
fd2 = os.open(output, os.O_CREAT|os.O_TRUNC|os.O_RDWR, 0644)
os.write(fd2,buf);
os.close(fd2);
os.close(fd);
print "text offset %lx"%gen_asm_offset
print "text size %lx"%gen_asm_size
def main():
orig_bin = sys.argv[1];
output_file = sys.argv[2];
extract_data(orig_bin, ".text",output_file);
main();
| gpl-2.0 | 6,978,572,787,141,813,000 | 23.877193 | 64 | 0.59732 | false | 2.500882 | false | false | false |
byteweaver/django-skrill | skrill/views.py | 1 | 1731 | from django import http
from django.views.generic.base import View
from skrill.models import PaymentRequest, StatusReport
class StatusReportView(View):
def post(self, request, *args, **kwargs):
payment_request = PaymentRequest.objects.get(pk=request.POST['transaction_id'])
report = StatusReport()
report.payment_request = payment_request
report.pay_to_email = request.POST['pay_to_email']
report.pay_from_email = request.POST['pay_from_email']
report.merchant_id = request.POST['merchant_id']
report.customer_id = request.POST.get('customer_id', None)
report.mb_transaction_id = request.POST['mb_transaction_id']
report.mb_amount = request.POST['mb_amount']
report.mb_currency = request.POST['mb_currency']
report.status = request.POST['status']
report.failed_reason_code = request.POST.get('failed_reason_code', None)
report.md5sig = request.POST['md5sig']
report.sha2sig = request.POST.get('sha2sig', None)
report.amount = request.POST['amount']
report.currency = request.POST['currency']
report.payment_type = request.POST.get('payment_type', None)
report.custom_field_1 = request.POST.get('custom_field_1', None)
report.custom_field_2 = request.POST.get('custom_field_2', None)
report.custom_field_3 = request.POST.get('custom_field_3', None)
report.custom_field_4 = request.POST.get('custom_field_4', None)
report.custom_field_5 = request.POST.get('custom_field_5', None)
report.save()
report.validate_md5sig()
report.valid = True
report.save()
report.send_signal()
return http.HttpResponse()
| bsd-3-clause | -1,467,092,703,406,649,300 | 45.783784 | 87 | 0.659734 | false | 3.636555 | false | false | false |
mjfarmer/scada_py | env/lib/python2.7/site-packages/twisted/conch/ssh/keys.py | 3 | 43907 | # -*- test-case-name: twisted.conch.test.test_keys -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Handling of RSA and DSA keys.
"""
from __future__ import absolute_import, division
import base64
import itertools
import warnings
from hashlib import md5
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, rsa, padding
try:
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature, decode_dss_signature)
except ImportError:
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_rfc6979_signature as encode_dss_signature,
decode_rfc6979_signature as decode_dss_signature)
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from pyasn1.error import PyAsn1Error
from pyasn1.type import univ
from pyasn1.codec.ber import decoder as berDecoder
from pyasn1.codec.ber import encoder as berEncoder
from twisted.conch.ssh import common, sexpy
from twisted.conch.ssh.common import int_from_bytes, int_to_bytes
from twisted.python import randbytes
from twisted.python.compat import iterbytes, long, izip, nativeString, _PY3
from twisted.python.deprecate import deprecated, getDeprecationWarningString
from twisted.python.versions import Version
class BadKeyError(Exception):
"""
Raised when a key isn't what we expected from it.
XXX: we really need to check for bad keys
"""
class EncryptedKeyError(Exception):
"""
Raised when an encrypted key is presented to fromString/fromFile without
a password.
"""
class Key(object):
"""
An object representing a key. A key can be either a public or
private key. A public key can verify a signature; a private key can
create or verify a signature. To generate a string that can be stored
on disk, use the toString method. If you have a private key, but want
the string representation of the public key, use Key.public().toString().
@ivar keyObject: DEPRECATED. The C{Crypto.PublicKey} object
that operations are performed with.
"""
def fromFile(cls, filename, type=None, passphrase=None):
"""
Load a key from a file.
@param filename: The path to load key data from.
@type type: L{str} or C{None}
@param type: A string describing the format the key data is in, or
C{None} to attempt detection of the type.
@type passphrase: L{bytes} or C{None}
@param passphrase: The passphrase the key is encrypted with, or C{None}
if there is no encryption.
@rtype: L{Key}
@return: The loaded key.
"""
with open(filename, 'rb') as f:
return cls.fromString(f.read(), type, passphrase)
fromFile = classmethod(fromFile)
def fromString(cls, data, type=None, passphrase=None):
"""
Return a Key object corresponding to the string data.
type is optionally the type of string, matching a _fromString_*
method. Otherwise, the _guessStringType() classmethod will be used
to guess a type. If the key is encrypted, passphrase is used as
the decryption key.
@type data: L{bytes}
@param data: The key data.
@type type: L{str} or C{None}
@param type: A string describing the format the key data is in, or
C{None} to attempt detection of the type.
@type passphrase: L{bytes} or C{None}
@param passphrase: The passphrase the key is encrypted with, or C{None}
if there is no encryption.
@rtype: L{Key}
@return: The loaded key.
"""
if type is None:
type = cls._guessStringType(data)
if type is None:
raise BadKeyError('cannot guess the type of %r' % (data,))
method = getattr(cls, '_fromString_%s' % (type.upper(),), None)
if method is None:
raise BadKeyError('no _fromString method for %s' % (type,))
if method.__code__.co_argcount == 2: # No passphrase
if passphrase:
raise BadKeyError('key not encrypted')
return method(data)
else:
return method(data, passphrase)
fromString = classmethod(fromString)
@classmethod
def _fromString_BLOB(cls, blob):
"""
Return a public key object corresponding to this public key blob.
The format of a RSA public key blob is::
string 'ssh-rsa'
integer e
integer n
The format of a DSA public key blob is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
@type blob: L{bytes}
@param blob: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == b'ssh-rsa':
e, n, rest = common.getMP(rest, 2)
return cls(
rsa.RSAPublicNumbers(e, n).public_key(default_backend()))
elif keyType == b'ssh-dss':
p, q, g, y, rest = common.getMP(rest, 4)
return cls(
dsa.DSAPublicNumbers(
y=y,
parameter_numbers=dsa.DSAParameterNumbers(
p=p,
q=q,
g=g
)
).public_key(default_backend())
)
else:
raise BadKeyError('unknown blob type: %s' % (keyType,))
@classmethod
def _fromString_PRIVATE_BLOB(cls, blob):
"""
Return a private key object corresponding to this private key blob.
The blob formats are as follows:
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
@type blob: L{bytes}
@param blob: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == b'ssh-rsa':
n, e, d, u, p, q, rest = common.getMP(rest, 6)
return cls._fromRSAComponents(n=n, e=e, d=d, p=p, q=q)
elif keyType == b'ssh-dss':
p, q, g, y, x, rest = common.getMP(rest, 5)
return cls._fromDSAComponents(y=y, g=g, p=p, q=q, x=x)
else:
raise BadKeyError('unknown blob type: %s' % (keyType,))
@classmethod
def _fromString_PUBLIC_OPENSSH(cls, data):
"""
Return a public key object corresponding to this OpenSSH public key
string. The format of an OpenSSH public key string is::
<key type> <base64-encoded public key blob>
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the blob type is unknown.
"""
blob = base64.decodestring(data.split()[1])
return cls._fromString_BLOB(blob)
@classmethod
def _fromString_PRIVATE_OPENSSH(cls, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string. If the key is encrypted, passphrase MUST be provided.
Providing a passphrase for an unencrypted key is an error.
The format of an OpenSSH private key string is::
-----BEGIN <key type> PRIVATE KEY-----
[Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,<initialization value>]
<base64-encoded ASN.1 structure>
------END <key type> PRIVATE KEY------
The ASN.1 structure of a RSA key is::
(0, n, e, d, p, q)
The ASN.1 structure of a DSA key is::
(0, p, q, g, y, x)
@type data: L{bytes}
@param data: The key data.
@type passphrase: L{bytes} or C{None}
@param passphrase: The passphrase the key is encrypted with, or C{None}
if it is not encrypted.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* the ASN.1 encoding is incorrect
@raises EncryptedKeyError: if
* a passphrase is not provided for an encrypted key
"""
lines = data.strip().split(b'\n')
kind = lines[0][11:14]
if lines[1].startswith(b'Proc-Type: 4,ENCRYPTED'):
if not passphrase:
raise EncryptedKeyError('Passphrase must be provided '
'for an encrypted key')
# Determine cipher and initialization vector
try:
_, cipherIVInfo = lines[2].split(b' ', 1)
cipher, ivdata = cipherIVInfo.rstrip().split(b',', 1)
except ValueError:
raise BadKeyError('invalid DEK-info %r' % (lines[2],))
if cipher == b'AES-128-CBC':
algorithmClass = algorithms.AES
keySize = 16
if len(ivdata) != 32:
raise BadKeyError('AES encrypted key with a bad IV')
elif cipher == b'DES-EDE3-CBC':
algorithmClass = algorithms.TripleDES
keySize = 24
if len(ivdata) != 16:
raise BadKeyError('DES encrypted key with a bad IV')
else:
raise BadKeyError('unknown encryption type %r' % (cipher,))
# Extract keyData for decoding
iv = bytes(bytearray([int(ivdata[i:i + 2], 16)
for i in range(0, len(ivdata), 2)]))
ba = md5(passphrase + iv[:8]).digest()
bb = md5(ba + passphrase + iv[:8]).digest()
decKey = (ba + bb)[:keySize]
b64Data = base64.decodestring(b''.join(lines[3:-1]))
decryptor = Cipher(
algorithmClass(decKey),
modes.CBC(iv),
backend=default_backend()
).decryptor()
keyData = decryptor.update(b64Data) + decryptor.finalize()
removeLen = ord(keyData[-1:])
keyData = keyData[:-removeLen]
else:
b64Data = b''.join(lines[1:-1])
keyData = base64.decodestring(b64Data)
try:
decodedKey = berDecoder.decode(keyData)[0]
except PyAsn1Error as e:
raise BadKeyError(
'Failed to decode key (Bad Passphrase?): %s' % (e,))
if kind == b'RSA':
if len(decodedKey) == 2: # Alternate RSA key
decodedKey = decodedKey[0]
if len(decodedKey) < 6:
raise BadKeyError('RSA key failed to decode properly')
n, e, d, p, q, dmp1, dmq1, iqmp = [
long(value) for value in decodedKey[1:9]
]
if p > q: # Make p smaller than q
p, q = q, p
return cls(
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=rsa.RSAPublicNumbers(e=e, n=n),
).private_key(default_backend())
)
elif kind == b'DSA':
p, q, g, y, x = [long(value) for value in decodedKey[1: 6]]
if len(decodedKey) < 6:
raise BadKeyError('DSA key failed to decode properly')
return cls(
dsa.DSAPrivateNumbers(
x=x,
public_numbers=dsa.DSAPublicNumbers(
y=y,
parameter_numbers=dsa.DSAParameterNumbers(
p=p,
q=q,
g=g
)
)
).private_key(backend=default_backend())
)
else:
raise BadKeyError("unknown key type %s" % (kind,))
@classmethod
def _fromString_PUBLIC_LSH(cls, data):
"""
Return a public key corresponding to this LSH public key string.
The LSH public key string format is::
<s-expression: ('public-key', (<key type>, (<name, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e.
The names for a DSA (key type 'dsa') key are: y, g, p, q.
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(base64.decodestring(data[1:-1]))
assert sexp[0] == b'public-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == b'dsa':
return cls._fromDSAComponents(
y=kd[b'y'], g=kd[b'g'], p=kd[b'p'], q=kd[b'q'])
elif sexp[1][0] == b'rsa-pkcs1-sha1':
return cls._fromRSAComponents(n=kd[b'n'], e=kd[b'e'])
else:
raise BadKeyError('unknown lsh key type %s' % (sexp[1][0],))
@classmethod
def _fromString_PRIVATE_LSH(cls, data):
"""
Return a private key corresponding to this LSH private key string.
The LSH private key string format is::
<s-expression: ('private-key', (<key type>, (<name>, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e, d, p, q.
The names for a DSA (key type 'dsa') key are: y, g, p, q, x.
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(data)
assert sexp[0] == b'private-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == b'dsa':
assert len(kd) == 5, len(kd)
return cls._fromDSAComponents(
y=kd[b'y'], g=kd[b'g'], p=kd[b'p'], q=kd[b'q'], x=kd[b'x'])
elif sexp[1][0] == b'rsa-pkcs1':
assert len(kd) == 8, len(kd)
if kd[b'p'] > kd[b'q']: # Make p smaller than q
kd[b'p'], kd[b'q'] = kd[b'q'], kd[b'p']
return cls._fromRSAComponents(
n=kd[b'n'], e=kd[b'e'], d=kd[b'd'], p=kd[b'p'], q=kd[b'q'])
else:
raise BadKeyError('unknown lsh key type %s' % (sexp[1][0],))
@classmethod
def _fromString_AGENTV3(cls, data):
"""
Return a private key object corresponsing to the Secure Shell Key
Agent v3 format.
The SSH Key Agent v3 format for a RSA key is::
string 'ssh-rsa'
integer e
integer d
integer n
integer u
integer p
integer q
The SSH Key Agent v3 format for a DSA key is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown
"""
keyType, data = common.getNS(data)
if keyType == b'ssh-dss':
p, data = common.getMP(data)
q, data = common.getMP(data)
g, data = common.getMP(data)
y, data = common.getMP(data)
x, data = common.getMP(data)
return cls._fromDSAComponents(y=y, g=g, p=p, q=q, x=x)
elif keyType == b'ssh-rsa':
e, data = common.getMP(data)
d, data = common.getMP(data)
n, data = common.getMP(data)
u, data = common.getMP(data)
p, data = common.getMP(data)
q, data = common.getMP(data)
return cls._fromRSAComponents(n=n, e=e, d=d, p=p, q=q, u=u)
else:
raise BadKeyError("unknown key type %s" % (keyType,))
def _guessStringType(cls, data):
"""
Guess the type of key in data. The types map to _fromString_*
methods.
@type data: L{bytes}
@param data: The key data.
"""
if data.startswith(b'ssh-'):
return 'public_openssh'
elif data.startswith(b'-----BEGIN'):
return 'private_openssh'
elif data.startswith(b'{'):
return 'public_lsh'
elif data.startswith(b'('):
return 'private_lsh'
elif data.startswith(b'\x00\x00\x00\x07ssh-'):
ignored, rest = common.getNS(data)
count = 0
while rest:
count += 1
ignored, rest = common.getMP(rest)
if count > 4:
return 'agentv3'
else:
return 'blob'
_guessStringType = classmethod(_guessStringType)
@classmethod
def _fromRSAComponents(cls, n, e, d=None, p=None, q=None, u=None):
"""
Build a key from RSA numerical components.
@type n: L{int}
@param n: The 'n' RSA variable.
@type e: L{int}
@param e: The 'e' RSA variable.
@type d: L{int} or C{None}
@param d: The 'd' RSA variable (optional for a public key).
@type p: L{int} or C{None}
@param p: The 'p' RSA variable (optional for a public key).
@type q: L{int} or C{None}
@param q: The 'q' RSA variable (optional for a public key).
@type u: L{int} or C{None}
@param u: The 'u' RSA variable. Ignored, as its value is determined by
p and q.
@rtype: L{Key}
@return: An RSA key constructed from the values as given.
"""
publicNumbers = rsa.RSAPublicNumbers(e=e, n=n)
if d is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=rsa.rsa_crt_dmp1(d, p),
dmq1=rsa.rsa_crt_dmq1(d, q),
iqmp=rsa.rsa_crt_iqmp(p, q),
public_numbers=publicNumbers,
)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
@classmethod
def _fromDSAComponents(cls, y, p, q, g, x=None):
"""
Build a key from DSA numerical components.
@type y: L{int}
@param y: The 'y' DSA variable.
@type p: L{int}
@param p: The 'p' DSA variable.
@type q: L{int}
@param q: The 'q' DSA variable.
@type g: L{int}
@param g: The 'g' DSA variable.
@type x: L{int} or C{None}
@param x: The 'x' DSA variable (optional for a public key)
@rtype: L{Key}
@return: A DSA key constructed from the values as given.
"""
publicNumbers = dsa.DSAPublicNumbers(
y=y, parameter_numbers=dsa.DSAParameterNumbers(p=p, q=q, g=g))
if x is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = dsa.DSAPrivateNumbers(
x=x, public_numbers=publicNumbers)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
def __init__(self, keyObject):
"""
Initialize with a private or public
C{cryptography.hazmat.primitives.asymmetric} key.
@param keyObject: Low level key.
@type keyObject: C{cryptography.hazmat.primitives.asymmetric} key.
"""
# Avoid importing PyCrypto if at all possible
if keyObject.__class__.__module__.startswith('Crypto.PublicKey'):
warningString = getDeprecationWarningString(
Key,
Version("Twisted", 16, 0, 0),
replacement='passing a cryptography key object')
warnings.warn(warningString, DeprecationWarning, stacklevel=2)
self.keyObject = keyObject
else:
self._keyObject = keyObject
def __eq__(self, other):
"""
Return True if other represents an object with the same key.
"""
if type(self) == type(other):
return self.type() == other.type() and self.data() == other.data()
else:
return NotImplemented
def __ne__(self, other):
"""
Return True if other represents anything other than this key.
"""
result = self.__eq__(other)
if result == NotImplemented:
return result
return not result
def __repr__(self):
"""
Return a pretty representation of this object.
"""
lines = [
'<%s %s (%s bits)' % (
nativeString(self.type()),
self.isPublic() and 'Public Key' or 'Private Key',
self._keyObject.key_size)]
for k, v in sorted(self.data().items()):
if _PY3 and isinstance(k, bytes):
k = k.decode('ascii')
lines.append('attr %s:' % (k,))
by = common.MP(v)[4:]
while by:
m = by[:15]
by = by[15:]
o = ''
for c in iterbytes(m):
o = o + '%02x:' % (ord(c),)
if len(m) < 15:
o = o[:-1]
lines.append('\t' + o)
lines[-1] = lines[-1] + '>'
return '\n'.join(lines)
@property
@deprecated(Version('Twisted', 16, 0, 0))
def keyObject(self):
"""
A C{Crypto.PublicKey} object similar to this key.
As PyCrypto is no longer used for the underlying operations, this
property should be avoided.
"""
# Lazy import to have PyCrypto as a soft dependency.
from Crypto.PublicKey import DSA, RSA
keyObject = None
keyType = self.type()
keyData = self.data()
isPublic = self.isPublic()
if keyType == 'RSA':
if isPublic:
keyObject = RSA.construct((
keyData['n'],
long(keyData['e']),
))
else:
keyObject = RSA.construct((
keyData['n'],
long(keyData['e']),
keyData['d'],
keyData['p'],
keyData['q'],
keyData['u'],
))
elif keyType == 'DSA':
if isPublic:
keyObject = DSA.construct((
keyData['y'],
keyData['g'],
keyData['p'],
keyData['q'],
))
else:
keyObject = DSA.construct((
keyData['y'],
keyData['g'],
keyData['p'],
keyData['q'],
keyData['x'],
))
else:
raise BadKeyError('Unsupported key type.')
return keyObject
@keyObject.setter
@deprecated(Version('Twisted', 16, 0, 0))
def keyObject(self, value):
# Lazy import to have PyCrypto as a soft dependency.
from Crypto.PublicKey import DSA, RSA
if isinstance(value, RSA._RSAobj):
rawKey = value.key
if rawKey.has_private():
newKey = self._fromRSAComponents(
e=rawKey.e,
n=rawKey.n,
p=rawKey.p,
q=rawKey.q,
d=rawKey.d,
u=rawKey.u,
)
else:
newKey = self._fromRSAComponents(e=rawKey.e, n=rawKey.n)
elif isinstance(value, DSA._DSAobj):
rawKey = value.key
if rawKey.has_private():
newKey = self._fromDSAComponents(
y=rawKey.y,
p=rawKey.p,
q=rawKey.q,
g=rawKey.g,
x=rawKey.x,
)
else:
newKey = self._fromDSAComponents(
y=rawKey.y,
p=rawKey.p,
q=rawKey.q,
g=rawKey.g,
)
else:
raise BadKeyError('PyCrypto key type not supported.')
self._keyObject = newKey._keyObject
def isPublic(self):
"""
Check if this instance is a public key.
@return: C{True} if this is a public key.
"""
return isinstance(
self._keyObject, (rsa.RSAPublicKey, dsa.DSAPublicKey))
def public(self):
"""
Returns a version of this key containing only the public key data.
If this is a public key, this may or may not be the same object
as self.
@rtype: L{Key}
@return: A public key.
"""
return Key(self._keyObject.public_key())
def fingerprint(self):
"""
Get the user presentation of the fingerprint of this L{Key}. As
described by U{RFC 4716 section
4<http://tools.ietf.org/html/rfc4716#section-4>}::
The fingerprint of a public key consists of the output of the MD5
message-digest algorithm [RFC1321]. The input to the algorithm is
the public key data as specified by [RFC4253]. (...) The output
of the (MD5) algorithm is presented to the user as a sequence of 16
octets printed as hexadecimal with lowercase letters and separated
by colons.
@since: 8.2
@return: the user presentation of this L{Key}'s fingerprint, as a
string.
@rtype: L{str}
"""
return ':'.join([x.encode('hex') for x in md5(self.blob()).digest()])
def type(self):
"""
Return the type of the object we wrap. Currently this can only be
'RSA' or 'DSA'.
@rtype: L{str}
"""
if isinstance(
self._keyObject, (rsa.RSAPublicKey, rsa.RSAPrivateKey)):
return 'RSA'
elif isinstance(
self._keyObject, (dsa.DSAPublicKey, dsa.DSAPrivateKey)):
return 'DSA'
else:
raise RuntimeError(
'unknown type of object: %r' % (self._keyObject,))
def sshType(self):
"""
Get the type of the object we wrap as defined in the SSH protocol,
defined in RFC 4253, Section 6.6. Currently this can only be b'ssh-rsa'
or b'ssh-dss'.
@return: The key type format.
@rtype: L{bytes}
"""
return {'RSA': b'ssh-rsa', 'DSA': b'ssh-dss'}[self.type()]
def size(self):
"""
Return the size of the object we wrap.
@return: The size of the key.
@rtype: C{int}
"""
if self._keyObject is None:
return 0
return self._keyObject.key_size
def data(self):
"""
Return the values of the public key as a dictionary.
@rtype: C{dict}
"""
if isinstance(self._keyObject, rsa.RSAPublicKey):
numbers = self._keyObject.public_numbers()
return {
"n": numbers.n,
"e": numbers.e,
}
elif isinstance(self._keyObject, rsa.RSAPrivateKey):
numbers = self._keyObject.private_numbers()
return {
"n": numbers.public_numbers.n,
"e": numbers.public_numbers.e,
"d": numbers.d,
"p": numbers.p,
"q": numbers.q,
# Use a trick: iqmp is q^-1 % p, u is p^-1 % q
"u": rsa.rsa_crt_iqmp(numbers.q, numbers.p),
}
elif isinstance(self._keyObject, dsa.DSAPublicKey):
numbers = self._keyObject.public_numbers()
return {
"y": numbers.y,
"g": numbers.parameter_numbers.g,
"p": numbers.parameter_numbers.p,
"q": numbers.parameter_numbers.q,
}
elif isinstance(self._keyObject, dsa.DSAPrivateKey):
numbers = self._keyObject.private_numbers()
return {
"x": numbers.x,
"y": numbers.public_numbers.y,
"g": numbers.public_numbers.parameter_numbers.g,
"p": numbers.public_numbers.parameter_numbers.p,
"q": numbers.public_numbers.parameter_numbers.q,
}
else:
raise RuntimeError("Unexpected key type: %s" % (self._keyObject,))
def blob(self):
"""
Return the public key blob for this key. The blob is the
over-the-wire format for public keys.
SECSH-TRANS RFC 4253 Section 6.6.
RSA keys::
string 'ssh-rsa'
integer e
integer n
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
@rtype: L{bytes}
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS(b'ssh-rsa') + common.MP(data['e']) +
common.MP(data['n']))
elif type == 'DSA':
return (common.NS(b'ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']))
else:
raise BadKeyError("unknown key type %s" % (type,))
def privateBlob(self):
"""
Return the private key blob for this key. The blob is the
over-the-wire format for private keys:
Specification in OpenSSH PROTOCOL.agent
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS(b'ssh-rsa') + common.MP(data['n']) +
common.MP(data['e']) + common.MP(data['d']) +
common.MP(data['u']) + common.MP(data['p']) +
common.MP(data['q']))
elif type == 'DSA':
return (common.NS(b'ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']) + common.MP(data['x']))
else:
raise BadKeyError("unknown key type %s" % (type,))
def toString(self, type, extra=None):
"""
Create a string representation of this key. If the key is a private
key and you want the represenation of its public key, use
C{key.public().toString()}. type maps to a _toString_* method.
@param type: The type of string to emit. Currently supported values
are C{'OPENSSH'}, C{'LSH'}, and C{'AGENTV3'}.
@type type: L{str}
@param extra: Any extra data supported by the selected format which
is not part of the key itself. For public OpenSSH keys, this is
a comment. For private OpenSSH keys, this is a passphrase to
encrypt with.
@type extra: L{bytes} or L{NoneType}
@rtype: L{bytes}
"""
method = getattr(self, '_toString_%s' % (type.upper(),), None)
if method is None:
raise BadKeyError('unknown key type: %s' % (type,))
if method.__code__.co_argcount == 2:
return method(extra)
else:
return method()
def _toString_OPENSSH(self, extra):
"""
Return a public or private OpenSSH string. See
_fromString_PUBLIC_OPENSSH and _fromString_PRIVATE_OPENSSH for the
string formats. If extra is present, it represents a comment for a
public key, or a passphrase for a private key.
@param extra: Comment for a public key or passphrase for a
private key
@type extra: L{bytes}
@rtype: L{bytes}
"""
data = self.data()
if self.isPublic():
b64Data = base64.encodestring(self.blob()).replace(b'\n', b'')
if not extra:
extra = b''
return (self.sshType() + b' ' + b64Data + b' ' + extra).strip()
else:
lines = [b''.join((b'-----BEGIN ', self.type().encode('ascii'),
b' PRIVATE KEY-----'))]
if self.type() == 'RSA':
p, q = data['p'], data['q']
objData = (0, data['n'], data['e'], data['d'], q, p,
data['d'] % (q - 1), data['d'] % (p - 1),
data['u'])
else:
objData = (0, data['p'], data['q'], data['g'], data['y'],
data['x'])
asn1Sequence = univ.Sequence()
for index, value in izip(itertools.count(), objData):
asn1Sequence.setComponentByPosition(index, univ.Integer(value))
asn1Data = berEncoder.encode(asn1Sequence)
if extra:
iv = randbytes.secureRandom(8)
hexiv = ''.join(['%02X' % (ord(x),) for x in iterbytes(iv)])
hexiv = hexiv.encode('ascii')
lines.append(b'Proc-Type: 4,ENCRYPTED')
lines.append(b'DEK-Info: DES-EDE3-CBC,' + hexiv + b'\n')
ba = md5(extra + iv).digest()
bb = md5(ba + extra + iv).digest()
encKey = (ba + bb)[:24]
padLen = 8 - (len(asn1Data) % 8)
asn1Data += (chr(padLen) * padLen).encode('ascii')
encryptor = Cipher(
algorithms.TripleDES(encKey),
modes.CBC(iv),
backend=default_backend()
).encryptor()
asn1Data = encryptor.update(asn1Data) + encryptor.finalize()
b64Data = base64.encodestring(asn1Data).replace(b'\n', b'')
lines += [b64Data[i:i + 64] for i in range(0, len(b64Data), 64)]
lines.append(b''.join((b'-----END ', self.type().encode('ascii'),
b' PRIVATE KEY-----')))
return b'\n'.join(lines)
def _toString_LSH(self):
"""
Return a public or private LSH key. See _fromString_PUBLIC_LSH and
_fromString_PRIVATE_LSH for the key formats.
@rtype: L{bytes}
"""
data = self.data()
type = self.type()
if self.isPublic():
if type == 'RSA':
keyData = sexpy.pack([[b'public-key',
[b'rsa-pkcs1-sha1',
[b'n', common.MP(data['n'])[4:]],
[b'e', common.MP(data['e'])[4:]]]]])
elif type == 'DSA':
keyData = sexpy.pack([[b'public-key',
[b'dsa',
[b'p', common.MP(data['p'])[4:]],
[b'q', common.MP(data['q'])[4:]],
[b'g', common.MP(data['g'])[4:]],
[b'y', common.MP(data['y'])[4:]]]]])
else:
raise BadKeyError("unknown key type %s" % (type,))
return (b'{' + base64.encodestring(keyData).replace(b'\n', b'') +
b'}')
else:
if type == 'RSA':
p, q = data['p'], data['q']
return sexpy.pack([[b'private-key',
[b'rsa-pkcs1',
[b'n', common.MP(data['n'])[4:]],
[b'e', common.MP(data['e'])[4:]],
[b'd', common.MP(data['d'])[4:]],
[b'p', common.MP(q)[4:]],
[b'q', common.MP(p)[4:]],
[b'a', common.MP(
data['d'] % (q - 1))[4:]],
[b'b', common.MP(
data['d'] % (p - 1))[4:]],
[b'c', common.MP(data['u'])[4:]]]]])
elif type == 'DSA':
return sexpy.pack([[b'private-key',
[b'dsa',
[b'p', common.MP(data['p'])[4:]],
[b'q', common.MP(data['q'])[4:]],
[b'g', common.MP(data['g'])[4:]],
[b'y', common.MP(data['y'])[4:]],
[b'x', common.MP(data['x'])[4:]]]]])
else:
raise BadKeyError("unknown key type %s'" % (type,))
def _toString_AGENTV3(self):
"""
Return a private Secure Shell Agent v3 key. See
_fromString_AGENTV3 for the key format.
@rtype: L{bytes}
"""
data = self.data()
if not self.isPublic():
if self.type() == 'RSA':
values = (data['e'], data['d'], data['n'], data['u'],
data['p'], data['q'])
elif self.type() == 'DSA':
values = (data['p'], data['q'], data['g'], data['y'],
data['x'])
return common.NS(self.sshType()) + b''.join(map(common.MP, values))
def sign(self, data):
"""
Sign some data with this key.
SECSH-TRANS RFC 4253 Section 6.6.
@type data: L{bytes}
@param data: The data to sign.
@rtype: L{bytes}
@return: A signature for the given data.
"""
if self.type() == 'RSA':
signer = self._keyObject.signer(
padding.PKCS1v15(), hashes.SHA1())
signer.update(data)
ret = common.NS(signer.finalize())
elif self.type() == 'DSA':
signer = self._keyObject.signer(hashes.SHA1())
signer.update(data)
signature = signer.finalize()
(r, s) = decode_dss_signature(signature)
# SSH insists that the DSS signature blob be two 160-bit integers
# concatenated together. The sig[0], [1] numbers from obj.sign
# are just numbers, and could be any length from 0 to 160 bits.
# Make sure they are padded out to 160 bits (20 bytes each)
ret = common.NS(int_to_bytes(r, 20) + int_to_bytes(s, 20))
else:
raise BadKeyError("unknown key type %s" % (self.type(),))
return common.NS(self.sshType()) + ret
def verify(self, signature, data):
"""
Verify a signature using this key.
@type signature: L{bytes}
@param signature: The signature to verify.
@type data: L{bytes}
@param data: The signed data.
@rtype: L{bool}
@return: C{True} if the signature is valid.
"""
if len(signature) == 40:
# DSA key with no padding
signatureType, signature = b'ssh-dss', common.NS(signature)
else:
signatureType, signature = common.getNS(signature)
if signatureType != self.sshType():
return False
if self.type() == 'RSA':
k = self._keyObject
if not self.isPublic():
k = k.public_key()
verifier = k.verifier(
common.getNS(signature)[0],
padding.PKCS1v15(),
hashes.SHA1(),
)
elif self.type() == 'DSA':
concatenatedSignature = common.getNS(signature)[0]
r = int_from_bytes(concatenatedSignature[:20], 'big')
s = int_from_bytes(concatenatedSignature[20:], 'big')
signature = encode_dss_signature(r, s)
k = self._keyObject
if not self.isPublic():
k = k.public_key()
verifier = k.verifier(
signature, hashes.SHA1())
else:
raise BadKeyError("unknown key type %s" % (self.type(),))
verifier.update(data)
try:
verifier.verify()
except InvalidSignature:
return False
else:
return True
@deprecated(Version("Twisted", 15, 5, 0))
def objectType(obj):
"""
DEPRECATED. Return the SSH key type corresponding to a
C{Crypto.PublicKey.pubkey.pubkey} object.
@param obj: Key for which the type is returned.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@return: Return the SSH key type corresponding to a PyCrypto object.
@rtype: C{str}
"""
keyDataMapping = {
('n', 'e', 'd', 'p', 'q'): b'ssh-rsa',
('n', 'e', 'd', 'p', 'q', 'u'): b'ssh-rsa',
('y', 'g', 'p', 'q', 'x'): b'ssh-dss'
}
try:
return keyDataMapping[tuple(obj.keydata)]
except (KeyError, AttributeError):
raise BadKeyError("invalid key object", obj)
def _getPersistentRSAKey(location, keySize=4096):
"""
This function returns a persistent L{Key}.
The key is loaded from a PEM file in C{location}. If it does not exist, a
key with the key size of C{keySize} is generated and saved.
@param location: Where the key is stored.
@type location: L{twisted.python.filepath.FilePath)
@param keySize: The size of the key, if it needs to be generated.
@type keySize: L{int}
@returns: A persistent key.
@rtype: L{Key}
"""
location.parent().makedirs(ignoreExistingDirectory=True)
# If it doesn't exist, we want to generate a new key and save it
if not location.exists():
privateKey = rsa.generate_private_key(
public_exponent=65537,
key_size=keySize,
backend=default_backend()
)
pem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
location.setContent(pem)
# By this point (save any hilarious race conditions) we should have a
# working PEM file. Load it!
# (Future archaelogical readers: I chose not to short circuit above,
# because then there's two exit paths to this code!)
with location.open("rb") as keyFile:
privateKey = serialization.load_pem_private_key(
keyFile.read(),
password=None,
backend=default_backend()
)
return Key(privateKey)
if _PY3:
# The objectType function is deprecated and not being ported to Python 3.
del objectType
| gpl-3.0 | -1,670,624,420,698,330,600 | 33.195483 | 79 | 0.508279 | false | 4.011237 | false | false | false |
geocryology/GeoCryoLabPy | equipment/Agilent4395A.py | 1 | 6615 | """
Python class representing the Agilent4395A Frequency Response Analyzer
"""
import cmath
import datetime
import sys
import visa
from draw import draw
# Controller class for the Agilent 4395A Frequency Response Analyzer
# - operates over GPIB using a National Instruments USB-GPIB cable
#
# GPIB uses two methods for communication:
# WRITE - Sends a command, doesn't return a response
# QUERY - Sends a command, followed by a '?', and returns a response
class Agilent4395A:
# GPIB Address, used to specify connection
ADDRESS = u'GPIB0::16::INSTR'
# GPIB ID string, returned by the '*IDN?' query, used to test if connection is successful
ID = u'HEWLETT-PACKARD,4395A,MY41101925,REV1.12\n'
def __init__(self):
self.rm = visa.ResourceManager()
self.analyzer = None
# Connect to and initialize the analyzer
def connect(self):
self.analyzer = self.rm.open_resource(self.ADDRESS)
currentID = (self.query("*IDN?"))
if currentID != self.ID:
print "ID discrepancy:"
print " expected:", self.ID
print " actual: ", currentID
return False
self.write("*RST")
self.write("*CLS")
return True
# Close the connection (should be done before script exits)
def disconnect(self):
self.analyzer.close()
# Sends a string to the analyzer, does not return a response
def write(self, cmd):
self.analyzer.write(cmd)
# Sends a string (must end with '?'), returns response
# If the response is large, it may take several seconds to return
def query(self, cmd):
return self.analyzer.query(cmd)
if __name__ == "__main__":
# Test script, sends some configuration commands and reads the measured data
import time
fra = Agilent4395A()
if not fra.connect():
print "Failed to connect to Agilent4395A"
exit(1)
# dictionaries to store measurements
results = {"A": {}, "B": {}}
channels = ["A", "B"]
filename = sys.argv[1]
# Setup parameters
Rs = 50.0 # resistance, ohms
power = 0.0 # dB
nPoints = 201
f1 = 100
f2 = 100000000
# overwrite power from command line
if len(sys.argv) == 3:
power = eval(sys.argv[2])
# Validate parameters
if not (1 <= nPoints <= 801):
print "nPoints must be in the range [0, 801]"
exit()
if not (f1 < f2):
print "f1 must be less than f2"
exit()
if not (10 <= f1 <= 510000000):
print "start/stop frequencies must be in the range [10, 510M]"
exit()
if not (10 <= f2 <= 510000000):
print "start/stop frequencies must be in the range [10, 510M]"
exit()
# BWAUTO 1
fmts = ["LINM", "PHAS", "REAL", "IMAG", "LOGM"]
commands = """NA
CHAN1
HOLD
SWPT LOGF
BWAUTO 1
POIN {}
FORM4
MEAS {{}}
PHAU DEG
STAR {} HZ
STOP {} HZ
POWE {}
FMT LINM""".format(nPoints, f1, f2, power)
for channel in channels:
#print "press enter to measure channel {}".format(channel), raw_input()
# Configure analyzer for measurements
print "Configuring analyzer for measurement of channel {}".format(channel)
commandList = commands.format(channel).split("\n")
for cmd in commandList:
fra.write(cmd.strip())
time.sleep(15)
# Get sweep duration
t = 10
try:
duration = fra.query("SWET?").strip()
t = float(duration)
except:
print "failed to convert to float: ", duration
t = 180
print "sweep time: {}".format(t)
# Make measurement
t0 = time.time()
fra.write("SING")
while time.time() < t0 + t + 2:
print "waiting"
time.sleep(1)
# Read data from analyzer
for fmt in fmts:
print "Reading Channel {}: {} ...".format(channel, fmt),
fra.write("FMT {}".format(fmt))
# results are read as list of x1,y1,x2,y2,x3,y3... where every yn value is 0.
# this line splits the list at every comma, strips out every second value, and converts to floats
response = fra.query("OUTPDTRC?")
print "done - {} bytes".format(len(response))
results[channel][fmt] = map(float, response.strip().split(",")[::2])
# Read x-axis values (frequency points)
freqs = fra.query("OUTPSWPRM?").strip()
freqs = map(float, freqs.split(","))
timestamp = datetime.datetime.now().isoformat()
print "saving file"
filename = "sweepResults_{}.csv".format(filename)
output = open(filename, "w")
output.write("Impedance Measurement Performed with an Agilent 4395A Network Analyzer\n")
output.write("File generated on: {}\n".format(timestamp))
output.write("Rs = {} ohms\n".format(Rs))
output.write("Impedance Calculation: Rs x (Va - Vb) / Vb\n")
output.write("Start Frequency: {} Hz\n".format(f1))
output.write("Stop Frequency: {} Hz\n".format(f2))
output.write("Number of data points: {}\n".format(nPoints))
output.write("Source Power (dB): {}\n".format(power))
output.write("Measurement BW: auto \n")
output.write("\n") # Store additional info here
output.write("\n") # Store additional info here
output.write("Frequency,Va (real),Va (imag),Vb (real),Vb (imag),Va Mag,Va Phase,Vb Mag,Vb Phase,Impedance Mag,Impedance Phase\n")
for i in range(nPoints):
freq = freqs[i]
VaReal = results["A"]["REAL"][i]
VaImag = results["A"]["IMAG"][i]
VbReal = results["B"]["REAL"][i]
VbImag = results["B"]["IMAG"][i]
Va = VaReal + 1j * VaImag
Vb = VbReal + 1j * VbImag
Z = Rs * (Va - Vb) / Vb
VaMag, VaPhase = cmath.polar(Va)
VbMag, VbPhase = cmath.polar(Vb)
ZMag, ZPhase = cmath.polar(Z)
VaPhase = 180 * VaPhase / cmath.pi
VbPhase = 180 * VbPhase / cmath.pi
ZPhase = 180 * ZPhase / cmath.pi
output.write("{},{},{},{},{},{},{},{},{},{},{}\n".format(
freq, VaReal, VaImag, VbReal, VbImag,
VaMag, VaPhase, VbMag, VbPhase, ZMag, ZPhase))
output.close()
fra.disconnect()
#time.sleep(1)
draw(filename)
exit() | gpl-3.0 | -4,691,533,418,516,130,000 | 29.660287 | 133 | 0.562963 | false | 3.614754 | false | false | false |
jhajek/euca2ools | euca2ools/commands/iam/getinstanceprofile.py | 5 | 2211 | # Copyright 2014-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_iprofile
class GetInstanceProfile(IAMRequest):
DESCRIPTION = "Display an instance profile's ARN and GUID"
ARGS = [arg_iprofile(
help='name of the instance profile to describe (required)'),
Arg('-r', dest='show_roles', action='store_true', route_to=None,
help='''also list the roles associated with the instance
profile'''),
AS_ACCOUNT]
LIST_TAGS = ['Roles']
def print_result(self, result):
print result.get('InstanceProfile', {}).get('Arn')
print result.get('InstanceProfile', {}).get('InstanceProfileId')
if self.args.get('show_roles'):
for role in result.get('InstanceProfile', {}).get('Roles') or []:
print role.get('Arn')
| bsd-2-clause | -2,087,599,757,317,781,800 | 47.065217 | 77 | 0.720036 | false | 4.439759 | false | false | false |
fdroidtravis/fdroidserver | fdroidserver/rewritemeta.py | 1 | 3253 | #!/usr/bin/env python3
#
# rewritemeta.py - part of the FDroid server tools
# This cleans up the original .yml metadata file format.
# Copyright (C) 2010-12, Ciaran Gultnieks, [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import os
import logging
import io
import tempfile
import shutil
from . import _
from . import common
from . import metadata
config = None
options = None
def proper_format(app):
s = io.StringIO()
# TODO: currently reading entire file again, should reuse first
# read in metadata.py
with open(app.metadatapath, 'r', encoding='utf-8') as f:
cur_content = f.read()
if app.metadatapath.endswith('.yml'):
metadata.write_yaml(s, app)
content = s.getvalue()
s.close()
return content == cur_content
def main():
global config, options
parser = ArgumentParser()
common.setup_global_opts(parser)
parser.add_argument("-l", "--list", action="store_true", default=False,
help=_("List files that would be reformatted"))
parser.add_argument("appid", nargs='*', help=_("application ID of file to operate on"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
metadata.warnings_action = options.W
config = common.read_config(options)
# Get all apps...
allapps = metadata.read_metadata(options.appid)
apps = common.read_app_args(options.appid, allapps, False)
for appid, app in apps.items():
path = app.metadatapath
if path.endswith('.yml'):
logging.info(_("Rewriting '{appid}'").format(appid=appid))
else:
logging.warning(_('Cannot rewrite "{path}"').format(path=path))
continue
if options.list:
if not proper_format(app):
print(path)
continue
newbuilds = []
for build in app.get('Builds', []):
new = metadata.Build()
for k in metadata.build_flags:
v = build[k]
if v is None or v is False or v == [] or v == '':
continue
new[k] = v
newbuilds.append(new)
app['Builds'] = newbuilds
# rewrite to temporary file before overwriting existsing
# file in case there's a bug in write_metadata
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = os.path.join(tmpdir, os.path.basename(path))
metadata.write_metadata(tmp_path, app)
shutil.move(tmp_path, path)
logging.debug(_("Finished"))
if __name__ == "__main__":
main()
| agpl-3.0 | -6,551,851,765,028,590,000 | 30.892157 | 91 | 0.639102 | false | 3.991411 | false | false | false |
cpsaltis/pythogram-core | src/gramcore/data/arrays.py | 1 | 6962 | """Imports/exports arrays and generates artificial ones.
The artificial data are DTMs and DSMs are basically numpy arrays with height
values. All sizes and 2D coordinates refer to array elements, with (0==row,
0==column) being the top left cell.
"""
import numpy
from scipy.ndimage import measurements
def asarray(parameters):
"""Converts a PIL image to a numpy array.
:param parameters['data']: the input image, takes only one
:type parameters['data']: PIL.Image
:return: numpy.array
"""
return numpy.asarray(parameters['data'][0])
def get_shape(parameters):
"""Returns the shape of the input array.
:param parameters['data']: the input array, takes only one
:type parameters['data']: numpy.array
:return: tuple
"""
return parameters['data'][0].shape
def gaussian_noise(parameters):
"""Generates gaussian noise.
.. warning::
If this is to be applied to images keep in mind that the values should
be integers and that adding noise will push some pixel values over the
supports color depth. e.g. In an 8 bit grey image, normally taking
color values in [0, 255] adding noise to it will make some pixels take
color values > 255. Scaling these pixels to become white will result
in more white pixels than expected.
:param parameters['data']: the input array
:type parameters['data']: numpy.array
:param parameters['mean']: mean value of the distribution
:type parameters['mean']: float
:param parameters['stddev']: standard deviation of the distribution
:type parameters['stddev']: float
:return: numpy.array
"""
return numpy.random.normal(parameters['mean'],
parameters['stddev'],
parameters['shape'])
def load(parameters):
"""Loads an array from file and returns it.
It supports loading from txt and npy files.
:param parameters['path']: path to the file
:type parameters['path']: string
:param parameters['delimiter']: select which delimiter to use for loading
a txt to an array, defaults to space
:type parameters['delimiter']: string
:return: numpy.array
"""
path = parameters['path']
extension = path.split('.').pop()
if extension in 'txt':
delimiter = parameters.get('delimiter', ' ')
return numpy.loadtxt(path, delimiter=delimiter)
elif extension in 'npy':
return numpy.load(path)
else:
raise TypeError("Filetype not supported")
def save(parameters):
"""Saves an object to a file.
It supports saving to txt and npy files.
:param parameters['data']: the object to be saved, takes only one
:type parameters['data']: numpy.array
:param parameters['path']: destination path
:type parameters['path']: string
:param parameters['format']: select output format, defaults to '%.2f'
:type parameters['format']: string
:param parameters['delimiter']: select which delimiter to use for saving a
txt to an array, defaults to space
:type parameters['delimiter']: string
:return: True or raise TypeError
"""
path = parameters['path']
data = parameters['data'][0]
extension = path.split('.').pop()
if extension in 'txt':
format = parameters.get('fmt', '%.2f')
delimiter = parameters.get('delimiter', ' ')
numpy.savetxt(path, data, fmt=format, delimiter=delimiter)
elif extension in 'npy':
numpy.save(path, data)
else:
raise TypeError("Filetype not supported")
return True
def split(parameters):
"""Splits a 3D array and returns only the layer requested.
:param parameters['data']: the input 3D array, takes only one
:type parameters['data']: numpy.array
:param parameters['layer']: the 2D layer to return, 0 is the first one
:type parameters['layer']: numpy.array
:return: 2D numpy.array
"""
return parameters['data'][0][:, :, parameters['layer']]
def dtm(parameters):
"""Generates a DTM with linear slope.
Slope is applied in row major order, so pixels in each row have the same
height value.
:param parameters['slope_step']: height difference for neighbouring cells
:type parameters['slope_step']: float or integer
:param parameters['min_value']: global minimum height value
:type parameters['min_value']: float or integer
:param parameters['size']: the size of the surface in [rows, columns]
:type parameters['size']: list
:return: numpy.array
"""
slope_step = parameters['slope_step']
min_value = parameters['min_value']
size = parameters['size']
data = numpy.zeros(size, dtype=float)
for i in range(size[0]):
data[i, :] = numpy.arange(min_value, size[1], slope_step)
return data
def dsm(parameters):
"""Generates a DSM by elevating groups a cells by certain height.
This requires an input array, the DTM, and a mask. The mask designates
which cells of the DTM should be elevated in order to produce the DSM.
Basically, the mask shows in which cells there are features with
significant height, e.g. trees, buildings etc.
The tricky part it to account for DTM slope when elevating a group of
cells. If you simply add some height to the initial DTM then the features
will be elevated parallel to the ground. Especially in the case of
buildings, their roof is horizontal, regardless of the underlying DTM
slope.
To account for this, the algorithm initially labels the mask. As a result
you get groups of cells which should all be elevated to the same height.
Next, it finds the maximum height value of underlying DTM for each blob.
Finally, it assigns `max_blob_height + delta_height` to each blob cell.
:param parameters['data'][0]: the base DTM
:type parameters['data'][0]: numpy.array
:param parameters['data'][1]: the mask of cells to elevate
:type parameters['data'][1]: numpy.array with boolean/binary values
:param parameters['delta_height']: single cell elevation value
:type parameters['delta_height']: float or integer
:return: numpy.array
"""
dtm = parameters['data'][0]
mask = parameters['data'][1]
delta_height = parameters['delta_height']
# label and find the max height of each blob
labels, count = measurements.label(mask)
max_heights = measurements.maximum(dtm,
labels=labels,
index=range(1, count + 1))
# assign the max height at each blob cell, required to copy so it won't
# change the initial dtm values
dsm = dtm.copy()
for blob_id in range(1, count + 1):
dsm[numpy.where(labels == blob_id)] = max_heights[blob_id - 1] +\
delta_height
return dsm
| mit | -8,977,862,224,966,665,000 | 31.995261 | 78 | 0.652973 | false | 4.329602 | false | false | false |
CPLUG/cplug.org-backend | index/migrations/0001_initial.py | 1 | 1643 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-14 05:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=80, verbose_name='Name')),
('time', models.DateTimeField(verbose_name='Date and Time')),
('description', models.TextField(verbose_name='Description')),
],
),
migrations.CreateModel(
name='Officer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Name')),
('position', models.CharField(max_length=30, verbose_name='Position')),
('username', models.CharField(blank=True, max_length=30, verbose_name='Username')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('website', models.URLField(blank=True, verbose_name='Website')),
('github', models.URLField(blank=True, verbose_name='GitHub')),
('linkedin', models.URLField(blank=True, verbose_name='LinkedIn')),
('description', models.CharField(max_length=200, verbose_name='Description')),
],
),
]
| mit | -5,968,599,745,039,652,000 | 41.128205 | 114 | 0.576993 | false | 4.335092 | false | false | false |
minghuascode/pyj | examples/controls/ControlDemo.py | 8 | 5722 | """ ControlDemo Example
Bill Winder <[email protected]> added HorizontalSlider demo.
Bill Winder <[email protected]> added AreaSlider demo.
"""
import pyjd # dummy in pyjs
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.CaptionPanel import CaptionPanel
from pyjamas.ui.Label import Label
from pyjamas.ui.Controls import VerticalDemoSlider
from pyjamas.ui.Controls import VerticalDemoSlider2
from pyjamas.ui.Controls import HorizontalDemoSlider
from pyjamas.ui.Controls import HorizontalDemoSlider2
from pyjamas.ui.Controls import AreaDemoSlider
from pyjamas.ui.Controls import AreaDemoSlider2
from pyjamas.ui.Controls import InputControl
from pyjamas.ui.MouseInputControl import MouseInputControl
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui import HasAlignment
class SliderClass(VerticalPanel):
""" example of control which pairs up two other controls.
should really be made into a control itself.
"""
def __init__(self, p2):
VerticalPanel.__init__(self)
self.setSpacing(10)
if p2:
self.b = VerticalDemoSlider2(0, 100)
else:
self.b = VerticalDemoSlider(0, 100)
self.add(self.b)
self.b.setWidth("20px")
self.b.setHeight("100px")
self.b.addControlValueListener(self)
self.label = InputControl(0, 100)
self.add(self.label)
self.label.addControlValueListener(self)
def onControlValueChanged(self, sender, old_value, new_value):
if sender == self.label:
self.b.setControlPos(new_value)
self.b.setValue(new_value, 0)
if sender == self.b:
self.label.setControlPos(new_value)
self.label.setValue(new_value, 0)
class HSliderClass(VerticalPanel):
""" example of control which pairs up two other controls.
should really be made into a control itself.
"""
def __init__(self, p2):
VerticalPanel.__init__(self)
self.setSpacing(10)
if p2:
self.b = HorizontalDemoSlider2(0, 100)
else:
self.b = HorizontalDemoSlider(0, 100)
self.add(self.b)
self.b.setHeight("20px")
self.b.setWidth("100px")
self.b.addControlValueListener(self)
self.label = InputControl(0, 100)
self.add(self.label)
self.label.addControlValueListener(self)
def onControlValueChanged(self, sender, old_value, new_value):
if sender == self.label:
self.b.setControlPos(new_value)
self.b.setValue(new_value, 0)
if sender == self.b:
self.label.setControlPos(new_value)
self.label.setValue(new_value, 0)
class ASliderClass(VerticalPanel):
""" example of control which pairs up two other controls.
should really be made into a control itself.
"""
def __init__(self, p2):
VerticalPanel.__init__(self)
self.setSpacing(10)
if p2:
self.b = AreaDemoSlider2([0,0], [100,100], [0.2, 0.2])
else:
self.b = AreaDemoSlider([0,0], [100,100], [0.2, 0.2])
self.add(self.b)
self.b.setHeight("100px")
self.b.setWidth("100px")
self.b.addControlValueListener(self)
self.label_x = MouseInputControl(0, 100, 0.2)
self.add(self.label_x)
self.label_x.addControlValueListener(self)
self.label_y = MouseInputControl(0, 100, 0.2)
self.add(self.label_y)
self.label_y.addControlValueListener(self)
def onControlValueChanged(self, sender, old_value_xy, new_value_xy):
#no use of old_values? (old_value_x,old_value_y)
if (sender == self.label_x):
self.b.setControlPos([new_value_xy, self.b.value_y])
self.b.setValue([new_value_xy, self.b.value_y], 0)
elif (sender == self.label_y):
self.b.setControlPos([self.b.value_x, new_value_xy])
self.b.setValue([self.b.value_x, new_value_xy], 0)
elif (sender == self.b):
(new_value_x,new_value_y) = new_value_xy
self.label_x.setControlPos(new_value_x)
self.label_x.setValue(new_value_x, 0)
self.label_y.setControlPos(new_value_y)
self.label_y.setValue(new_value_y, 0)
class ControlDemo:
def onModuleLoad(self):
v = VerticalPanel(Spacing=10)
p = HorizontalPanel(Spacing=10,
VerticalAlignment=HasAlignment.ALIGN_BOTTOM)
sc = SliderClass(False)
p.add(CaptionPanel("clickable only", sc))
sc = SliderClass(True)
p.add(CaptionPanel("draggable", sc))
sc = SliderClass(True)
p.add(CaptionPanel("draggable", sc))
v.add(CaptionPanel("Vertical Sliders with inputboxes", p))
p = HorizontalPanel()
p.setSpacing(10)
p.setVerticalAlignment(HasAlignment.ALIGN_BOTTOM)
sc = HSliderClass(False)
p.add(CaptionPanel("clickable only", sc))
sc = HSliderClass(True)
p.add(CaptionPanel("draggable", sc))
v.add(CaptionPanel("Horizontal Sliders with inputboxes", p))
p = HorizontalPanel()
p.setSpacing(10)
p.setVerticalAlignment(HasAlignment.ALIGN_BOTTOM)
sc = ASliderClass(False)
p.add(CaptionPanel("clickable only", sc))
sc = ASliderClass(True)
p.add(CaptionPanel("draggable", sc))
v.add(CaptionPanel("2D Controls: Inputboxes are draggable as well", p))
RootPanel().add(v)
if __name__ == '__main__':
pyjd.setup("./public/ControlDemo.html")
app = ControlDemo()
app.onModuleLoad()
pyjd.run()
| apache-2.0 | 6,881,778,421,363,607,000 | 29.92973 | 79 | 0.632296 | false | 3.467879 | false | false | false |
matk86/pymatgen | pymatgen/command_line/tests/test_critic2_caller.py | 6 | 3331 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
from pymatgen import Structure
from pymatgen.command_line.critic2_caller import *
from monty.os.path import which
__author__ = "Matthew Horton"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "July 2017"
@unittest.skipIf(not which('critic2'), "critic2 executable not present")
class Critic2CallerTest(unittest.TestCase):
def test_from_path(self):
# uses chgcars
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/bader')
c2c = Critic2Caller.from_path(test_dir)
# check we have some results!
self.assertGreaterEqual(len(c2c._stdout), 500)
def test_from_structure(self):
# uses promolecular density
structure = Structure.from_file(os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/critic2/MoS2.cif'))
c2c = Critic2Caller(structure)
# check we have some results!
self.assertGreaterEqual(len(c2c._stdout), 500)
class Critic2OutputTest(unittest.TestCase):
def setUp(self):
stdout_file = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/critic2/MoS2_critic2_stdout.txt')
with open(stdout_file, 'r') as f:
reference_stdout = f.read()
structure = Structure.from_file(os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files/critic2/MoS2.cif'))
self.c2o = Critic2Output(structure, reference_stdout)
def test_properties_to_from_dict(self):
self.assertEqual(len(self.c2o.critical_points), 6)
self.assertEqual(len(self.c2o.nodes), 14)
self.assertEqual(len(self.c2o.edges), 10)
# reference dictionary for c2o.critical_points[0].as_dict()
# {'@class': 'CriticalPoint',
# '@module': 'pymatgen.command_line.critic2_caller',
# 'coords': None,
# 'field': 93848.0413,
# 'field_gradient': 0.0,
# 'field_hessian': [[-2593274446000.0, -3.873587547e-19, -1.704530713e-08],
# [-3.873587547e-19, -2593274446000.0, 1.386877485e-18],
# [-1.704530713e-08, 1.386877485e-18, -2593274446000.0]],
# 'frac_coords': [0.333333, 0.666667, 0.213295],
# 'index': 0,
# 'multiplicity': 1.0,
# 'point_group': 'D3h',
# 'type': < CriticalPointType.nucleus: 'nucleus' >}
self.assertEqual(str(self.c2o.critical_points[0].type), "CriticalPointType.nucleus")
# test connectivity
self.assertDictEqual(self.c2o.edges[3], {'from_idx': 1, 'from_lvec': (0, 0, 0),
'to_idx': 0, 'to_lvec': (1, 0, 0)})
# test as/from dict
d = self.c2o.as_dict()
self.assertEqual(set(d.keys()), {'@module', '@class',
'structure', 'critic2_stdout'})
self.c2o.from_dict(d)
if __name__ == '__main__':
unittest.main()
| mit | 1,198,286,598,272,625,000 | 36.011111 | 97 | 0.558691 | false | 3.233981 | true | false | false |
artemp/MapQuest-Render-Stack | py/tile_worker/worker_opts.py | 1 | 2608 | #!/usr/bin/python
#------------------------------------------------------------------------------
#
# Process tile worker arguments from command line
#
# Author: [email protected]
#
# Copyright 2010-1 Mapquest, Inc. All Rights reserved.
#
import os
import sys
import getopt
class worker_opts:
def __init__( self, args ):
self.argv = args
self.brokeraddress = None
self.worker_id = "pid%08d" % os.getpid()
self.mapstyle = None
self.tile_dir = None
self.timeouts = 4
#--------------------------------------------------------------------------
def usage( self ):
print "worker.py --address=192.168.0.0:8888 --mapstyle=thestyle --tiledir=/mnt/tiles --id=foo --timeouts=8"
#--------------------------------------------------------------------------
def process( self ):
try:
opts, args = getopt.getopt( self.argv, "a:d:hi:m:t:", [ "address=", "help", "id=", "mapstyle=", "tiledir=", "timeouts=" ])
except getopt.GetoptError:
self.usage()
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
self.usage()
sys.exit()
elif opt in ("-a", "--address"):
self.brokeraddress = arg
elif opt in ("-t", "--tiledir"):
self.tile_dir = arg
elif opt in ("-i", "--id"):
self.worker_id = arg
elif opt in ("-m", "--mapstyle"):
self.mapstyle = arg
elif opt in ("-t", "--timeouts"):
self.tile_dir = arg
#--------------------------------------------------------------------------
def validate( self ):
if self.brokeraddress == None:
return False
if self.worker_id == None:
return False
if self.mapstyle == None:
return False
if self.tile_dir == None:
return False
return True
#--------------------------------------------------------------------------
def getBrokerAddress( self ):
return self.brokeraddress
#--------------------------------------------------------------------------
def getWorkerID( self ):
return self.worker_id
#--------------------------------------------------------------------------
def getMapStyle( self ):
return self.mapstyle
#--------------------------------------------------------------------------
def getTileDir( self ):
return self.tile_dir
#--------------------------------------------------------------------------
def getTimeouts( self ):
return self.timeouts
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
if __name__ == "__main__":
objOpts = worker_opts( sys.argv[1:] )
objOpts.process() | lgpl-2.1 | 1,195,247,672,633,795,000 | 27.358696 | 125 | 0.415261 | false | 4.094192 | false | false | false |
sergiohgz/incubator-airflow | tests/contrib/operators/test_ecs_operator.py | 7 | 8022 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import unittest
from copy import deepcopy
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.contrib.operators.ecs_operator import ECSOperator
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
RESPONSE_WITHOUT_FAILURES = {
"failures": [],
"tasks": [
{
"containers": [
{
"containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868",
"lastStatus": "PENDING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55"
}
],
"desiredStatus": "RUNNING",
"lastStatus": "PENDING",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55",
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11"
}
]
}
class TestECSOperator(unittest.TestCase):
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def setUp(self, aws_hook_mock):
configuration.load_test_config()
self.aws_hook_mock = aws_hook_mock
self.ecs = ECSOperator(
task_id='task',
task_definition='t',
cluster='c',
overrides={},
aws_conn_id=None,
region_name='eu-west-1')
def test_init(self):
self.assertEqual(self.ecs.region_name, 'eu-west-1')
self.assertEqual(self.ecs.task_definition, 't')
self.assertEqual(self.ecs.aws_conn_id, None)
self.assertEqual(self.ecs.cluster, 'c')
self.assertEqual(self.ecs.overrides, {})
self.assertEqual(self.ecs.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.ecs.template_fields, ('overrides',))
@mock.patch.object(ECSOperator, '_wait_for_task_ended')
@mock.patch.object(ECSOperator, '_check_success_task')
def test_execute_without_failures(self, check_mock, wait_mock):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.run_task.return_value = RESPONSE_WITHOUT_FAILURES
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs', region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType='EC2',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t'
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(self.ecs.arn, 'arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55')
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
resp_failures = deepcopy(RESPONSE_WITHOUT_FAILURES)
resp_failures['failures'].append('dummy error')
client_mock.run_task.return_value = resp_failures
with self.assertRaises(AirflowException):
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs', region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType='EC2',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t'
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
self.ecs._wait_for_task_ended()
client_mock.get_waiter.assert_called_once_with('tasks_stopped')
client_mock.get_waiter.return_value.wait.assert_called_once_with(cluster='c', tasks=['arn'])
self.assertEquals(sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'lastStatus': 'STOPPED',
'exitCode': 1
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is not in success state ", str(e.exception))
self.assertIn("'name': 'foo'", str(e.exception))
self.assertIn("'lastStatus': 'STOPPED'", str(e.exception))
self.assertIn("'exitCode': 1", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'PENDING'
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is still pending ", str(e.exception))
self.assertIn("'name': 'container-name'", str(e.exception))
self.assertIn("'lastStatus': 'PENDING'", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_mutliple(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'exitCode': 1
}, {
'name': 'bar',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
def test_check_success_task_not_raises(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(cluster='c', tasks=['arn'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,431,884,348,846,554,600 | 35.298643 | 120 | 0.592122 | false | 3.727695 | true | false | false |
ActiveState/code | recipes/Python/580698_Reversi_Othello/recipe-580698.py | 1 | 12673 | # Reversi/Othello Board Game using Minimax and Alpha-Beta Pruning
# https://en.wikipedia.org/wiki/Reversi
# https://en.wikipedia.org/wiki/Computer_Othello
# https://en.wikipedia.org/wiki/Minimax
# https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
# https://en.wikipedia.org/wiki/Negamax
# https://en.wikipedia.org/wiki/Principal_variation_search
# FB36 - 20160831
import os, copy
n = 8 # board size (even)
board = [['0' for x in range(n)] for y in range(n)]
# 8 directions
dirx = [-1, 0, 1, -1, 1, -1, 0, 1]
diry = [-1, -1, -1, 0, 0, 1, 1, 1]
def InitBoard():
if n % 2 == 0: # if board size is even
z = (n - 2) / 2
board[z][z] = '2'
board[n - 1 - z][z] = '1'
board[z][n - 1 - z] = '1'
board[n - 1 - z][n - 1 - z] = '2'
def PrintBoard():
m = len(str(n - 1))
for y in range(n):
row = ''
for x in range(n):
row += board[y][x]
row += ' ' * m
print row + ' ' + str(y)
print
row = ''
for x in range(n):
row += str(x).zfill(m) + ' '
print row + '\n'
def MakeMove(board, x, y, player): # assuming valid move
totctr = 0 # total number of opponent pieces taken
board[y][x] = player
for d in range(8): # 8 directions
ctr = 0
for i in range(n):
dx = x + dirx[d] * (i + 1)
dy = y + diry[d] * (i + 1)
if dx < 0 or dx > n - 1 or dy < 0 or dy > n - 1:
ctr = 0; break
elif board[dy][dx] == player:
break
elif board[dy][dx] == '0':
ctr = 0; break
else:
ctr += 1
for i in range(ctr):
dx = x + dirx[d] * (i + 1)
dy = y + diry[d] * (i + 1)
board[dy][dx] = player
totctr += ctr
return (board, totctr)
def ValidMove(board, x, y, player):
if x < 0 or x > n - 1 or y < 0 or y > n - 1:
return False
if board[y][x] != '0':
return False
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
if totctr == 0:
return False
return True
minEvalBoard = -1 # min - 1
maxEvalBoard = n * n + 4 * n + 4 + 1 # max + 1
def EvalBoard(board, player):
tot = 0
for y in range(n):
for x in range(n):
if board[y][x] == player:
if (x == 0 or x == n - 1) and (y == 0 or y == n - 1):
tot += 4 # corner
elif (x == 0 or x == n - 1) or (y == 0 or y == n - 1):
tot += 2 # side
else:
tot += 1
return tot
# if no valid move(s) possible then True
def IsTerminalNode(board, player):
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
return False
return True
def GetSortedNodes(board, player):
sortedNodes = []
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
sortedNodes.append((boardTemp, EvalBoard(boardTemp, player)))
sortedNodes = sorted(sortedNodes, key = lambda node: node[1], reverse = True)
sortedNodes = [node[0] for node in sortedNodes]
return sortedNodes
def Minimax(board, player, depth, maximizingPlayer):
if depth == 0 or IsTerminalNode(board, player):
return EvalBoard(board, player)
if maximizingPlayer:
bestValue = minEvalBoard
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
v = Minimax(boardTemp, player, depth - 1, False)
bestValue = max(bestValue, v)
else: # minimizingPlayer
bestValue = maxEvalBoard
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
v = Minimax(boardTemp, player, depth - 1, True)
bestValue = min(bestValue, v)
return bestValue
def AlphaBeta(board, player, depth, alpha, beta, maximizingPlayer):
if depth == 0 or IsTerminalNode(board, player):
return EvalBoard(board, player)
if maximizingPlayer:
v = minEvalBoard
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
v = max(v, AlphaBeta(boardTemp, player, depth - 1, alpha, beta, False))
alpha = max(alpha, v)
if beta <= alpha:
break # beta cut-off
return v
else: # minimizingPlayer
v = maxEvalBoard
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
v = min(v, AlphaBeta(boardTemp, player, depth - 1, alpha, beta, True))
beta = min(beta, v)
if beta <= alpha:
break # alpha cut-off
return v
def AlphaBetaSN(board, player, depth, alpha, beta, maximizingPlayer):
if depth == 0 or IsTerminalNode(board, player):
return EvalBoard(board, player)
sortedNodes = GetSortedNodes(board, player)
if maximizingPlayer:
v = minEvalBoard
for boardTemp in sortedNodes:
v = max(v, AlphaBetaSN(boardTemp, player, depth - 1, alpha, beta, False))
alpha = max(alpha, v)
if beta <= alpha:
break # beta cut-off
return v
else: # minimizingPlayer
v = maxEvalBoard
for boardTemp in sortedNodes:
v = min(v, AlphaBetaSN(boardTemp, player, depth - 1, alpha, beta, True))
beta = min(beta, v)
if beta <= alpha:
break # alpha cut-off
return v
def Negamax(board, player, depth, color):
if depth == 0 or IsTerminalNode(board, player):
return color * EvalBoard(board, player)
bestValue = minEvalBoard
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
v = -Negamax(boardTemp, player, depth - 1, -color)
bestValue = max(bestValue, v)
return bestValue
def NegamaxAB(board, player, depth, alpha, beta, color):
if depth == 0 or IsTerminalNode(board, player):
return color * EvalBoard(board, player)
bestValue = minEvalBoard
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
v = -NegamaxAB(boardTemp, player, depth - 1, -beta, -alpha, -color)
bestValue = max(bestValue, v)
alpha = max(alpha, v)
if alpha >= beta:
break
return bestValue
def NegamaxABSN(board, player, depth, alpha, beta, color):
if depth == 0 or IsTerminalNode(board, player):
return color * EvalBoard(board, player)
sortedNodes = GetSortedNodes(board, player)
bestValue = minEvalBoard
for boardTemp in sortedNodes:
v = -NegamaxABSN(boardTemp, player, depth - 1, -beta, -alpha, -color)
bestValue = max(bestValue, v)
alpha = max(alpha, v)
if alpha >= beta:
break
return bestValue
def Negascout(board, player, depth, alpha, beta, color):
if depth == 0 or IsTerminalNode(board, player):
return color * EvalBoard(board, player)
firstChild = True
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
if not firstChild:
score = -Negascout(boardTemp, player, depth - 1, -alpha - 1, -alpha, -color)
if alpha < score and score < beta:
score = -Negascout(boardTemp, player, depth - 1, -beta, -score, -color)
else:
firstChild = False
score = -Negascout(boardTemp, player, depth - 1, -beta, -alpha, -color)
alpha = max(alpha, score)
if alpha >= beta:
break
return alpha
def NegascoutSN(board, player, depth, alpha, beta, color):
if depth == 0 or IsTerminalNode(board, player):
return color * EvalBoard(board, player)
sortedNodes = GetSortedNodes(board, player)
firstChild = True
for boardTemp in sortedNodes:
if not firstChild:
score = -NegascoutSN(boardTemp, player, depth - 1, -alpha - 1, -alpha, -color)
if alpha < score and score < beta:
score = -NegascoutSN(boardTemp, player, depth - 1, -beta, -score, -color)
else:
firstChild = False
score = -NegascoutSN(boardTemp, player, depth - 1, -beta, -alpha, -color)
alpha = max(alpha, score)
if alpha >= beta:
break
return alpha
def BestMove(board, player):
maxPoints = 0
mx = -1; my = -1
for y in range(n):
for x in range(n):
if ValidMove(board, x, y, player):
(boardTemp, totctr) = MakeMove(copy.deepcopy(board), x, y, player)
if opt == 0:
points = EvalBoard(boardTemp, player)
elif opt == 1:
points = Minimax(boardTemp, player, depth, True)
elif opt == 2:
points = AlphaBeta(board, player, depth, minEvalBoard, maxEvalBoard, True)
elif opt == 3:
points = Negamax(boardTemp, player, depth, 1)
elif opt == 4:
points = NegamaxAB(boardTemp, player, depth, minEvalBoard, maxEvalBoard, 1)
elif opt == 5:
points = Negascout(boardTemp, player, depth, minEvalBoard, maxEvalBoard, 1)
elif opt == 6:
points = AlphaBetaSN(board, player, depth, minEvalBoard, maxEvalBoard, True)
elif opt == 7:
points = NegamaxABSN(boardTemp, player, depth, minEvalBoard, maxEvalBoard, 1)
elif opt == 8:
points = NegascoutSN(boardTemp, player, depth, minEvalBoard, maxEvalBoard, 1)
if points > maxPoints:
maxPoints = points
mx = x; my = y
return (mx, my)
print 'REVERSI/OTHELLO BOARD GAME'
print '0: EvalBoard'
print '1: Minimax'
print '2: Minimax w/ Alpha-Beta Pruning'
print '3: Negamax'
print '4: Negamax w/ Alpha-Beta Pruning'
print '5: Negascout (Principal Variation Search)'
print '6: Minimax w/ Alpha-Beta Pruning w/ Sorted Nodes'
print '7: Negamax w/ Alpha-Beta Pruning w/ Sorted Nodes'
print '8: Negascout (Principal Variation Search) w/ Sorted Nodes'
opt = int(raw_input('Select AI Algorithm: '))
if opt > 0 and opt < 9:
depth = 4
depthStr = raw_input('Select Search Depth (DEFAULT: 4): ')
if depthStr != '': depth = int(depth)
print '\n1: User 2: AI (Just press Enter for Exit!)'
InitBoard()
while True:
for p in range(2):
print
PrintBoard()
player = str(p + 1)
print 'PLAYER: ' + player
if IsTerminalNode(board, player):
print 'Player cannot play! Game ended!'
print 'Score User: ' + str(EvalBoard(board, '1'))
print 'Score AI : ' + str(EvalBoard(board, '2'))
os._exit(0)
if player == '1': # user's turn
while True:
xy = raw_input('X Y: ')
if xy == '': os._exit(0)
(x, y) = xy.split()
x = int(x); y = int(y)
if ValidMove(board, x, y, player):
(board, totctr) = MakeMove(board, x, y, player)
print '# of pieces taken: ' + str(totctr)
break
else:
print 'Invalid move! Try again!'
else: # AI's turn
(x, y) = BestMove(board, player)
if not (x == -1 and y == -1):
(board, totctr) = MakeMove(board, x, y, player)
print 'AI played (X Y): ' + str(x) + ' ' + str(y)
print '# of pieces taken: ' + str(totctr)
| mit | -6,430,002,800,367,195,000 | 37.874233 | 97 | 0.527184 | false | 3.603355 | false | false | false |
stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/unit/states/hg_test.py | 2 | 1920 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <[email protected]>`
'''
# Import Python libs
from __future__ import absolute_import
import os
# Import Salt Libs
from salt.states import hg
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
ensure_in_syspath('../../')
hg.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class HgTestCase(TestCase):
'''
Validate the svn state
'''
def test_latest(self):
'''
Test to Make sure the repository is cloned to
the given directory and is up to date
'''
ret = {'changes': {}, 'comment': '', 'name': 'salt', 'result': True}
mock = MagicMock(return_value=True)
with patch.object(hg, '_fail', mock):
self.assertTrue(hg.latest("salt"))
mock = MagicMock(side_effect=[False, True, False, False, False, False])
with patch.object(os.path, 'isdir', mock):
mock = MagicMock(return_value=True)
with patch.object(hg, '_handle_existing', mock):
self.assertTrue(hg.latest("salt", target="c:\\salt"))
with patch.dict(hg.__opts__, {'test': True}):
mock = MagicMock(return_value=True)
with patch.object(hg, '_neutral_test', mock):
self.assertTrue(hg.latest("salt", target="c:\\salt"))
with patch.dict(hg.__opts__, {'test': False}):
mock = MagicMock(return_value=True)
with patch.object(hg, '_clone_repo', mock):
self.assertDictEqual(hg.latest("salt", target="c:\\salt"),
ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(HgTestCase, needs_daemon=False)
| apache-2.0 | -7,226,903,616,466,534,000 | 29.967742 | 79 | 0.570833 | false | 3.847695 | true | false | false |
Habbie/weakforced | regression-tests/test_helper.py | 1 | 3021 | from datetime import datetime
import os
import requests
import urlparse
import unittest
import json
from subprocess import call, check_output
DAEMON = os.environ.get('DAEMON', 'authoritative')
class ApiTestCase(unittest.TestCase):
def setUp(self):
# TODO: config
self.server_address = '127.0.0.1'
self.server_port = int(os.environ.get('WEBPORT', '8084'))
self.server_url = 'http://%s:%s/' % (self.server_address, self.server_port)
self.session = requests.Session()
self.session.auth = ('foo', os.environ.get('APIKEY', 'super'))
#self.session.keep_alive = False
# self.session.headers = {'X-API-Key': os.environ.get('APIKEY', 'changeme-key'), 'Origin': 'http://%s:%s' % (self.server_address, self.server_port)}
def writeFileToConsole(self, file):
fp = open(file)
cmds_nl = fp.read()
# Lua doesn't need newlines and the console gets confused by them e.g.
# function definitions
cmds = cmds_nl.replace("\n", " ")
return call(["../wforce", "-c", "../wforce.conf", "-e", cmds])
def writeCmdToConsole(self, cmd):
return check_output(["../wforce", "-c", "../wforce.conf", "-e", cmd])
def allowFunc(self, login, remote, pwhash):
return self.allowFuncAttrs(login, remote, pwhash, {})
def allowFuncAttrs(self, login, remote, pwhash, attrs):
payload = dict()
payload['login'] = login
payload['remote'] = remote
payload['pwhash'] = pwhash
payload['attrs'] = attrs
return self.session.post(
self.url("/?command=allow"),
data=json.dumps(payload),
headers={'Content-Type': 'application/json'})
def reportFunc(self, login, remote, pwhash, success):
return self.reportFuncAttrs(login, remote, pwhash, success, {})
def reportFuncAttrs(self, login, remote, pwhash, success, attrs):
payload = dict()
payload['login'] = login
payload['remote'] = remote
payload['pwhash'] = pwhash
payload['success'] = success
payload['attrs'] = attrs
return self.session.post(
self.url("/?command=report"),
data=json.dumps(payload),
headers={'Content-Type': 'application/json'})
def resetFunc(self, login, ip):
payload = dict()
payload['login'] = login
payload['ip'] = ip
return self.session.post(
self.url("/?command=reset"),
data=json.dumps(payload),
headers={'Content-Type': 'application/json'})
def pingFunc(self):
return self.session.get(self.url("/?command=ping"))
def url(self, relative_url):
return urlparse.urljoin(self.server_url, relative_url)
def assert_success_json(self, result):
try:
result.raise_for_status()
except:
print result.content
raise
self.assertEquals(result.headers['Content-Type'], 'application/json')
| gpl-2.0 | 5,715,002,648,871,436,000 | 34.127907 | 163 | 0.594505 | false | 3.814394 | false | false | false |
UTSA-ICS/python-openstackclient-SID | tools/install_venv.py | 5 | 2151 | # Copyright 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Installation script for python-openstackclient's development virtualenv
"""
import os
import sys
import install_venv_common as install_venv
def print_help():
help = """
python-openstackclient development environment setup is complete.
python-openstackclient development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the python-openstackclient virtualenv for the extent of your current
shell session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, ".venv")
pip_requires = os.path.join(root, "requirements.txt")
test_requires = os.path.join(root, "test-requirements.txt")
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = "python-openstackclient"
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help()
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | -4,115,733,392,282,553,300 | 31.590909 | 84 | 0.701999 | false | 3.968635 | false | false | false |
mvwicky/ScotusScraper | scotus_parser.py | 1 | 6953 | #!/cygdrive/c/Anaconda3/python.exe
import os
import sys
import multiprocessing as mp
import requests
from bs4 import BeautifulSoup, SoupStrainer
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from logger import Logger
class ScotusParser(QThread):
def __init__(self, name='Parser', log_dir='.', save_dir='.', parent=None):
QThread.__init__(self, parent)
self.name = name
self.log_dir = os.path.realpath(log_dir)
self.save_dir = os.path.realpath(save_dir)
self.log = Logger(name=self.name, save_dir=self.log_dir)
self.base_url = 'http://supremecourt.gov'
@staticmethod
def fmt_name(inp):
""" cleans up file names/removes explicity disallowed characters """
not_allowed_chars = '<>:"/\\|?*' # Explicity not allowed characters
for char in not_allowed_chars:
inp = inp.replace(char, '')
for char in ', ': # I personally don't want these (spaces and commas)
inp = inp.replace(char, '')
inp = inp.replace('..', '.').replace('v.', '_v_') # looks weird
return os.path.normpath(inp)
def make_dir(self, dir_path):
if not os.path.exists(dir_path) or os.path.isfile(dir_path):
self.log('Directory does not exist: {}'.format(dir_path))
try:
os.makedirs(dir_path)
except OSError as e:
self.log('Problem creating: {}'.format(dir_path))
return False
else:
self.log('Directory created: {}'.format(dir_path))
return True
else:
self.log('Directory exists: {}'.format(dir_path))
return True
def argument_audio(self, year):
year = int(year)
pairs = []
self.log('Finding argument audio for {}'.format(year))
audio = {'media': '/'.join([self.base_url, 'media/audio/mp3files']),
'dir': os.path.join(self.save_dir,
str(year),
'Argument_Audio'),
'url': '/'.join([self.base_url,
'oral_arguments/argument_audio',
str(year)]),
'search': '../audio/{year}/'.format(year=year)}
if not self.make_dir(audio['dir']):
return pairs
res = requests.get(audio['url'])
if res.status_code == 404:
self.log('Got 404 from {}'.format(audio['url']))
return pairs
soup = BeautifulSoup(res.content, 'lxml')
for rows in soup('tr'):
for a in rows('a', class_=None):
if audio['search'] in a.get('href'):
link = a.get('href')
docket = a.string
name = rows.find('span').string
name = self.fmt_name('{}-{}.mp3'.format(docket, name))
file_path = os.path.join(audio['dir'], name)
url = '/'.join([audio['media'], '{}.mp3'.format(docket)])
pairs.append((url, file_path))
url = url.replace(self.base_url, '')
file_path = os.path.relpath(file_path)
self.log('Found: ({url}, {file})'
.format(url=url, file=file_path))
return pairs
def slip_opinions(self, year):
year = int(year)
pairs = []
self.log('Finding slip opinions for {}'.format(year))
slip = {'dir': os.path.join(self.save_dir, str(year), 'Slip Opinions'),
'url': '/'.join([self.base_url,
'opinions',
'slipopinion',
str(year-2000)]),
'filter': SoupStrainer('table', class_='table table-bordered')}
if not self.make_dir(slip['dir']):
return pairs
res = requests.get(slip['url'])
if res.status_code == 404:
self.log('Got 404 from {}'.format(slip['url']))
return pairs
soup = BeautifulSoup(res.content, 'lxml', parse_only=slip['filter'])
for rows in soup('tr'):
docket, name = None, None
for i, cell in enumerate(rows('td')):
if i == 2:
docket = cell.string
elif i == 3:
a = cell.find('a')
link = a.get('href')
url = ''.join([self.base_url, link])
name = a.string
if docket and name:
file_name = self.fmt_name('{}-{}.pdf'.format(docket, name))
file_path = os.path.join(slip['dir'], file_name)
pairs.append((url, file_path))
url = url.replace(self.base_url, '')
file_path = os.path.relpath(file_path)
self.log('Found: ({url}, {file})'
.format(url=url, file=file_path))
return pairs
def argument_transcripts(self, year):
year = int(year)
pairs = []
self.log('Finding argument transcripts for {}'.format(year))
script = {'dir': os.path.join(self.save_dir,
str(year),
'Argument Transcripts'),
'url': '/'.join([self.base_url,
'oral_arguments',
'argument_transcript',
str(year)]),
'search': '../argument_transcripts/'}
if not self.make_dir(script['dir']):
return pairs
res = requests.get(script['url'])
if res.status_code == 404:
self.log('Got 404 from {}'.format(script['url']))
return pairs
soup = BeautifulSoup(res.content, 'lxml')
for cell in soup('td'):
for a in cell('a'):
if script['search'] in a.get('href'):
link = a.get('href').replace('../', '')
docket = link.replace('argument_transcripts/', '')
docket = docket.replace('.pdf', '')
url = '/'.join([self.base_url, 'oral_arguments', link])
name = cell.find('span').string
file_name = self.fmt_name('{}-{}.pdf'.format(docket, name))
file_path = os.path.join(script['dir'], file_name)
pairs.append((url, file_path))
url = url.replace(self.base_url, '')
file_path = os.path.relpath(file_path)
self.log('Found: ({url}, {file})'
.format(url=url, file=file_path))
return pairs
if __name__ == '__main__':
p = ScotusParser(log_dir='logs', save_dir='SCOTUS')
p.argument_audio(2015)
p.slip_opinions(2015)
p.argument_transcripts(2015)
| mit | 1,598,903,029,727,719,700 | 37.843575 | 79 | 0.477923 | false | 4.114201 | false | false | false |
nukui-s/sscomdetection | TestUpdate.py | 1 | 1512 | # Author : Hoang NT
# Date : 2016-03-08
#
# Simple test case for Updater.py
import tensorflow as tf
import numpy as np
import Update as ud
# Made-up matrix for testing
# A is 5-by-5 adj matrix
# O is 5-by-5 prior knowledge matrix
# D is 5-by-5 diagonal matrix of O
# k = 4 - Suppose we have 4 clustering
# lambda = 0.5 - Consider trade off between topology (A) and prior knowledge (O)
A = np.array([[0, 1, 1, 0, 1],
[1, 0, 1, 1, 0],
[1, 1, 0, 0, 1],
[0, 1, 0, 0, 1],
[1, 0, 1, 1, 0]])
O = np.array([[1, 1, 1, 1, 1],
[1, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1]])
D = np.diag(O.sum(axis=1))
k = 4
l = 0.5 # lambda
iterations = 100
# Create a tensorflow graph and add nodes
graph = tf.Graph()
updater = ud.UpdateElem()
# Add updating rule computing node to graph
# Look at UpdateElem class (Update.py) for more detail
updater.add_semi_supervised_rule(A, O, D, k, l)
# Create a session to run
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
# Check for initial values of W and H
print(sess.run(updater._W))
print(sess.run(updater._H))
# Update the matrices
for _ in range(iterations) :
# Get the new value for W and W
sess.run([updater.update_H_node(), updater.update_W_node()])
# Assign W and H to the new values
sess.run([updater.assign_H_node(), updater.assign_W_node()])
# Print results
print('Final result for %d iterations' % iterations)
print(sess.run(updater._W))
print(sess.run(updater._H))
| apache-2.0 | -7,122,712,059,874,937,000 | 26.490909 | 80 | 0.632275 | false | 2.805195 | false | false | false |
amirouche/pythonium | pythonium/main.py | 1 | 1579 | #!/usr/bin/env python3
"""Usage: pythonium [-h]|[-v]|[-r]|[FILE]
Options:
-h --help show this
-v --version show version
-r --runtime output pythonium runtime (exclusive option)
You will need to use the library generated with -r or --runtime
option to run the code.
"""
import os
import sys
from ast import parse
from .compliant.compliant import Compliant
from . import __version__
def main(argv=None):
from docopt import docopt
args = docopt(__doc__, argv, version='pythonium ' + __version__)
if not args['--runtime'] and not args['FILE']:
main(['-h'])
sys.exit(1)
if args['--runtime']:
# call ourself for each file in pythonium.lib:
from pythonium.compliant import builtins
path = os.path.dirname(__file__)
filepath = os.path.join(path, 'pythonium.js')
with open(filepath) as f:
print(f.read())
# compile builtins
for path in builtins.__path__:
for name in sorted(os.listdir(path)):
if name.endswith('.py'):
argv = [os.path.join(path, name)]
main(argv)
return
filepath = args['FILE']
dirname = os.path.abspath(os.path.dirname(filepath))
basename = os.path.basename(filepath)
with open(os.path.join(dirname, basename)) as f:
input = f.read()
# generate javascript
tree = parse(input)
translator = Compliant()
translator.visit(tree)
output = translator.writer.value()
print(output)
if __name__ == '__main__':
main()
| lgpl-2.1 | -2,693,304,183,785,684,500 | 24.467742 | 68 | 0.59278 | false | 3.81401 | false | false | false |
swalladge/snsrv | old/db_frontend.py | 1 | 5710 |
# high level db access
# must define lowerlevel db something...
import datetime
import uuid
import copy
import re
class Database():
def __init__(self, thedatabase):
self.database = thedatabase
def get_user(self, username):
userdata = self.database.get_user(username)
if not userdata:
return None
if userdata.get('token', None):
# remove if over 24 hours old
if (datetime.datetime.utcnow().timestamp() - userdata['tokendate']) > 86400:
userdata['token'] = None
else:
userdata['token'] = None
return userdata
def create_user(self, username, hashed):
result = self.database.create_user(username, hashed)
return result
def check_token(self, username, token):
user = self.get_user(username)
if user and user.get('token') and user['token'] == token:
return True
return False
def get_token(self, username):
user = self.get_user(username)
# if already token, return it
if user['token']:
return user['token']
# otherwise generate a new one
token = (str(uuid.uuid4())+str(uuid.uuid4())).replace('-','').upper()
tokendate = datetime.datetime.utcnow().timestamp()
self.database.update_token(user['email'], token, tokendate)
return token
def get_note(self, username, notekey, version=None):
note = self.database.get_note(username, notekey, version)
if not note:
return None
return note
def update_note(self, username, notekey, data):
# TODO: check/validate data types
# TODO: use syncnum to resolve conflicts (if syncnum in new data is lower, don't use)
old_note = self.get_note(username, notekey)
if not old_note:
return ('note with that key does not exist', 404)
content = data.get('content', None)
if content and content != old_note['content']:
# then save old version
self.database.save_version(username, notekey)
old_note['content'] = content
# TODO: currently version only increments when content changes (is this wanted?) - either way, syncnum is inc'd
old_note['version'] += 1
s = datetime.datetime.utcnow().timestamp()
old_note['modifydate'] = min(s, data.get('modifydate', s))
# old_note['createdate'] = min(s, data.get('createdate', s)) # TODO: should createdate ever be modified?
# TODO: handle version in new note data (ie for merge? and _whether to update or not_ - don't overwrite newer note with older note)
old_note['minversion'] = max(old_note['version'] - 20, 1) #TODO: allow configuring number of versions to keep
self.database.drop_old_versions(username, notekey, old_note['minversion'])
# TODO: handling sharekey?
deleted = data.get('deleted', None)
if deleted == '1' or deleted == '0':
deleted = int(deleted)
if (deleted in [1,0]):
old_note['deleted'] = deleted
if 'systemtags' in data:
old_note['systemtags'] = [t for t in set(data.get('systemtags',[])) if t in ('pinned', 'markdown', 'list')]
if 'tags' in data:
tags = []
for t in set(data.get('tags', [])):
safe_tag = self._validate_tag(t)
if safe_tag:
tags.append(safe_tag)
old_note['tags'] = tags
old_note['syncnum'] += 1
ok = self.database.update_note(username, copy.deepcopy(old_note))
if ok:
return (old_note, 200)
return ('unable to create note', 400)
def create_note(self, username, data):
note_data = {}
if 'content' not in data:
return ('note must contain a content field', False)
note_data['content'] = str(data['content'])
note_data['key'] = str(uuid.uuid4()) + str(int(datetime.datetime.utcnow().timestamp()))
s = datetime.datetime.utcnow().timestamp()
note_data['modifydate'] = min(s, data.get('modifydate', s))
note_data['createdate'] = min(s, data.get('createdate', s))
note_data['version'] = 1
note_data['minversion'] = 1
note_data['publishkey'] = None
note_data['syncnum'] = 1
deleted = data.get('deleted', 0)
if deleted == '1' or deleted == '0':
deleted = int(deleted)
elif deleted != 1:
deleted = 0
note_data['deleted'] = deleted
note_data['systemtags'] = [t for t in set(data.get('systemtags',[])) if t in ('pinned', 'markdown', 'list')]
tags = []
for t in set(data.get('tags', [])):
safe_tag = self._validate_tag(t)
if safe_tag:
tags.append(safe_tag)
note_data['tags'] = tags
ok = self.database.create_note(username, copy.deepcopy(note_data))
if ok:
return (note_data, True)
return ('unable to create note', False)
def delete_note(self, username, key):
data = self.database.delete_note(username, key)
return data
def notes_index(self, username, length, since, mark):
""" username<string>, length<int>, since<float>, mark<whatever> """
data, status = self.database.notes_index(username, length, since, mark)
return (data, status)
# TODO: tags api
def _validate_tag(self, t):
# remove surrounding whitespace
t = t.strip()
# can't contain whitespace or commas!
if re.search(r'(\s|,)', t):
return None
return t
| gpl-3.0 | -7,271,441,237,214,232,000 | 30.899441 | 139 | 0.571979 | false | 3.957034 | false | false | false |
TrentonSarnowski/GenomeBusters | polymorphs-master/setup.py | 1 | 1030 | #!/bin/env python3
import json
import os
from pprint import pprint
from requests import get
import subprocess
print("starting...")
def download_latest_release():
print("downloading prodigal...")
req_content = json.loads(get("https://api.github.com/repos/hyattpd/Prodigal/releases/latest").content.decode('utf-8'))
print("installing %s" % req_content["name"])
system_os = "linux" if os.name == "posix" else ("nt" if os.name == "nt" else "osx")
print(system_os)
for obj in req_content["assets"]:
if system_os in obj["name"]:
pprint(obj)
# wget(obj["browser_download_url"], obj["name"])
def wget(url, file_name):
binary_download_response = get(url, stream=True)
with open(file_name, 'wb') as binary:
for chunk in binary_download_response.iter_content(chunk_size=1024):
if chunk:
binary.write(chunk)
download_latest_release()
with subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE) as proc:
print(proc.stdout.read())
| mit | -9,127,763,450,498,961,000 | 28.428571 | 122 | 0.651456 | false | 3.468013 | false | false | false |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/extractors/comment_extractor.py | 1 | 2178 | import os
from .self_post_extractor import SelfPostExtractor
from ..core.errors import Error
from ..utils import system_util
class CommentExtractor(SelfPostExtractor):
def __init__(self, post, **kwargs):
super().__init__(post, **kwargs)
def extract_content(self):
try:
ext = self.post.significant_reddit_object.comment_file_format
title = self.make_title()
directory = self.make_dir_path()
self.download_text(directory, title, ext)
except Exception as e:
self.failed_extraction = True
self.extraction_error = Error.TEXT_LINK_FAILURE
self.failed_extraction_message = f'Failed to save comment text. ERROR: {e}'
self.logger.error('Failed to save content text', extra={
'url': self.url, 'user': self.comment.url, 'subreddit': self.comment.subreddit,
'comment_id': self.comment.id, 'comment_reddit_id': self.comment.reddit_id,
'date_posted': self.comment.date_posted
})
def download_text(self, dir_path, title, extension):
try:
self.check_file_path(dir_path, title, extension)
path = os.path.join(dir_path, title) + f'.{extension}'
with open(path, 'w', encoding='utf-8') as file:
text = self.get_text(extension)
file.write(text)
except:
self.logger.error('Failed to download comment text',
extra={'post': self.post.title, 'post_id': self.post.id, 'comment_id': self.comment.id,
'directory_path': dir_path, 'title': title}, exc_info=True)
def check_file_path(self, dir_path, name, ext):
self.create_dir_path(dir_path)
unique_count = 1
base_title = system_util.clean_path(name)
download_title = base_title
path = os.path.join(dir_path, f'{download_title}.{ext}')
while os.path.exists(path):
download_title = f'{base_title}({unique_count})'
path = os.path.join(dir_path, f'{download_title}.{ext}')
unique_count += 1
return path
| gpl-3.0 | 2,644,559,677,840,326,000 | 41.705882 | 117 | 0.581267 | false | 3.875445 | false | false | false |
vathpela/blivet | blivet/mounts.py | 2 | 4059 | # mounts.py
# Active mountpoints cache.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vojtech Trefny <[email protected]>
#
import libmount
import functools
from . import util
import logging
log = logging.getLogger("blivet")
MOUNT_FILE = "/proc/self/mountinfo"
class MountsCache(object):
""" Cache object for system mountpoints; checks
/proc/self/mountinfo for up-to-date information.
"""
def __init__(self):
self.mounts_hash = 0
self.mountpoints = None
def get_mountpoints(self, devspec, subvolspec=None):
""" Get mountpoints for selected device
:param devscpec: device specification, eg. "/dev/vda1"
:type devspec: str
:param subvolspec: btrfs subvolume specification, eg. ID or name
:type subvolspec: object (may be NoneType)
:returns: list of mountpoints (path)
:rtype: list of str or empty list
.. note::
Devices can be mounted on multiple paths, and paths can have multiple
devices mounted to them (hiding previous mounts). Callers should take this into account.
"""
self._cache_check()
mountpoints = []
if subvolspec is not None:
subvolspec = str(subvolspec)
# devspec might be a '/dev/dm-X' path but /proc/self/mountinfo always
# contains the '/dev/mapper/...' path -- find_source is able to resolve
# both paths but returns only one mountpoint -- it is neccesary to check
# for all possible mountpoints using new/resolved path (devspec)
try:
fs = self.mountpoints.find_source(devspec)
except Exception: # pylint: disable=broad-except
return mountpoints
else:
devspec = fs.source
# iterate over all lines in the table to find all matching mountpoints
for fs in iter(functools.partial(self.mountpoints.next_fs), None):
if subvolspec:
if fs.fstype != "btrfs":
continue
if fs.source == devspec and (fs.match_options("subvolid=%s" % subvolspec) or
fs.match_options("subvol=/%s" % subvolspec)):
mountpoints.append(fs.target)
else:
if fs.source == devspec:
mountpoints.append(fs.target)
return mountpoints
def is_mountpoint(self, path):
""" Check to see if a path is already mounted
:param str path: Path to check
"""
self._cache_check()
try:
self.mountpoints.find_source(path)
except Exception: # pylint: disable=broad-except
return False
else:
return True
def _cache_check(self):
""" Computes the MD5 hash on /proc/self/mountinfo and updates the cache on change
"""
md5hash = util.md5_file(MOUNT_FILE)
if md5hash != self.mounts_hash:
self.mounts_hash = md5hash
self.mountpoints = libmount.Table(MOUNT_FILE)
mounts_cache = MountsCache()
| gpl-2.0 | 8,593,153,931,211,163,000 | 34.295652 | 104 | 0.631683 | false | 4.095863 | false | false | false |
masneyb/cavedbmanager | cavedb/docgen_gis_maps.py | 1 | 2782 | # SPDX-License-Identifier: Apache-2.0
import os
from django.conf import settings
import cavedb.docgen_gis_common
import cavedb.utils
class GisMaps(cavedb.docgen_gis_common.GisCommon):
def __init__(self, bulletin, gis_x_buffer=0.005, gis_y_buffer=0.005):
cavedb.docgen_gis_common.GisCommon.__init__(self, gis_x_buffer, gis_y_buffer)
self.bulletin = bulletin
self.gismaps = []
def gis_map(self, gismap):
self.gismaps.append(gismap.name)
def generate_buildscript(self):
buildscr = ''
for gismap in self.gismaps:
mapfile = cavedb.docgen_gis_common.get_bulletin_mapserver_mapfile(self.bulletin.id, \
gismap)
localfile = get_all_regions_gis_map(self.bulletin.id, gismap)
if self.overall_extents['gishash']:
buildscr += create_map(mapfile, localfile, self.overall_extents)
for extents in list(self.region_extents.values()):
localfile = get_region_gis_map(self.bulletin.id, extents['id'], gismap)
buildscr += create_map(mapfile, localfile, extents)
buildscr += '\n'
return buildscr
def create_map(mapfile, outfile, extents):
if not extents['minx']:
return ''
hashcode_file = outfile + ".hashcode"
existing_hashcode = get_existing_hashcode(outfile, hashcode_file)
enabled = '#' if extents['gishash'] == existing_hashcode else ''
return '%sshp2img -m %s -o %s -e %s %s %s %s\n' % \
(enabled, mapfile, outfile, \
extents['minx'], extents['miny'], extents['maxx'], extents['maxy']) + \
'%sif [ $? = 0 ] ; then\n' % (enabled) + \
'%s echo %s > "%s"\n' % (enabled, extents['gishash'], hashcode_file) + \
'%sfi\n' % (enabled)
def get_existing_hashcode(outfile, hashcode_file):
if not os.path.exists(outfile):
return None
if not os.path.exists(hashcode_file):
return None
with open(hashcode_file, 'r') as infile:
actual_hashcode = infile.read(1024)
return actual_hashcode.replace('\n', '').replace('\r', '')
return None
def get_all_regions_gis_map(bulletin_id, map_name):
return '%s/bulletin_%s_gis_%s_map.jpg' % \
(cavedb.docgen_gis_common.get_bulletin_gis_maps_directory(bulletin_id), bulletin_id, \
map_name)
def get_region_gis_map(bulletin_id, region_id, map_name):
return '%s/bulletin_%s_region_%s_gis_%s_map.jpg' % \
(cavedb.docgen_gis_common.get_bulletin_gis_maps_directory(bulletin_id), bulletin_id, \
region_id, map_name)
def get_mapserver_include(map_name):
return '%s/%s.map' % (settings.GIS_INCLUDES_DIR, map_name)
| apache-2.0 | -8,005,233,630,944,951,000 | 32.926829 | 97 | 0.59885 | false | 3.063877 | false | false | false |
sam-m888/gprime | gprime/lib/family.py | 1 | 27884 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2010 Michiel D. Nauta
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Family object for Gramps.
"""
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
from warnings import warn
import logging
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .primaryobj import PrimaryObject
from .citationbase import CitationBase
from .notebase import NoteBase
from .mediabase import MediaBase
from .attrbase import AttributeBase
from .eventref import EventRef
from .ldsordbase import LdsOrdBase
from .tagbase import TagBase
from .childref import ChildRef
from .familyreltype import FamilyRelType
from .const import IDENTICAL, EQUAL, DIFFERENT
from .handle import Handle
LOG = logging.getLogger(".citation")
#-------------------------------------------------------------------------
#
# Family class
#
#-------------------------------------------------------------------------
class Family(CitationBase, NoteBase, MediaBase, AttributeBase, LdsOrdBase,
PrimaryObject):
"""
The Family record is the Gramps in-memory representation of the
relationships between people. It contains all the information
related to the relationship.
Family objects are usually created in one of two ways.
1. Creating a new Family object, which is then initialized and
added to the database.
2. Retrieving an object from the database using the records
handle.
Once a Family object has been modified, it must be committed
to the database using the database object's commit_family function,
or the changes will be lost.
"""
def __init__(self, db=None):
"""
Create a new Family instance.
After initialization, most data items have empty or null values,
including the database handle.
"""
PrimaryObject.__init__(self)
CitationBase.__init__(self)
NoteBase.__init__(self)
MediaBase.__init__(self)
AttributeBase.__init__(self)
LdsOrdBase.__init__(self)
self.father_handle = None
self.mother_handle = None
self.child_ref_list = []
self.type = FamilyRelType()
self.event_ref_list = []
self.complete = 0
self.db = db
def to_struct(self):
"""
Convert the data held in this object to a structure (eg,
struct) that represents all the data elements.
This method is used to recursively convert the object into a
self-documenting form that can easily be used for various
purposes, including diffs and queries.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns a struct containing the data of the object.
:rtype: dict
"""
return {"_class": "Family",
"handle": Handle("Family", self.handle),
"gid": self.gid,
"father_handle": Handle("Person", self.father_handle),
"mother_handle": Handle("Person", self.mother_handle),
"child_ref_list": [cr.to_struct() for cr in self.child_ref_list],
"type": self.type.to_struct(),
"event_ref_list": [er.to_struct() for er in self.event_ref_list],
"media_list": MediaBase.to_struct(self),
"attribute_list": AttributeBase.to_struct(self),
"lds_ord_list": LdsOrdBase.to_struct(self),
"citation_list": CitationBase.to_struct(self),
"note_list": NoteBase.to_struct(self),
"change": self.change,
"tag_list": TagBase.to_struct(self),
"private": self.private}
@classmethod
def from_struct(cls, struct, self=None):
"""
Given a struct data representation, return a serialized object.
:returns: Returns a serialized object
"""
default = Family()
if not self:
self = default
data = (Handle.from_struct(struct.get("handle", default.handle)),
struct.get("gid", default.gid),
Handle.from_struct(struct.get("father_handle",
default.father_handle)),
Handle.from_struct(struct.get("mother_handle",
default.mother_handle)),
[ChildRef.from_struct(cr)
for cr in struct.get("child_ref_list",
default.child_ref_list)],
FamilyRelType.from_struct(struct.get("type", {})),
[EventRef.from_struct(er)
for er in struct.get("event_ref_list",
default.event_ref_list)],
struct.get("change", default.change),
struct.get("private", default.private))
(self.handle, self.gid, self.father_handle, self.mother_handle,
self.child_ref_list, self.type, self.event_ref_list,
self.change, self.private) = data
MediaBase.set_from_struct(self, struct)
AttributeBase.set_from_struct(self, struct)
CitationBase.set_from_struct(self, struct)
NoteBase.set_from_struct(self, struct)
LdsOrdBase.set_from_struct(self, struct)
TagBase.set_from_struct(self, struct)
return self
@classmethod
def get_schema(cls):
from .mediaref import MediaRef
from .ldsord import LdsOrd
from .childref import ChildRef
from .attribute import Attribute
return {
"handle": Handle("Family", "FAMILY-HANDLE"),
"gid": str,
"father_handle": Handle("Person", "PERSON-HANDLE"),
"mother_handle": Handle("Person", "PERSON-HANDLE"),
"child_ref_list": [ChildRef],
"type": FamilyRelType,
"event_ref_list": [EventRef],
"media_list": [MediaRef],
"attribute_list": [Attribute],
"lds_ord_list": [LdsOrd],
"citation_list": [Handle("Citation", "CITATION-HANDLE")],
"note_list": [Handle("Note", "NOTE-HANDLE")],
"change": int,
"tag_list": [Handle("Tag", "TAG-HANDLE")],
"private": bool
}
@classmethod
def get_table(cls):
"""
Return abstract Table for database defintions.
"""
from .struct import Table, Column
return Table(cls,
[Column("handle", "VARCHAR(50)",
primary=True, null=False, index=True),
Column("father_handle", "VARCHAR(50)", index=True),
Column("mother_handle", "VARCHAR(50)", index=True),
Column("gid", "TEXT", index=True),
Column("json_data", "TEXT")])
@classmethod
def get_labels(cls, _):
return {
"_class": _("Family"),
"handle": _("Handle"),
"gid": _("ID"),
"father_handle": _("Father"),
"mother_handle": _("Mother"),
"child_ref_list": _("Children"),
"type": _("Relationship"),
"event_ref_list": _("Events"),
"media_list": _("Media"),
"attribute_list": _("Attributes"),
"lds_ord_list": _("LDS ordinances"),
"citation_list": _("Citations"),
"note_list": _("Notes"),
"change": _("Last changed"),
"tag_list": _("Tags"),
"private": _("Private"),
}
@classmethod
def field_aliases(cls):
"""
Return dictionary of alias to full field names
for this object class.
"""
return {
"mother_surname": "mother_handle.primary_name.surname_list.0.surname",
"mother_given": "mother_handle.primary_name.first_name",
"father_surname": "father_handle.primary_name.surname_list.0.surname",
"father_given": "father_handle.primary_name.first_name",
}
@classmethod
def get_extra_secondary_fields(cls):
"""
Return a list of full field names and types for secondary
fields that are not directly listed in the schema.
"""
return [
("father_handle.primary_name.surname_list.0.surname", str),
("father_handle.primary_name.first_name", str),
("mother_handle.primary_name.surname_list.0.surname", str),
("mother_handle.primary_name.first_name", str),
]
@classmethod
def get_index_fields(cls):
return [
"father_handle.primary_name.surname_list.0.surname",
"father_handle.primary_name.first_name",
"mother_handle.primary_name.surname_list.0.surname",
"mother_handle.primary_name.first_name",
]
def _has_handle_reference(self, classname, handle):
"""
Return True if the object has reference to a given handle of given
primary object type.
:param classname: The name of the primary object class.
:type classname: str
:param handle: The handle to be checked.
:type handle: str
:returns: Returns whether the object has reference to this handle of
this object type.
:rtype: bool
"""
if classname == 'Event':
return handle in [ref.ref for ref in self.event_ref_list]
elif classname == 'Person':
return handle in ([ref.ref for ref in self.child_ref_list]
+ [self.father_handle, self.mother_handle])
elif classname == 'Place':
return handle in [x.place for x in self.lds_ord_list]
return False
def remove_handle_references(self, classname, handle_list):
"""
Remove all references in this object to object handles in the list.
:param classname: The name of the primary object class.
:type classname: str
:param handle_list: The list of handles to be removed.
:type handle_list: str
"""
if classname == 'Event':
self.remove_event_references(handle_list)
elif classname == 'Person':
self.remove_person_references(handle_list)
elif classname == 'Place':
self.remove_place_references(handle_list)
elif classname == 'Media':
self.remove_media_references(handle_list)
elif classname == 'Tag':
self.remove_tag_references(handle_list)
elif classname == 'Note':
self.remove_note_references(handle_list)
elif classname == 'Citation':
self.remove_citation_references(handle_list)
def remove_person_references(self, handle_list):
new_list = [ref for ref in self.child_ref_list
if ref.ref not in handle_list]
self.child_ref_list = new_list
if self.father_handle in handle_list:
self.father_handle = None
if self.mother_handle in handle_list:
self.mother_handle = None
def remove_place_references(self):
for lds_ord in self.lds_ord_list:
if lds_ord.place in handle_list:
lds_ord.place = None
def _replace_handle_reference(self, classname, old_handle, new_handle):
"""
Replace all references to old handle with those to the new handle.
:param classname: The name of the primary object class.
:type classname: str
:param old_handle: The handle to be replaced.
:type old_handle: str
:param new_handle: The handle to replace the old one with.
:type new_handle: str
"""
if classname == 'Event':
refs_list = [ref.ref for ref in self.event_ref_list]
new_ref = None
if new_handle in refs_list:
new_ref = self.event_ref_list[refs_list.index(new_handle)]
n_replace = refs_list.count(old_handle)
for ix_replace in range(n_replace):
idx = refs_list.index(old_handle)
self.event_ref_list[idx].ref = new_handle
refs_list[idx] = new_handle
if new_ref:
evt_ref = self.event_ref_list[idx]
equi = new_ref.is_equivalent(evt_ref)
if equi != DIFFERENT:
if equi == EQUAL:
new_ref.merge(evt_ref)
self.event_ref_list.pop(idx)
refs_list.pop(idx)
elif classname == 'Person':
refs_list = [ref.ref for ref in self.child_ref_list]
new_ref = None
if new_handle in refs_list:
new_ref = self.child_ref_list[refs_list.index(new_handle)]
n_replace = refs_list.count(old_handle)
for ix_replace in range(n_replace):
idx = refs_list.index(old_handle)
self.child_ref_list[idx].ref = new_handle
refs_list[idx] = new_handle
if new_ref:
child_ref = self.child_ref_list[idx]
equi = new_ref.is_equivalent(child_ref)
if equi != DIFFERENT:
if equi == EQUAL:
new_ref.merge(child_ref)
self.child_ref_list.pop(idx)
refs_list.pop(idx)
if self.father_handle == old_handle:
self.father_handle = new_handle
if self.mother_handle == old_handle:
self.mother_handle = new_handle
elif classname == 'Place':
for lds_ord in self.lds_ord_list:
if lds_ord.place == old_handle:
lds_ord.place = new_handle
def get_text_data_list(self):
"""
Return the list of all textual attributes of the object.
:returns: Returns the list of all textual attributes of the object.
:rtype: list
"""
return [self.gid]
def get_text_data_child_list(self):
"""
Return the list of child objects that may carry textual data.
:returns: Returns the list of child objects that may carry textual data.
:rtype: list
"""
add_list = [_f for _f in self.lds_ord_list if _f]
return self.media_list + self.attribute_list + add_list
def get_citation_child_list(self):
"""
Return the list of child secondary objects that may refer citations.
:returns: Returns the list of child secondary child objects that may
refer citations.
:rtype: list
"""
check_list = self.media_list + self.attribute_list + \
self.lds_ord_list + self.child_ref_list + self.event_ref_list
return check_list
def get_note_child_list(self):
"""
Return the list of child secondary objects that may refer notes.
:returns: Returns the list of child secondary child objects that may
refer notes.
:rtype: list
"""
check_list = self.media_list + self.attribute_list + \
self.lds_ord_list + self.child_ref_list + \
self.event_ref_list
return check_list
def get_referenced_handles(self):
"""
Return the list of (classname, handle) tuples for all directly
referenced primary objects.
:returns: List of (classname, handle) tuples for referenced objects.
:rtype: list
"""
ret = self.get_referenced_note_handles() + \
self.get_referenced_citation_handles()
ret += [('Person', handle) for handle
in ([ref.ref for ref in self.child_ref_list] +
[self.father_handle, self.mother_handle])
if handle]
ret += self.get_referenced_tag_handles()
return ret
def get_handle_referents(self):
"""
Return the list of child objects which may, directly or through their
children, reference primary objects..
:returns: Returns the list of objects referencing primary objects.
:rtype: list
"""
return self.media_list + self.attribute_list + \
self.lds_ord_list + self.child_ref_list + self.event_ref_list
def merge(self, acquisition):
"""
Merge the content of acquisition into this family.
Lost: handle, id, relation, father, mother of acquisition.
:param acquisition: The family to merge with the present family.
:type acquisition: Family
"""
if self.type != acquisition.type and self.type == FamilyRelType.UNKNOWN:
self.set_relationship(acquisition.get_relationship())
self._merge_privacy(acquisition)
self._merge_event_ref_list(acquisition)
self._merge_lds_ord_list(acquisition)
self._merge_media_list(acquisition)
self._merge_child_ref_list(acquisition)
self._merge_attribute_list(acquisition)
self._merge_note_list(acquisition)
self._merge_citation_list(acquisition)
self._merge_tag_list(acquisition)
def set_relationship(self, relationship_type):
"""
Set the relationship type between the people identified as the
father and mother in the relationship.
The type is a tuple whose first item is an integer constant and whose
second item is the string. The valid values are:
========================= ============================================
Type Description
========================= ============================================
FamilyRelType.MARRIED indicates a legally recognized married
relationship between two individuals. This
may be either an opposite or a same sex
relationship.
FamilyRelType.UNMARRIED indicates a relationship between two
individuals that is not a legally recognized
relationship.
FamilyRelType.CIVIL_UNION indicates a legally recongnized, non-married
relationship between two individuals of the
same sex.
FamilyRelType.UNKNOWN indicates that the type of relationship
between the two individuals is not know.
FamilyRelType.CUSTOM indicates that the type of relationship
between the two individuals does not match
any of the other types.
========================= ============================================
:param relationship_type: (int,str) tuple of the relationship type
between the father and mother of the relationship.
:type relationship_type: tuple
"""
self.type.set(relationship_type)
def get_relationship(self):
"""
Return the relationship type between the people identified as the
father and mother in the relationship.
"""
return self.type
def set_father_handle(self, person_handle):
"""
Set the database handle for :class:`~.person.Person` that corresponds
to male of the relationship.
For a same sex relationship, this can represent either of people
involved in the relationship.
:param person_handle: :class:`~.person.Person` database handle
:type person_handle: str
"""
self.father_handle = person_handle
def get_father_handle(self):
"""
Return the database handle of the :class:`~.person.Person` identified
as the father of the Family.
:returns: :class:`~.person.Person` database handle
:rtype: str
"""
return self.father_handle
def set_mother_handle(self, person_handle):
"""
Set the database handle for :class:`~.person.Person` that corresponds
to male of the relationship.
For a same sex relationship, this can represent either of people
involved in the relationship.
:param person_handle: :class:`~.person.Person` database handle
:type person_handle: str
"""
self.mother_handle = person_handle
def get_mother_handle(self):
"""
Return the database handle of the :class:`~.person.Person` identified
as the mother of the Family.
:returns: :class:`~.person.Person` database handle
:rtype: str
"""
return self.mother_handle
def add_child_ref(self, child_ref):
"""
Add the database handle for :class:`~.person.Person` to the Family's
list of children.
:param child_ref: Child Reference instance
:type child_ref: ChildRef
"""
if not isinstance(child_ref, ChildRef):
raise ValueError("expecting ChildRef instance")
self.child_ref_list.append(child_ref)
def remove_child_ref(self, child_ref):
"""
Remove the database handle for :class:`~.person.Person` to the Family's
list of children if the :class:`~.person.Person` is already in the list.
:param child_ref: Child Reference instance
:type child_ref: ChildRef
:returns: True if the handle was removed, False if it was not
in the list.
:rtype: bool
"""
if not isinstance(child_ref, ChildRef):
raise ValueError("expecting ChildRef instance")
new_list = [ref for ref in self.child_ref_list
if ref.ref != child_ref.ref]
self.child_ref_list = new_list
def remove_child_handle(self, child_handle):
"""
Remove the database handle for :class:`~.person.Person` to the Family's
list of children if the :class:`~.person.Person` is already in the list.
:param child_handle: :class:`~.person.Person` database handle
:type child_handle: str
:returns: True if the handle was removed, False if it was not
in the list.
:rtype: bool
"""
new_list = [ref for ref in self.child_ref_list
if ref.ref != child_handle]
self.child_ref_list = new_list
def get_child_ref_list(self):
"""
Return the list of :class:`~.childref.ChildRef` handles identifying the
children of the Family.
:returns: Returns the list of :class:`~.childref.ChildRef` handles
associated with the Family.
:rtype: list
"""
return self.child_ref_list
def set_child_ref_list(self, child_ref_list):
"""
Assign the passed list to the Family's list children.
:param child_ref_list: List of Child Reference instances to be
associated as the Family's list of children.
:type child_ref_list: list of :class:`~.childref.ChildRef` instances
"""
self.child_ref_list = child_ref_list
def _merge_child_ref_list(self, acquisition):
"""
Merge the list of child references from acquisition with our own.
:param acquisition: the childref list of this family will be merged
with the current childref list.
:type acquisition: Family
"""
childref_list = self.child_ref_list[:]
for addendum in acquisition.get_child_ref_list():
for childref in childref_list:
equi = childref.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
childref.merge(addendum)
break
else:
self.child_ref_list.append(addendum)
def add_event_ref(self, event_ref):
"""
Add the :class:`~.eventref.EventRef` to the Family instance's
:class:`~.eventref.EventRef` list.
This is accomplished by assigning the :class:`~.eventref.EventRef` for
the valid :class:`~.event.Event` in the current database.
:param event_ref: the :class:`~.eventref.EventRef` to be added to the
Person's :class:`~.eventref.EventRef` list.
:type event_ref: EventRef
"""
if event_ref and not isinstance(event_ref, EventRef):
raise ValueError("Expecting EventRef instance")
self.event_ref_list.append(event_ref)
def get_event_list(self):
warn("Use get_event_ref_list instead of get_event_list",
DeprecationWarning, 2)
# Wrapper for old API
# remove when transitition done.
event_handle_list = []
for event_ref in self.get_event_ref_list():
event_handle_list.append(event_ref.get_reference_handle())
return event_handle_list
def get_event_ref_list(self):
"""
Return the list of :class:`~.eventref.EventRef` objects associated with
:class:`~.event.Event` instances.
:returns: Returns the list of :class:`~.eventref.EventRef` objects
associated with the Family instance.
:rtype: list
"""
return self.event_ref_list
def set_event_ref_list(self, event_ref_list):
"""
Set the Family instance's :class:`~.eventref.EventRef` list to the
passed list.
:param event_ref_list: List of valid :class:`~.eventref.EventRef`
objects
:type event_ref_list: list
"""
self.event_ref_list = event_ref_list
def _merge_event_ref_list(self, acquisition):
"""
Merge the list of event references from acquisition with our own.
:param acquisition: the event references list of this object will be
merged with the current event references list.
:type acquisition: Person
"""
eventref_list = self.event_ref_list[:]
for addendum in acquisition.get_event_ref_list():
for eventref in eventref_list:
equi = eventref.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
eventref.merge(addendum)
break
else:
self.event_ref_list.append(addendum)
| gpl-2.0 | -2,009,945,109,026,395,400 | 37.998601 | 82 | 0.565486 | false | 4.342626 | false | false | false |
chrisdickinson/nappingcat | nappingcat/contrib/auth/backends/jsonauth.py | 1 | 1145 | from nappingcat.auth import AuthBackend
import os
try:
import json as simplejson
except ImportError:
import simplejson
SECTION_NAME = 'jsonauth'
class JSONAuthBackend(AuthBackend):
def __init__(self, *args, **kwargs):
super(JSONAuthBackend, self).__init__(*args, **kwargs)
settings_dict = dict(self.settings.items(SECTION_NAME))
filename = os.path.expanduser(settings_dict.get('file', '~/nappingcat_auth.json'))
try:
with open(filename, 'r') as input:
self.users = simplejson.loads(input.read())
except (IOError, ValueError) as e:
self.users = {}
with open(filename, 'w') as fallback:
fallback.write(simplejson.dumps({}))
def finish(self, pubkey_handler):
super(JSONAuthBackend, self).finish(pubkey_handler)
if self.require_update:
settings_dict = dict(self.settings.items(SECTION_NAME))
filename = os.path.expanduser(settings_dict.get('file', '~/nappingcat_auth.json'))
with open(filename, 'w') as output:
output.write(simplejson.dumps(self.users))
| bsd-3-clause | -518,137,101,240,385,400 | 38.482759 | 94 | 0.627074 | false | 3.948276 | false | false | false |
laplab/carbon | carbon/process.py | 1 | 6603 | import shlex
from subprocess import Popen, PIPE, TimeoutExpired
from queue import Queue, Empty
from threading import Thread
import psutil
import time
from utils import Map
class Process:
"""Allows to run processes with limits
Attributes:
cmd (str): Command to execute
input (Optional[str]): Input to be passed to processes STDIN
time_limit (Optional[int]): Time limit in milliseconds
memory_limit (Optional[int]): Memory limit in kB
stdout_file (Optional[str]): Name of file STDOUT should be written to
stderr_file (Optional[str]): Name of file STDERR should be written to
process (Popen): Popen process object
status (Map): Current status of program including
time_limit_exceeded (bool): Is time limit exceeded
memory_limit_exceeded (bool): Is memory limit exceeded
stdout (str): All STDOUT of process
stderr (str): All STDERR of process
time (int): Execution time on milliseconds. This attribute is None until process finished.
memory (int): Maximum memory use in kB. This attribute is None until process finished.
retuncode (int): Return code of process. This attribute is None until process finished.
"""
def __init__(self, cmd, input=None, time_limit=None, memory_limit=None, stdout_file=None, stderr_file=None):
"""Init method of process
Args:
cmd (str): Command to execute
input (Optional[str]): Input to be passed to processes STDIN
time_limit (Optional[int]): Time limit in milliseconds
memory_limit (Optional[int]): Memory limit in kB
stdout_file (Optional[str]): Name of file STDOUT should be written to
stderr_file (Optional[str]): Name of file STDERR should be written to
"""
self.cmd, self.input, self.time_limit, self.memory_limit, self.stdout_file, self.stderr_file\
= shlex.split(cmd), input, time_limit, memory_limit, stdout_file, stderr_file
if self.input:
self.input = self.input.encode('UTF-8')
self.process = None
# status variables
self.status = Map()
self.status.time_limit_exceeded = False
self.status.memory_limit_exceeded = False
self.status.stdout = None
self.status.stderr = None
self.status.time = None
self.status.memory = None
self.status.returncode = None
def run(self):
"""Runs process with configuration set.
"""
self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
psutil_process = psutil.Process(self.process.pid)
# pause process to allow bootstrap code execute before it
psutil_process.suspend()
stdout_summary = ''
stderr_summary = ''
if self.memory_limit is None:
try:
psutil_process.resume()
start = time.time()
(stdout_summary, stderr_summary) = self.process.communicate(self.input, self.time_limit)
# strange line
self.status.time = time.time() - start
self.status.returncode = self.process.poll()
except TimeoutExpired:
self.status.time_limit_exceeded = True
self.process.kill()
else:
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
stdout_queue = Queue()
stdout_thread = Thread(target=enqueue_output, args=(self.process.stdout, stdout_queue))
stdout_thread.daemon = True
stdout_thread.start()
stderr_queue = Queue()
stderr_thread = Thread(target=enqueue_output, args=(self.process.stderr, stderr_queue))
stderr_thread.daemon = True
stderr_thread.start()
max_mem = 0
# start timer
start = time.time()
# bootstrap finished, resume
psutil_process.resume()
# write data to STDIN of program
if self.input:
try:
self.process.stdin.write(self.input)
self.process.stdin.close()
except BrokenPipeError:
pass # program does not accept any STDIN
# start main cycle
while time.time() - start <= (self.time_limit or float('inf')):
max_mem = max(max_mem, psutil_process.memory_info().vms)
# Memory limit exceeded
if max_mem > self.memory_limit:
self.status.memory_limit_exceeded = True
break
# process finished
if self.process.poll() is not None:
self.status.returncode = self.process.returncode
break
# Time limit exceeded
if self.status.returncode is None:
if not self.status.memory_limit_exceeded:
self.status.time_limit_exceeded = True
self.process.kill()
self.status.time = round((time.time() - start) * 1000)
self.status.memory = max_mem / 1024
stdout_thread.join()
stderr_thread.join()
# get lost STDOUT
to_file = isinstance(self.stdout_file, str)
if to_file:
f = open(self.stdout_file, 'w')
while True:
try:
line = stdout_queue.get_nowait().decode('UTF-8')
except Empty:
break
else:
if to_file:
f.write(line)
stdout_summary += line
if to_file:
f.close()
# get lost STDERR
to_file = isinstance(self.stderr_file, str)
if to_file:
f = open(self.stderr_file, 'w')
while True:
try:
line = stderr_queue.get_nowait().decode('UTF-8')
except Empty:
break
else:
if to_file:
f.write(line)
stderr_summary += line
if to_file:
f.close()
# save STDOUT and STDERR to class vars
if stdout_summary:
self.status.stdout = stdout_summary
if stderr_summary:
self.status.stderr = stderr_summary | mit | 5,675,861,900,556,990,000 | 35.486188 | 112 | 0.547933 | false | 4.630435 | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/morphology/fill_segments.py | 1 | 3054 | # Fill a mask using watershed and skeleton segments
import os
import cv2
import numpy as np
from skimage.segmentation import watershed
from plantcv.plantcv import outputs
from plantcv.plantcv import params
from plantcv.plantcv.visualize import colorize_label_img
from plantcv.plantcv._debug import _debug
def fill_segments(mask, objects, stem_objects=None, label="default"):
"""Fills masked segments from contours.
Inputs:
mask = Binary image, single channel, object = 1 and background = 0
objects = List of contours
Returns:
filled_mask = Labeled mask
:param mask: numpy.ndarray
:param objects: list
:param stem_objects: numpy.ndarray
:param label: str
:return filled_mask: numpy.ndarray
"""
h, w = mask.shape
markers = np.zeros((h, w))
objects_unique = objects.copy()
if stem_objects is not None:
objects_unique.append(np.vstack(stem_objects))
labels = np.arange(len(objects_unique)) + 1
for i, l in enumerate(labels):
cv2.drawContours(markers, objects_unique, i, int(l), 5)
# Fill as a watershed segmentation from contours as markers
filled_mask = watershed(mask == 0, markers=markers,
mask=mask != 0, compactness=0)
# Count area in pixels of each segment
ids, counts = np.unique(filled_mask, return_counts=True)
if stem_objects is None:
outputs.add_observation(sample=label, variable='segment_area', trait='segment area',
method='plantcv.plantcv.morphology.fill_segments',
scale='pixels', datatype=list,
value=counts[1:].tolist(),
label=(ids[1:]-1).tolist())
else:
outputs.add_observation(sample=label, variable='leaf_area', trait='segment area',
method='plantcv.plantcv.morphology.fill_segments',
scale='pixels', datatype=list,
value=counts[1:-1].tolist(),
label=(ids[1:-1]-1).tolist())
outputs.add_observation(sample=label, variable='stem_area', trait='segment area',
method='plantcv.plantcv.morphology.fill_segments',
scale='pixels', datatype=list,
value=counts[-1].tolist(),
label=(ids[-1]-1).tolist())
# rgb_vals = color_palette(num=len(labels), saved=False)
# filled_img = np.zeros((h, w, 3), dtype=np.uint8)
# for l in labels:
# for ch in range(3):
# filled_img[:, :, ch][filled_mask == l] = rgb_vals[l - 1][ch]
debug = params.debug
params.debug = None
filled_img = colorize_label_img(filled_mask)
params.debug = debug
_debug(visual=filled_img, filename=os.path.join(params.debug_outdir,
str(params.device) + "_filled_segments_img.png"))
return filled_mask
| mit | 46,040,409,376,672,670 | 37.658228 | 101 | 0.579895 | false | 4.045033 | false | false | false |
Zerknechterer/pyload | module/plugins/internal/Crypter.py | 1 | 3529 | # -*- coding: utf-8 -*-
import urlparse
from module.plugins.internal.Plugin import Plugin
from module.utils import decode, save_path
class Crypter(Plugin):
__name__ = "Crypter"
__type__ = "crypter"
__version__ = "0.03"
__pattern__ = r'^unmatchable$'
__config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config.get("general", "folder_per_package")
("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
__description__ = """Base decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
html = None #: last html loaded #@TODO: Move to Hoster
def __init__(self, pyfile):
super(Crypter, self).__init__(pyfile)
#: Provide information in dict here
self.info = {} #@TODO: Move to Plugin
#: Put all packages here. It's a list of tuples like: ( name, [list of links], folder )
self.packages = []
#: List of urls, pyLoad will generate packagenames
self.urls = []
self.multiDL = True
self.limitDL = 0
def process(self, pyfile):
"""Main method"""
self.decrypt(pyfile)
if self.urls:
self._generate_packages()
elif not self.packages:
self.error(_("No link grabbed"), "decrypt")
self._create_packages()
def decrypt(self, pyfile):
raise NotImplementedError
def _generate_packages(self):
"""Generate new packages from self.urls"""
packages = [(name, links, None) for name, links in self.core.api.generatePackages(self.urls).iteritems()]
self.packages.extend(packages)
def _create_packages(self):
"""Create new packages from self.packages"""
package_folder = self.pyfile.package().folder
package_password = self.pyfile.package().password
package_queue = self.pyfile.package().queue
folder_per_package = self.core.config.get('general', 'folder_per_package')
use_subfolder = self.getConfig('use_subfolder', folder_per_package)
subfolder_per_package = self.getConfig('subfolder_per_package', True)
for name, links, folder in self.packages:
self.logDebug("Parsed package: %s" % name,
"%d links" % len(links),
"Saved to folder: %s" % folder if folder else "Saved to download folder")
links = map(decode, links)
pid = self.core.api.addPackage(name, links, package_queue)
if package_password:
self.core.api.setPackageData(pid, {"password": package_password})
setFolder = lambda x: self.core.api.setPackageData(pid, {"folder": x or ""}) #@NOTE: Workaround to do not break API addPackage method
if use_subfolder:
if not subfolder_per_package:
setFolder(package_folder)
self.logDebug("Set package %(name)s folder to: %(folder)s" % {"name": name, "folder": folder})
elif not folder_per_package or name != folder:
if not folder:
folder = urlparse.urlparse(name).path.split("/")[-1]
setFolder(save_path(folder))
self.logDebug("Set package %(name)s folder to: %(folder)s" % {"name": name, "folder": folder})
elif folder_per_package:
setFolder(None)
| gpl-3.0 | -8,256,083,987,422,104,000 | 32.609524 | 146 | 0.575801 | false | 4.005675 | false | false | false |
cloudera/hue | desktop/libs/notebook/src/notebook/consumer.py | 2 | 2022 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from desktop.conf import has_channels
LOG = logging.getLogger(__name__)
if has_channels():
from asgiref.sync import async_to_sync
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.layers import get_channel_layer
class EditorConsumer(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
LOG.info('User %(user)s connected to WS Editor.' % self.scope)
await self.send(
text_data=json.dumps({
'type': 'channel_name',
'data': self.channel_name,
'accept': True
})
)
async def task_progress(self, event):
await self.send(
text_data=json.dumps({
'type': 'query_progress',
'data': event["data"]
})
)
async def task_result(self, event):
await self.send(
text_data=json.dumps({
'type': 'query_result',
'data': event["data"]
})
)
def _send_to_channel(channel_name, message_type, message_data):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.send)(
channel_name, {
"type": message_type,
"data": message_data,
}
)
| apache-2.0 | -3,856,826,571,091,781,000 | 26.69863 | 74 | 0.661227 | false | 3.956947 | false | false | false |
RAPD/RAPD | src/launch/launcher_adapters/echo_simple.py | 1 | 3396 | """
Simple echo RAPD launcher adapter
For use in testing setup
"""
"""
This file is part of RAPD
Copyright (C) 2017, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2017-07-11"
__maintainer__ = "Your name"
__email__ = "Your email"
__status__ = "Development"
# Standard imports
# import argparse
# import from collections import OrderedDict
# import datetime
# import glob
import logging
# import multiprocessing
# import os
from pprint import pprint
# import pymongo
# import re
#import redis
# import shutil
# import subprocess
# import sys
# import time
# import unittest
# import urllib2
# import uuid
# from distutils.spawn import find_executable
import importlib
# RAPD imports
# import commandline_utils
# import detectors.detector_utils as detector_utils
# import utils
# import utils.credits as credits
from utils import exceptions
import utils.launch_tools as launch_tools
from utils.text import json
from bson.objectid import ObjectId
class LauncherAdapter(object):
"""
RAPD adapter for launcher process
Doesn't launch the job, but merely echoes it back
"""
redis = None
def __init__(self, site, message, settings):
"""
Initialize the plugin
Keyword arguments
site -- imported site definition module
message -- command from the control process, encoded as JSON
settings --
"""
# Get the logger Instance
self.logger = logging.getLogger("RAPDLogger")
self.logger.debug("__init__")
# Store passed-in variables
self.site = site
self.message = message
self.settings = settings
#print "site"
#pprint(site)
#print "message"
#pprint(message)
#print "settings"
#pprint(settings)
self.run()
def run(self):
"""Orchestrate the adapter's actions"""
self.preprocess()
self.process()
self.postprocess()
def preprocess(self):
"""Adjust the command passed in in install-specific ways"""
# Connect to redis
redis_database = importlib.import_module('database.redis_adapter')
#redis_db = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
#self.redis = redis_db.connect_to_redis()
self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS)
def process(self):
"""The main action of the adapter"""
# Set status on message to done
self.message["process"]["status"] = 100
def postprocess(self):
"""Clean up after adapter functions"""
# Encode in JSON
json_message = json.dumps(self.message)
# Pass back result
self.redis.publish("RAPD_RESULTS", json_message)
self.redis.lpush("RAPD_RESULTS", json_message)
| agpl-3.0 | -115,120,683,335,896,460 | 24.923664 | 90 | 0.676678 | false | 4.208178 | false | false | false |
stxent/kmodgen | mod.py | 1 | 6212 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mod.py
# Copyright (C) 2016 xent
# Project is distributed under the terms of the GNU General Public License v3.0
import argparse
import inspect
import json
import os
import re
import sys
import numpy
from wrlconv import model
from wrlconv import vrml_export
from wrlconv import vrml_export_kicad
from wrlconv import vrml_import
from wrlconv import x3d_export
from wrlconv import x3d_import
from packages import *
def load_materials(entries):
def decode(desc, title):
material = model.Material()
material.color.ident = title.capitalize()
if 'shininess' in desc:
material.color.shininess = float(desc['shininess'])
if 'transparency' in desc:
material.color.transparency = float(desc['transparency'])
if 'diffuse' in desc:
material.color.diffuse = numpy.array(desc['diffuse'])
if 'specular' in desc:
material.color.specular = numpy.array(desc['specular'])
if 'emissive' in desc:
material.color.emissive = numpy.array(desc['emissive'])
if 'ambient' in desc:
material.color.ambient = numpy.array(desc['ambient'])
return material
materials = {}
for entry in entries:
materials.update({entry.capitalize(): decode(entries[entry], entry)})
return materials
def load_models(files, pattern):
builders = [entry[1] for entry in inspect.getmembers(sys.modules['packages'])
if inspect.ismodule(entry[1]) and entry[1].__name__.startswith('packages.')]
types = []
for entry in builders:
types.extend(entry.__dict__['types'])
models = []
pattern_re = re.compile(pattern, re.S)
for filename in files:
desc = json.load(open(filename, 'rb'))
materials = load_materials(desc['materials']) if 'materials' in desc else {}
templates = load_templates(desc['templates'],
os.path.dirname(filename)) if 'templates' in desc else []
for part in filter(lambda x: pattern_re.search(x['title']) is not None, desc['parts']):
for package in types:
if package.__name__ == part['package']['type']:
models.append((package().generate(materials, templates, part), part['title']))
return models
def load_templates(entries, path):
templates = []
for entry in entries:
script_path = path + '/' + entry
extension = os.path.splitext(script_path)[1][1:].lower()
if extension == 'wrl':
templates.extend(vrml_import.load(script_path))
elif extension == 'x3d':
templates.extend(x3d_import.load(script_path))
return templates
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', help='show debug information',
default=False, action='store_true')
parser.add_argument('-f', dest='pattern', help='filter parts by name',
default='.*')
parser.add_argument('-l', dest='library', help='add footprints to a specified library',
default=None)
parser.add_argument('-o', dest='output', help='write models to a specified directory',
default='')
parser.add_argument('-v', dest='view', help='render models',
default=False, action='store_true')
parser.add_argument('--fast', dest='fast', help='disable visual effects',
default=False, action='store_true')
parser.add_argument('--no-grid', dest='simple', help='disable grid',
default=False, action='store_true')
parser.add_argument('--normals', dest='normals', help='show normals',
default=False, action='store_true')
parser.add_argument('--smooth', dest='smooth', help='use smooth shading',
default=False, action='store_true')
parser.add_argument('--vrml', dest='vrml', help='use VRML model format',
default=False, action='store_true')
parser.add_argument(dest='files', nargs='*')
return parser.parse_args()
def render_models(models, is_fast, is_simple, is_debug):
if not models:
print('Empty set of models')
sys.exit()
if is_debug:
render_ogl41.debug_enabled = True
effects = {} if is_fast else {'antialiasing': 4}
helper_objects = [] if is_simple else helpers.create_grid()
export_list = []
for entry in models:
export_list.extend(entry[0])
render = render_ogl41.Render(helper_objects + export_list, effects)
render.run()
def write_models(models, library, output, is_vrml, is_debug=False):
if library is not None:
library_path = os.path.join(output, library)
else:
library_path = output
if not os.path.exists(library_path):
os.makedirs(library_path)
extension = '.wrl' if is_vrml else '.x3d'
export_func = vrml_export_kicad.store if is_vrml else x3d_export.store
for group in models:
export_func(group[0], os.path.join(library_path, group[1] + extension))
if is_debug:
print('Model {:s}:{:s} was exported'.format(group[1], extension))
def main(options):
models = load_models(options.files, options.pattern)
if options.output != '':
write_models(models, options.library, options.output, options.vrml, options.debug)
if options.normals or options.smooth:
for group in models:
for entry in group[0]:
entry.appearance().normals = options.normals
entry.appearance().smooth = options.smooth
if options.view:
render_models(models, options.fast, options.simple, options.debug)
if __name__ == '__main__':
parsed_options = parse_args()
if parsed_options.debug:
vrml_export.debug_enabled = True
vrml_export_kicad.debug_enabled = True
vrml_import.debug_enabled = True
x3d_import.debug_enabled = True
x3d_export.debug_enabled = True
if parsed_options.view:
from wrlconv import helpers
from wrlconv import render_ogl41
main(parsed_options)
| gpl-3.0 | -6,714,082,840,905,657,000 | 35.541176 | 98 | 0.619929 | false | 3.887359 | false | false | false |
lexodistro/blissflixx | lib/player/processpipe.py | 2 | 6422 | from Queue import Queue
from threading import Thread
import subprocess32 as subprocess
import os, select, signal, cherrypy, shutil
MSG_PROCESS_READY = 1
MSG_PROCESS_HALTED = 2
MSG_PROCESS_FINISHED = 3
MSG_PLAYER_PIPE_STOPPED = 4
TMP_DIR = "/tmp/blissflixx"
OUT_FILE = "/tmp/blissflixx/bf.out"
def _start_thread(target, *args):
th = Thread(target=target, args=args)
th.daemon = True
th.start()
return th
class _DiscardFile(object):
def write(self, *args):
pass
def close(self):
pass
def _copypipe(src, dest):
if not dest:
dest = _DiscardFile()
# Ignore broken pipe errors if process
# are forced to stop
try:
shutil.copyfileobj(src, dest)
except Exception:
pass
src.close()
dest.close()
def _bgcopypipe(src, dest):
return _start_thread(_copypipe, src, dest)
class ProcessException(Exception):
pass
class ProcessPipe(object):
def __init__(self, title):
self.title = title
self.procs = []
self.threads = []
self.msgq = Queue()
self.next_proc = 0
self.stopping = False
self.started = False
def status_msg(self):
if self.started:
return self.title
else:
idx = self.next_proc - 1
if idx < 0:
idx = 0
return self.procs[idx].status_msg()
def add_process(self, proc):
self.procs.append(proc)
def start(self, pmsgq):
self.pmsgq = pmsgq
self._start_next()
while True:
m = self.msgq.get()
idx = self.msgq.get()
name = self.procs[idx].name()
if m == MSG_PROCESS_READY:
cherrypy.log("READY: " + name)
args = self.msgq.get()
if not self._is_last_proc(idx):
self._start_next(args)
else:
self.started = True
elif m == MSG_PROCESS_FINISHED:
cherrypy.log("FINISHED: " + name)
if self._is_last_proc(idx):
self.stop()
break
elif m == MSG_PROCESS_HALTED:
cherrypy.log("HALTED: " + name)
self.stop()
break
def _last_proc(self):
return self.procs[len(self.procs) - 1]
def _is_last_proc(self, idx):
return idx == len(self.procs) - 1
def _start_next(self, args={}):
proc = self.procs[self.next_proc]
cherrypy.log("STARTING: " + proc.name())
proc.set_msgq(self.msgq, self.next_proc)
self.threads.append(_start_thread(proc.start, args))
self.next_proc = self.next_proc + 1
def stop(self):
if self.stopping:
return
self.stopping = True
self.started = False
error = None
for idx in xrange(self.next_proc-1, -1, -1):
proc = self.procs[idx]
proc.stop()
self.threads[idx].join()
if proc.has_error():
error = proc.get_errors()[0]
cherrypy.log("GOT ERROR: " + error)
self.pmsgq.put(MSG_PLAYER_PIPE_STOPPED)
self.pmsgq.put(error)
def is_started(self):
return self.started
def is_stopping(self):
return self.stopping
def control(self, action):
if self.is_started():
self._last_proc().control(action)
class Process(object):
def __init__(self):
self.errors = []
def set_msgq(self, msgq, procidx):
self.msgq = msgq
self.procidx = procidx
def _send(self, msg, args=None):
self.msgq.put(msg)
self.msgq.put(self.procidx)
if args is not None:
self.msgq.put(args)
def _set_error(self, msg):
self.errors.append(msg)
def get_errors(self):
return self.errors
def has_error(self):
return len(self.errors) > 0
def status_msg(self):
return "LOADING STREAM"
def name(self):
raise NotImplementedError('This method must be implemented by subclasses')
def start(self, args):
raise NotImplementedError('This method must be implemented by subclasses')
def stop(self):
raise NotImplementedError('This method must be implemented by subclasses')
def msg_ready(self, args=None):
if args is None:
args = {}
self._send(MSG_PROCESS_READY, args)
def msg_halted(self):
self._send(MSG_PROCESS_HALTED)
def msg_finished(self):
self._send(MSG_PROCESS_FINISHED)
class ExternalProcess(Process):
def __init__(self, shell=False):
Process.__init__(self)
self.shell = shell
self.killing = False
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
def start(self, args):
cmd = self._get_cmd(args)
self.proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, preexec_fn=os.setsid,
shell=self.shell)
try:
args = self._ready()
self.msg_ready(args)
except ProcessException, e:
# Ignore errors if process is being killed
if not self.killing:
self._set_error(str(e))
self._wait()
def _wait(self):
# Drain stderr/stdout pipe to stop it filling up and blocking process
cpthr = _bgcopypipe(self.proc.stdout, None)
retcode = self.proc.wait()
self.proc = None
#if retcode != 0:
# cherrypy.log("Process exited with code: " + str(retcode))
if self.has_error() or self.killing:
self.msg_halted()
else:
self.msg_finished()
def stop(self):
if self.proc is not None:
# Stop gets called from a seperate thread
# so shutdown may already be in progress
# when we try to kill - therefore ignore errors
try:
# kill - including all children of process
self.killing = True
os.killpg(self.proc.pid, signal.SIGKILL)
except Exception, e:
pass
if os.path.exists(OUT_FILE):
try:
os.remove(OUT_FILE)
except Exception:
pass
def _get_cmd(self):
raise NotImplementedError('This method must be implemented by subclasses')
def _ready(self):
raise NotImplementedError('This method must be implemented by subclasses')
def _readline(self, timeout=None):
poll_obj = select.poll()
poll_obj.register(self.proc.stdout, select.POLLIN)
while self.proc.poll() is None:
if timeout is not None:
poll_result = poll_obj.poll(1000 * timeout)
if not poll_result:
raise ProcessException("Timed out waiting for input")
line = self.proc.stdout.readline()
if not line:
raise ProcessException("Process suddenly died")
line = line.strip()
if line.strip() != '':
return line
raise ProcessException("Process exit: "+str(self.proc.returncode))
| gpl-2.0 | -3,475,898,093,572,110,300 | 23.7 | 79 | 0.622859 | false | 3.461995 | false | false | false |
eggmaster/tempest | tempest/api_schema/response/compute/v2_1/quota_classes.py | 31 | 1490 | # Copyright 2014 IBM Corporation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute.v2_1 import quotas
# NOTE(mriedem): os-quota-class-sets responses are the same as os-quota-sets
# except for the key in the response body is quota_class_set instead of
# quota_set, so update this copy of the schema from os-quota-sets.
get_quota_class_set = copy.deepcopy(quotas.get_quota_set)
get_quota_class_set['response_body']['properties']['quota_class_set'] = (
get_quota_class_set['response_body']['properties'].pop('quota_set'))
get_quota_class_set['response_body']['required'] = ['quota_class_set']
update_quota_class_set = copy.deepcopy(quotas.update_quota_set)
update_quota_class_set['response_body']['properties']['quota_class_set'] = (
update_quota_class_set['response_body']['properties'].pop('quota_set'))
update_quota_class_set['response_body']['required'] = ['quota_class_set']
| apache-2.0 | -242,331,995,297,467,940 | 47.064516 | 78 | 0.730201 | false | 3.581731 | false | false | false |
bentzinir/Buffe | Applications/mgail/mgail.py | 1 | 11459 | from ER import ER
import tensorflow as tf
import common
import numpy as np
class MGAIL(object):
def __init__(self, environment):
self.env = environment
self.do_keep_prob = tf.placeholder("float", shape=(), name='do_keep_prob')
self.forward_model = __import__('forward_model').ForwardModel(state_size=self.env.state_size,
action_size=self.env.action_size,
rho=self.env.fm_rho,
beta=self.env.fm_beta,
encoding_size=self.env.fm_encoding_size,
batch_size=self.env.fm_batch_size,
multi_layered_encoder=self.env.fm_multi_layered_encoder,
num_steps=self.env.fm_num_steps,
separate_encoders=self.env.fm_separate_encoders,
merger=self.env.fm_merger,
activation=self.env.fm_activation,
lstm=self.env.fm_lstm,
dropout_keep=self.env.do_keep_prob)
autoencoder = None
transformed_state_size = self.env.state_size
self.discriminator = __import__('discriminator').DISCRIMINATOR(in_dim=transformed_state_size + self.env.action_size,
out_dim=2,
size=self.env.d_size,
lr=self.env.d_lr,
do_keep_prob=self.do_keep_prob,
weight_decay=self.env.weight_decay)
self.policy = __import__('policy').POLICY(in_dim=transformed_state_size,
out_dim=self.env.action_size,
size=self.env.p_size,
lr=self.env.p_lr,
w_std=self.env.w_std,
do_keep_prob=self.do_keep_prob,
n_accum_steps=self.env.policy_accum_steps,
weight_decay=self.env.weight_decay)
# self.policy_ = __import__('policy').POLICY(in_dim=transformed_state_size,
# out_dim=self.env.action_size,
# size=self.env.p_size,
# lr=self.env.p_lr,
# w_std=self.env.w_std,
# do_keep_prob=self.do_keep_prob,
# n_accum_steps=self.env.policy_accum_steps,
# weight_decay=self.env.weight_decay)
self.er_agent = ER(memory_size=self.env.er_agent_size,
state_dim=self.env.state_size,
action_dim=self.env.action_size,
reward_dim=1, # stub connection
qpos_dim=self.env.qpos_size,
qvel_dim=self.env.qvel_size,
batch_size=self.env.batch_size,
history_length=1)
self.er_expert = common.load_er(fname=self.env.run_dir + self.env.expert_data,
batch_size=self.env.batch_size,
history_length=1,
traj_length=2)
self.env.sigma = self.er_expert.actions_std/self.env.noise_intensity
self.states_ = tf.placeholder("float", shape=(None, self.env.state_size), name='states_') # Batch x State
self.states = tf.placeholder("float", shape=(None, self.env.state_size), name='states') # Batch x State
self.actions = tf.placeholder("float", shape=(None, self.env.action_size), name='action') # Batch x Action
self.label = tf.placeholder("float", shape=(None, 1), name='label')
self.gamma = tf.placeholder("float", shape=(), name='gamma')
self.temp = tf.placeholder("float", shape=(), name='temperature')
self.noise = tf.placeholder("float", shape=(), name='noise_flag')
self.noise_mean = tf.placeholder("float", shape=(self.env.action_size))
states_ = common.normalize(self.states_, self.er_expert.states_mean, self.er_expert.states_std)
states = common.normalize(self.states, self.er_expert.states_mean, self.er_expert.states_std)
if self.env.continuous_actions:
actions = common.normalize(self.actions, self.er_expert.actions_mean, self.er_expert.actions_std)
else:
actions = self.actions
self.forward_model.states_normalizer = self.er_expert.states_max - self.er_expert.states_min
self.forward_model.actions_normalizer = self.er_expert.actions_max - self.er_expert.actions_min
self.forward_model.states_normalizer = self.er_expert.states_std
self.forward_model.actions_normalizer = self.er_expert.actions_std
s = np.ones((1, self.forward_model.arch_params['encoding_dim']))
# 1. Forward Model
fm_output, _, gru_state = self.forward_model.forward([states_, actions, s])
l2_loss = tf.reduce_mean(tf.square(states-fm_output))
self.forward_model.train(objective=l2_loss)
# 2. Discriminator
labels = tf.concat(1, [1 - self.label, self.label])
d = self.discriminator.forward(states, actions, autoencoder)
# 2.1 0-1 accuracy
correct_predictions = tf.equal(tf.argmax(d, 1), tf.argmax(labels, 1))
self.discriminator.acc = tf.reduce_mean(tf.cast(correct_predictions, "float"))
# 2.2 prediction
d_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=d, labels=labels)
# cost sensitive weighting (weigh true=exprt, predict=agent mistakes)
d_loss_weighted = self.env.cost_sensitive_weight * tf.mul(tf.to_float(tf.equal(tf.squeeze(self.label), 1.)), d_cross_entropy) +\
tf.mul(tf.to_float(tf.equal(tf.squeeze(self.label), 0.)), d_cross_entropy)
discriminator_loss = tf.reduce_mean(d_loss_weighted)
self.discriminator.train(objective=discriminator_loss)
self.discriminator.acc_summary = tf.scalar_summary('acc_d', self.discriminator.acc)
# 3. Collect experience
mu = self.policy.forward(states, autoencoder)
if self.env.continuous_actions:
a = common.denormalize(mu, self.er_expert.actions_mean, self.er_expert.actions_std)
eta = tf.random_normal(shape=tf.shape(a), stddev=self.env.sigma, mean=self.noise_mean)
self.action_test = tf.squeeze(a + self.noise * eta)
else:
a = common.gumbel_softmax(logits=mu, temperature=self.temp)
self.action_test = tf.argmax(a, dimension=1)
# 4. Policy
# 4.1 SL
actions_a = self.policy.forward(states, autoencoder)
policy_sl_loss = tf.nn.l2_loss(actions_a - actions) # action == expert action
self.policy.train(objective=policy_sl_loss, mode='sl')
# 4.2 Temporal Regularization
actions_a_ = self.policy_.forward(states, autoencoder)
policy_tr_loss = self.env.policy_tr_w * self.env.policy_accum_steps * tf.nn.l2_loss(actions_a - actions_a_)
self.policy.train(objective=policy_tr_loss, mode='tr')
# op for copying weights from policy to policy_
self.policy_.copy_weights(self.policy.weights, self.policy.biases)
# Plain adversarial learning
d = self.discriminator.forward(states, actions_a, autoencoder)
policy_alr_loss = self.al_loss(d)
self.policy.train(objective=policy_alr_loss, mode='alr')
# 4.3 AL
def policy_loop(state_, t, total_cost, total_trans_err, _):
mu = self.policy.forward(state_, autoencoder)
if self.env.continuous_actions:
eta = self.env.sigma * tf.random_normal(shape=tf.shape(mu), mean=self.noise_mean)
a = mu + eta
else:
a = common.gumbel_softmax_sample(logits=mu, temperature=self.temp)
# minimize the gap between agent logit (d[:,0]) and expert logit (d[:,1])
d = self.discriminator.forward(state_, a, autoencoder)
cost = self.al_loss(d)
# add step cost
total_cost += tf.mul(tf.pow(self.gamma, t), cost)
# get next state
if self.env.continuous_actions:
a_sim = common.denormalize(a, self.er_expert.actions_mean, self.er_expert.actions_std)
else:
a_sim = tf.argmax(a, dimension=1)
state_env, _, env_term_sig, = self.env.step(a_sim, mode='tensorflow')[:3]
state_e = common.normalize(state_env, self.er_expert.states_mean, self.er_expert.states_std)
state_e = tf.stop_gradient(state_e)
state_a, _, _ = self.forward_model.forward([state_, a, s])
state, nu = common.re_parametrization(state_e=state_e, state_a=state_a)
total_trans_err += tf.reduce_mean(abs(nu))
t += 1
return state, t, total_cost, total_trans_err, env_term_sig
def policy_stop_condition(state_, t, cost, trans_err, env_term_sig):
cond = tf.logical_not(env_term_sig)
cond = tf.logical_and(cond, t < self.env.n_steps_train)
cond = tf.logical_and(cond, trans_err < self.env.total_trans_err_allowed)
return cond
state_0 = tf.slice(states, [0, 0], [1, -1])
loop_outputs = tf.while_loop(policy_stop_condition, policy_loop, [state_0, 0., 0., 0., False])
self.policy.train(objective=loop_outputs[2], mode='al')
def al_loss(self, d):
logit_agent, logit_expert = tf.split(split_dim=1, num_split=2, value=d)
logit_gap = logit_agent - logit_expert
valid_cond = tf.stop_gradient(tf.to_float(logit_gap > 0))
valid_gaps = tf.mul(logit_gap, valid_cond)
# L2
if self.env.al_loss == 'L2':
loss = tf.nn.l2_loss(tf.mul(logit_gap, tf.to_float(logit_gap > 0)))
# L1
elif self.env.al_loss == 'L1':
loss = tf.reduce_mean(valid_gaps)
# Cross entropy
elif self.env.al_loss == 'CE':
labels = tf.concat(1, [tf.zeros_like(logit_agent), tf.ones_like(logit_expert)])
d_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=d, labels=labels)
loss = tf.reduce_mean(d_cross_entropy)
return loss*self.env.policy_al_w
| mit | 8,101,156,917,626,142,000 | 54.357488 | 136 | 0.516275 | false | 3.856951 | false | false | false |
Glutanimate/image-occlusion-2-enhanced | src/image_occlusion_enhanced/nconvert.py | 1 | 10279 | # -*- coding: utf-8 -*-
# Image Occlusion Enhanced Add-on for Anki
#
# Copyright (C) 2016-2020 Aristotelis P. <https://glutanimate.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version, with the additions
# listed at the end of the license file that accompanied this program.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# NOTE: This program is subject to certain additional terms pursuant to
# Section 7 of the GNU Affero General Public License. You should have
# received a copy of these additional terms immediately following the
# terms and conditions of the GNU Affero General Public License that
# accompanied this program.
#
# If not, please request a copy through one of the means of contact
# listed here: <https://glutanimate.com/contact/>.
#
# Any modifications to this file must keep this entire header intact.
"""
Makes older IO notes editable.
"""
import logging
from aqt.qt import *
from anki.hooks import addHook
from aqt.utils import tooltip
from xml.dom import minidom
from .config import *
from .dialogs import ioAskUser
from .utils import img2path, fname2img
class ImgOccNoteConverter(object):
def __init__(self, browser):
self.browser = browser
self.occl_id_last = None
loadConfig(self)
def convertNotes(self, nids):
"""Main note conversion method"""
nids_by_nr = {}
skipped = 0
(io_nids, filtered) = self.filterSelected(nids)
for nid in io_nids:
note = mw.col.getNote(nid)
(uniq_id, note_nr) = self.getDataFromNamingScheme(note)
if uniq_id == False:
logging.debug("Skipping note that couldn't be parsed: %s", nid)
skipped += 1
continue
occl_tp = self.getOcclTypeAndNodes(note)
occl_id = uniq_id + '-' + occl_tp
if occl_id == self.occl_id_last:
logging.debug(
"Skipping note that we've just converted: %s", nid)
continue
self.occl_id_last = occl_id
for nid in self.findByNoteId(uniq_id):
note = mw.col.getNote(nid)
(uniq_id, note_nr) = self.getDataFromNamingScheme(note)
if uniq_id == False:
logging.debug(
"Skipping note that couldn't be parsed: %s", nid)
skipped += 1
continue
nids_by_nr[int(note_nr)] = nid
self.idAndCorrelateNotes(nids_by_nr, occl_id)
converted = len(io_nids)
tooltip("<b>%i</b> notes updated, <b>%i</b> skipped"
% (converted - skipped, filtered + skipped))
def filterSelected(self, nids):
"""Filters out notes with the wrong note type and those that are
valid already"""
io_nids = []
filtered = 0
for nid in nids:
note = mw.col.getNote(nid)
if note.model() != self.model:
logging.debug("Skipping note with wrong note type: %s", nid)
filtered += 1
continue
elif note[self.ioflds['id']]:
logging.debug(
"Skipping IO note that is already editable: %s", nid)
filtered += 1
continue
elif not note[self.ioflds['om']]:
logging.debug(
"Skipping IO note without original SVG mask: %s", nid)
filtered += 1
continue
logging.debug("Found IO note in need of update: %s", nid)
io_nids.append(nid)
return (io_nids, filtered)
def findByNoteId(self, note_id):
"""Search collection for notes with given ID in their omask paths"""
# need to use omask path because Note ID field is not yet set
query = '"%s:*%s*"' % (self.ioflds['om'], note_id)
logging.debug("query: %s", query)
res = mw.col.findNotes(query)
return res
def getDataFromNamingScheme(self, note):
"""Get unique ID and note nr from qmask path"""
qmask = note[self.ioflds['qm']]
path = img2path(qmask, True)
if not path:
return (False, None)
grps = path.split('_')
try:
if len(grps) == 2:
logging.debug("Extracting data using IO 2.0 naming scheme")
uniq_id = grps[0]
note_nr = path.split(' ')[1].split('.')[0]
else:
logging.debug(
"Extracting data using IO Enhanced naming scheme")
grps = path.split('-')
uniq_id = grps[0]
note_nr = int(grps[2]) - 1
return (uniq_id, note_nr)
except IndexError:
return (False, None)
def idAndCorrelateNotes(self, nids_by_nr, occl_id):
"""Update Note ID fields and omasks of all occlusion session siblings"""
logging.debug("occl_id %s", occl_id)
logging.debug("nids_by_nr %s", nids_by_nr)
logging.debug("mnode_idxs %s", self.mnode_idxs)
for nr in sorted(nids_by_nr.keys()):
try:
midx = self.mnode_idxs[nr]
except IndexError:
continue
nid = nids_by_nr[nr]
note = mw.col.getNote(nid)
new_mnode_id = occl_id + '-' + str(nr+1)
self.mnode.childNodes[midx].setAttribute("id", new_mnode_id)
note[self.ioflds['id']] = new_mnode_id
note.flush()
logging.debug("Adding ID for note nr %s", nr)
logging.debug("midx %s", midx)
logging.debug("nid %s", nid)
logging.debug("note %s", note)
logging.debug("new_mnode_id %s", new_mnode_id)
new_svg = self.svg_node.toxml()
omask_path = self._saveMask(new_svg, occl_id, "O")
logging.debug("omask_path %s", omask_path)
for nid in list(nids_by_nr.values()):
note = mw.col.getNote(nid)
note[self.ioflds['om']] = fname2img(omask_path)
note.addTag(".io-converted")
note.flush()
logging.debug("Setting om and tag for nid %s", nid)
def getOcclTypeAndNodes(self, note):
"""Determine oclusion type and svg mask nodes"""
nr_of_masks = {}
mnode_idxs = {}
svg_mlayer = {}
for i in ["qm", "om"]: # om second, so that end vars are correct
svg_file = img2path(note[self.ioflds[i]], True)
svg_node = self.readSvg(svg_file)
svg_mlayer = self.layerNodesFrom(svg_node)[-1] # topmost layer
mnode_idxs = self.getMaskNodes(svg_mlayer)
nr_of_masks[i] = len(mnode_idxs)
# decide on occl_tp based on nr of mask nodes in omask vs qmask
if nr_of_masks["om"] != nr_of_masks["qm"]:
occl_tp = "oa"
else:
occl_tp = "ao"
self.svg_node = svg_node
self.mnode = svg_mlayer
self.mnode_idxs = mnode_idxs
return occl_tp
def readSvg(self, svg_file):
"""Read and fix malformatted IO 2.0 SVGs"""
svg_doc = minidom.parse(svg_file)
# ugly workaround for wrong namespace in older IO notes:
svg_string = svg_doc.toxml().replace('ns0:', '').replace(':ns0', '')
svg_string = str(svg_string)
svg_doc = minidom.parseString(svg_string.encode('utf-8'))
svg_node = svg_doc.documentElement
return svg_node
def getMaskNodes(self, mlayer):
"""Find mask nodes in masks layer"""
mnode_indexes = []
for i, node in enumerate(mlayer.childNodes):
if (node.nodeType == node.ELEMENT_NODE) and (node.nodeName != 'title'):
mnode_indexes.append(i)
return mnode_indexes
def layerNodesFrom(self, svg_node):
"""Get layer nodes (topmost group nodes below the SVG node)"""
assert (svg_node.nodeType == svg_node.ELEMENT_NODE)
assert (svg_node.nodeName == 'svg')
layer_nodes = [node for node in svg_node.childNodes
if node.nodeType == node.ELEMENT_NODE]
assert (len(layer_nodes) >= 1)
# last, i.e. top-most element, needs to be a layer:
assert (layer_nodes[-1].nodeName == 'g')
return layer_nodes
def _saveMask(self, mask, note_id, mtype):
"""Write mask to file in media collection"""
logging.debug("!saving %s, %s", note_id, mtype)
mask_path = '%s-%s.svg' % (note_id, mtype)
mask_file = open(mask_path, 'w')
mask_file.write(mask.encode('utf-8'))
mask_file.close()
return mask_path
def onIoConvert(self):
"""Launch initial dialog, set up checkpoint, invoke converter"""
mw = self.mw
selected = self.selectedNotes()
if not selected:
tooltip("No cards selected.", period=2000)
return
ret = ioAskUser("question_nconvert", title="Please confirm action",
parent=self, defaultno=True)
if not ret:
return False
mw.progress.start()
mw.checkpoint("Image Occlusion Note Conversions")
self.model.beginReset()
conv = ImgOccNoteConverter(self)
conv.convertNotes(selected)
self.model.endReset()
mw.col.reset()
mw.reset()
mw.progress.finish()
# Set up menus and hooks
def setupMenu(self):
menu = self.form.menuEdit
menu.addSeparator()
a = menu.addAction("Convert to Editable IO &Enhanced Notes")
a.triggered.connect(lambda _, b=self: onIoConvert(b))
try:
from aqt.gui_hooks import browser_menus_did_init
browser_menus_did_init.append(setupMenu)
except (ImportError, ModuleNotFoundError):
addHook("browser.setupMenus", setupMenu)
| bsd-2-clause | -7,466,364,075,666,562,000 | 36.790441 | 83 | 0.583909 | false | 3.698813 | false | false | false |
hmeine/vigra | vigranumpy/examples/eccentricity/eccentricity_transform.py | 3 | 5735 | import vigra
import numpy
import vigra.graphs as vigraph
import matplotlib.pyplot as plt
import scipy.misc
import sys
gamma = 0.0001
percentage = 2
f = "figure_1.png"
## img: image segment with 0: inside, 1: outside
## distFunc: function applied after distance transform, must be one of "exponential", "linear", "inverse"
## showPathImage: if True, the image with distance transform and paths will be shown
## percentageOfPaths: percentage of computed paths
def eccentricity( img, distFunc = "exponential", showPathImage = False, percentageOfPaths = 100, imgSaveName = "" ):
## Enlarge image by one pixel on each side
img = img.astype(numpy.uint8)
bigImg = numpy.ones( (img.shape[0]+2, img.shape[1]+2) )
bigImg[1:bigImg.shape[0]-1, 1:bigImg.shape[1]-1] = img
## Find borders in img (replace with graph functions)
borderImg = numpy.zeros(bigImg.shape)
for y in range(bigImg.shape[1]-1):
for x in range(bigImg.shape[0]-1):
if bigImg[x,y] == 0:
if bigImg[x+1,y] == 1 or bigImg[x,y+1] == 1:
borderImg[x, y] = 1
else:
if bigImg[x+1,y] == 0:
borderImg[x+1, y] = 1
if bigImg[x,y+1] == 0:
borderImg[x, y+1] = 1
## regionImageToCrackEdgeImage ( labelImage )
# ## Apply distanceTransform and modify (outside: high values, inside: low values)
# distImage = vigra.filters.distanceTransform2D(bigImg.astype(numpy.float32))
# if showPathImage:
# imgp = distImage.copy()
# if distFunc == "exponential":
# distImage = numpy.exp(distImage*-gamma)
# elif distFunc == "linear":
# maxDist = distImage.max()
# distImage = maxDist - distImage
# elif distFunc == "inverse":
# w = numpy.where(distImage!=0)
# distImage[w] = 1/distImage[w]
# else:
# print "wrong parameters for distFunc in eccentricity"
## Distance in the inside between two pixels is 1.0
distImage = bigImg.copy().astype(numpy.float32)
distImage[numpy.where(bigImg==0)]=1.0
## Set the outside to a very high value
distImage[numpy.where(bigImg==1)]=10000.0
imgp = distImage.copy()
## Get image graph and its path finder
gridGraph = vigraph.gridGraph(bigImg.shape[0:2],False)
edgeWeights = vigra.resize(distImage,[distImage.shape[0]*2-1,distImage.shape[1]*2-1],order=0)
edgeWeights = vigra.graphs.edgeFeaturesFromInterpolatedImageCorrected(gridGraph,edgeWeights)
pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph)
## End points for paths (all points on the border)
targets = numpy.where(borderImg==1)
tx,ty = targets
nTargets = len(tx)
## Indices of start points for paths (random)
nPoints = int(numpy.ceil(percentageOfPaths * nTargets / 100.0))
numpy.random.seed(42)
starts = numpy.random.permutation(range(nTargets))[:nPoints]
## Compute paths
maxPaths = []
maxPathLengths = []
for i in range(nPoints):
source = gridGraph.coordinateToNode((int(tx[starts[i]]), int (ty[starts[i]])))
pathFinder.run(edgeWeights, source)
maxPathLength = 0
for j in range(nTargets):
target = gridGraph.coordinateToNode((int(tx[j]), int(ty[j])))
path = pathFinder.path(pathType='coordinates', target=target)
pathLength = pathFinder.distance(target)
if pathLength > maxPathLength or maxPathLength == 0:
maxPathLength = pathLength
maxPath = path
maxPaths.append(maxPath)
maxPathLengths.append(maxPathLength)
if showPathImage or len(imgSaveName)>1:
val = (imgp.max()+imgp.min())/2
for p in maxPaths:
imgp[p[:,0], p[:,1]] = val
if showPathImage:
plt.figure(distFunc)
plt.imshow(imgp, interpolation='none')
if len(imgSaveName)>1:
scipy.misc.imsave(imgSaveName, imgp)
return maxPathLengths
## Read image
img = vigra.impex.readImage(f)
labels = numpy.squeeze(vigra.analysis.labelImage(img))
### Compute slic superpixels
#labels ,nseg = vigra.analysis.slicSuperpixels(img,100.0,50)
#labels = numpy.squeeze(vigra.analysis.labelImage(labels))
## Compute bounding boxes
regionFeatures = vigra.analysis.extractRegionFeatures(img, labels)
upperLeftBBs = regionFeatures["Coord<Minimum>"]
lowerRightBBs = regionFeatures["Coord<Maximum>"]
nBoxes = len(upperLeftBBs)-1
## Get segment inside its bounding box
segments = []
nonEmptyBoxIndices = []
for i in range(nBoxes):
subImg = labels[ upperLeftBBs[i+1][0]:lowerRightBBs[i+1][0], upperLeftBBs[i+1][1]:lowerRightBBs[i+1][1] ].copy()
where = numpy.where(subImg==i+1)
if len(where[0]) > 0:
subImg[where] = 0
subImg[numpy.where(subImg!=0)] = 1
segments.append(subImg)
nonEmptyBoxIndices.append(i+1)
## Apply eccentricity transform
pathLengths = []
counter = 0
for seg in segments:
#eccentricity(subImg, distFunc="exponential", showPathImage=True, percentageOfPaths=percentage)
#eccentricity(subImg, distFunc="inverse", showPathImage=True, percentageOfPaths=percentage)
pathLength = eccentricity(seg, distFunc="linear", showPathImage=False, percentageOfPaths=percentage)
pathLengths.append(pathLength)
counter = counter+1
#vigra.show()
# ## Testimage: map longest path to color
# maxPath = 0
# for i in range(len(pathLengths)):
# m = max(pathLengths[i])
# if m > maxPath:
# maxPath = m
# labelCopy = labels.copy()
# for i in range(len(pathLengths)):
# val = max(pathLengths[i]) * 255.0/maxPath
# j = nonEmptyBoxIndices[i]
# labelCopy[numpy.where(labels == j)] = val
#
# vigra.imshow(labelCopy)
# vigra.show()
| mit | 2,636,803,325,228,039,700 | 33.969512 | 116 | 0.656844 | false | 3.265945 | false | false | false |
nerevu/riko | riko/modules/fetchtext.py | 1 | 5602 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.fetchtext
~~~~~~~~~~~~~~~~~~~~~~
Provides functions for fetching text data sources.
Accesses and extracts data from text sources on the web. This data can then be
merged with other data in your Pipe.
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.modules.fetchtext import pipe
>>>
>>> conf = {'url': get_path('lorem.txt')}
>>> next(pipe(conf=conf))['content'] == 'What is Lorem Ipsum?'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from . import processor
from riko import ENCODING
from riko.utils import fetch, auto_close, get_abspath
from riko.bado import coroutine, return_value, io
OPTS = {'ftype': 'none', 'assign': 'content'}
DEFAULTS = {'encoding': ENCODING}
logger = gogo.Gogo(__name__, monolog=True).logger
@coroutine
def async_parser(_, objconf, skip=False, **kwargs):
""" Asynchronously parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['content'])
... url = get_path('lorem.txt')
... objconf = Objectify({'url': url, 'encoding': ENCODING})
... d = async_parser(None, objconf, assign='content')
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
What is Lorem Ipsum?
"""
if skip:
stream = kwargs['stream']
else:
url = get_abspath(objconf.url)
f = yield io.async_url_open(url)
assign = kwargs['assign']
encoding = objconf.encoding
_stream = ({assign: line.strip().decode(encoding)} for line in f)
stream = auto_close(_stream, f)
return_value(stream)
def parser(_, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from meza.fntools import Objectify
>>>
>>> url = get_path('lorem.txt')
>>> objconf = Objectify({'url': url, 'encoding': ENCODING})
>>> result = parser(None, objconf, assign='content')
>>> next(result)['content'] == 'What is Lorem Ipsum?'
True
"""
if skip:
stream = kwargs['stream']
else:
f = fetch(decode=True, **objconf)
_stream = ({kwargs['assign']: line.strip()} for line in f)
stream = auto_close(_stream, f)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously fetches and parses an XML or JSON file to
return the entries.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'. May
contain the key 'encoding'.
url (str): The web site to fetch.
encoding (str): The file encoding (default: utf-8).
assign (str): Attribute to assign parsed content (default: content)
Returns:
Deferred: twisted.internet.defer.Deferred stream of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['content'])
... conf = {'url': get_path('lorem.txt')}
... d = async_pipe(conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
What is Lorem Ipsum?
"""
return async_parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that fetches and parses an XML or JSON file to
return the entries.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'. May
contain the key 'encoding'.
url (str): The web site to fetch
encoding (str): The file encoding (default: utf-8).
assign (str): Attribute to assign parsed content (default: content)
Returns:
dict: an iterator of items
Examples:
>>> from riko import get_path
>>>
>>> conf = {'url': get_path('lorem.txt')}
>>> next(pipe(conf=conf))['content'] == 'What is Lorem Ipsum?'
True
"""
return parser(*args, **kwargs)
| mit | -4,538,184,047,259,151,400 | 28.177083 | 78 | 0.577294 | false | 3.959011 | false | false | false |
rolandschulz/speedup | measure.py | 1 | 3753 | #!/usr/bin/env python3
# Copyright 2017 Jussi Pakkanen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess, sys, os, shutil, platform, json
meson_commands = ['meson', 'meson.py', '/home/jpakkane/workspace/meson/meson.py', 'c:/users/IEUser/meson/meson.py', '/Users/jpakkane/meson/meson.py']
meson_bin = None
for c in meson_commands:
if shutil.which(c):
meson_bin = c
break
if not meson_bin:
sys.exit('Could not find Meson executable.')
def measure_one(builddir, compiler, extra_flag, sort, buildtype):
if os.path.exists(builddir):
shutil.rmtree(builddir)
env = os.environ.copy()
env['CXX'] = compiler
env['CXXFLAGS'] = extra_flag
if sort:
sort_arg = ['--sort']
else:
sort_arg = []
subprocess.check_call([sys.executable, meson_bin, builddir, '--buildtype=' + buildtype] , stdout=subprocess.DEVNULL, env=env)
subprocess.check_call(['ninja', '-C', builddir], stdout=subprocess.DEVNULL)
out = subprocess.check_output([os.path.join(builddir, 'speedup')] + sort_arg)
out = out.decode('utf-8')
result = {}
for line in out.split('\n'):
line = line.strip()
if line == '':
continue
typename, duration, _ = line.split()
result[typename] = duration
shutil.rmtree(builddir)
return result
def do_measurements():
measurements = []
if platform.processor() == 'x86_64' or platform.processor() == 'i386' or 'Intel64' in platform.processor():
gcc_cpu_flags = ['', '-mavx', '-msse4.2', '-msse2', '-msse']
elif platform.machine().startswith('arm'):
gcc_cpu_flags = ['', '-mfpu=neon']
else:
sys.exit('Unsupported CPU: ' + platform.processor())
cl_cpu_flags = [''] # Add /arch:AVX and /arch:AVX2
builddir = 'buildmeasurement'
compilers = []
if platform.system().lower() == 'linux':
trials = ['g++', 'clang++']
elif platform.system().lower() == 'windows':
trials = ['g++', 'clang++', 'cl']
elif platform.system().lower() == 'darwin':
trials = ['clang++'] # On OSX g++ is an alias to clang++
for c in trials:
if shutil.which(c):
compilers.append(c)
for compiler in compilers:
cpu_flags = cl_cpu_flags if compiler == 'cl' else gcc_cpu_flags
for cpu_flag in cpu_flags:
for sort in [True, False]:
for buildtype in ['debugoptimized', 'release']:
times = measure_one(builddir, compiler, cpu_flag, sort, buildtype)
measurements.append({'compiler': compiler,
'cpu_flag': cpu_flag,
'sort': sort,
'buildtype': buildtype,
'times': times,
})
return measurements
if __name__ == '__main__':
if len(sys.argv) != 2:
print(sys.argv[1], '<output file name>')
sys.exit(1)
if not os.path.isfile('meson.build'):
print('This script must be run in the top of the source dir.')
sys.exit(1)
ofilename = sys.argv[1]
measurements = do_measurements()
json.dump(measurements, open(ofilename, 'w'))
| apache-2.0 | -8,321,515,540,365,940,000 | 37.295918 | 149 | 0.591527 | false | 3.719524 | false | false | false |
openego/dingo | ding0/flexopt/check_tech_constraints.py | 1 | 26442 | """This file is part of DING0, the DIstribution Network GeneratOr.
DING0 is a tool to generate synthetic medium and low voltage power
distribution grids based on open data.
It is developed in the project open_eGo: https://openegoproject.wordpress.com
DING0 lives at github: https://github.com/openego/ding0/
The documentation is available on RTD: http://ding0.readthedocs.io"""
__copyright__ = "Reiner Lemoine Institut gGmbH"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/ding0/blob/master/LICENSE"
__author__ = "nesnoj, gplssm"
# check technical constraints of distribution grids (shared lib)
from ding0.tools import config as cfg_ding0
import logging
from ding0.core.network.loads import LVLoadDing0
from ding0.core.network import GeneratorDing0
from ding0.core.network.cable_distributors import LVCableDistributorDing0
from ding0.core.network.stations import LVStationDing0
from ding0.core.powerflow import q_sign
import networkx as nx
import math
logger = logging.getLogger('ding0')
def check_load(grid, mode):
""" Checks for over-loading of branches and transformers for MV or LV grid.
Parameters
----------
grid : :class:`~.ding0.core.GridDing0`
Grid identifier.
mode : :obj:`str`
Kind of grid ('MV' or 'LV').
Returns
-------
:obj:`dict`
Dict of critical branches with max. relative overloading, and the
following format::
{
branch_1: rel_overloading_1,
...,
branch_n: rel_overloading_n
}
:obj:`list` of :class:`~.ding0.core.network.TransformerDing0` objects
List of critical transformers with the following format::
[trafo_1, ..., trafo_m]
Note
-----
Lines'/cables' max. capacity (load case and feed-in case) are taken from [#]_.
References
----------
.. [#] dena VNS
See Also
--------
ding0.flexopt.reinforce_measures.reinforce_branches_current :
ding0.flexopt.reinforce_measures.reinforce_branches_voltage :
"""
crit_branches = {}
crit_stations = []
if mode == 'MV':
# load load factors (conditions) for cables, lines and trafos for load- and feedin case
# load_factor_mv_trans_lc_normal = float(cfg_ding0.get('assumptions',
# 'load_factor_mv_trans_lc_normal'))
load_factor_mv_line_lc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_line_lc_normal'))
load_factor_mv_cable_lc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_cable_lc_normal'))
#load_factor_mv_trans_fc_normal = float(cfg_ding0.get('assumptions',
# 'load_factor_mv_trans_fc_normal'))
load_factor_mv_line_fc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_line_fc_normal'))
load_factor_mv_cable_fc_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_cable_fc_normal'))
mw2kw = 1e3
kw2mw = 1e-3
# STEP 1: check branches' loads
for branch in grid.graph_edges():
s_max_th = 3**0.5 * branch['branch'].type['U_n'] * branch['branch'].type['I_max_th']
if branch['branch'].kind == 'line':
s_max_th_lcfc = [s_max_th * load_factor_mv_line_lc_normal,
s_max_th * load_factor_mv_line_fc_normal]
elif branch['branch'].kind == 'cable':
s_max_th_lcfc = [s_max_th * load_factor_mv_cable_lc_normal,
s_max_th * load_factor_mv_cable_fc_normal]
else:
raise ValueError('Branch kind is invalid!')
# check loads only for non-aggregated Load Areas (aggregated ones are skipped raising except)
try:
# check if s_res exceeds allowed values for laod and feedin case
# CAUTION: The order of values is fix! (1. load case, 2. feedin case)
if any([s_res * mw2kw > _ for s_res, _ in zip(branch['branch'].s_res, s_max_th_lcfc)]):
# save max. relative overloading
crit_branches[branch] = max(branch['branch'].s_res) * mw2kw / s_max_th
except:
pass
# STEP 2: check HV-MV station's load
# NOTE: HV-MV station reinforcement is not required for status-quo
# scenario since HV-MV trafos already sufficient for load+generation
# case as done in MVStationDing0.choose_transformers()
# OLD snippet:
# cum_peak_load = grid.grid_district.peak_load
# cum_peak_generation = grid.station().peak_generation(mode='MVLV')
#
# # reinforcement necessary only if generation > load
# if cum_peak_generation > cum_peak_load:
# grid.station().choose_transformers
#
# cum_trafo_capacity = sum((_.s_max_a for _ in grid.station().transformers()))
#
# max_trafo = max((_.s_max_a for _ in grid.station().transformers()))
#
# # determine number and size of required transformers
# kw2mw = 1e-3
# residual_apparent_power = cum_generation_sum * kw2mw - \
# cum_trafo_capacity
elif mode == 'LV':
raise NotImplementedError
if crit_branches:
logger.info('==> {} branches have load issues.'.format(
len(crit_branches)))
if crit_stations:
logger.info('==> {} stations have load issues.'.format(
len(crit_stations)))
return crit_branches, crit_stations
def check_voltage(grid, mode):
""" Checks for voltage stability issues at all nodes for MV or LV grid
Parameters
----------
grid : :class:`~.ding0.core.GridDing0`
Grid identifier.
mode : :obj:`str`
Kind of grid ('MV' or 'LV').
Returns
-------
:obj:`list` of Ding0 node object (member of graph) either
* :class:`~.ding0.core.network.GeneratorDing0` or
* :class:`~.ding0.core.network.GeneratorFluctuatingDing0` or
* :class:`~.ding0.core.network.LoadDing0` or
* :class:`~.ding0.core.network.StationDing0` or
* :class:`~.ding0.core.network.CircuitBreakerDing0` or
* :class:`~.ding0.core.network.CableDistributorDing0`
List of critical nodes, sorted descending by voltage difference.
Note
-----
The examination is done in two steps, according to [#]_ :
1. It is checked #TODO: what?
2. #TODO: what's next?
References
----------
.. [#] dena VNS
"""
crit_nodes = {}
if mode == 'MV':
# load max. voltage difference for load and feedin case
mv_max_v_level_lc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_max_v_level_lc_diff_normal'))
mv_max_v_level_fc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_max_v_level_fc_diff_normal'))
# check nodes' voltages
voltage_station = grid._station.voltage_res
for node in grid.graph_nodes_sorted():
try:
# compare node's voltage with max. allowed voltage difference for load and feedin case
if (abs(voltage_station[0] - node.voltage_res[0]) > mv_max_v_level_lc_diff_normal) or\
(abs(voltage_station[1] - node.voltage_res[1]) > mv_max_v_level_fc_diff_normal):
crit_nodes[node] = {'node': node,
'v_diff': max([abs(v2-v1) for v1, v2 in zip(node.voltage_res, voltage_station)])}
except:
pass
elif mode == 'LV':
raise NotImplementedError
if crit_nodes:
logger.info('==> {} nodes have voltage issues.'.format(len(crit_nodes)))
return [_['node'] for _ in sorted(crit_nodes.values(), key=lambda _: _['v_diff'], reverse=True)]
def get_critical_line_loading(grid):
"""
Assign line loading to each branch determined by peak load and peak
generation of descendant branches
The attribute `s_res` is a list of two elements
1. apparent power in load case
2. apparent power in feed-in case
Parameters
----------
grid : :class:`~.ding0.core.network.grids.LVGridDing0`
Ding0 LV grid object
Returns
-------
:obj:`list`
List of critical branches incl. its line loading
:obj:`list`
List of critical stations incl. its transformer loading
"""
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
lf_trafo_load = cfg_ding0.get('assumptions',
"load_factor_lv_trans_lc_normal")
lf_trafo_gen = cfg_ding0.get('assumptions',
"load_factor_lv_trans_fc_normal")
critical_branches = []
critical_stations = []
# Convert grid to a tree (is a directed graph)
# based on this tree, descendants of each node are accessible
station = grid._station
tree = nx.dfs_tree(grid._graph, station)
for node in tree.nodes():
# list of descendant nodes including the node itself
descendants = list(nx.descendants(tree, node))
descendants.append(node)
if isinstance(node, LVStationDing0):
# determine cumulative peak load at node and assign to branch
peak_load, peak_gen = peak_load_generation_at_node(descendants)
if grid.id_db == 61107:
if isinstance(node, LVStationDing0):
print(node)
# get trafos cumulative apparent power
s_max_trafos = sum([_.s_max_a for _ in node._transformers])
# compare with load and generation connected to
if (((peak_load / cos_phi_load) > s_max_trafos * lf_trafo_load) or
((peak_gen / cos_phi_feedin) > s_max_trafos * lf_trafo_gen)):
critical_stations.append(
{'station': node,
's_max': [
peak_load / cos_phi_load,
peak_gen / cos_phi_feedin]})
else:
# preceeding node of node
predecessors = list(tree.predecessors(node))
# a non-meshed grid topology returns a list with only 1 item
predecessor = predecessors[0]
# get preceeding
branches = grid.graph_branches_from_node(node)
preceeding_branch = [branch for branch in branches
if branch[0] is predecessor][0]
# determine cumulative peak load at node and assign to branch
peak_load, peak_gen = peak_load_generation_at_node(descendants)
s_max_th = 3 ** 0.5 * preceeding_branch[1]['branch'].type['U_n'] * \
preceeding_branch[1]['branch'].type['I_max_th'] / 1e3
if (((peak_load / cos_phi_load) > s_max_th) or
((peak_gen / cos_phi_feedin) > s_max_th)):
critical_branches.append(
{'branch': preceeding_branch[1]['branch'],
's_max': [
peak_load / cos_phi_load,
peak_gen / cos_phi_feedin]})
return critical_branches, critical_stations
def peak_load_generation_at_node(nodes):
"""
Get maximum occuring load and generation at a certain node
Summarizes peak loads and nominal generation power of descendant nodes
of a branch
Parameters
----------
nodes : :obj:`list`
Any LV grid Ding0 node object that is part of the grid topology
Return
------
:any:`float`
peak_load : Sum of peak loads of descendant nodes
:any:`float`
peak_generation : Sum of nominal power of generation at descendant nodes
"""
loads = [node.peak_load for node in nodes
if isinstance(node, LVLoadDing0)]
peak_load = sum(loads)
generation = [node.capacity for node in nodes
if isinstance(node, GeneratorDing0)]
peak_generation = sum(generation)
return peak_load, peak_generation
def get_critical_voltage_at_nodes(grid):
r"""
Estimate voltage drop/increase induced by loads/generators connected to the
grid.
Based on voltage level at each node of the grid critical nodes in terms
of exceed tolerable voltage drop/increase are determined.
The tolerable voltage drop/increase is defined by [#VDE]_ a adds up to
3 % of nominal voltage.
The longitudinal voltage drop at each line segment is estimated by a
simplified approach (neglecting the transverse voltage drop) described in
[#VDE]_.
Two equations are available for assessing voltage drop/ voltage increase.
The first is used to assess a voltage drop in the load case
.. math::
\\Delta u = \\frac{S_{Amax} \cdot ( R_{kV} \cdot cos(\phi) + X_{kV} \cdot sin(\phi) )}{U_{nom}}
The second equation can be used to assess the voltage increase in case of
feedin. The only difference is the negative sign before X. This is related
to consider a voltage drop due to inductive operation of generators.
.. math::
\\Delta u = \\frac{S_{Amax} \cdot ( R_{kV} \cdot cos(\phi) - X_{kV} \cdot sin(\phi) )}{U_{nom}}
================= =============================
Symbol Description
================= =============================
:math:`\Delta u` Voltage drop/increase at node
:math:`S_{Amax}` Apparent power
:math:`R_{kV}` Short-circuit resistance
:math:`X_{kV}` Short-circuit reactance
:math:`cos(\phi)` Power factor
:math:`U_{nom}` Nominal voltage
================= =============================
Parameters
----------
grid : :class:`~.ding0.core.network.grids.LVGridDing0`
Ding0 LV grid object
Note
-----
The implementation highly depends on topology of LV grid. This must not
change its topology from radial grid with stubs branching from radial
branches. In general, the approach of [#VDE]_ is only applicable to grids of
radial topology.
We consider the transverse voltage drop/increase by applying the same
methodology successively on results of main branch. The voltage
drop/increase at each house connection branch (aka. stub branch or grid
connection point) is estimated by superposition based on voltage level
in the main branch cable distributor.
References
----------
.. [#VDE] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz –
Technische Mindestanforderungen für Anschluss und Parallelbetrieb von
Erzeugungsanlagen am Niederspannungsnetz, 2011
"""
v_delta_tolerable_fc = cfg_ding0.get('assumptions',
'lv_max_v_level_fc_diff_normal')
v_delta_tolerable_lc = cfg_ding0.get('assumptions',
'lv_max_v_level_lc_diff_normal')
crit_nodes = []
# get list of nodes of main branch in right order
tree = nx.dfs_tree(grid._graph, grid._station)
# list for nodes of main branch
main_branch = []
# list of stub cable distributors branching from main branch
grid_conn_points = []
# fill two above lists
for node in list(nx.descendants(tree, grid._station)):
successors = list(tree.successors(node))
if successors and all(isinstance(successor, LVCableDistributorDing0)
for successor in successors):
main_branch.append(node)
elif (isinstance(node, LVCableDistributorDing0) and
all(isinstance(successor, (GeneratorDing0, LVLoadDing0))
for successor in successors)):
grid_conn_points.append(node)
v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar = get_voltage_at_bus_bar(grid, tree)
if (abs(v_delta_gen_case_bus_bar) > v_delta_tolerable_fc
or abs(v_delta_load_case_bus_bar) > v_delta_tolerable_lc):
crit_nodes.append({'node': grid._station,
'v_diff': [v_delta_load_case_bus_bar,
v_delta_gen_case_bus_bar]})
# voltage at main route nodes
for first_node in [b for b in tree.successors(grid._station)
if b in main_branch]:
# initiate loop over feeder
successor = first_node
# cumulative voltage drop/increase at substation bus bar
v_delta_load_cum = v_delta_load_case_bus_bar
v_delta_gen_cum = v_delta_gen_case_bus_bar
# successively determine voltage levels for succeeding nodes
while successor:
# calculate voltage drop over preceding line
voltage_delta_load, voltage_delta_gen = get_delta_voltage_preceding_line(grid, tree, successor)
# add voltage drop over preceding line
v_delta_load_cum += voltage_delta_load
v_delta_gen_cum += voltage_delta_gen
# roughly estimate transverse voltage drop
stub_node = [_ for _ in tree.successors(successor) if
_ not in main_branch][0]
v_delta_load_stub, v_delta_gen_stub = get_delta_voltage_preceding_line(grid, tree, stub_node)
# check if voltage drop at node exceeds tolerable voltage drop
if (abs(v_delta_gen_cum) > (v_delta_tolerable_fc)
or abs(v_delta_load_cum) > (
v_delta_tolerable_lc)):
# add node and successing stub node to critical nodes
crit_nodes.append({'node': successor,
'v_diff': [v_delta_load_cum,
v_delta_gen_cum]})
crit_nodes.append({'node': stub_node,
'v_diff': [
v_delta_load_cum + v_delta_load_stub,
v_delta_gen_cum + v_delta_gen_stub]})
# check if voltage drop at stub node exceeds tolerable voltage drop
elif ((abs(v_delta_gen_cum + v_delta_gen_stub) > v_delta_tolerable_fc)
or (abs(v_delta_load_cum + v_delta_load_stub) > v_delta_tolerable_lc)):
# add stub node to critical nodes
crit_nodes.append({'node': stub_node,
'v_diff': [
v_delta_load_cum + v_delta_load_stub,
v_delta_gen_cum + v_delta_gen_stub]})
successor = [_ for _ in tree.successors(successor)
if _ in main_branch]
if successor:
successor = successor[0]
return crit_nodes
def get_voltage_at_bus_bar(grid, tree):
"""
Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : :class:`~.ding0.core.network.grids.LVGridDing0`
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:obj:`list`
Voltage at bus bar. First item refers to load case, second item refers
to voltage in feedin (generation) case
"""
# impedance of mv grid and transformer
r_mv_grid, x_mv_grid = get_mv_impedance_at_voltage_level(grid, grid.v_level / 1e3)
z_trafo = 1 / sum(1 / (tr.z(voltage_level=grid.v_level / 1e3)) for tr in grid._station._transformers)
r_trafo = z_trafo.real
x_trafo = z_trafo.imag
# cumulative resistance/reactance at bus bar
r_busbar = r_mv_grid + r_trafo
x_busbar = x_mv_grid + x_trafo
# get voltage drop at substation bus bar
v_delta_load_case_bus_bar, \
v_delta_gen_case_bus_bar = get_voltage_delta_branch(tree, grid._station, r_busbar, x_busbar)
return v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar
def get_delta_voltage_preceding_line(grid, tree, node):
"""
Parameters
----------
grid : :class:`~.ding0.core.network.grids.LVGridDing0`
Ding0 grid object
tree: :networkx:`NetworkX Graph Obj< >`
Tree of grid topology
node: graph node
Node at end of line
Return
------
:any:`float`
Voltage drop over preceding line of node
"""
# get impedance of preceding line
freq = cfg_ding0.get('assumptions', 'frequency')
omega = 2 * math.pi * freq
# choose preceding branch
branch = [_ for _ in grid.graph_branches_from_node(node) if
_[0] in list(tree.predecessors(node))][0][1]
# calculate impedance of preceding branch
r_line = (branch['branch'].type['R_per_km'] * branch['branch'].length/1e3)
x_line = (branch['branch'].type['L_per_km'] / 1e3 * omega *
branch['branch'].length/1e3)
# get voltage drop over preceeding line
voltage_delta_load, voltage_delta_gen = \
get_voltage_delta_branch(tree, node, r_line, x_line)
return voltage_delta_load, voltage_delta_gen
def get_voltage_delta_branch(tree, node, r, x):
"""
Determine voltage for a branch with impedance r + jx
Parameters
----------
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology
node : graph node
Node to determine voltage level at
r : float
Resistance of preceeding branch
x : float
Reactance of preceeding branch
Return
------
:any:`float`
Delta voltage for branch
"""
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
cos_phi_load_mode = cfg_ding0.get('assumptions', 'cos_phi_load_mode')
cos_phi_feedin_mode = cfg_ding0.get('assumptions', 'cos_phi_gen_mode') #ToDo: Check if this is true. Why would generator run in a way that aggravates voltage issues?
v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
# get apparent power for load and generation case
peak_load, gen_capacity = get_cumulated_conn_gen_load(tree, node)
s_max_load = peak_load/cos_phi_load
s_max_feedin = gen_capacity/cos_phi_feedin
# determine voltage increase/ drop a node
x_sign_load = q_sign(cos_phi_load_mode, 'load')
voltage_delta_load = voltage_delta_vde(v_nom, s_max_load, r, x_sign_load * x,
cos_phi_load)
x_sign_gen = q_sign(cos_phi_feedin_mode, 'load')
voltage_delta_gen = voltage_delta_vde(v_nom, s_max_feedin, r, x_sign_gen * x,
cos_phi_feedin)
return [voltage_delta_load, voltage_delta_gen]
def get_cumulated_conn_gen_load(graph, node):
"""
Get generation capacity/ peak load of all descending nodes
Parameters
----------
graph : :networkx:`NetworkX Graph Obj< >`
Directed graph
node : graph node
Node of the main branch of LV grid
Returns
-------
:obj:`list`
A list containing two items
# cumulated peak load of connected loads at descending nodes of node
# cumulated generation capacity of connected generators at descending nodes of node
"""
# loads and generators connected to descending nodes
peak_load = sum(
[node.peak_load for node in nx.descendants(graph, node)
if isinstance(node, LVLoadDing0)])
generation = sum(
[node.capacity for node in nx.descendants(graph, node)
if isinstance(node, GeneratorDing0)])
return [peak_load, generation]
def get_mv_impedance_at_voltage_level(grid, voltage_level):
"""
Determine MV grid impedance (resistance and reactance separately)
Parameters
----------
grid : :class:`~.ding0.core.network.grids.LVGridDing0`
voltage_level: float
voltage level to which impedance is rescaled (normally 0.4 kV for LV)
Returns
-------
:obj:`list`
List containing resistance and reactance of MV grid
"""
freq = cfg_ding0.get('assumptions', 'frequency')
omega = 2 * math.pi * freq
mv_grid = grid.grid_district.lv_load_area.mv_grid_district.mv_grid
edges = mv_grid.find_path(grid._station, mv_grid._station, type='edges')
r_mv_grid = sum([e[2]['branch'].type['R_per_km'] * e[2]['branch'].length / 1e3
for e in edges])
x_mv_grid = sum([e[2]['branch'].type['L_per_km'] / 1e3 * omega * e[2][
'branch'].length / 1e3 for e in edges])
# rescale to voltage level
r_mv_grid_vl = r_mv_grid * (voltage_level / mv_grid.v_level) ** 2
x_mv_grid_vl = x_mv_grid * (voltage_level / mv_grid.v_level) ** 2
return [r_mv_grid_vl, x_mv_grid_vl]
def voltage_delta_vde(v_nom, s_max, r, x, cos_phi):
"""
Estimate voltrage drop/increase
The VDE [#]_ proposes a simplified method to estimate voltage drop or
increase in radial grids.
Parameters
----------
v_nom : :obj:`int`
Nominal voltage
s_max : :obj:`float`
Apparent power
r : :obj:`float`
Short-circuit resistance from node to HV/MV substation (in ohm)
x : :obj:`float`
Short-circuit reactance from node to HV/MV substation (in ohm). Must
be a signed number indicating (+) inductive reactive consumer (load
case) or (-) inductive reactive supplier (generation case)
cos_phi : :obj:`float`
The cosine phi of the connected generator or load that induces the
voltage change
Returns
-------
:obj:`float`
Voltage drop or increase
References
----------
.. [#] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz –
Technische Mindestanforderungen für Anschluss und Parallelbetrieb von
Erzeugungsanlagen am Niederspannungsnetz, 2011
"""
delta_v = (s_max * 1e3 * (
r * cos_phi - x * math.sin(math.acos(cos_phi)))) / v_nom ** 2
return delta_v
| agpl-3.0 | 1,344,183,671,160,117,000 | 36.497872 | 169 | 0.585225 | false | 3.704596 | false | false | false |
meteorfox/PerfKitBenchmarker | perfkitbenchmarker/providers/gcp/gcp_dataproc.py | 1 | 6608 | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for GCP's spark service.
Spark clusters can be created and deleted.
"""
import datetime
import json
import re
from perfkitbenchmarker import flags
from perfkitbenchmarker import providers
from perfkitbenchmarker import spark_service
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
GCP_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
class GcpDataproc(spark_service.BaseSparkService):
"""Object representing a GCP Dataproc cluster.
Attributes:
cluster_id: ID of the cluster.
project: ID of the project.
"""
CLOUD = providers.GCP
SERVICE_NAME = 'dataproc'
def __init__(self, spark_service_spec):
super(GcpDataproc, self).__init__(spark_service_spec)
self.project = self.spec.master_group.vm_spec.project
@staticmethod
def _GetStats(stdout):
results = json.loads(stdout)
stats = {}
done_time = datetime.datetime.strptime(
results['status']['stateStartTime'], GCP_TIME_FORMAT)
pending_time = None
start_time = None
for state in results['statusHistory']:
if state['state'] == 'PENDING':
pending_time = datetime.datetime.strptime(state['stateStartTime'],
GCP_TIME_FORMAT)
elif state['state'] == 'RUNNING':
start_time = datetime.datetime.strptime(state['stateStartTime'],
GCP_TIME_FORMAT)
if done_time and start_time:
stats[spark_service.RUNTIME] = (done_time - start_time).total_seconds()
if start_time and pending_time:
stats[spark_service.WAITING] = (
(start_time - pending_time).total_seconds())
return stats
def _Create(self):
"""Creates the cluster."""
if self.cluster_id is None:
self.cluster_id = 'pkb-' + FLAGS.run_uri
cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'create',
self.cluster_id)
if self.project is not None:
cmd.flags['project'] = self.project
cmd.flags['num-workers'] = self.spec.worker_group.vm_count
for group_type, group_spec in [
('worker', self.spec.worker_group),
('master', self.spec.master_group)]:
flag_name = group_type + '-machine-type'
cmd.flags[flag_name] = group_spec.vm_spec.machine_type
if group_spec.vm_spec.num_local_ssds:
ssd_flag = 'num-{0}-local-ssds'.format(group_type)
cmd.flags[ssd_flag] = group_spec.vm_spec.num_local_ssds
if group_spec.vm_spec.boot_disk_size:
disk_flag = group_type + '-boot-disk-size'
cmd.flags[disk_flag] = group_spec.vm_spec.boot_disk_size
cmd.Issue()
def _Delete(self):
"""Deletes the cluster."""
cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'delete',
self.cluster_id)
# If we don't put this here, zone is automatically added, which
# breaks the dataproc clusters delete
cmd.flags['zone'] = []
cmd.Issue()
def _Exists(self):
"""Check to see whether the cluster exists."""
cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'describe',
self.cluster_id)
# If we don't put this here, zone is automatically added to
# the command, which breaks dataproc clusters describe
cmd.flags['zone'] = []
_, _, retcode = cmd.Issue()
return retcode == 0
def SubmitJob(self, jarfile, classname, job_poll_interval=None,
job_arguments=None, job_stdout_file=None,
job_type=spark_service.SPARK_JOB_TYPE):
cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', job_type)
cmd.flags['cluster'] = self.cluster_id
# If we don't put this here, zone is auotmatically added to the command
# which breaks dataproc jobs submit
cmd.flags['zone'] = []
if classname:
cmd.flags['jars'] = jarfile
cmd.flags['class'] = classname
else:
cmd.flags['jar'] = jarfile
# Dataproc gives as stdout an object describing job execution.
# Its stderr contains a mix of the stderr of the job, and the
# stdout of the job. We set the driver log level to FATAL
# to suppress those messages, and we can then separate, hopefully
# the job standard out from the log messages.
cmd.flags['driver-log-levels'] = 'root=FATAL'
if job_arguments:
cmd.additional_flags = ['--'] + job_arguments
stdout, stderr, retcode = cmd.Issue(timeout=None)
if retcode != 0:
return {spark_service.SUCCESS: False}
stats = self._GetStats(stdout)
stats[spark_service.SUCCESS] = True
if job_stdout_file:
with open(job_stdout_file, 'w') as f:
lines = stderr.splitlines(True)
if (not re.match(r'Job \[.*\] submitted.', lines[0]) or
not re.match(r'Waiting for job output...', lines[1])):
raise Exception('Dataproc output in unexpected format.')
i = 2
if job_type == spark_service.SPARK_JOB_TYPE:
if not re.match(r'\r', lines[i]):
raise Exception('Dataproc output in unexpected format.')
i += 1
# Eat these status lines. They end in \r, so they overwrite
# themselves at the console or when you cat a file. But they
# are part of this string.
while re.match(r'\[Stage \d+:', lines[i]):
i += 1
if not re.match(r' *\r$', lines[i]):
raise Exception('Dataproc output in unexpected format.')
while i < len(lines) and not re.match(r'Job \[.*\]', lines[i]):
f.write(lines[i])
i += 1
if i != len(lines) - 1:
raise Exception('Dataproc output in unexpected format.')
return stats
def SetClusterProperty(self):
pass
def GetMetadata(self):
basic_data = super(GcpDataproc, self).GetMetadata()
if self.spec.worker_group.vm_spec.num_local_ssds:
basic_data.update({'ssd_count':
str(self.spec.worker_group.vm_spec.num_local_ssds)})
return basic_data
| apache-2.0 | -2,838,796,046,700,911,600 | 35.10929 | 77 | 0.634231 | false | 3.771689 | false | false | false |
disrupts/SicPy | sicpy/ciphers/railfence.py | 1 | 4491 | from sicpy.cryptobox import Cryptobox
from sicpy.utils.iterators import integer_iterator
class RailFence(Cryptobox):
""" Rail-fence Sawtooth
"""
# DOES NOT NEED ALPHASBET
def __init__(self, alphabet=None, key=None, plain_text=None,
cipher_text=None):
""" """
Cryptobox.__init__(self, alphabet, key, plain_text, cipher_text)
self.key_init = 2
def cipher(self, alphabet=None, input_text=None, key=None):
""" """
# Variable inference
if alphabet == None:
alphabet = self.alphabet
if input_text == None:
input_text = self.plain_text
if key == None:
key = self.key
# Let's make a matrix out of the original text
text_length = len(input_text)
buffer_matrix = [[0 for i in range(key)] for j in range(text_length)]
rail_list = generate_rail_list(key, text_length)
for position, letter in enumerate(input_text):
buffer_matrix[position][rail_list[position]] = letter
# Let's transpose the matrix
matrix_width = len(buffer_matrix[0])
buffer_matrix = (
[[row[i] for row in buffer_matrix] for i in range(matrix_width)] )
# Now let's flatten the matrix to a single vector
buffer_list = sum(buffer_matrix,[])
# change to string omiting 0s
output_text = ''
for letter in buffer_list:
if letter != 0:
output_text += str(letter)
return output_text
def decipher(self, alphabet=None, key=None, input_text=None):
"""
asimetrical algo, decipher is different
(thow known and unique for each cipher)
"""
# Variable inference
if alphabet == None:
alphabet = self.alphabet
if input_text == None:
input_text = self.cipher_text
if key == None:
key = self.key
# make a matrix with filled with 0 and 1, 1 representing
# the place were letters will be placed
text_length = len(input_text)
buffer_matrix = [[0 for i in range(key)] for j in range(text_length)]
rail_list = generate_rail_list(key, text_length)
for position, letter in enumerate(input_text):
buffer_matrix[position][rail_list[position]] = 1
# place letters (line per line)
position = 0
for j in range(key):
for i in range(len(buffer_matrix)):
if buffer_matrix[i][j] == 1:
buffer_matrix[i][j] = input_text[position]
position += 1
# Read (extract) letters (one letter per column)
output_text = ''
for i in range(len(buffer_matrix)):
for j in range(key):
#if isinstance(buffer_matrix[i][j],int):
if buffer_matrix[i][j] != 0:
output_text += buffer_matrix[i][j]
return output_text
def bruteforce(self, times=None, alphabet=None, input_text=None):
"""
times should be lower than len(input_text)
"""
# initialise times to maximum possible value
if times == None:
times = len(input_text) - 1
return Cryptobox.bruteforce(self, times, alphabet, input_text)
def key_inverse(self, alphabet=None, key=None):
""" algo is asimetric, same key is used, just returns same key """
# Variable inference
if alphabet == None:
alphabet = self.alphabet
if key == None:
key = self.key
# Main code
return key
def key_iterate(self, alphabet=None, key=None):
"""
need to pass alphabet for consistency"""
# Variable inference
#if alphabet == None:
# alphabet = self.alphabet
if key == None:
key = self.key
# Main code
return key + 1 # the length of the alphabet is not the limit
def generate_rail_list(key, list_length):
""" Generates a list of integers following a sawtooth or rail fence
"""
return_list = []
element = 0
for whatever in range(list_length):
return_list.append(element)
# Change direction
if element == (key-1):
dir_up = False
if element == 0:
dir_up = True
# Update element
if dir_up == True:
element += 1
else:
element -= 1
return return_list
| gpl-3.0 | -4,004,982,648,631,174,000 | 31.773723 | 77 | 0.557461 | false | 4.104205 | false | false | false |
etal/cladecompare | cladeweb.py | 1 | 7840 | #!/usr/bin/env python
"""Web browser interface for CladeCompare.
Input (GET):
Form for submitting FG, [BG, HMM].
Output (POST):
CladeReport (heat map) of submission.
"""
# TODO:
# - take boolean do_weight as a form option (checkbox?)
# ENH (in the report):
# - asterisks up top link to PDF pairlogos
# - below title, link to PyMOL script of PDB(s)
from __future__ import print_function
import logging
import os
import tempfile
import webbrowser
import bottle
from cladecomparelib import (core, pairlogo, pmlscript, urn, gtest, jsd,
phospho, report)
# ENH: include textarea as alternative to each file upload input
# ENH: dropdown alpha (or textinput w/ JS validation)
FORM_HTML = """\
<html>
<body>
<h1>CladeCompare</h1>
<form action="cladecompare" method="post" enctype="multipart/form-data">
<p>
Submission name:
<input type="text" name="name" />
</p>
<h2>Sequences</h2>
<p>
Sequence file 1 (required):
<br />
<input type="file" name="seqfile1" size=50 />
</p>
<p>
Sequence file 2:
<br />
<input type="file" name="seqfile2" size=50 />
</p>
<h2>Statistical strategy</h2>
<p>
<label>
<input type="radio" name="strategy" value="gtest" checked="checked" />
G-test (goodness-of-fit)
</label>
</p>
<p>
<label>
<input type="radio" name="strategy" value="urn" />
Ball-in-urn model (binomial)
</label>
</p>
<p>
<label>
<input type="radio" name="strategy" value="jsd" />
Jensen-Shannon divergence
</label>
</p>
<p>
<label>
<input type="radio" name="strategy" value="phospho" />
Phosphorylation site conservation
</label>
</p>
<p>
Significance cutoff (alpha):
<input type="text" name="alpha" value="0.005" />
</p>
<h2>Alignment profile</h2>
<p>
HMM (.hmm) profile:
<br />
<input type="file" name="profile" size=50 />
</p>
<!--
<h2>Structure</h2>
<p>
PDB ID:
<input type="text" name="pdbid" />
<br />
or upload a
PDB file:
<br />
<input type="file" name="pdbfile" size=50 />
</p>
-->
<p />
<p><input type="submit" /></p>
</form>
<hr />
<p>Project page: <a
href="http://github.com/etal/cladecompare">http://github.com/etal/cladecompare</a></p>
<p>If you use this software in a publication, please cite our paper that
describes it:</p>
<blockquote>Talevich, E. & Kannan, N. (2013)
<a href="http://www.biomedcentral.com/1471-2148/13/117">Structural and
evolutionary adaptation of rhoptry kinases and pseudokinases, a family of
coccidian virulence factors</a>.
<i>BMC Evolutionary Biology</i> 13:117 doi:10.1186/1471-2148-13-117
</blockquote>
</body>
</html>
"""
# --- Routes ---
@bottle.get('/cladecompare')
def form():
return FORM_HTML
# TODO - routes for downloading .pml, .pdf -- use static_file
@bottle.post('/cladecompare')
def form_submit():
# ENH: pick a unique, informative name -- e.g. date or hostname
name = bottle.request.forms.name
seqfile1 = bottle.request.files.seqfile1
if not hasattr(seqfile1, 'file'):
return "Error: You need to specify at least one sequence file."
seq1fname = handle2temp(seqfile1.file,
suffix=('.cma' if seqfile1.filename.endswith('.cma')
else '.seq'))
# Optional second sequence set -- if missing, do single mode
seqfile2 = bottle.request.files.seqfile2
if hasattr(seqfile2, 'file'):
seq2fname = handle2temp(seqfile2.file,
suffix=('.cma' if
seqfile2.filename.endswith('.cma') else
'.seq'))
if not name:
name = "%s-vs-%s" % (seqfile1.filename.rsplit('.', 1)[0],
seqfile2.filename.rsplit('.', 1)[0])
else:
seq2fname = ''
if not name:
name = seqfile1.filename
# Optional HMM profile for alignment
profile = bottle.request.files.profile
# Optional HMM profile for alignment
profile = bottle.request.files.profile
if hasattr(profile, 'file'):
if not profile.filename.endswith('.hmm'):
return "HMM profile file name must end in .hmm"
profname = handle2temp(profile.file, suffix='.hmm')
logging.info("Aligning %s with profile %s", seq1fname, profname)
fg_aln = core.hmm_align_and_read(profname, seq1fname)
if seq2fname:
logging.info("Aligning %s with profile %s", seq2fname, profname)
bg_aln = core.hmm_align_and_read(profname, seq2fname)
else:
profname = ''
fg_aln = core.read_aln(seq1fname, 'fasta')
if seq2fname:
bg_aln = core.read_aln(seq2fname, 'fasta')
pdbfile = bottle.request.files.pdbfile
if hasattr(pdbfile, 'file'):
if not profname:
return ("Error: to generate a PyMOL script for a PDB file you must"
"also specify an HMM profile")
pdbfname = handle2temp(pdbfile.file)
logging.info("Aligning %s with profile %s", pdbfile.filename, profname)
pdb_rec, pdb_resnums, pdb_inserts = core.pdb_hmm(profname,
pdbfname)
pdb_data = [(pdbfname, pdb_rec, pdb_resnums, pdb_inserts)]
else:
pdbfname = ''
pdb_data = None
# Mutually exclusive with pdbfname (above):
pdbid = bottle.request.forms.pdbid
if pdbid:
# If PDB ID: .pml should use "fetch" instead of "load"?
# Can get this info w/o dl'ing actual PDB file (e.g. via FASTA)?
pass
stat_module = dict(gtest=gtest, urn=urn, jsd=jsd, phospho=phospho,
)[bottle.request.forms.strategy]
try:
alpha = float(bottle.request.forms.alpha)
if not 0.0 <= alpha <= 1.0:
raise ValueError
except ValueError:
return "Error: alpha must be a number between 0 and 1"
_fdo, tmp_output = tempfile.mkstemp(suffix='.out')
os.close(_fdo)
_fdp, tmp_pattern = tempfile.mkstemp(suffix='.pttrn')
os.close(_fdp)
# Run the algorithms...
if seq2fname:
# Pair mode
fg_clean, bg_clean, hits = core.process_pair(fg_aln, bg_aln,
stat_module, False)
core.process_output(fg_clean, bg_clean, hits, alpha,
tmp_output, tmp_pattern,
pdb_data)
else:
# Single mode
aln, hits = core.process_one(fg_aln, stat_module, False)
core.process_output(aln, None, hits, alpha,
tmp_output, tmp_pattern,
pdb_data)
# Get the HTML report data
contents = report.do_single(tmp_output, tmp_pattern)[1]
cleanup(seq1fname)
cleanup(seq2fname)
cleanup(profname)
cleanup(tmp_output)
cleanup(tmp_pattern)
return report.html_page_tpl % dict(title=name, contents=contents)
# --- Helpers ---
def handle2temp(handle, suffix=''):
"""Write file handle contents to a temporary file, return tempfile name."""
_fd, fname = tempfile.mkstemp(suffix=suffix)
os.write(_fd, handle.read())
os.close(_fd)
return fname
def cleanup(fname):
"""Remove a temporary file that may or may not exist."""
if os.path.isfile(fname):
try:
os.remove(fname)
print("Cleaned up", fname)
except OSError:
print("Failed to clean up", fname)
# --- Run ---
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format="%(module)s [@%(lineno)s]: %(message)s")
webbrowser.open("http://localhost:8080/cladecompare")
bottle.run(host='localhost', port=8080)
| bsd-2-clause | 1,552,306,808,442,255,000 | 27.303249 | 86 | 0.585459 | false | 3.391003 | false | false | false |
kovidgoyal/kitty | kittens/hyperlinked_grep/main.py | 1 | 2181 | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2020, Kovid Goyal <kovid at kovidgoyal.net>
import os
import re
import signal
import socket
import subprocess
import sys
from typing import Callable, cast
from urllib.parse import quote_from_bytes
def write_hyperlink(write: Callable[[bytes], None], url: bytes, line: bytes, frag: bytes = b'') -> None:
text = b'\033]8;;' + url
if frag:
text += b'#' + frag
text += b'\033\\' + line + b'\033]8;;\033\\'
write(text)
def main() -> None:
if not sys.stdout.isatty() and '--pretty' not in sys.argv:
os.execlp('rg', 'rg', *sys.argv[1:])
cmdline = ['rg', '--pretty', '--with-filename'] + sys.argv[1:]
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
assert p.stdout is not None
write: Callable[[bytes], None] = cast(Callable[[bytes], None], sys.stdout.buffer.write)
sgr_pat = re.compile(br'\x1b\[.*?m')
osc_pat = re.compile(b'\x1b\\].*?\x1b\\\\')
num_pat = re.compile(br'^(\d+)[:-]')
in_result: bytes = b''
hostname = socket.gethostname().encode('utf-8')
try:
for line in p.stdout:
line = osc_pat.sub(b'', line) # remove any existing hyperlinks
clean_line = sgr_pat.sub(b'', line).rstrip() # remove SGR formatting
if not clean_line:
in_result = b''
write(b'\n')
continue
if in_result:
m = num_pat.match(clean_line)
if m is not None:
write_hyperlink(write, in_result, line, frag=m.group(1))
else:
write(line)
else:
if line.strip():
path = quote_from_bytes(os.path.abspath(clean_line)).encode('utf-8')
in_result = b'file://' + hostname + path
write_hyperlink(write, in_result, line)
else:
write(line)
except KeyboardInterrupt:
p.send_signal(signal.SIGINT)
except EOFError:
pass
finally:
p.stdout.close()
raise SystemExit(p.wait())
if __name__ == '__main__':
main()
| gpl-3.0 | 3,550,360,851,716,614,700 | 31.073529 | 104 | 0.544246 | false | 3.546341 | false | false | false |
HunanTV/redis-ctl | models/node.py | 1 | 2671 | from werkzeug.utils import cached_property
from base import db, Base
from cluster import Cluster
from models.base import commit_session
class RedisNode(Base):
__tablename__ = 'redis_node'
host = db.Column(db.String(255), nullable=False)
port = db.Column(db.Integer, nullable=False)
eru_container_id = db.Column(db.String(64), index=True)
assignee_id = db.Column(db.ForeignKey(Cluster.id), index=True)
suppress_alert = db.Column(db.Integer, nullable=False, default=1)
__table_args__ = (db.Index('address', 'host', 'port', unique=True),)
def free(self):
return self.assignee_id is None
@cached_property
def containerized(self):
return self.eru_container_id is not None
@cached_property
def container_info(self):
from flask import g
if g.container_client is None or not self.containerized:
return None
return g.container_client.get_container(self.eru_container_id)
def get_by_host_port(host, port):
return db.session.query(RedisNode).filter(
RedisNode.host == host, RedisNode.port == port).first()
def list_eru_nodes(offset, limit):
return db.session.query(RedisNode).filter(
RedisNode.eru_container_id != None).order_by(
RedisNode.id.desc()).offset(offset).limit(limit).all()
def list_all_nodes():
return db.session.query(RedisNode).all()
def create_instance(host, port):
node = RedisNode(host=host, port=port)
if get_by_host_port(host, port) is None:
db.session.add(node)
db.session.flush()
return node
def list_free():
return RedisNode.query.filter(RedisNode.assignee_id == None).order_by(
RedisNode.id.desc()).all()
def create_eru_instance(host, port, eru_container_id):
node = RedisNode(host=host, port=port, eru_container_id=eru_container_id)
if get_by_host_port(host, port) is None:
db.session.add(node)
db.session.flush()
return node
def delete_eru_instance(eru_container_id):
i = db.session.query(RedisNode).filter(
RedisNode.eru_container_id == eru_container_id).first()
if i is None or i.assignee_id is not None:
raise ValueError('node not free')
db.session.delete(i)
def get_eru_by_container_id(eru_container_id):
return db.session.query(RedisNode).filter(
RedisNode.eru_container_id == eru_container_id).first()
def delete_free_instance(host, port):
node = db.session.query(RedisNode).filter(
RedisNode.host == host,
RedisNode.port == port,
RedisNode.assignee_id == None).with_for_update().first()
if node is not None:
db.session.delete(node)
| mit | 8,090,166,741,273,736,000 | 29.011236 | 77 | 0.667166 | false | 3.334582 | false | false | false |
OpenKMIP/PyKMIP | kmip/tests/unit/core/objects/test_attribute.py | 1 | 3119 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from kmip.core import attributes
from kmip.core import objects
class TestAttribute(testtools.TestCase):
"""
Test suite for the Attribute object.
"""
def setUp(self):
super(TestAttribute, self).setUp()
def tearDown(self):
super(TestAttribute, self).tearDown()
def test_init(self):
"""
Test that an Attribute object can be created.
"""
objects.Attribute()
def test_init_with_args(self):
self.skipTest('')
def test_read(self):
self.skipTest('')
def test_write(self):
self.skipTest('')
def test_repr(self):
"""
Test that repr can be applied to an Attribute object.
"""
attribute = objects.Attribute(
attribute_name=objects.Attribute.AttributeName('test-name'),
attribute_index=objects.Attribute.AttributeIndex(0),
attribute_value=attributes.CustomAttribute('test-value')
)
self.assertEqual(
"Attribute("
"attribute_name=AttributeName(value='test-name'), "
"attribute_index=AttributeIndex(value=0), "
"attribute_value=CustomAttribute(value='test-value'))",
repr(attribute)
)
def test_str(self):
"""
Test that str can be applied to an Attribute object.
"""
attribute = objects.Attribute(
attribute_name=objects.Attribute.AttributeName('test-name'),
attribute_index=objects.Attribute.AttributeIndex(0),
attribute_value=attributes.CustomAttribute('test-value')
)
self.assertEqual(
str({
'attribute_name': 'test-name',
'attribute_index': '0',
'attribute_value': 'test-value'
}),
str(attribute)
)
def test_equal_on_equal(self):
self.skipTest('')
def test_equal_on_not_equal_name(self):
self.skipTest('')
def test_equal_on_not_equal_index(self):
self.skipTest('')
def test_equal_on_not_equal_value(self):
self.skipTest('')
def test_equal_on_type_mismatch(self):
self.skipTest('')
def test_not_equal_on_equal(self):
self.skipTest('')
def test_not_equal_on_not_equal_name(self):
self.skipTest('')
def test_not_equal_on_not_equal_index(self):
self.skipTest('')
def test_not_equal_on_not_equal_value(self):
self.skipTest('')
def test_not_equal_on_type_mismatch(self):
self.skipTest('')
| apache-2.0 | 2,686,189,043,021,573,000 | 27.354545 | 75 | 0.611735 | false | 4.175368 | true | false | false |
lmoresi/UoM-VIEPS-Intro-to-Python | scripts/run-jupyter8.py | 1 | 1679 | #!/usr/bin/env python
from subprocess import call
import os
import time
# We want to start a server from each www directory
# where everything was built by the site-builder script
# Make sure jupyter defaults are correct (globally)
call("jupyter nbextension enable hide_input/main", shell=True)
# call("jupyter nbextension enable rubberband/main", shell=True)
# call("jupyter nbextension enable exercise/main", shell=True)
# This could be automated, but I am not sure how well the number of
# servers will scale ... so leave at 8 ... and hand build
# The root user is www
users = { "www" : ["vieps-pye-boss", 8080 ],
"build/www1": ["vieps-pye-1", 8081 ],
"build/www2": ["vieps-pye-2", 8082 ],
"build/www3": ["vieps-pye-3", 8083 ],
"build/www4": ["vieps-pye-4", 8084 ],
"build/www5": ["vieps-pye-5", 8085 ],
"build/www6": ["vieps-pye-6", 8086 ],
"build/www7": ["vieps-pye-7", 8087 ],
"build/www8": ["vieps-pye-8", 8088 ],
"build/www9": ["vieps-pye-9", 8089 ],
"build/www10": ["vieps-pye-10", 8090 ],
"build/www11": ["vieps-pye-11", 8091 ],
"build/www12": ["vieps-pye-12", 8092 ] }
# Maybe we need to quote the password in case it has odd characters in it
for dir in users.keys():
password = users[dir][0]
port = users[dir][1]
call( "cd {:s} && nohup jupyter notebook --port={:d} --ip='*' --no-browser\
--NotebookApp.token={:s} --NotebookApp.default_url=/files/index.html &".format(dir, port, password), shell=True )
# Don't exit
while True:
time.sleep(10)
| mit | -5,864,566,678,173,984,000 | 35.5 | 124 | 0.589041 | false | 3.025225 | false | false | false |
1flow/1flow | oneflow/core/migrations/0011_auto__del_field_mailaccount_date_test__add_field_mailaccount_date_last.py | 2 | 9154 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'MailAccount.date_test'
db.delete_column(u'core_mailaccount', 'date_test')
# Adding field 'MailAccount.date_last_conn'
db.add_column(u'core_mailaccount', 'date_last_conn',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2007, 1, 1, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Adding field 'MailAccount.date_test'
db.add_column(u'core_mailaccount', 'date_test',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2007, 1, 1, 0, 0)),
keep_default=False)
# Deleting field 'MailAccount.date_last_conn'
db.delete_column(u'core_mailaccount', 'date_last_conn')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '940b6cd148ca4e09b8f18d4a7b37d7a4'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.corepermissions': {
'Meta': {'object_name': 'CorePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.mailaccount': {
'Meta': {'unique_together': "(('user', 'hostname', 'username'),)", 'object_name': 'MailAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2007, 1, 1, 0, 0)'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.mailfeedrule': {
'Meta': {'object_name': 'MailFeedRule'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailAccount']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeed']"})
},
'core.mailfeedruleline': {
'Meta': {'object_name': 'MailFeedRuleLine'},
'header_field': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_type': ('django.db.models.fields.CharField', [], {'default': "u'contains'", 'max_length': '10'}),
'match_value': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']"})
}
}
complete_apps = ['core'] | agpl-3.0 | -1,872,303,365,812,109,300 | 70.523438 | 195 | 0.550142 | false | 3.592622 | false | false | false |
alexortizrosado/pyBreezeChMS | samples/easytithe_importer.py | 2 | 8096 | #!/usr/bin/python
"""Import contributions from EasyTithe to BreezeChMS.
Logs into your EasyTithe account and imports contributions into BreezeChMS using
the Python Breeze API.
Usage:
python easytithe_importer.py \\
--username [email protected] \\
--password easytithe_password \\
--breeze_url https://demo.breezechms.com \\
--breeze_api_key 5c2d2cbacg3 \\
--start_date 01/01/2014 \\
--end_date 12/31/2014 \\
[--debug \\]
[--dry_run \\]
"""
__author__ = '[email protected] (Alex Ortiz-Rosado)'
import argparse
import logging
import os
import re
import sys
from datetime import datetime
from easytithe import easytithe
try:
from breeze import breeze
except ImportError:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir))
from breeze import breeze
class Contribution(object):
"""An object for storing a contribution from EasyTithe."""
def __init__(self, contribution):
"""Instantiates a Contribution object.
Args:
contribution: a single contribution from EasyTithe.
"""
self._contribution = contribution
@property
def first_name(self):
return self._contribution['Name'].split()[0]
@property
def last_name(self):
return self._contribution['Name'].split()[-1]
@property
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
@property
def name(self):
return self._contribution['Name']
@property
def date(self):
formatted_date = datetime.strptime(
self._contribution['Date'], '%m/%d/%Y')
return formatted_date.strftime('%Y-%m-%d')
@property
def fund(self):
return self._contribution['Fund']
@fund.setter
def fund(self, fund_name):
self._contribution['Fund'] = fund_name
@property
def amount(self):
# Removes leading $ and any thousands seperator.
return self._contribution['Amount'].lstrip('$').replace(',', '')
@property
def card_type(self):
return self._contribution['Type']
@property
def email_address(self):
return self._contribution['Email']
@property
def uid(self):
return self._contribution['PersonID']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-u', '--username',
required=True,
nargs='*',
help='EasyTithe username.')
parser.add_argument(
'-p', '--password',
required=True,
nargs='*',
help='EasyTithe password.')
parser.add_argument(
'-k', '--breeze_api_key',
required=True,
nargs='*',
help='Breeze API key.')
parser.add_argument(
'-l', '--breeze_url',
required=True,
nargs='*',
help=('Fully qualified doman name for your organizations Breeze '
'subdomain.'))
parser.add_argument(
'-s', '--start_date',
required=True,
nargs='*',
help='Start date to get contribution information for.')
parser.add_argument(
'-e', '--end_date',
required=True,
nargs='*',
help='End date to get contribution information for.')
parser.add_argument(
'-d', '--dry_run',
action='store_true',
help='No-op, do not write anything.')
parser.add_argument(
'--debug',
action='store_true',
help='Print debug output.')
args = parser.parse_args()
return args
def enable_console_logging(default_level=logging.INFO):
logger = logging.getLogger()
console_logger = logging.StreamHandler()
console_logger.setLevel(default_level)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)8s] %(filename)s:%(lineno)s - %(message)s ',
'%Y-%m-%d %H:%M:%S')
console_logger.setFormatter(formatter)
logger.addHandler(console_logger)
logging.Formatter()
logger.setLevel(default_level)
def main():
args = parse_args()
if args.debug:
enable_console_logging(logging.DEBUG)
else:
enable_console_logging()
start_date = args.start_date[0]
end_date = args.end_date[0]
# Log into EasyTithe and get all contributions for date range.
username = args.username[0]
password = args.password[0]
logging.info('Connecting to EasyTithe as [%s]', username)
easytithe_api = easytithe.EasyTithe(username, password)
contributions = [
Contribution(contribution)
for contribution in easytithe_api.GetContributions(
start_date, end_date)
]
if not contributions:
logging.info('No contributions found between %s and %s.', start_date,
end_date)
sys.exit(0)
logging.info('Found %s contributions between %s and %s.',
len(contributions), start_date, end_date)
# Log into Breeze using API.
breeze_api_key = args.breeze_api_key[0]
breeze_url = args.breeze_url[0]
breeze_api = breeze.BreezeApi(breeze_url, breeze_api_key,
dry_run=args.dry_run)
people = breeze_api.get_people()
if not people:
logging.info('No people in Breeze database.')
sys.exit(0)
logging.info('Found %d people in Breeze database.', len(people))
for person in people:
person['full_name'] = '%s %s' % (person['force_first_name'].strip(),
person['last_name'].strip())
for contribution in contributions:
person_match = [person for person in people
if re.search(person['full_name'],
contribution.full_name, re.IGNORECASE) and
person['full_name'] != ' ']
contribution_params = {
'date': contribution.date,
'name': contribution.name,
'uid': contribution.uid,
'method': 'Credit/Debit Online',
'funds_json': (
'[{"name": "%s", "amount": "%s"}]' % (contribution.fund,
contribution.amount)),
'amount': contribution.amount,
'group': contribution.date,
'processor': 'EasyTithe',
'batch_name': 'EasyTithe (%s)' % contribution.date
}
if not person_match:
logging.warning(
'Unable to find a matching person in Breeze for [%s]. '
'Adding contribution to Breeze as Anonymous.',
contribution.full_name)
breeze_api.add_contribution(**contribution_params)
else:
def is_duplicate_contribution(person_id, date, amount):
"""Predicate that checks if a contribution is a duplicate."""
return breeze_api.list_contributions(
start_date=date,
end_date=date,
person_id=person_id,
amount_min=amount,
amount_max=amount)
if is_duplicate_contribution(date=contribution.date,
person_id=person_match[0]['id'],
amount=contribution.amount):
logging.warning(
'Skipping duplicate contribution for [%s] paid on [%s] '
'for [%s]', contribution.full_name, contribution.date,
contribution.amount)
continue
logging.info('Person:[%s]', person_match)
logging.info(
'Adding contribution for [%s] to fund [%s] in the amount of '
'[%s] paid on [%s].', contribution.full_name,
contribution.fund, contribution.amount, contribution.date)
# Add the contribution on the matching person's Breeze profile.
contribution_params['person_id'] = person_match[0]['id']
breeze_api.add_contribution(**contribution_params)
if __name__ == '__main__':
main()
| apache-2.0 | -7,111,658,035,406,301,000 | 29.550943 | 80 | 0.567811 | false | 3.956989 | false | false | false |
ctSkennerton/fxtract | fxtract_test/speed_test/plot_times.py | 2 | 1384 | #!/usr/bin/env python
from __future__ import print_function
import sys, os, glob, re
fixed_re = re.compile('fxtract-(\w+)_random_(10{2,5})_(\d\d)bp_patterns_times\.txt')
norm_re = re.compile('fxtract-(\w+)_random_(10{2,5})_patterns_minus_small_times\.txt')
impl_codes = {'ac': 'aho-corasick', 'sh': 'set horspool', 'mb': 'multi-bfam',
'wm': 'wu-manber', 'cwm': 'non-seqan wu-manber'}
times_files = glob.glob("./fxtract*times.txt")
data = {}
for fn in times_files:
match = fixed_re.search(fn)
if match:
impl = impl_codes[match.group(1)]
pattern_type = match.group(3)
pattern_count = match.group(2)
else:
match = norm_re.search(fn)
if match:
impl = impl_codes[match.group(1)]
pattern_type = 'norm'
pattern_count = match.group(2)
else:
raise ValueError("Filename %s does not fit either form" % fn)
with open(fn) as fp:
first_line = True
seconds = 0.0
memory = 0.0
for line in fp:
if first_line:
first_line = False
continue
fields = line.rstrip().split(',')
seconds += float(fields[1]) + float(fields[2])
memory += float(fields[-1])
seconds /= 10.0
memory /= 10.0
print(impl,pattern_type, pattern_count, seconds, memory, sep="\t")
| mit | -3,389,505,375,175,508,500 | 31.952381 | 86 | 0.554191 | false | 3.248826 | false | false | false |
bobbybabra/codeGuild | kevinStore.py | 1 | 2222 | __author__ = 'kevin'
class Store():
def __init__(self):
self.available_items = []
self.customer_list = []
class InventoryItem():
def __init__(self, name, on_hand, price):
self.name = name
self.on_hand = on_hand
self.price = price
def __repr__(self):
output = ""
output = output + self.name
output = output + " @ " + str(self.price)
return output
class CartLineItem():
def __init__(self, item, quantity):
self.item = item
self.quantity = quantity
def __repr__(self):
output = ""
output = output + str(self.quantity)
output = output + " x "
output = output + str(self.item)
output = output + " is "
output = output + str(self.quantity * self.item.price)
return output
class Cart():
def __init__(self):
self.selected_items = []
def get_total(self):
total = 0.0
for line in self.selected_items:
line_total = line.quantity * line.item.price
total = total + line_total
return total
def __repr__(self):
output = "Cart:\n"
for line in self.selected_items:
output = output + str(line) + "\n"
output = output + "\nTotal: " + str(self.get_total())
return output
class Customer():
def __init__(self, name):
self.name = name
self.cart = Cart()
from store import *
# TEST
amazon = Store()
amazon.available_items = {
111: InventoryItem("Farenheit 451", 10, 4.99),
222: InventoryItem("World According to Garp", 5, 4.99),
333: InventoryItem("Stranger in a Stange Land.", 1, 4.99),
}
amazon.customer_list = {
11: Customer("Bob"),
22: Customer("Carol"),
}
# ##
# TEST
# ##
# who are you? select a customer.
bob = amazon.customer_list[11]
# what do you want to buy?
item = amazon.available_items[333]
#how many?
qty = 2
#add to cart
bob.cart.selected_items.append(CartLineItem(item, qty))
#what do you want to buy?
item = amazon.available_items[222]
#how many?
qty = 3
#add to cart
bob.cart.selected_items.append(CartLineItem(item, qty))
#add more? if no then checkout and print cart
print bob.cart
| bsd-2-clause | -5,598,137,168,085,619,000 | 20.365385 | 62 | 0.578308 | false | 3.41321 | false | false | false |
cygx/nqp | 3rdparty/dyncall/test/suite2_x86win32std/mkcase.py | 6 | 2026 | #!/usr/bin/python
#//////////////////////////////////////////////////////////////////////////////
#
# Copyright (c) 2007,2009 Daniel Adler <[email protected]>,
# Tassilo Philipp <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#//////////////////////////////////////////////////////////////////////////////
import sys
f = file("design.cfg")
sigmap = {
'B':'DCbool'
, 'c':'DCchar'
, 's':'DCshort'
, 'i':'DCint'
, 'l':'DClonglong'
, 'f':'DCfloat'
, 'd':'DCdouble'
, 'p':'DCpointer'
}
apimap = {
'_':''
, 's':'__declspec(stdcall)'
, 'f':'__declspec(fastcall)'
}
id = 0
maxargs = 0
sys.stdout.write("/* auto generated by mkcase.py (on stdout) */\n");
for line in f:
line = line.rstrip().lstrip()
if len(line) == 0 or line[0] == '#': continue
types = [];
# args = len(line)-1
args = len(line)
maxargs = max(maxargs, args)
# api = apimap[ line[0] ]
out = [ "VF",str(args),"(", str(id), "," ];
for i in xrange(0,len(line)):
types += [ sigmap[ line[i] ] ]
out += [ ",".join( types ), ",s_", line, ")\n" ]
out = "".join(out)
sys.stdout.write(out)
id += 1
sys.stderr.write("/* auto generated by mkcase (on stderr) */\n");
sys.stderr.write("".join( ["#define NCASES ",str(id),"\n"] ) )
sys.stderr.write("".join( ["#define MAXARGS ",str(maxargs),"\n"] ) )
| artistic-2.0 | 2,967,816,954,526,329,000 | 29.69697 | 79 | 0.591313 | false | 3.288961 | false | false | false |
bjornfor/fgui | .ycm_extra_conf.py | 1 | 1648 | # YouCompleteMe Vim plugin config
import os
import ycm_core
import subprocess
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
#'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
]
def is_c_source_file(filename):
return filename[-2:] == ".c"
def is_c_header_file(filename):
return filename[-2:] == ".h"
def is_cxx_file(filename):
return filename[-2:] in (".cpp", ".cxx")
def is_cxx_header(filename):
return filename[-2:] in (".hpp", ".hxx")
def get_proj_flags():
output = subprocess.check_output("pkg-config --cflags --libs sdl", shell=True)
return output.split()
flags.extend(get_proj_flags())
# youcompleteme is calling this function to get flags
# You can also set database for flags. Check: JSONCompilationDatabase.html in
# clang-3.2-doc package
def FlagsForFile(filename):
return {
'flags': flags,
'do_cache': True
}
| mit | 8,540,638,513,112,684,000 | 28.963636 | 82 | 0.652306 | false | 3.476793 | false | false | false |
cudacode/RPIBigTrak | python/CommandInterface.py | 1 | 3300 | # Copyright (c) 2015 'cudacode'
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import Tkinter
import CommandModel
class CommandInterface:
'Command User Interface Methods'
def __init__ (self, cmdBtns, cntBtns, cmdModel):
self.cmdBtns = cmdBtns
self.cntBtns = cntBtns
self.cmdModel = cmdModel
self.mode = 'open'
def enableBtns(self, btnArray):
for btn in btnArray:
btn['state'] = Tkinter.NORMAL
def disableBtns(self, btnArray):
for btn in btnArray:
btn['state'] = Tkinter.DISABLED
def commandMode(self):
print ('Command Mode')
self.mode = 'command'
self.enableBtns(self.cmdBtns)
self.disableBtns(self.cntBtns)
def countMode(self):
print ('Count Mode')
self.mode = 'count'
self.enableBtns(self.cntBtns)
self.disableBtns(self.cmdBtns)
def openMode(self):
print ('Open Mode')
self.mode = 'open'
self.enableBtns(self.cntBtns)
self.enableBtns(self.cmdBtns)
# The mode reflects the input we are looking for command | count | open (command or number)
def getMode(self):
return self.mode
def isOpenMode(self):
if self.mode == 'open':
return True
else:
return False
# def updateCmdVar(command):
# cmdVar.set(command.name)
def clrCallBack(self):
print ('Clear Command')
self.cmdModel.clear()
self.commandMode()
def clsCallBack(self):
print ('Clear Last Command')
self.cmdModel.clearLast()
self.commandMode()
def fireCallBack(self):
print ('Fire Command')
self.countMode()
self.cmdModel.newCommand('fire')
def goCallBack(self):
print ('Go Command')
self.cmdModel.execute()
self.commandMode()
def fwdCallBack(self):
print ('Forward Command')
self.countMode()
self.cmdModel.newCommand('fwd')
def backCallBack(self):
print ('Back Command')
self.countMode()
self.cmdModel.newCommand('back')
def leftCallBack(self):
print ('Left Command')
self.countMode()
self.cmdModel.newCommand('left')
def rightCallBack(self):
print ('Right Command')
self.countMode()
self.cmdModel.newCommand('right')
def holdCallBack(self):
print ('Hold Command')
self.countMode()
self.cmdModel.newCommand('hold')
def numCallBack(self, num):
print ('Num Button', num)
self.cmdModel.updateParam(num)
if self.isOpenMode():
self.commandMode()
else:
self.openMode()
| mit | -4,700,101,757,885,334,000 | 25.612903 | 92 | 0.727273 | false | 3.283582 | false | false | false |
lmazuel/azure-sdk-for-python | azure-keyvault/azure/keyvault/models/certificate_update_parameters.py | 4 | 1565 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateUpdateParameters(Model):
"""The certificate update parameters.
:param certificate_policy: The management policy for the certificate.
:type certificate_policy: :class:`CertificatePolicy
<azure.keyvault.models.CertificatePolicy>`
:param certificate_attributes: The attributes of the certificate
(optional).
:type certificate_attributes: :class:`CertificateAttributes
<azure.keyvault.models.CertificateAttributes>`
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict
"""
_attribute_map = {
'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, certificate_policy=None, certificate_attributes=None, tags=None):
self.certificate_policy = certificate_policy
self.certificate_attributes = certificate_attributes
self.tags = tags
| mit | -5,860,635,482,764,688,000 | 40.184211 | 89 | 0.64345 | false | 4.830247 | false | false | false |
sabel83/metashell | 3rd/templight/libcxx/utils/sym_diff.py | 8 | 2793 | #!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===##
"""
sym_diff - Compare two symbol lists and output the differences.
"""
from argparse import ArgumentParser
import sys
from libcxx.sym_check import diff, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument(
'--names-only', dest='names_only',
help='Only print symbol names',
action='store_true', default=False)
parser.add_argument(
'--removed-only', dest='removed_only',
help='Only print removed symbols',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--strict', dest='strict',
help="Exit with a non-zero status if any symbols "
"differ",
action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
parser.add_argument(
'--demangle', dest='demangle', action='store_true', default=False)
parser.add_argument(
'old_syms', metavar='old-syms', type=str,
help='The file containing the old symbol list or a library')
parser.add_argument(
'new_syms', metavar='new-syms', type=str,
help='The file containing the new symbol list or a library')
args = parser.parse_args()
old_syms_list = util.extract_or_load(args.old_syms)
new_syms_list = util.extract_or_load(args.new_syms)
if args.only_stdlib:
old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list)
new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list)
added, removed, changed = diff.diff(old_syms_list, new_syms_list)
if args.removed_only:
added = {}
report, is_break, is_different = diff.report_diff(
added, removed, changed, names_only=args.names_only,
demangle=args.demangle)
if args.output is None:
print(report)
else:
with open(args.output, 'w') as f:
f.write(report + '\n')
exit_code = 1 if is_break or (args.strict and is_different) else 0
sys.exit(exit_code)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,208,067,089,656,349,000 | 37.260274 | 79 | 0.58038 | false | 3.950495 | false | false | false |
asb29/Redundant | auth/tests/test_views.py | 2 | 1446 | from django.test import TestCase
from django.test import Client
class RegisterTestCase(TestCase):
def test_register(self):
c = Client()
# on success redirects to /
response = c.post('/accounts/register/', {
'username': 'asdas',
'password1': 'asdasdasd12',
'password2': 'asdasdasd12'
})
self.assertRedirects(response, '/')
# passwords don't match
response = c.post('/accounts/register/', {
'username': 'asdasdasd1',
'password1': 'asdasdasd1',
'password2': 'asdasdasd2'
})
self.assertEquals(response.status_code, 200)
# username is empty
response = c.post('/accounts/register/', {
'username': '',
'password1': 'asdasdasd12',
'password2': 'asdasdasd12'
})
self.assertEquals(response.status_code, 200)
# no password
response = c.post('/accounts/register/', {
'username': 'asdasdasd',
'password1': '',
'password2': ''
})
self.assertEquals(response.status_code, 200)
# username and password are similar
response = c.post('/accounts/register/', {
'username': 'asdasdasd0',
'password1': 'asdasdasd1',
'password2': 'asdasdasd1'
})
self.assertEquals(response.status_code, 200)
| mit | -5,282,888,757,211,514,000 | 29.765957 | 52 | 0.531812 | false | 4.167147 | true | false | false |
stanfordnqp/spins-b | spins/goos/generic.py | 1 | 4455 | """Defines nodes for generic operations."""
from typing import List, Optional, Union
from spins import goos
from spins.goos import flows
from spins.goos import optplan
def cast(node: goos.ProblemGraphNode,
cls_type,
name: Optional[str] = None) -> goos.ProblemGraphNode:
"""Casts a problem graph node into another type.
The problem graph node can be cast into any arbitrary type. No checks are
performed whatsoever, so the resulting graph may throw an error during
execution.
Casting works by creating a new `CastOp` each type whose superclass is
determined by `cls_type`. This `CastOp` simply performs the identity
operation. In order to handle serialization/deserialization, this `CastOp`
class is not registered with the context. Instead, `build_cast_op` function
is registered.
Usage:
numeric_node = goos.cast(node, goos.Function) + 3
Args:
node: Node to cast.
cls_type: Class type.
name: Name of the cast node.
Returns:
A dummy `CastOp` that has the target type `cls_type` and simply forwards
the result of the underlying node. It is essentially an identity
operation.
"""
class CastOp(cls_type):
node_type = "goos.cast"
# We will register a custom function `build_cast_op` instead. We need
# to do this as the superclass of `CastOp` needs to be parsed from
# the schema during a load.
goos_no_register = True
Schema = CastSchema
def __init__(self, node: goos.ProblemGraphNode, target_type: str):
goos.ProblemGraphNode.__init__(self, node)
self._node = node
def eval(self, inputs):
return inputs[0]
def grad(self, inputs, grad_val):
return [grad_val]
def __getattr__(self, name: str):
"""Forwards any function calls to the underlying node."""
return getattr(self._node, name)
return CastOp(node, cls_type.node_type, name=name)
def build_cast_op(node: goos.ProblemGraphNode, target_type: str,
name: str) -> goos.ProblemGraphNode:
"""Constructs a cast operation from the schema.
This function is registered with the context in order to perform casting
operations.
Args:
node: The node to cast.
target_type: The string name of the type to cast into.
Returns:
`CastOp` object. See `cast`.
"""
return cast(node,
optplan.GLOBAL_CONTEXT_STACK.get_node(target_type).creator,
name=name)
class CastSchema(optplan.ProblemGraphNodeSchema, optplan.WildcardSchema):
"""Schema for `cast`."""
type = goos.ModelNameType("goos.cast")
node = goos.ReferenceType(optplan.ProblemGraphNodeSchema)
target_type = goos.types.StringType()
optplan.GLOBAL_CONTEXT_STACK.register_node("goos.cast", CastSchema,
build_cast_op)
def rename(node: goos.ProblemGraphNode, name: str) -> goos.ProblemGraphNode:
"""Renames a given node.
Because the name of a node is fixed upon node creation, this function serves
as a mechanism to change the name of a node. It does this by creating
an identity op (by casting a node into the same type) with a new name
`name`.
Args:
node: Node to rename.
name: New name of the node.
Returns:
Node with the same type but with name `name`.
"""
return cast(node, type(node), name=name)
class LogPrint(goos.Action):
"""Prints text out to the log.
This is useful for debugging purposes.
"""
node_type = "goos.log_print"
def __init__(
self,
obj: Union[str, goos.ProblemGraphNode,
List[goos.ProblemGraphNode]] = None
) -> None:
super().__init__()
self._obj = obj
def run(self, plan: goos.OptimizationPlan) -> None:
if isinstance(self._obj, str):
plan.logger.info(self._obj)
return
if isinstance(self._obj, goos.Function):
nodes = [self._obj]
else:
nodes = self._obj
values = plan.eval_nodes(nodes)
for node, val in zip(nodes, values):
plan.logger.info("{}: {}".format(node._goos_name, val))
def log_print(*args, **kwargs) -> LogPrint:
action = LogPrint(*args, **kwargs)
goos.get_default_plan().add_action(action)
return action
| gpl-3.0 | 5,058,664,186,843,052,000 | 29.724138 | 80 | 0.627609 | false | 3.92511 | false | false | false |
haakenlid/django-skeleton | settings/base.py | 1 | 5649 | # -*- coding: utf-8 -*-
"""
Django settings for skeleton project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
# import django.conf.global_settings as DEFAULT_SETTINGS
def env_var(keyname):
""" shortcut for getting environmental variables """
# To avoid commiting passwords and usernames to git and GitHub,
# these settings are saved as environmental variables in a file called postactivate.
# Postactivate is sourced when the virtual environment is activated.
return os.environ.get('DJANGO_{keyname}'.format(keyname=keyname.upper().replace(' ', '_')))
def join_path(*paths):
""" shortcut for joining paths. cross os compatible """
return os.path.normpath(os.path.join(*paths))
# CORE CONFIG
ROOT_URLCONF = 'core.urls'
WSGI_APPLICATION = 'core.wsgi.application'
SECRET_KEY = env_var('SECRET_KEY')
SITE_URL = env_var('SITE_URL')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [env_var('SITE_URL'), ]
# EMAIL CONFIGURATION
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = env_var('GMAIL_USER')
EMAIL_HOST_PASSWORD = env_var('GMAIL_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# CUSTOM APPS
INSTALLED_APPS = [
'core',
'functional_tests',
]
# THIRD PARTY APPS
INSTALLED_APPS = [
# 'autocomplete_light',
'django_extensions',
'sorl.thumbnail',
] + INSTALLED_APPS
# CORE APPS
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
] + INSTALLED_APPS
# MIDDLEWARE
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# POSTGRESQL DATABASE
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env_var('DB_NAME'),
'USER': env_var('DB_USER'),
'PASSWORD': env_var('DB_PASSWORD'),
'HOST': 'localhost',
'PORT': '', # Set to empty string for default.
},
}
# SORL THUMBNAILS
# Redis used as key-value store
THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore'
# ImageMagick ( or Ubuntu's graphicsmagick-imagemagick-compat )
THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.convert_engine.Engine'
# REDIS CACHE
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
'OPTIONS': {
'DB': 0,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
}
},
},
}
# PATH CONFIGURATION
# Absolute filesystem path to the Django project directory containing all
# files under version control, including django files.
BASE_DIR = env_var('SOURCE_FOLDER')
# Absolute filesystem path to the top-level project folder containing
# static folder, log folder, virtualenv and configs not under version control.
PROJECT_DIR = join_path(BASE_DIR, '..')
# This is where static files are collected to by django and served by the webserver.
STATIC_ROOT = join_path(PROJECT_DIR, 'static')
STATIC_URL = '/static/'
# User uploaded files location.
MEDIA_ROOT = join_path(PROJECT_DIR, 'static', 'media')
MEDIA_URL = '/media/'
# Extra path to collect static assest such as javascript and css
STATICFILES_DIRS = [join_path(BASE_DIR, 'assets'), ]
# Project wide fixtures to be loaded into database.
FIXTURE_DIRS = [join_path(BASE_DIR, 'fixtures'), ]
# Project wide django template files
TEMPLATE_DIRS = [join_path(BASE_DIR, 'templates'), ]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
]
# INTERNATIONALIZATION AND TRANSLATION
LANGUAGE_CODE = 'nb_NO' # Norwegian bokmål
TIME_ZONE = 'Europe/Oslo'
USE_I18N = True # Internationalisation (string translation)
USE_L10N = True # Localisation (numbers and stuff)
USE_TZ = True # Use timezone
LOCALE_PATHS = [join_path(BASE_DIR, 'translation'), ] # Django puts generated translation files here.
# LOGGING
DEBUG_LOG_FILE = join_path(PROJECT_DIR, 'logs', 'django-debug.log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'stream_to_console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
'write_to_logfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': DEBUG_LOG_FILE,
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'stream_to_console', ],
'level': 'DEBUG',
'propagate': True,
},
'debug': {
'handlers': ['stream_to_console', 'write_to_logfile', ],
'level': 'DEBUG',
'propagate': False,
},
},
}
| mit | 2,878,011,207,863,782,000 | 29.203209 | 102 | 0.659348 | false | 3.581484 | false | false | false |
topazproject/topaz | topaz/modules/ffi/function.py | 1 | 2041 | from topaz.module import ClassDef
from topaz.modules.ffi import type as ffitype
from topaz.modules.ffi.pointer import W_PointerObject
from topaz.modules.ffi.dynamic_library import coerce_dl_symbol
from topaz.modules.ffi.function_type import W_FunctionTypeObject
from topaz.objects.moduleobject import W_FunctionObject
from rpython.rtyper.lltypesystem import rffi, lltype
for i, name in enumerate(ffitype.type_names):
globals()[name] = i
class W_FFIFunctionObject(W_PointerObject):
classdef = ClassDef('FFI::Function', W_PointerObject.classdef)
_immutable_fields_ = ['ptr']
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_FFIFunctionObject(space)
def __init__(self, space):
W_PointerObject.__init__(self, space)
self.ptr = lltype.nullptr(rffi.VOIDP.TO)
@classdef.method('initialize')
def method_initialize(self, space, w_ret_type, w_arg_types,
w_handle=None, w_options=None):
self.w_info = space.send(space.getclassfor(W_FunctionTypeObject),
'new', [w_ret_type, w_arg_types, w_options])
self.setup(space, w_handle)
def setup(self, space, w_handle):
self.ptr = (coerce_dl_symbol(space, w_handle) if w_handle
else lltype.nullptr(rffi.VOIDP.TO))
@classdef.method('call')
def method_call(self, space, args_w, block=None):
return self.w_info.invoke(space, self.ptr, args_w, block)
@classdef.method('attach', name='str')
def method_attach(self, space, w_lib, name):
w_lib.attach_method(space, name, W_MethodAdapter(name, self))
class W_MethodAdapter(W_FunctionObject):
_immutable_fields_ = ['name', 'w_ffi_func']
def __init__(self, name, w_ffi_func):
W_FunctionObject.__init__(self, name)
self.name = name
self.w_ffi_func = w_ffi_func
def call(self, space, w_receiver, args_w, block):
return space.send(self.w_ffi_func, 'call', args_w, block)
| bsd-3-clause | 4,059,170,644,656,358,000 | 36.109091 | 77 | 0.66193 | false | 3.276083 | false | false | false |
alexzhang2015/zhihuSpider | followee_list.py | 1 | 4983 | # -*- coding: utf-8 -*-
import requests
import re
import math
import codecs
import json
import time
import datetime
global user_list,login_dat, processed_user
#user_list = ['wang-wei-63','allenzhang','kentzhu']
#user_list = ['yangbo','baiya','junyu','wang-xiao-chuan']
#user_list = ['wangxing','gongjun','zhouyuan']
user_list = ['alexzhang2015']
#user_list = ['hi-id','shek']
#user_list = ['commando','chen-hao-84','jin-chen-yu']
processed_user = []
#login_data = {'email': '[email protected]', 'password': 'yourpassword','rememberme':'y',}
login_data = {'email': '[email protected]', 'password': '','rememberme':'y',}
# session对象,会自动保持cookies
s = requests.session()
# auto-login.
def login(login_data):
s.post('http://www.zhihu.com/login', login_data)
def load_more(user,data):
# 进行加载时的Request URL
click_url = 'http://www.zhihu.com/node/ProfileFolloweesListV2'
# headers
header_info = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1581.2 Safari/537.36 Test',
'Host':'www.zhihu.com',
'Origin':'http://www.zhihu.com',
'Connection':'keep-alive',
'Referer':'http://www.zhihu.com/people/' + user + '/followees',
'Content-Type':'application/x-www-form-urlencoded',
}
# form data.
try:
raw_hash_id = re.findall('hash_id(.*)',data)
hash_id = raw_hash_id[0][14:46] # hash_id
raw_xsrf = re.findall('xsrf(.*)',data)
_xsrf = raw_xsrf[0][9:-3] # _xsrf
#
load_more_times = int(re.findall('<strong>(.*?)</strong>',data)[2]) / 20
# ---- key module ----
# 写入头20个用户信息
user_id = re.compile('zhihu.com/people/(.*?)"').findall(data)
user_id = user_id[1:len(user_id)]
answers = re.findall('answers" class="zg-link-gray-normal">(.*?) ',data)
asks = re.findall('asks" class="zg-link-gray-normal">(.*?) ',data)
followers = re.findall('followers" class="zg-link-gray-normal">(.*?) ',data)
goods = re.findall('class="zg-link-gray-normal">(.*?) ',data)
goods = goods[3:len(goods):4]
fp.write('user_id,followers,asks,answers,goods')
fp.write('\r\n')
write_file(user_id,followers,asks,answers,goods)
# 写入其余用户信息
for i in range(1,load_more_times+1):
t_start = time.localtime()[5]
offsets = i*20
# 由于返回的是json数据,所以用json处理parameters.
params = json.dumps({"hash_id":hash_id,"order_by":"created","offset":offsets,})
payload = {"method":"next", "params": params, "_xsrf":_xsrf,}
# debug and improve robustness. Date: 2014-02-12
try:
r = s.post(click_url,data=payload,headers=header_info,timeout=18)
except:
# 响应时间过程过长则重试
print 'repost'
r = s.post(click_url,data=payload,headers=header_info,timeout=60)
# parse info.
user_id = re.findall('href=\\\\"\\\\/people\\\\/(.*?)\\\\',r.text)
user_id = user_id[0:len(user_id):5]
user_info = re.findall('class=\\\\"zg-link-gray-normal\\\\">(.*?) ',r.text)
followers = user_info[0:len(user_info):4]
asks = user_info[1:len(user_info):4]
answers = user_info[2:len(user_info):4]
goods = user_info[3:len(user_info):4]
#print user_id,followers,asks,answers,goods
#print len(user_id),len(followers),len(asks),len(answers),len(goods)
write_file(user_id,followers,asks,answers,goods)
#print user_id
t_elapsed = time.localtime()[5] - t_start
#print 'got:',offsets,'users.','elapsed: ',t_elapsed,'s.\n'
except:
print 'something happed'
def main():
# login
s.post('http://www.zhihu.com/login', login_data)
#for user in user_list:
while(len(user_list) > 0):
user = user_list.pop()
print 'crawling ' + user + '\'s followees...\n'
print 'queue size: '+ str(len(user_list)) + '\n'
# 写文件
global fp
fp = codecs.open(user + '.txt', 'w', 'utf-8')
url = 'http://www.zhihu.com/people/' + user + '/followees'
# 转跳到用户followers页
r = s.get(url)
data = r.text
#print data
load_more(user,data)
# 去重复id
global user_list
processed_user.append(user)
user_list=list(set(user_list)-set(processed_user))
time.sleep(1)
def write_file(user_id,followers,asks,answers,goods):
for i in range(len(user_id)):
global fp
fp.write( user_id[i].strip()+','+followers[i].strip()+','+asks[i].strip()+','+answers[i].strip()+','+goods[i].strip() )
user_list.append(user_id[i].strip())
fp.write('\r\n')
if __name__=='__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
print 'total time consumption: ' + str((end_time - start_time).seconds) + 's'
| mit | -9,131,285,511,980,202,000 | 29.71519 | 148 | 0.58706 | false | 2.766819 | false | false | false |
appleseedhq/blenderseed | ui/meshes.py | 2 | 2263 | #
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from ..utils import util
class ASMESH_PT_export(bpy.types.Panel):
bl_label = "appleseed Export"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.object is not None and context.object.type in {'MESH', 'CURVE', 'SURFACE'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
asr_obj = context.object.data.appleseed
layout.prop(asr_obj, "export_normals", text="Export Normals")
layout.prop(asr_obj, "export_uvs", text="Export UVs")
layout.prop(asr_obj, "smooth_tangents", text="Calculate Smooth Tangents")
def register():
util.safe_register_class(ASMESH_PT_export)
def unregister():
util.safe_unregister_class(ASMESH_PT_export)
| mit | 1,035,809,832,209,433,200 | 37.355932 | 139 | 0.730004 | false | 3.803361 | false | false | false |
cprogrammer1994/ModernGL | moderngl/texture_cube.py | 1 | 7703 | from .buffer import Buffer
__all__ = ['TextureCube']
class TextureCube:
'''
A Texture is an OpenGL object that contains one or more images that all
have the same image format. A texture can be used in two ways. It can
be the source of a texture access from a Shader, or it can be used
as a render target.
.. Note:: ModernGL enables ``GL_TEXTURE_CUBE_MAP_SEAMLESS`` globally
to ensure filtering will be done across the cube faces.
A Texture3D object cannot be instantiated directly, it requires a context.
Use :py:meth:`Context.texture_cube` to create one.
'''
__slots__ = ['mglo', '_size', '_components', '_dtype', '_glo', 'ctx', 'extra']
def __init__(self):
self.mglo = None #: Internal representation for debug purposes only.
self._size = (None, None)
self._components = None
self._dtype = None
self._glo = None
self.ctx = None #: The context this object belongs to
self.extra = None #: Any - Attribute for storing user defined objects
raise TypeError()
def __repr__(self):
return '<TextureCube: %d>' % self.glo
def __eq__(self, other):
return type(self) is type(other) and self.mglo is other.mglo
@property
def size(self):
'''
tuple: The size of the texture.
'''
return self._size
@property
def components(self) -> int:
'''
int: The number of components of the texture.
'''
return self._components
@property
def dtype(self) -> str:
'''
str: Data type.
'''
return self._dtype
@property
def filter(self):
'''
tuple: The minification and magnification filter for the texture.
(Default ``(moderngl.LINEAR. moderngl.LINEAR)``)
Example::
texture.filter == (moderngl.NEAREST, moderngl.NEAREST)
texture.filter == (moderngl.LINEAR_MIPMAP_LINEAR, moderngl.LINEAR)
texture.filter == (moderngl.NEAREST_MIPMAP_LINEAR, moderngl.NEAREST)
texture.filter == (moderngl.LINEAR_MIPMAP_NEAREST, moderngl.NEAREST)
'''
return self.mglo.filter
@filter.setter
def filter(self, value):
self.mglo.filter = value
@property
def swizzle(self) -> str:
'''
str: The swizzle mask of the texture (Default ``'RGBA'``).
The swizzle mask change/reorder the ``vec4`` value returned by the ``texture()`` function
in a GLSL shaders. This is represented by a 4 character string were each
character can be::
'R' GL_RED
'G' GL_GREEN
'B' GL_BLUE
'A' GL_ALPHA
'0' GL_ZERO
'1' GL_ONE
Example::
# Alpha channel will always return 1.0
texture.swizzle = 'RGB1'
# Only return the red component. The rest is masked to 0.0
texture.swizzle = 'R000'
# Reverse the components
texture.swizzle = 'ABGR'
'''
return self.mglo.swizzle
@swizzle.setter
def swizzle(self, value):
self.mglo.swizzle = value
@property
def anisotropy(self):
'''
float: Number of samples for anisotropic filtering (Default ``1.0``).
The value will be clamped in range ``1.0`` and ``ctx.max_anisotropy``.
Any value greater than 1.0 counts as a use of anisotropic filtering::
# Disable anisotropic filtering
texture.anisotropy = 1.0
# Enable anisotropic filtering suggesting 16 samples as a maximum
texture.anisotropy = 16.0
'''
return self.mglo.anisotropy
@anisotropy.setter
def anisotropy(self, value):
self.mglo.anisotropy = value
@property
def glo(self) -> int:
'''
int: The internal OpenGL object.
This values is provided for debug purposes only.
'''
return self._glo
def read(self, face, *, alignment=1) -> bytes:
'''
Read a face from the cubemap as bytes into system memory.
Args:
face (int): The face to read.
Keyword Args:
alignment (int): The byte alignment of the pixels.
'''
return self.mglo.read(face, alignment)
def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None:
'''
Read a face from the cubemap texture.
Read a face of the cubemap into a bytearray or :py:class:`~moderngl.Buffer`.
The advantage of reading into a :py:class:`~moderngl.Buffer` is that pixel data
does not need to travel all the way to system memory::
# Reading pixel data into a bytearray
data = bytearray(4)
texture = ctx.texture_cube((2, 2), 1)
texture.read_into(data, 0)
# Reading pixel data into a buffer
data = ctx.buffer(reserve=4)
texture = ctx.texture_cube((2, 2), 1)
texture.read_into(data, 0)
Args:
buffer (bytearray): The buffer that will receive the pixels.
face (int): The face to read.
Keyword Args:
alignment (int): The byte alignment of the pixels.
write_offset (int): The write offset.
'''
if type(buffer) is Buffer:
buffer = buffer.mglo
return self.mglo.read_into(buffer, face, alignment, write_offset)
def write(self, face, data, viewport=None, *, alignment=1) -> None:
'''
Update the content of the texture.
Update the content of a face in the cubemap from byte data
or a moderngl :py:class:`~moderngl.Buffer`::
# Write data from a moderngl Buffer
data = ctx.buffer(reserve=4)
texture = ctx.texture_cube((2, 2), 1)
texture.write(0, data)
# Write data from bytes
data = b'\xff\xff\xff\xff'
texture = ctx.texture_cube((2, 2), 1)
texture.write(0, data)
Args:
face (int): The face to update.
data (bytes): The pixel data.
viewport (tuple): The viewport.
Keyword Args:
alignment (int): The byte alignment of the pixels.
'''
if type(data) is Buffer:
data = data.mglo
self.mglo.write(face, data, viewport, alignment)
def use(self, location=0) -> None:
'''
Bind the texture to a texture unit.
The location is the texture unit we want to bind the texture.
This should correspond with the value of the ``samplerCube``
uniform in the shader because samplers read from the texture
unit we assign to them::
# Define what texture unit our two samplerCube uniforms should represent
program['texture_a'] = 0
program['texture_b'] = 1
# Bind textures to the texture units
first_texture.use(location=0)
second_texture.use(location=1)
Args:
location (int): The texture location/unit.
'''
self.mglo.use(location)
def release(self) -> None:
'''
Release the ModernGL object.
'''
self.mglo.release()
| mit | 7,221,561,308,332,472,000 | 30.440816 | 101 | 0.543425 | false | 4.286589 | false | false | false |
jonmsawyer/maio | maio/management/commands/maio_get_images.py | 1 | 18823 | import os
import sys
import hashlib
from pprint import pprint
from getpass import getpass
from datetime import datetime
import magic
from PIL import Image
import pytz
from django.conf import settings
from django.core.management.base import CommandError
import django.db.utils
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from maio import lib
from maio.models import File
from maio.models import Media
from maio.models import Tag
from ._base import MaioBaseCommand
class Command(MaioBaseCommand):
args = '<None>'
help = 'Scrapes images in one or more directories.'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('username', nargs=1, type=str, metavar='USERNAME',
help=('Set owner of each file to %(metavar)s'))
parser.add_argument('directories', nargs='+', type=str, metavar='DIRECTORIES',
help=('Scrape images from %(metavar)s'))
# Optional arguments
parser.add_argument('--tag-directories', '-td', action='store_true',
help=('Tag the supplied directories for Image Tags. Does not tag '
'subdirectories under the supplied directories.'))
parser.add_argument('--tag-subfolders', '-ts', action='store_true',
help=('Tag the subdirectories under the supplied directories. Does '
'not tag the supplied directories.'))
parser.add_argument('--tag-filenames', '-tf', action='store_true',
help=('Tag the file names of the files.'))
parser.add_argument('--tag-all', '-ta', action='store_true',
help=('Equivalent to options -td -ts -tf.'))
parser.add_argument('--tags', '-t', nargs='*', type=str, metavar='TAGS',
help=('Tag each image with %(metavar)s'))
def handle(self, *args, **kwargs):
# shortcut settings
MAIO_SETTINGS = settings.MAIO_SETTINGS
TIME_ZONE = settings.TIME_ZONE
TZ = pytz.timezone(TIME_ZONE)
def mk_md5_dir(md5, root):
'''
Make MD5 directory. Makes 3 directories under ``root``, where the first 2 characters
in ``md5`` make up the first directory, the next 2 characters make up the second
directory, and the next 2 characters make up the third directory.
:returns: (str) The path to final directory structure.
'''
if len(md5) == 32:
part1 = md5[0:2]
part2 = md5[2:4]
part3 = md5[4:6]
dirtomake = os.path.join(root, part1, part2, part3)
if os.path.isdir(dirtomake):
return dirtomake
if os.path.isdir(root):
os.makedirs(dirtomake)
return dirtomake
def is_image(mimetype):
'''
Check to see if the supplied ``mimetype`` is an image, according to
``lib.MIMETYPE_EXTENSION``.
:returns: (bool) True if ``mimetype`` is an image, False otherwise.
'''
for key, value in lib.MIMETYPE_EXTENSION['image'].items():
if mimetype == key:
return True
return False
# grab the username from the options
username = kwargs.get('username', [''])[0]
# tag flag options
tag_directories = kwargs.get('tag_directories')
tag_subfolders = kwargs.get('tag_subfolders')
tag_filenames = kwargs.get('tag_filenames')
tag_all = kwargs.get('tag_all')
tags_input = kwargs.get('tags')
if tags_input is None:
tags_input = []
# validate user
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
self.out('User {} does not exist.'.format(username))
self.out('')
exit(1)
# grab the directories to scrape images from
directories = kwargs.get('directories', [])
# walk through each directory and make sure each one exists
for directory in directories:
if not os.path.isdir(directory):
self.out('"{}" is not a valid directory.'.format(directory))
self.out('')
exit(1)
# set up mime Magic
mime = magic.Magic(mime=True)
# walk through each directory, scraping images
for directory in directories:
# for each directory's files
for root, subdirs, files in os.walk(directory):
# for each file
for filename in files:
# read and join the file path
try:
file_path = os.path.join(root, filename)
except UnicodeDecodeError as e:
if "'utf8' codec can't decode bytes" in str(e):
self.out('Error processing {}, unreadable file name ...'
.format(os.path.join(root, filename)))
continue
raise
except:
raise
self.out('For file: {}'.format(file_path))
# get mime type
try:
mimetype = mime.from_file(file_path)
except IOError as e:
if 'File does not exist' in str(e):
self.out('file {} does not exist'.format(file_path))
continue
raise
except UnicodeDecodeError as e:
self.out('File: ', file_path)
raise
except:
raise
# make sure the mimetype is an image rather than something that looks like
# an image
if not is_image(mimetype):
self.out('{} is not a valid image type... (it might be a symlink?)'
.format(file_path))
continue
# get file extension
filename_ext = lib.MIMETYPE_EXTENSION['image'].get(mimetype, [[]])[0]
if filename_ext in (None, [[]]):
try:
filename_ext = file_path.split('.')[-1]
except IndexError:
filename_ext = ''
else:
filename_ext = filename_ext.replace('.', '')
# get name of file
name_of_file = file_path.split(os.sep)[-1].split('.')[:-1][0]
# stat file
sfile = os.stat(file_path)
# obtain modified datetime
mtime = TZ.localize(datetime.fromtimestamp(sfile.st_mtime))
# open image and check to make sure its width and height values
# are within configured constraints
try:
im = Image.open(file_path)
# this next if/elif statement looks backwards, but it's not.
# we want to continue onto the next image if the user chooses
# 'and' and the image's x 'or' y are out of range.
if MAIO_SETTINGS.get('images_min_inclusive', 'and').lower() == 'and':
if im.size[0] < MAIO_SETTINGS.get('images_min_width', 200) or \
im.size[1] < MAIO_SETTINGS.get('images_min_height', 200):
continue
# we want to continue onto the next image if the user chooses
# 'or' and the image's x 'and' y are both out of range.
elif MAIO_SETTINGS.get('images_min_inclusive', 'and').lower() == 'or':
if im.size[0] < MAIO_SETTINGS.get('images_min_width', 200) and \
im.size[1] < MAIO_SETTINGS.get('images_min_height', 200):
continue
im.load()
if im.mode != 'RGB':
im = im.convert('RGB')
except IOError as e:
self.out('Error in processing {} ...'.format(file_path))
if 'truncated' in str(e):
self.out('truncated')
continue
elif 'cannot identify image file' in str(e):
self.out('invalid image file')
continue
elif 'No such file or directory' in str(e):
self.out('no such file or directory')
continue
else:
raise
# get md5sum hash of the image
md5sum = hashlib.md5()
with open(file_path, 'rb') as fh:
md5sum.update(fh.read())
md5 = md5sum.hexdigest()
# make filestore directories if they don't exist
if not os.path.isdir(MAIO_SETTINGS['filestore_directory']):
# ./filestore
os.mkdir(MAIO_SETTINGS['filestore_directory'])
if not os.path.isdir(os.path.join(MAIO_SETTINGS['filestore_directory'],
'media')):
# ./filestore/media
os.mkdir(os.path.join(MAIO_SETTINGS['filestore_directory'],
'media'))
if not os.path.isdir(os.path.join(MAIO_SETTINGS['filestore_directory'],
'media', 'images')):
# ./filestore/media/images
os.mkdir(os.path.join(MAIO_SETTINGS['filestore_directory'],
'media', 'images'))
if not os.path.isdir(os.path.join(MAIO_SETTINGS['filestore_directory'],
'thumbnails')):
# ./filestore/thumbnails
os.mkdir(os.path.join(MAIO_SETTINGS['filestore_directory'],
'thumbnails'))
# process and save image to filestore
img_dir = mk_md5_dir(md5, os.path.join(MAIO_SETTINGS['filestore_directory'],
'media', 'images'))
img = os.path.join(img_dir, md5+'.'+filename_ext)
if not os.path.isfile(img):
# copy the image to the filestore if it doesn't already exist
im.save(img)
file_path = img
width = im.width
height = im.height
comment = str(im.info)
# process and save thumbnail to filestore
thumb_dir = mk_md5_dir(md5, os.path.join(MAIO_SETTINGS['filestore_directory'],
'thumbnails'))
thumb = os.path.join(thumb_dir, md5+'.jpg')
if not os.path.isfile(thumb):
im.thumbnail((300, 300), Image.ANTIALIAS)
im.save(thumb)
tn_width = im.width
tn_height = im.height
# close image file
im.close()
# process tag flags
tags = [] + tags_input
if tag_all or tag_directories:
# split up a directory such as
# C:\Users\bob\Pictures
# into
# ['C:', 'Users', 'bob', 'Pictures']
dir_tags = directory.split(os.sep)
# don't include Windows drive letters
# ['C:', 'Users', 'bob', 'Pictures']
# into
# ['Users', 'bob', 'Pictures']
if ':' in dir_tags[0]:
dir_tags = dir_tags[1:]
tags.extend(dir_tags)
if tag_all or tag_subfolders:
# split up a directory such as
# C:\Users\bob\Pictures\foo\bar\baz\beef.jpg
# where the supplied directory is
# C:\Users\bob\Pictures
# into
# ['foo', 'bar', 'baz', 'beef.jpg']
dir_tags = os.path.join(root, filename) \
.replace(directory+os.sep, '') \
.split(os.sep)
# don't include the filename for this option
# ['foo', 'bar', 'baz']
dir_tags = dir_tags[:-1]
tags.extend(dir_tags)
if tag_all or tag_filenames:
# split up a directory such as
# C:\Users\bob\Pictures\foo\bar\baz\beef.jpg
# where the supplied directory is
# C:\Users\bob\Pictures
# into
# ['foo', 'bar', 'baz', 'beef.jpg']
dir_tags = os.path.join(root, filename) \
.replace(directory+os.sep, '') \
.split(os.sep)
# get only the filename for this option
# ['beef.jpg']
dir_tags = dir_tags[-1:]
# split the filename from the extension
# ['beef', 'jpg']
dir_tags = dir_tags[0].split('.')
tags.extend(dir_tags)
# save file information to the database
try:
filestore = MAIO_SETTINGS['filestore_directory']
thumb_uri = thumb.replace(filestore, '').replace(os.sep, '/')
file_uri = file_path.replace(filestore, '').replace(os.sep, '/')
self.out(md5sum.hexdigest(), mimetype, filename,
file_path, file_uri, thumb_uri)
if filename_ext == '':
filename_ext = None
f = File(**{'md5sum': md5,
'original_name': name_of_file,
'original_extension': filename_ext,
'mime_type': mimetype,
'size': sfile.st_size,
'mtime': sfile.st_mtime,
'tn_path': thumb_uri,
'file_path': file_uri,
'date_modified': mtime,})
f.save()
except django.db.utils.IntegrityError:
f = File.objects.get(md5sum=md5)
if sfile.st_mtime == f.mtime:
self.out('Already in database and up-to-date, skipping {}'
.format(file_path))
self.out('')
continue
else:
self.out('Already in database and not up-to-date, processing {}'
.format(file_path))
f.mtime = sfile.st_mtime
f.date_modified = mtime
f.save()
except:
raise
media = Media(**{'file': f,
'media_type': 'image',
'owner': user,
'name': name_of_file,
'extension': filename_ext,
'mtime': sfile.st_mtime,
'size': sfile.st_size,
'date_modified': mtime,
'width': width,
'height': height,
'tn_width': tn_width,
'tn_height': tn_height,
'length': None,
'comment': comment})
media.save()
self.out('Tagging tags {} to "{}.{}"'
.format(tags, name_of_file, filename_ext))
self.out('')
# tag the image
for tag in tags:
# get DB tag if exists, if not create it
try:
tag = tag.lower()
t = Tag.objects.get(name=tag)
except Tag.DoesNotExist:
t = Tag(name=tag)
t.save()
# now associate the tag to the ImageFile
media.tags.add(t)
| mit | 1,934,864,507,498,044,400 | 46.175439 | 98 | 0.403655 | false | 5.422933 | false | false | false |
iluxa-com/mercurial-crew-tonfa | mercurial/localrepo.py | 1 | 86188 | # localrepo.py - read/write repository class for mercurial
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import bin, hex, nullid, nullrev, short
from i18n import _
import repo, changegroup, subrepo
import changelog, dirstate, filelog, manifest, context
import lock, transaction, store, encoding
import util, extensions, hook, error
import match as match_
import merge as merge_
import tags as tags_
from lock import release
import weakref, stat, errno, os, time, inspect
propertycache = util.propertycache
class localrepository(repo.repository):
capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
supported = set('revlogv1 store fncache shared'.split())
def __init__(self, baseui, path=None, create=0):
repo.repository.__init__(self)
self.root = os.path.realpath(path)
self.path = os.path.join(self.root, ".hg")
self.origroot = path
self.opener = util.opener(self.path)
self.wopener = util.opener(self.root)
self.baseui = baseui
self.ui = baseui.copy()
try:
self.ui.readconfig(self.join("hgrc"), self.root)
extensions.loadall(self.ui)
except IOError:
pass
if not os.path.isdir(self.path):
if create:
if not os.path.exists(path):
os.mkdir(path)
os.mkdir(self.path)
requirements = ["revlogv1"]
if self.ui.configbool('format', 'usestore', True):
os.mkdir(os.path.join(self.path, "store"))
requirements.append("store")
if self.ui.configbool('format', 'usefncache', True):
requirements.append("fncache")
# create an invalid changelog
self.opener("00changelog.i", "a").write(
'\0\0\0\2' # represents revlogv2
' dummy changelog to prevent using the old repo layout'
)
reqfile = self.opener("requires", "w")
for r in requirements:
reqfile.write("%s\n" % r)
reqfile.close()
else:
raise error.RepoError(_("repository %s not found") % path)
elif create:
raise error.RepoError(_("repository %s already exists") % path)
else:
# find requirements
requirements = set()
try:
requirements = set(self.opener("requires").read().splitlines())
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
for r in requirements - self.supported:
raise error.RepoError(_("requirement '%s' not supported") % r)
self.sharedpath = self.path
try:
s = os.path.realpath(self.opener("sharedpath").read())
if not os.path.exists(s):
raise error.RepoError(
_('.hg/sharedpath points to nonexistent directory %s') % s)
self.sharedpath = s
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.store = store.store(requirements, self.sharedpath, util.opener)
self.spath = self.store.path
self.sopener = self.store.opener
self.sjoin = self.store.join
self.opener.createmode = self.store.createmode
self.sopener.options = {}
# These two define the set of tags for this repository. _tags
# maps tag name to node; _tagtypes maps tag name to 'global' or
# 'local'. (Global tags are defined by .hgtags across all
# heads, and local tags are defined in .hg/localtags.) They
# constitute the in-memory cache of tags.
self._tags = None
self._tagtypes = None
self._branchcache = None # in UTF-8
self._branchcachetip = None
self.nodetagscache = None
self.filterpats = {}
self._datafilters = {}
self._transref = self._lockref = self._wlockref = None
@propertycache
def changelog(self):
c = changelog.changelog(self.sopener)
if 'HG_PENDING' in os.environ:
p = os.environ['HG_PENDING']
if p.startswith(self.root):
c.readpending('00changelog.i.a')
self.sopener.options['defversion'] = c.version
return c
@propertycache
def manifest(self):
return manifest.manifest(self.sopener)
@propertycache
def dirstate(self):
return dirstate.dirstate(self.opener, self.ui, self.root)
def __getitem__(self, changeid):
if changeid is None:
return context.workingctx(self)
return context.changectx(self, changeid)
def __contains__(self, changeid):
try:
return bool(self.lookup(changeid))
except error.RepoLookupError:
return False
def __nonzero__(self):
return True
def __len__(self):
return len(self.changelog)
def __iter__(self):
for i in xrange(len(self)):
yield i
def url(self):
return 'file:' + self.root
def hook(self, name, throw=False, **args):
return hook.hook(self.ui, self, name, throw, **args)
tag_disallowed = ':\r\n'
def _tag(self, names, node, message, local, user, date, extra={}):
if isinstance(names, str):
allchars = names
names = (names,)
else:
allchars = ''.join(names)
for c in self.tag_disallowed:
if c in allchars:
raise util.Abort(_('%r cannot be used in a tag name') % c)
for name in names:
self.hook('pretag', throw=True, node=hex(node), tag=name,
local=local)
def writetags(fp, names, munge, prevtags):
fp.seek(0, 2)
if prevtags and prevtags[-1] != '\n':
fp.write('\n')
for name in names:
m = munge and munge(name) or name
if self._tagtypes and name in self._tagtypes:
old = self._tags.get(name, nullid)
fp.write('%s %s\n' % (hex(old), m))
fp.write('%s %s\n' % (hex(node), m))
fp.close()
prevtags = ''
if local:
try:
fp = self.opener('localtags', 'r+')
except IOError:
fp = self.opener('localtags', 'a')
else:
prevtags = fp.read()
# local tags are stored in the current charset
writetags(fp, names, None, prevtags)
for name in names:
self.hook('tag', node=hex(node), tag=name, local=local)
return
try:
fp = self.wfile('.hgtags', 'rb+')
except IOError:
fp = self.wfile('.hgtags', 'ab')
else:
prevtags = fp.read()
# committed tags are stored in UTF-8
writetags(fp, names, encoding.fromlocal, prevtags)
if '.hgtags' not in self.dirstate:
self.add(['.hgtags'])
m = match_.exact(self.root, '', ['.hgtags'])
tagnode = self.commit(message, user, date, extra=extra, match=m)
for name in names:
self.hook('tag', node=hex(node), tag=name, local=local)
return tagnode
def tag(self, names, node, message, local, user, date):
'''tag a revision with one or more symbolic names.
names is a list of strings or, when adding a single tag, names may be a
string.
if local is True, the tags are stored in a per-repository file.
otherwise, they are stored in the .hgtags file, and a new
changeset is committed with the change.
keyword arguments:
local: whether to store tags in non-version-controlled file
(default False)
message: commit message to use if committing
user: name of user to use if committing
date: date tuple to use if committing'''
for x in self.status()[:5]:
if '.hgtags' in x:
raise util.Abort(_('working copy of .hgtags is changed '
'(please commit .hgtags manually)'))
self.tags() # instantiate the cache
self._tag(names, node, message, local, user, date)
def tags(self):
'''return a mapping of tag to node'''
if self._tags is None:
(self._tags, self._tagtypes) = self._findtags()
return self._tags
def _findtags(self):
'''Do the hard work of finding tags. Return a pair of dicts
(tags, tagtypes) where tags maps tag name to node, and tagtypes
maps tag name to a string like \'global\' or \'local\'.
Subclasses or extensions are free to add their own tags, but
should be aware that the returned dicts will be retained for the
duration of the localrepo object.'''
# XXX what tagtype should subclasses/extensions use? Currently
# mq and bookmarks add tags, but do not set the tagtype at all.
# Should each extension invent its own tag type? Should there
# be one tagtype for all such "virtual" tags? Or is the status
# quo fine?
alltags = {} # map tag name to (node, hist)
tagtypes = {}
tags_.findglobaltags(self.ui, self, alltags, tagtypes)
tags_.readlocaltags(self.ui, self, alltags, tagtypes)
# Build the return dicts. Have to re-encode tag names because
# the tags module always uses UTF-8 (in order not to lose info
# writing to the cache), but the rest of Mercurial wants them in
# local encoding.
tags = {}
for (name, (node, hist)) in alltags.iteritems():
if node != nullid:
tags[encoding.tolocal(name)] = node
tags['tip'] = self.changelog.tip()
tagtypes = dict([(encoding.tolocal(name), value)
for (name, value) in tagtypes.iteritems()])
return (tags, tagtypes)
def tagtype(self, tagname):
'''
return the type of the given tag. result can be:
'local' : a local tag
'global' : a global tag
None : tag does not exist
'''
self.tags()
return self._tagtypes.get(tagname)
def tagslist(self):
'''return a list of tags ordered by revision'''
l = []
for t, n in self.tags().iteritems():
try:
r = self.changelog.rev(n)
except:
r = -2 # sort to the beginning of the list if unknown
l.append((r, t, n))
return [(t, n) for r, t, n in sorted(l)]
def nodetags(self, node):
'''return the tags associated with a node'''
if not self.nodetagscache:
self.nodetagscache = {}
for t, n in self.tags().iteritems():
self.nodetagscache.setdefault(n, []).append(t)
return self.nodetagscache.get(node, [])
def _branchtags(self, partial, lrev):
# TODO: rename this function?
tiprev = len(self) - 1
if lrev != tiprev:
self._updatebranchcache(partial, lrev + 1, tiprev + 1)
self._writebranchcache(partial, self.changelog.tip(), tiprev)
return partial
def branchmap(self):
'''returns a dictionary {branch: [branchheads]}'''
tip = self.changelog.tip()
if self._branchcache is not None and self._branchcachetip == tip:
return self._branchcache
oldtip = self._branchcachetip
self._branchcachetip = tip
if oldtip is None or oldtip not in self.changelog.nodemap:
partial, last, lrev = self._readbranchcache()
else:
lrev = self.changelog.rev(oldtip)
partial = self._branchcache
self._branchtags(partial, lrev)
# this private cache holds all heads (not just tips)
self._branchcache = partial
return self._branchcache
def branchtags(self):
'''return a dict where branch names map to the tipmost head of
the branch, open heads come before closed'''
bt = {}
for bn, heads in self.branchmap().iteritems():
tip = heads[-1]
for h in reversed(heads):
if 'close' not in self.changelog.read(h)[5]:
tip = h
break
bt[bn] = tip
return bt
def _readbranchcache(self):
partial = {}
try:
f = self.opener("branchheads.cache")
lines = f.read().split('\n')
f.close()
except (IOError, OSError):
return {}, nullid, nullrev
try:
last, lrev = lines.pop(0).split(" ", 1)
last, lrev = bin(last), int(lrev)
if lrev >= len(self) or self[lrev].node() != last:
# invalidate the cache
raise ValueError('invalidating branch cache (tip differs)')
for l in lines:
if not l:
continue
node, label = l.split(" ", 1)
partial.setdefault(label.strip(), []).append(bin(node))
except KeyboardInterrupt:
raise
except Exception, inst:
if self.ui.debugflag:
self.ui.warn(str(inst), '\n')
partial, last, lrev = {}, nullid, nullrev
return partial, last, lrev
def _writebranchcache(self, branches, tip, tiprev):
try:
f = self.opener("branchheads.cache", "w", atomictemp=True)
f.write("%s %s\n" % (hex(tip), tiprev))
for label, nodes in branches.iteritems():
for node in nodes:
f.write("%s %s\n" % (hex(node), label))
f.rename()
except (IOError, OSError):
pass
def _updatebranchcache(self, partial, start, end):
# collect new branch entries
newbranches = {}
for r in xrange(start, end):
c = self[r]
newbranches.setdefault(c.branch(), []).append(c.node())
# if older branchheads are reachable from new ones, they aren't
# really branchheads. Note checking parents is insufficient:
# 1 (branch a) -> 2 (branch b) -> 3 (branch a)
for branch, newnodes in newbranches.iteritems():
bheads = partial.setdefault(branch, [])
bheads.extend(newnodes)
if len(bheads) < 2:
continue
newbheads = []
# starting from tip means fewer passes over reachable
while newnodes:
latest = newnodes.pop()
if latest not in bheads:
continue
minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
reachable = self.changelog.reachable(latest, minbhrev)
bheads = [b for b in bheads if b not in reachable]
newbheads.insert(0, latest)
bheads.extend(newbheads)
partial[branch] = bheads
def lookup(self, key):
if isinstance(key, int):
return self.changelog.node(key)
elif key == '.':
return self.dirstate.parents()[0]
elif key == 'null':
return nullid
elif key == 'tip':
return self.changelog.tip()
n = self.changelog._match(key)
if n:
return n
if key in self.tags():
return self.tags()[key]
if key in self.branchtags():
return self.branchtags()[key]
n = self.changelog._partialmatch(key)
if n:
return n
# can't find key, check if it might have come from damaged dirstate
if key in self.dirstate.parents():
raise error.Abort(_("working directory has unknown parent '%s'!")
% short(key))
try:
if len(key) == 20:
key = hex(key)
except:
pass
raise error.RepoLookupError(_("unknown revision '%s'") % key)
def local(self):
return True
def join(self, f):
return os.path.join(self.path, f)
def wjoin(self, f):
return os.path.join(self.root, f)
def rjoin(self, f):
return os.path.join(self.root, util.pconvert(f))
def file(self, f):
if f[0] == '/':
f = f[1:]
return filelog.filelog(self.sopener, f)
def changectx(self, changeid):
return self[changeid]
def parents(self, changeid=None):
'''get list of changectxs for parents of changeid'''
return self[changeid].parents()
def filectx(self, path, changeid=None, fileid=None):
"""changeid can be a changeset revision, node, or tag.
fileid can be a file revision or node."""
return context.filectx(self, path, changeid, fileid)
def getcwd(self):
return self.dirstate.getcwd()
def pathto(self, f, cwd=None):
return self.dirstate.pathto(f, cwd)
def wfile(self, f, mode='r'):
return self.wopener(f, mode)
def _link(self, f):
return os.path.islink(self.wjoin(f))
def _filter(self, filter, filename, data):
if filter not in self.filterpats:
l = []
for pat, cmd in self.ui.configitems(filter):
if cmd == '!':
continue
mf = match_.match(self.root, '', [pat])
fn = None
params = cmd
for name, filterfn in self._datafilters.iteritems():
if cmd.startswith(name):
fn = filterfn
params = cmd[len(name):].lstrip()
break
if not fn:
fn = lambda s, c, **kwargs: util.filter(s, c)
# Wrap old filters not supporting keyword arguments
if not inspect.getargspec(fn)[2]:
oldfn = fn
fn = lambda s, c, **kwargs: oldfn(s, c)
l.append((mf, fn, params))
self.filterpats[filter] = l
for mf, fn, cmd in self.filterpats[filter]:
if mf(filename):
self.ui.debug("filtering %s through %s\n" % (filename, cmd))
data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
break
return data
def adddatafilter(self, name, filter):
self._datafilters[name] = filter
def wread(self, filename):
if self._link(filename):
data = os.readlink(self.wjoin(filename))
else:
data = self.wopener(filename, 'r').read()
return self._filter("encode", filename, data)
def wwrite(self, filename, data, flags):
data = self._filter("decode", filename, data)
try:
os.unlink(self.wjoin(filename))
except OSError:
pass
if 'l' in flags:
self.wopener.symlink(data, filename)
else:
self.wopener(filename, 'w').write(data)
if 'x' in flags:
util.set_flags(self.wjoin(filename), False, True)
def wwritedata(self, filename, data):
return self._filter("decode", filename, data)
def transaction(self):
tr = self._transref and self._transref() or None
if tr and tr.running():
return tr.nest()
# abort here if the journal already exists
if os.path.exists(self.sjoin("journal")):
raise error.RepoError(
_("abandoned transaction found - run hg recover"))
# save dirstate for rollback
try:
ds = self.opener("dirstate").read()
except IOError:
ds = ""
self.opener("journal.dirstate", "w").write(ds)
self.opener("journal.branch", "w").write(self.dirstate.branch())
renames = [(self.sjoin("journal"), self.sjoin("undo")),
(self.join("journal.dirstate"), self.join("undo.dirstate")),
(self.join("journal.branch"), self.join("undo.branch"))]
tr = transaction.transaction(self.ui.warn, self.sopener,
self.sjoin("journal"),
aftertrans(renames),
self.store.createmode)
self._transref = weakref.ref(tr)
return tr
def recover(self):
lock = self.lock()
try:
if os.path.exists(self.sjoin("journal")):
self.ui.status(_("rolling back interrupted transaction\n"))
transaction.rollback(self.sopener, self.sjoin("journal"),
self.ui.warn)
self.invalidate()
return True
else:
self.ui.warn(_("no interrupted transaction available\n"))
return False
finally:
lock.release()
def rollback(self):
wlock = lock = None
try:
wlock = self.wlock()
lock = self.lock()
if os.path.exists(self.sjoin("undo")):
self.ui.status(_("rolling back last transaction\n"))
transaction.rollback(self.sopener, self.sjoin("undo"),
self.ui.warn)
util.rename(self.join("undo.dirstate"), self.join("dirstate"))
try:
branch = self.opener("undo.branch").read()
self.dirstate.setbranch(branch)
except IOError:
self.ui.warn(_("Named branch could not be reset, "
"current branch still is: %s\n")
% encoding.tolocal(self.dirstate.branch()))
self.invalidate()
self.dirstate.invalidate()
self.destroyed()
else:
self.ui.warn(_("no rollback information available\n"))
finally:
release(lock, wlock)
def invalidate(self):
for a in "changelog manifest".split():
if a in self.__dict__:
delattr(self, a)
self._tags = None
self._tagtypes = None
self.nodetagscache = None
self._branchcache = None # in UTF-8
self._branchcachetip = None
def _lock(self, lockname, wait, releasefn, acquirefn, desc):
try:
l = lock.lock(lockname, 0, releasefn, desc=desc)
except error.LockHeld, inst:
if not wait:
raise
self.ui.warn(_("waiting for lock on %s held by %r\n") %
(desc, inst.locker))
# default to 600 seconds timeout
l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
releasefn, desc=desc)
if acquirefn:
acquirefn()
return l
def lock(self, wait=True):
'''Lock the repository store (.hg/store) and return a weak reference
to the lock. Use this before modifying the store (e.g. committing or
stripping). If you are opening a transaction, get a lock as well.)'''
l = self._lockref and self._lockref()
if l is not None and l.held:
l.lock()
return l
l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
_('repository %s') % self.origroot)
self._lockref = weakref.ref(l)
return l
def wlock(self, wait=True):
'''Lock the non-store parts of the repository (everything under
.hg except .hg/store) and return a weak reference to the lock.
Use this before modifying files in .hg.'''
l = self._wlockref and self._wlockref()
if l is not None and l.held:
l.lock()
return l
l = self._lock(self.join("wlock"), wait, self.dirstate.write,
self.dirstate.invalidate, _('working directory of %s') %
self.origroot)
self._wlockref = weakref.ref(l)
return l
def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
"""
commit an individual file as part of a larger transaction
"""
fname = fctx.path()
text = fctx.data()
flog = self.file(fname)
fparent1 = manifest1.get(fname, nullid)
fparent2 = fparent2o = manifest2.get(fname, nullid)
meta = {}
copy = fctx.renamed()
if copy and copy[0] != fname:
# Mark the new revision of this file as a copy of another
# file. This copy data will effectively act as a parent
# of this new revision. If this is a merge, the first
# parent will be the nullid (meaning "look up the copy data")
# and the second one will be the other parent. For example:
#
# 0 --- 1 --- 3 rev1 changes file foo
# \ / rev2 renames foo to bar and changes it
# \- 2 -/ rev3 should have bar with all changes and
# should record that bar descends from
# bar in rev2 and foo in rev1
#
# this allows this merge to succeed:
#
# 0 --- 1 --- 3 rev4 reverts the content change from rev2
# \ / merging rev3 and rev4 should use bar@rev2
# \- 2 --- 4 as the merge base
#
cfname = copy[0]
crev = manifest1.get(cfname)
newfparent = fparent2
if manifest2: # branch merge
if fparent2 == nullid or crev is None: # copied on remote side
if cfname in manifest2:
crev = manifest2[cfname]
newfparent = fparent1
# find source in nearest ancestor if we've lost track
if not crev:
self.ui.debug(" %s: searching for copy revision for %s\n" %
(fname, cfname))
for ancestor in self['.'].ancestors():
if cfname in ancestor:
crev = ancestor[cfname].filenode()
break
self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
meta["copy"] = cfname
meta["copyrev"] = hex(crev)
fparent1, fparent2 = nullid, newfparent
elif fparent2 != nullid:
# is one parent an ancestor of the other?
fparentancestor = flog.ancestor(fparent1, fparent2)
if fparentancestor == fparent1:
fparent1, fparent2 = fparent2, nullid
elif fparentancestor == fparent2:
fparent2 = nullid
# is the file changed?
if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
changelist.append(fname)
return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
# are just the flags changed during merge?
if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
changelist.append(fname)
return fparent1
def commit(self, text="", user=None, date=None, match=None, force=False,
editor=False, extra={}):
"""Add a new revision to current repository.
Revision information is gathered from the working directory,
match can be used to filter the committed files. If editor is
supplied, it is called to get a commit message.
"""
def fail(f, msg):
raise util.Abort('%s: %s' % (f, msg))
if not match:
match = match_.always(self.root, '')
if not force:
vdirs = []
match.dir = vdirs.append
match.bad = fail
wlock = self.wlock()
try:
p1, p2 = self.dirstate.parents()
wctx = self[None]
if (not force and p2 != nullid and match and
(match.files() or match.anypats())):
raise util.Abort(_('cannot partially commit a merge '
'(do not specify files or patterns)'))
changes = self.status(match=match, clean=force)
if force:
changes[0].extend(changes[6]) # mq may commit unchanged files
# check subrepos
subs = []
for s in wctx.substate:
if match(s) and wctx.sub(s).dirty():
subs.append(s)
if subs and '.hgsubstate' not in changes[0]:
changes[0].insert(0, '.hgsubstate')
# make sure all explicit patterns are matched
if not force and match.files():
matched = set(changes[0] + changes[1] + changes[2])
for f in match.files():
if f == '.' or f in matched or f in wctx.substate:
continue
if f in changes[3]: # missing
fail(f, _('file not found!'))
if f in vdirs: # visited directory
d = f + '/'
for mf in matched:
if mf.startswith(d):
break
else:
fail(f, _("no match under directory!"))
elif f not in self.dirstate:
fail(f, _("file not tracked!"))
if (not force and not extra.get("close") and p2 == nullid
and not (changes[0] or changes[1] or changes[2])
and self[None].branch() == self['.'].branch()):
return None
ms = merge_.mergestate(self)
for f in changes[0]:
if f in ms and ms[f] == 'u':
raise util.Abort(_("unresolved merge conflicts "
"(see hg resolve)"))
cctx = context.workingctx(self, (p1, p2), text, user, date,
extra, changes)
if editor:
cctx._text = editor(self, cctx, subs)
edited = (text != cctx._text)
# commit subs
if subs:
state = wctx.substate.copy()
for s in subs:
self.ui.status(_('committing subrepository %s\n') % s)
sr = wctx.sub(s).commit(cctx._text, user, date)
state[s] = (state[s][0], sr)
subrepo.writestate(self, state)
# Save commit message in case this transaction gets rolled back
# (e.g. by a pretxncommit hook). Leave the content alone on
# the assumption that the user will use the same editor again.
msgfile = self.opener('last-message.txt', 'wb')
msgfile.write(cctx._text)
msgfile.close()
try:
ret = self.commitctx(cctx, True)
except:
if edited:
msgfn = self.pathto(msgfile.name[len(self.root)+1:])
self.ui.write(
_('note: commit message saved in %s\n') % msgfn)
raise
# update dirstate and mergestate
for f in changes[0] + changes[1]:
self.dirstate.normal(f)
for f in changes[2]:
self.dirstate.forget(f)
self.dirstate.setparents(ret)
ms.reset()
return ret
finally:
wlock.release()
def commitctx(self, ctx, error=False):
"""Add a new revision to current repository.
Revision information is passed via the context argument.
"""
tr = lock = None
removed = ctx.removed()
p1, p2 = ctx.p1(), ctx.p2()
m1 = p1.manifest().copy()
m2 = p2.manifest()
user = ctx.user()
xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
lock = self.lock()
try:
tr = self.transaction()
trp = weakref.proxy(tr)
# check in files
new = {}
changed = []
linkrev = len(self)
for f in sorted(ctx.modified() + ctx.added()):
self.ui.note(f + "\n")
try:
fctx = ctx[f]
new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
changed)
m1.set(f, fctx.flags())
except OSError, inst:
self.ui.warn(_("trouble committing %s!\n") % f)
raise
except IOError, inst:
errcode = getattr(inst, 'errno', errno.ENOENT)
if error or errcode and errcode != errno.ENOENT:
self.ui.warn(_("trouble committing %s!\n") % f)
raise
else:
removed.append(f)
# update manifest
m1.update(new)
removed = [f for f in sorted(removed) if f in m1 or f in m2]
drop = [f for f in removed if f in m1]
for f in drop:
del m1[f]
mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
p2.manifestnode(), (new, drop))
# update changelog
self.changelog.delayupdate()
n = self.changelog.add(mn, changed + removed, ctx.description(),
trp, p1.node(), p2.node(),
user, ctx.date(), ctx.extra().copy())
p = lambda: self.changelog.writepending() and self.root or ""
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
parent2=xp2, pending=p)
self.changelog.finalize(trp)
tr.close()
if self._branchcache:
self.branchtags()
self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
return n
finally:
del tr
lock.release()
def destroyed(self):
'''Inform the repository that nodes have been destroyed.
Intended for use by strip and rollback, so there's a common
place for anything that has to be done after destroying history.'''
# XXX it might be nice if we could take the list of destroyed
# nodes, but I don't see an easy way for rollback() to do that
# Ensure the persistent tag cache is updated. Doing it now
# means that the tag cache only has to worry about destroyed
# heads immediately after a strip/rollback. That in turn
# guarantees that "cachetip == currenttip" (comparing both rev
# and node) always means no nodes have been added or destroyed.
# XXX this is suboptimal when qrefresh'ing: we strip the current
# head, refresh the tag cache, then immediately add a new head.
# But I think doing it this way is necessary for the "instant
# tag cache retrieval" case to work.
tags_.findglobaltags(self.ui, self, {}, {})
def walk(self, match, node=None):
'''
walk recursively through the directory tree or a given
changeset, finding all files matched by the match
function
'''
return self[node].walk(match)
def status(self, node1='.', node2=None, match=None,
ignored=False, clean=False, unknown=False):
"""return status of files between two nodes or node and working directory
If node1 is None, use the first dirstate parent instead.
If node2 is None, compare node1 with working directory.
"""
def mfmatches(ctx):
mf = ctx.manifest().copy()
for fn in mf.keys():
if not match(fn):
del mf[fn]
return mf
if isinstance(node1, context.changectx):
ctx1 = node1
else:
ctx1 = self[node1]
if isinstance(node2, context.changectx):
ctx2 = node2
else:
ctx2 = self[node2]
working = ctx2.rev() is None
parentworking = working and ctx1 == self['.']
match = match or match_.always(self.root, self.getcwd())
listignored, listclean, listunknown = ignored, clean, unknown
# load earliest manifest first for caching reasons
if not working and ctx2.rev() < ctx1.rev():
ctx2.manifest()
if not parentworking:
def bad(f, msg):
if f not in ctx1:
self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
match.bad = bad
if working: # we need to scan the working dir
subrepos = ctx1.substate.keys()
s = self.dirstate.status(match, subrepos, listignored,
listclean, listunknown)
cmp, modified, added, removed, deleted, unknown, ignored, clean = s
# check for any possibly clean files
if parentworking and cmp:
fixup = []
# do a full compare of any files that might have changed
for f in sorted(cmp):
if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
or ctx1[f].cmp(ctx2[f].data())):
modified.append(f)
else:
fixup.append(f)
if listclean:
clean += fixup
# update dirstate for files that are actually clean
if fixup:
try:
# updating the dirstate is optional
# so we don't wait on the lock
wlock = self.wlock(False)
try:
for f in fixup:
self.dirstate.normal(f)
finally:
wlock.release()
except error.LockError:
pass
if not parentworking:
mf1 = mfmatches(ctx1)
if working:
# we are comparing working dir against non-parent
# generate a pseudo-manifest for the working dir
mf2 = mfmatches(self['.'])
for f in cmp + modified + added:
mf2[f] = None
mf2.set(f, ctx2.flags(f))
for f in removed:
if f in mf2:
del mf2[f]
else:
# we are comparing two revisions
deleted, unknown, ignored = [], [], []
mf2 = mfmatches(ctx2)
modified, added, clean = [], [], []
for fn in mf2:
if fn in mf1:
if (mf1.flags(fn) != mf2.flags(fn) or
(mf1[fn] != mf2[fn] and
(mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
modified.append(fn)
elif listclean:
clean.append(fn)
del mf1[fn]
else:
added.append(fn)
removed = mf1.keys()
r = modified, added, removed, deleted, unknown, ignored, clean
[l.sort() for l in r]
return r
def add(self, list):
wlock = self.wlock()
try:
rejected = []
for f in list:
p = self.wjoin(f)
try:
st = os.lstat(p)
except:
self.ui.warn(_("%s does not exist!\n") % f)
rejected.append(f)
continue
if st.st_size > 10000000:
self.ui.warn(_("%s: files over 10MB may cause memory and"
" performance problems\n"
"(use 'hg revert %s' to unadd the file)\n")
% (f, f))
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
self.ui.warn(_("%s not added: only files and symlinks "
"supported currently\n") % f)
rejected.append(p)
elif self.dirstate[f] in 'amn':
self.ui.warn(_("%s already tracked!\n") % f)
elif self.dirstate[f] == 'r':
self.dirstate.normallookup(f)
else:
self.dirstate.add(f)
return rejected
finally:
wlock.release()
def forget(self, list):
wlock = self.wlock()
try:
for f in list:
if self.dirstate[f] != 'a':
self.ui.warn(_("%s not added!\n") % f)
else:
self.dirstate.forget(f)
finally:
wlock.release()
def remove(self, list, unlink=False):
if unlink:
for f in list:
try:
util.unlink(self.wjoin(f))
except OSError, inst:
if inst.errno != errno.ENOENT:
raise
wlock = self.wlock()
try:
for f in list:
if unlink and os.path.exists(self.wjoin(f)):
self.ui.warn(_("%s still exists!\n") % f)
elif self.dirstate[f] == 'a':
self.dirstate.forget(f)
elif f not in self.dirstate:
self.ui.warn(_("%s not tracked!\n") % f)
else:
self.dirstate.remove(f)
finally:
wlock.release()
def undelete(self, list):
manifests = [self.manifest.read(self.changelog.read(p)[0])
for p in self.dirstate.parents() if p != nullid]
wlock = self.wlock()
try:
for f in list:
if self.dirstate[f] != 'r':
self.ui.warn(_("%s not removed!\n") % f)
else:
m = f in manifests[0] and manifests[0] or manifests[1]
t = self.file(f).read(m[f])
self.wwrite(f, t, m.flags(f))
self.dirstate.normal(f)
finally:
wlock.release()
def copy(self, source, dest):
p = self.wjoin(dest)
if not (os.path.exists(p) or os.path.islink(p)):
self.ui.warn(_("%s does not exist!\n") % dest)
elif not (os.path.isfile(p) or os.path.islink(p)):
self.ui.warn(_("copy failed: %s is not a file or a "
"symbolic link\n") % dest)
else:
wlock = self.wlock()
try:
if self.dirstate[dest] in '?r':
self.dirstate.add(dest)
self.dirstate.copy(source, dest)
finally:
wlock.release()
def heads(self, start=None):
heads = self.changelog.heads(start)
# sort the output in rev descending order
heads = [(-self.changelog.rev(h), h) for h in heads]
return [n for (r, n) in sorted(heads)]
def branchheads(self, branch=None, start=None, closed=False):
'''return a (possibly filtered) list of heads for the given branch
Heads are returned in topological order, from newest to oldest.
If branch is None, use the dirstate branch.
If start is not None, return only heads reachable from start.
If closed is True, return heads that are marked as closed as well.
'''
if branch is None:
branch = self[None].branch()
branches = self.branchmap()
if branch not in branches:
return []
# the cache returns heads ordered lowest to highest
bheads = list(reversed(branches[branch]))
if start is not None:
# filter out the heads that cannot be reached from startrev
fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
bheads = [h for h in bheads if h in fbheads]
if not closed:
bheads = [h for h in bheads if
('close' not in self.changelog.read(h)[5])]
return bheads
def branches(self, nodes):
if not nodes:
nodes = [self.changelog.tip()]
b = []
for n in nodes:
t = n
while 1:
p = self.changelog.parents(n)
if p[1] != nullid or p[0] == nullid:
b.append((t, n, p[0], p[1]))
break
n = p[0]
return b
def between(self, pairs):
r = []
for top, bottom in pairs:
n, l, i = top, [], 0
f = 1
while n != bottom and n != nullid:
p = self.changelog.parents(n)[0]
if i == f:
l.append(n)
f = f * 2
n = p
i += 1
r.append(l)
return r
def findincoming(self, remote, base=None, heads=None, force=False):
"""Return list of roots of the subsets of missing nodes from remote
If base dict is specified, assume that these nodes and their parents
exist on the remote side and that no child of a node of base exists
in both remote and self.
Furthermore base will be updated to include the nodes that exists
in self and remote but no children exists in self and remote.
If a list of heads is specified, return only nodes which are heads
or ancestors of these heads.
All the ancestors of base are in self and in remote.
All the descendants of the list returned are missing in self.
(and so we know that the rest of the nodes are missing in remote, see
outgoing)
"""
return self.findcommonincoming(remote, base, heads, force)[1]
def findcommonincoming(self, remote, base=None, heads=None, force=False):
"""Return a tuple (common, missing roots, heads) used to identify
missing nodes from remote.
If base dict is specified, assume that these nodes and their parents
exist on the remote side and that no child of a node of base exists
in both remote and self.
Furthermore base will be updated to include the nodes that exists
in self and remote but no children exists in self and remote.
If a list of heads is specified, return only nodes which are heads
or ancestors of these heads.
All the ancestors of base are in self and in remote.
"""
m = self.changelog.nodemap
search = []
fetch = set()
seen = set()
seenbranch = set()
if base is None:
base = {}
if not heads:
heads = remote.heads()
if self.changelog.tip() == nullid:
base[nullid] = 1
if heads != [nullid]:
return [nullid], [nullid], list(heads)
return [nullid], [], []
# assume we're closer to the tip than the root
# and start by examining the heads
self.ui.status(_("searching for changes\n"))
unknown = []
for h in heads:
if h not in m:
unknown.append(h)
else:
base[h] = 1
heads = unknown
if not unknown:
return base.keys(), [], []
req = set(unknown)
reqcnt = 0
# search through remote branches
# a 'branch' here is a linear segment of history, with four parts:
# head, root, first parent, second parent
# (a branch always has two parents (or none) by definition)
unknown = remote.branches(unknown)
while unknown:
r = []
while unknown:
n = unknown.pop(0)
if n[0] in seen:
continue
self.ui.debug("examining %s:%s\n"
% (short(n[0]), short(n[1])))
if n[0] == nullid: # found the end of the branch
pass
elif n in seenbranch:
self.ui.debug("branch already found\n")
continue
elif n[1] and n[1] in m: # do we know the base?
self.ui.debug("found incomplete branch %s:%s\n"
% (short(n[0]), short(n[1])))
search.append(n[0:2]) # schedule branch range for scanning
seenbranch.add(n)
else:
if n[1] not in seen and n[1] not in fetch:
if n[2] in m and n[3] in m:
self.ui.debug("found new changeset %s\n" %
short(n[1]))
fetch.add(n[1]) # earliest unknown
for p in n[2:4]:
if p in m:
base[p] = 1 # latest known
for p in n[2:4]:
if p not in req and p not in m:
r.append(p)
req.add(p)
seen.add(n[0])
if r:
reqcnt += 1
self.ui.progress('searching', reqcnt, unit='queries')
self.ui.debug("request %d: %s\n" %
(reqcnt, " ".join(map(short, r))))
for p in xrange(0, len(r), 10):
for b in remote.branches(r[p:p + 10]):
self.ui.debug("received %s:%s\n" %
(short(b[0]), short(b[1])))
unknown.append(b)
# do binary search on the branches we found
while search:
newsearch = []
reqcnt += 1
self.ui.progress('searching', reqcnt, unit='queries')
for n, l in zip(search, remote.between(search)):
l.append(n[1])
p = n[0]
f = 1
for i in l:
self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
if i in m:
if f <= 2:
self.ui.debug("found new branch changeset %s\n" %
short(p))
fetch.add(p)
base[i] = 1
else:
self.ui.debug("narrowed branch search to %s:%s\n"
% (short(p), short(i)))
newsearch.append((p, i))
break
p, f = i, f * 2
search = newsearch
# sanity check our fetch list
for f in fetch:
if f in m:
raise error.RepoError(_("already have changeset ")
+ short(f[:4]))
if base.keys() == [nullid]:
if force:
self.ui.warn(_("warning: repository is unrelated\n"))
else:
raise util.Abort(_("repository is unrelated"))
self.ui.debug("found new changesets starting at " +
" ".join([short(f) for f in fetch]) + "\n")
self.ui.progress('searching', None, unit='queries')
self.ui.debug("%d total queries\n" % reqcnt)
return base.keys(), list(fetch), heads
def findoutgoing(self, remote, base=None, heads=None, force=False):
"""Return list of nodes that are roots of subsets not in remote
If base dict is specified, assume that these nodes and their parents
exist on the remote side.
If a list of heads is specified, return only nodes which are heads
or ancestors of these heads, and return a second element which
contains all remote heads which get new children.
"""
if base is None:
base = {}
self.findincoming(remote, base, heads, force=force)
self.ui.debug("common changesets up to "
+ " ".join(map(short, base.keys())) + "\n")
remain = set(self.changelog.nodemap)
# prune everything remote has from the tree
remain.remove(nullid)
remove = base.keys()
while remove:
n = remove.pop(0)
if n in remain:
remain.remove(n)
for p in self.changelog.parents(n):
remove.append(p)
# find every node whose parents have been pruned
subset = []
# find every remote head that will get new children
updated_heads = set()
for n in remain:
p1, p2 = self.changelog.parents(n)
if p1 not in remain and p2 not in remain:
subset.append(n)
if heads:
if p1 in heads:
updated_heads.add(p1)
if p2 in heads:
updated_heads.add(p2)
# this is the set of all roots we have to push
if heads:
return subset, list(updated_heads)
else:
return subset
def pull(self, remote, heads=None, force=False):
lock = self.lock()
try:
common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
force=force)
if fetch == [nullid]:
self.ui.status(_("requesting all changes\n"))
if not fetch:
self.ui.status(_("no changes found\n"))
return 0
if heads is None and remote.capable('changegroupsubset'):
heads = rheads
if heads is None:
cg = remote.changegroup(fetch, 'pull')
else:
if not remote.capable('changegroupsubset'):
raise util.Abort(_("Partial pull cannot be done because "
"other repository doesn't support "
"changegroupsubset."))
cg = remote.changegroupsubset(fetch, heads, 'pull')
return self.addchangegroup(cg, 'pull', remote.url())
finally:
lock.release()
def push(self, remote, force=False, revs=None):
# there are two ways to push to remote repo:
#
# addchangegroup assumes local user can lock remote
# repo (local filesystem, old ssh servers).
#
# unbundle assumes local user cannot lock remote repo (new ssh
# servers, http servers).
if remote.capable('unbundle'):
return self.push_unbundle(remote, force, revs)
return self.push_addchangegroup(remote, force, revs)
def prepush(self, remote, force, revs):
'''Analyze the local and remote repositories and determine which
changesets need to be pushed to the remote. Return a tuple
(changegroup, remoteheads). changegroup is a readable file-like
object whose read() returns successive changegroup chunks ready to
be sent over the wire. remoteheads is the list of remote heads.
'''
common = {}
remote_heads = remote.heads()
inc = self.findincoming(remote, common, remote_heads, force=force)
update, updated_heads = self.findoutgoing(remote, common, remote_heads)
msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
def checkbranch(lheads, rheads, updatelb, branchname=None):
'''
check whether there are more local heads than remote heads on
a specific branch.
lheads: local branch heads
rheads: remote branch heads
updatelb: outgoing local branch bases
'''
warn = 0
if not revs and len(lheads) > len(rheads):
warn = 1
else:
# add local heads involved in the push
updatelheads = [self.changelog.heads(x, lheads)
for x in updatelb]
newheads = set(sum(updatelheads, [])) & set(lheads)
if not newheads:
return True
# add heads we don't have or that are not involved in the push
for r in rheads:
if r in self.changelog.nodemap:
desc = self.changelog.heads(r, heads)
l = [h for h in heads if h in desc]
if not l:
newheads.add(r)
else:
newheads.add(r)
if len(newheads) > len(rheads):
warn = 1
if warn:
if branchname is not None:
msg = _("abort: push creates new remote heads"
" on branch '%s'!\n") % branchname
else:
msg = _("abort: push creates new remote heads!\n")
self.ui.warn(msg)
if len(lheads) > len(rheads):
self.ui.status(_("(did you forget to merge?"
" use push -f to force)\n"))
else:
self.ui.status(_("(you should pull and merge or"
" use push -f to force)\n"))
return False
return True
if not bases:
self.ui.status(_("no changes found\n"))
return None, 1
elif not force:
# Check for each named branch if we're creating new remote heads.
# To be a remote head after push, node must be either:
# - unknown locally
# - a local outgoing head descended from update
# - a remote head that's known locally and not
# ancestral to an outgoing head
#
# New named branches cannot be created without --force.
if remote_heads != [nullid]:
if remote.capable('branchmap'):
remotebrheads = remote.branchmap()
if not revs:
localbrheads = self.branchmap()
else:
localbrheads = {}
for n in heads:
branch = self[n].branch()
localbrheads.setdefault(branch, []).append(n)
newbranches = list(set(localbrheads) - set(remotebrheads))
if newbranches: # new branch requires --force
branchnames = ', '.join("%s" % b for b in newbranches)
self.ui.warn(_("abort: push creates "
"new remote branches: %s!\n")
% branchnames)
# propose 'push -b .' in the msg too?
self.ui.status(_("(use 'hg push -f' to force)\n"))
return None, 0
for branch, lheads in localbrheads.iteritems():
if branch in remotebrheads:
rheads = remotebrheads[branch]
if not checkbranch(lheads, rheads, update, branch):
return None, 0
else:
if not checkbranch(heads, remote_heads, update):
return None, 0
if inc:
self.ui.warn(_("note: unsynced remote changes!\n"))
if revs is None:
# use the fast path, no race possible on push
nodes = self.changelog.findmissing(common.keys())
cg = self._changegroup(nodes, 'push')
else:
cg = self.changegroupsubset(update, revs, 'push')
return cg, remote_heads
def push_addchangegroup(self, remote, force, revs):
lock = remote.lock()
try:
ret = self.prepush(remote, force, revs)
if ret[0] is not None:
cg, remote_heads = ret
return remote.addchangegroup(cg, 'push', self.url())
return ret[1]
finally:
lock.release()
def push_unbundle(self, remote, force, revs):
# local repo finds heads on server, finds out what revs it
# must push. once revs transferred, if server finds it has
# different heads (someone else won commit/push race), server
# aborts.
ret = self.prepush(remote, force, revs)
if ret[0] is not None:
cg, remote_heads = ret
if force:
remote_heads = ['force']
return remote.unbundle(cg, remote_heads, 'push')
return ret[1]
def changegroupinfo(self, nodes, source):
if self.ui.verbose or source == 'bundle':
self.ui.status(_("%d changesets found\n") % len(nodes))
if self.ui.debugflag:
self.ui.debug("list of changesets:\n")
for node in nodes:
self.ui.debug("%s\n" % hex(node))
def changegroupsubset(self, bases, heads, source, extranodes=None):
"""Compute a changegroup consisting of all the nodes that are
descendents of any of the bases and ancestors of any of the heads.
Return a chunkbuffer object whose read() method will return
successive changegroup chunks.
It is fairly complex as determining which filenodes and which
manifest nodes need to be included for the changeset to be complete
is non-trivial.
Another wrinkle is doing the reverse, figuring out which changeset in
the changegroup a particular filenode or manifestnode belongs to.
The caller can specify some nodes that must be included in the
changegroup using the extranodes argument. It should be a dict
where the keys are the filenames (or 1 for the manifest), and the
values are lists of (node, linknode) tuples, where node is a wanted
node and linknode is the changelog node that should be transmitted as
the linkrev.
"""
# Set up some initial variables
# Make it easy to refer to self.changelog
cl = self.changelog
# msng is short for missing - compute the list of changesets in this
# changegroup.
if not bases:
bases = [nullid]
msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
if extranodes is None:
# can we go through the fast path ?
heads.sort()
allheads = self.heads()
allheads.sort()
if heads == allheads:
return self._changegroup(msng_cl_lst, source)
# slow path
self.hook('preoutgoing', throw=True, source=source)
self.changegroupinfo(msng_cl_lst, source)
# Some bases may turn out to be superfluous, and some heads may be
# too. nodesbetween will return the minimal set of bases and heads
# necessary to re-create the changegroup.
# Known heads are the list of heads that it is assumed the recipient
# of this changegroup will know about.
knownheads = set()
# We assume that all parents of bases are known heads.
for n in bases:
knownheads.update(cl.parents(n))
knownheads.discard(nullid)
knownheads = list(knownheads)
if knownheads:
# Now that we know what heads are known, we can compute which
# changesets are known. The recipient must know about all
# changesets required to reach the known heads from the null
# changeset.
has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
junk = None
# Transform the list into a set.
has_cl_set = set(has_cl_set)
else:
# If there were no known heads, the recipient cannot be assumed to
# know about any changesets.
has_cl_set = set()
# Make it easy to refer to self.manifest
mnfst = self.manifest
# We don't know which manifests are missing yet
msng_mnfst_set = {}
# Nor do we know which filenodes are missing.
msng_filenode_set = {}
junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
junk = None
# A changeset always belongs to itself, so the changenode lookup
# function for a changenode is identity.
def identity(x):
return x
# If we determine that a particular file or manifest node must be a
# node that the recipient of the changegroup will already have, we can
# also assume the recipient will have all the parents. This function
# prunes them from the set of missing nodes.
def prune_parents(revlog, hasset, msngset):
for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
msngset.pop(revlog.node(r), None)
# Use the information collected in collect_manifests_and_files to say
# which changenode any manifestnode belongs to.
def lookup_manifest_link(mnfstnode):
return msng_mnfst_set[mnfstnode]
# A function generating function that sets up the initial environment
# the inner function.
def filenode_collector(changedfiles):
# This gathers information from each manifestnode included in the
# changegroup about which filenodes the manifest node references
# so we can include those in the changegroup too.
#
# It also remembers which changenode each filenode belongs to. It
# does this by assuming the a filenode belongs to the changenode
# the first manifest that references it belongs to.
def collect_msng_filenodes(mnfstnode):
r = mnfst.rev(mnfstnode)
if r - 1 in mnfst.parentrevs(r):
# If the previous rev is one of the parents,
# we only need to see a diff.
deltamf = mnfst.readdelta(mnfstnode)
# For each line in the delta
for f, fnode in deltamf.iteritems():
f = changedfiles.get(f, None)
# And if the file is in the list of files we care
# about.
if f is not None:
# Get the changenode this manifest belongs to
clnode = msng_mnfst_set[mnfstnode]
# Create the set of filenodes for the file if
# there isn't one already.
ndset = msng_filenode_set.setdefault(f, {})
# And set the filenode's changelog node to the
# manifest's if it hasn't been set already.
ndset.setdefault(fnode, clnode)
else:
# Otherwise we need a full manifest.
m = mnfst.read(mnfstnode)
# For every file in we care about.
for f in changedfiles:
fnode = m.get(f, None)
# If it's in the manifest
if fnode is not None:
# See comments above.
clnode = msng_mnfst_set[mnfstnode]
ndset = msng_filenode_set.setdefault(f, {})
ndset.setdefault(fnode, clnode)
return collect_msng_filenodes
# We have a list of filenodes we think we need for a file, lets remove
# all those we know the recipient must have.
def prune_filenodes(f, filerevlog):
msngset = msng_filenode_set[f]
hasset = set()
# If a 'missing' filenode thinks it belongs to a changenode we
# assume the recipient must have, then the recipient must have
# that filenode.
for n in msngset:
clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
if clnode in has_cl_set:
hasset.add(n)
prune_parents(filerevlog, hasset, msngset)
# A function generator function that sets up the a context for the
# inner function.
def lookup_filenode_link_func(fname):
msngset = msng_filenode_set[fname]
# Lookup the changenode the filenode belongs to.
def lookup_filenode_link(fnode):
return msngset[fnode]
return lookup_filenode_link
# Add the nodes that were explicitly requested.
def add_extra_nodes(name, nodes):
if not extranodes or name not in extranodes:
return
for node, linknode in extranodes[name]:
if node not in nodes:
nodes[node] = linknode
# Now that we have all theses utility functions to help out and
# logically divide up the task, generate the group.
def gengroup():
# The set of changed files starts empty.
changedfiles = {}
collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
# Create a changenode group generator that will call our functions
# back to lookup the owning changenode and collect information.
group = cl.group(msng_cl_lst, identity, collect)
cnt = 0
for chnk in group:
yield chnk
self.ui.progress('bundle changes', cnt, unit='chunks')
cnt += 1
self.ui.progress('bundle changes', None, unit='chunks')
# Figure out which manifest nodes (of the ones we think might be
# part of the changegroup) the recipient must know about and
# remove them from the changegroup.
has_mnfst_set = set()
for n in msng_mnfst_set:
# If a 'missing' manifest thinks it belongs to a changenode
# the recipient is assumed to have, obviously the recipient
# must have that manifest.
linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
if linknode in has_cl_set:
has_mnfst_set.add(n)
prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
add_extra_nodes(1, msng_mnfst_set)
msng_mnfst_lst = msng_mnfst_set.keys()
# Sort the manifestnodes by revision number.
msng_mnfst_lst.sort(key=mnfst.rev)
# Create a generator for the manifestnodes that calls our lookup
# and data collection functions back.
group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
filenode_collector(changedfiles))
cnt = 0
for chnk in group:
yield chnk
self.ui.progress('bundle manifests', cnt, unit='chunks')
cnt += 1
self.ui.progress('bundle manifests', None, unit='chunks')
# These are no longer needed, dereference and toss the memory for
# them.
msng_mnfst_lst = None
msng_mnfst_set.clear()
if extranodes:
for fname in extranodes:
if isinstance(fname, int):
continue
msng_filenode_set.setdefault(fname, {})
changedfiles[fname] = 1
# Go through all our files in order sorted by name.
cnt = 0
for fname in sorted(changedfiles):
filerevlog = self.file(fname)
if not len(filerevlog):
raise util.Abort(_("empty or missing revlog for %s") % fname)
# Toss out the filenodes that the recipient isn't really
# missing.
if fname in msng_filenode_set:
prune_filenodes(fname, filerevlog)
add_extra_nodes(fname, msng_filenode_set[fname])
msng_filenode_lst = msng_filenode_set[fname].keys()
else:
msng_filenode_lst = []
# If any filenodes are left, generate the group for them,
# otherwise don't bother.
if len(msng_filenode_lst) > 0:
yield changegroup.chunkheader(len(fname))
yield fname
# Sort the filenodes by their revision #
msng_filenode_lst.sort(key=filerevlog.rev)
# Create a group generator and only pass in a changenode
# lookup function as we need to collect no information
# from filenodes.
group = filerevlog.group(msng_filenode_lst,
lookup_filenode_link_func(fname))
for chnk in group:
self.ui.progress(
'bundle files', cnt, item=fname, unit='chunks')
cnt += 1
yield chnk
if fname in msng_filenode_set:
# Don't need this anymore, toss it to free memory.
del msng_filenode_set[fname]
# Signal that no more groups are left.
yield changegroup.closechunk()
self.ui.progress('bundle files', None, unit='chunks')
if msng_cl_lst:
self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
return util.chunkbuffer(gengroup())
def changegroup(self, basenodes, source):
# to avoid a race we use changegroupsubset() (issue1320)
return self.changegroupsubset(basenodes, self.heads(), source)
def _changegroup(self, nodes, source):
"""Compute the changegroup of all nodes that we have that a recipient
doesn't. Return a chunkbuffer object whose read() method will return
successive changegroup chunks.
This is much easier than the previous function as we can assume that
the recipient has any changenode we aren't sending them.
nodes is the set of nodes to send"""
self.hook('preoutgoing', throw=True, source=source)
cl = self.changelog
revset = set([cl.rev(n) for n in nodes])
self.changegroupinfo(nodes, source)
def identity(x):
return x
def gennodelst(log):
for r in log:
if log.linkrev(r) in revset:
yield log.node(r)
def lookuprevlink_func(revlog):
def lookuprevlink(n):
return cl.node(revlog.linkrev(revlog.rev(n)))
return lookuprevlink
def gengroup():
'''yield a sequence of changegroup chunks (strings)'''
# construct a list of all changed files
changedfiles = {}
mmfs = {}
collect = changegroup.collector(cl, mmfs, changedfiles)
cnt = 0
for chnk in cl.group(nodes, identity, collect):
self.ui.progress('bundle changes', cnt, unit='chunks')
cnt += 1
yield chnk
self.ui.progress('bundle changes', None, unit='chunks')
mnfst = self.manifest
nodeiter = gennodelst(mnfst)
cnt = 0
for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
self.ui.progress('bundle manifests', cnt, unit='chunks')
cnt += 1
yield chnk
self.ui.progress('bundle manifests', None, unit='chunks')
cnt = 0
for fname in sorted(changedfiles):
filerevlog = self.file(fname)
if not len(filerevlog):
raise util.Abort(_("empty or missing revlog for %s") % fname)
nodeiter = gennodelst(filerevlog)
nodeiter = list(nodeiter)
if nodeiter:
yield changegroup.chunkheader(len(fname))
yield fname
lookup = lookuprevlink_func(filerevlog)
for chnk in filerevlog.group(nodeiter, lookup):
self.ui.progress(
'bundle files', cnt, item=fname, unit='chunks')
cnt += 1
yield chnk
self.ui.progress('bundle files', None, unit='chunks')
yield changegroup.closechunk()
if nodes:
self.hook('outgoing', node=hex(nodes[0]), source=source)
return util.chunkbuffer(gengroup())
def addchangegroup(self, source, srctype, url, emptyok=False):
"""add changegroup to repo.
return values:
- nothing changed or no source: 0
- more heads than before: 1+added heads (2..n)
- less heads than before: -1-removed heads (-2..-n)
- number of heads stays the same: 1
"""
def csmap(x):
self.ui.debug("add changeset %s\n" % short(x))
return len(cl)
def revmap(x):
return cl.rev(x)
if not source:
return 0
self.hook('prechangegroup', throw=True, source=srctype, url=url)
changesets = files = revisions = 0
# write changelog data to temp files so concurrent readers will not see
# inconsistent view
cl = self.changelog
cl.delayupdate()
oldheads = len(cl.heads())
tr = self.transaction()
try:
trp = weakref.proxy(tr)
# pull off the changeset group
self.ui.status(_("adding changesets\n"))
clstart = len(cl)
class prog(object):
step = 'changesets'
count = 1
ui = self.ui
def __call__(self):
self.ui.progress(self.step, self.count, unit='chunks')
self.count += 1
pr = prog()
chunkiter = changegroup.chunkiter(source, progress=pr)
if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
raise util.Abort(_("received changelog group is empty"))
clend = len(cl)
changesets = clend - clstart
self.ui.progress('changesets', None)
# pull off the manifest group
self.ui.status(_("adding manifests\n"))
pr.step = 'manifests'
pr.count = 1
chunkiter = changegroup.chunkiter(source, progress=pr)
# no need to check for empty manifest group here:
# if the result of the merge of 1 and 2 is the same in 3 and 4,
# no new manifest will be created and the manifest group will
# be empty during the pull
self.manifest.addgroup(chunkiter, revmap, trp)
self.ui.progress('manifests', None)
needfiles = {}
if self.ui.configbool('server', 'validate', default=False):
# validate incoming csets have their manifests
for cset in xrange(clstart, clend):
mfest = self.changelog.read(self.changelog.node(cset))[0]
mfest = self.manifest.readdelta(mfest)
# store file nodes we must see
for f, n in mfest.iteritems():
needfiles.setdefault(f, set()).add(n)
# process the files
self.ui.status(_("adding file changes\n"))
pr.step = 'files'
pr.count = 1
while 1:
f = changegroup.getchunk(source)
if not f:
break
self.ui.debug("adding %s revisions\n" % f)
fl = self.file(f)
o = len(fl)
chunkiter = changegroup.chunkiter(source, progress=pr)
if fl.addgroup(chunkiter, revmap, trp) is None:
raise util.Abort(_("received file revlog group is empty"))
revisions += len(fl) - o
files += 1
if f in needfiles:
needs = needfiles[f]
for new in xrange(o, len(fl)):
n = fl.node(new)
if n in needs:
needs.remove(n)
if not needs:
del needfiles[f]
self.ui.progress('files', None)
for f, needs in needfiles.iteritems():
fl = self.file(f)
for n in needs:
try:
fl.rev(n)
except error.LookupError:
raise util.Abort(
_('missing file data for %s:%s - run hg verify') %
(f, hex(n)))
newheads = len(cl.heads())
heads = ""
if oldheads and newheads != oldheads:
heads = _(" (%+d heads)") % (newheads - oldheads)
self.ui.status(_("added %d changesets"
" with %d changes to %d files%s\n")
% (changesets, revisions, files, heads))
if changesets > 0:
p = lambda: cl.writepending() and self.root or ""
self.hook('pretxnchangegroup', throw=True,
node=hex(cl.node(clstart)), source=srctype,
url=url, pending=p)
# make changelog see real files again
cl.finalize(trp)
tr.close()
finally:
del tr
if changesets > 0:
# forcefully update the on-disk branch cache
self.ui.debug("updating the branch cache\n")
self.branchtags()
self.hook("changegroup", node=hex(cl.node(clstart)),
source=srctype, url=url)
for i in xrange(clstart, clend):
self.hook("incoming", node=hex(cl.node(i)),
source=srctype, url=url)
# never return 0 here:
if newheads < oldheads:
return newheads - oldheads - 1
else:
return newheads - oldheads + 1
def stream_in(self, remote):
fp = remote.stream_out()
l = fp.readline()
try:
resp = int(l)
except ValueError:
raise error.ResponseError(
_('Unexpected response from remote server:'), l)
if resp == 1:
raise util.Abort(_('operation forbidden by server'))
elif resp == 2:
raise util.Abort(_('locking the remote repository failed'))
elif resp != 0:
raise util.Abort(_('the server sent an unknown error code'))
self.ui.status(_('streaming all changes\n'))
l = fp.readline()
try:
total_files, total_bytes = map(int, l.split(' ', 1))
except (ValueError, TypeError):
raise error.ResponseError(
_('Unexpected response from remote server:'), l)
self.ui.status(_('%d files to transfer, %s of data\n') %
(total_files, util.bytecount(total_bytes)))
start = time.time()
for i in xrange(total_files):
# XXX doesn't support '\n' or '\r' in filenames
l = fp.readline()
try:
name, size = l.split('\0', 1)
size = int(size)
except (ValueError, TypeError):
raise error.ResponseError(
_('Unexpected response from remote server:'), l)
self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
# for backwards compat, name was partially encoded
ofp = self.sopener(store.decodedir(name), 'w')
for chunk in util.filechunkiter(fp, limit=size):
ofp.write(chunk)
ofp.close()
elapsed = time.time() - start
if elapsed <= 0:
elapsed = 0.001
self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
(util.bytecount(total_bytes), elapsed,
util.bytecount(total_bytes / elapsed)))
self.invalidate()
return len(self.heads()) + 1
def clone(self, remote, heads=[], stream=False):
'''clone remote repository.
keyword arguments:
heads: list of revs to clone (forces use of pull)
stream: use streaming clone if possible'''
# now, all clients that can request uncompressed clones can
# read repo formats supported by all servers that can serve
# them.
# if revlog format changes, client will have to check version
# and format flags on "stream" capability, and use
# uncompressed only if compatible.
if stream and not heads and remote.capable('stream'):
return self.stream_in(remote)
return self.pull(remote, heads)
# used to avoid circular references so destructors work
def aftertrans(files):
renamefiles = [tuple(t) for t in files]
def a():
for src, dest in renamefiles:
util.rename(src, dest)
return a
def instance(ui, path, create):
return localrepository(ui, util.drop_scheme('file', path), create)
def islocal(path):
return True
| gpl-2.0 | -7,958,617,751,188,088,000 | 37.840919 | 81 | 0.51412 | false | 4.372362 | false | false | false |
dusenberrymw/systemml | src/main/pythondoc/conf.py | 8 | 6839 | # -*- coding: utf-8 -*-
#
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
#
# SystemML documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 24 11:58:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../python'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SystemML'
copyright = u'2017 The Apache Software Foundation. All rights reserved'
author = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = os.environ.get('SYSTEMML_VERSION', 'latest')
# The full version, including alpha/beta/rc tags.
release = os.environ.get('SYSTEMML_RELEASE', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# html_sidebars = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, links to the .rst sources are added to the pages.
html_show_sourcelink = False
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/systemml-logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs.
html_favicon = "../../../docs/img/favicon.png"
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SystemMLdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SystemML.tex', u'SystemML Documentation',
u'None', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'systemml', u'SystemML Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SystemML', u'SystemML Documentation',
author, 'SystemML', 'One line description of project.',
'Miscellaneous'),
]
| apache-2.0 | 6,972,552,690,581,151,000 | 31.412322 | 79 | 0.679924 | false | 3.957755 | false | false | false |
idbedead/RNA-sequence-tools | RNA_Seq_analysis/cluster.py | 2 | 33129 | import cPickle as pickle
import numpy as np
import pandas as pd
import os
from subprocess import call
import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator
import scipy
import json
from sklearn.decomposition import PCA as skPCA
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import fcluster, linkage, dendrogram, set_link_color_palette, to_tree, inconsistent
import seaborn as sns
from matplotlib.colors import rgb2hex, colorConverter
from pprint import pprint
import difflib
from operator import itemgetter
import itertools
#base path to pickle files with fpkm or count matrix
path_to_file = '/Volumes/Seq_data/count-picard_zw_all'
#for labeling all output files
base_name = 'deseq_zw_all'
filename = os.path.join(path_to_file, base_name+'subgroups_200_deseq_color2')
call('mkdir -p '+filename, shell=True)
#if you have cell group assignments and want to use them for coloring points and labels
#provide filename (full path if not in path_to_file directory)
group_file = False
cell_group_filename = ''
#if you want to restrict the genes inlcuded to a specific genelist, requires 'GeneID' and 'GroupID' header
make_gene_matrix = False
if make_gene_matrix:
gene_list_file = 'go_search_genes_lung_all.txt'
#if you want to restrict the cell matrix file to a subset of cells, expects 'SampleID' header
make_cell_matrix = False
if make_cell_matrix:
cell_file = 'E15.5_unsorted.txt'
cell_file_source = os.path.join(path_to_file, cell_file)
#choose metric and method for scipy clustering (also used in seaborn clustermap)
metric='euclidean'
method='average'
#if you want to test the stability of clustering over a range of top pca inputs
test_clust_stability = False
#load file gene
if path_to_file.split('/')[-1][0:8] == 'cuffnorm':
by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file,base_name+'_outlier_filtered.txt'), sep='\t')
else:
by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file,'DESeq__count_zw_all_outlier_filtered_matrix_norm.txt'), sep='\t')
by_gene = by_cell.transpose()
#create list of genes
gene_list = by_cell.index.tolist()
#create cell list
cell_list = [x for x in list(by_cell.columns.values)]
df_by_gene1 = pd.DataFrame(by_gene, columns=gene_list, index=cell_list)
df_by_cell1 = pd.DataFrame(by_cell, columns=cell_list, index=gene_list)
hu_cc_gene_df = pd.DataFrame.from_csv('/Volumes/Seq_data/cell_cycle_genes.txt', sep='\t')
def cell_cycle(cell_cycle_gene_df, df_by_gene):
gene_list = df_by_gene.columns.tolist()
for g_sym, alt_g_name in zip(cell_cycle_gene_df['Symbol'], cell_cycle_gene_df['Gene Name']):
if g_sym not in gene_list:
print g_sym
for g in alt_g_name.split(','):
if g.strip() in gene_list:
cell_cycle_gene_df['Symbol'][g_sym] = g.strip()
else:
try:
print cell_cycle_gene_df.Symbol
cell_cycle_gene_df = cell_cycle_gene_df[cell_cycle_gene_df.Symbol != g_sym]
except ValueError:
print g_sym, g
cc_gene_df = df_by_gene[cell_cycle_gene_df['Symbol']]
return cc_gene_df
def make_new_matrix_gene(org_matrix_by_gene, gene_list_file):
split_on='_'
gene_df = pd.read_csv(os.path.join(path_to_file, gene_list_file), delimiter= '\t')
gene_list = gene_df['GeneID'].tolist()
group_list = gene_df['GroupID'].tolist()
gmatrix_df = org_matrix_by_gene[gene_list]
cmatrix_df = gmatrix_df.transpose()
cell_list1 = []
for cell in cmatrix_df.columns.values:
if exclude:
if cell.split(split_on)[1] == 'ctrl' or cell.split(split_on)[1] == 'pnx':
if cell.split(split_on)[2][0] =='C':
print cell, 'cell'
cell_list1.append(cell)
else:
cell_list1.append(cell)
new_cmatrix_df = cmatrix_df[cell_list1]
new_gmatrix_df = new_cmatrix_df.transpose()
return new_cmatrix_df, new_gmatrix_df
def make_new_matrix_cell(org_matrix_by_cell, cell_list_file):
cell_df = pd.read_csv(os.path.join(path_to_file, cell_list_file), delimiter= '\t')
cell_list_new = [cell.strip('\n') for cell in cell_df['Sample ID'].tolist()]
cell_list_old = org_matrix_by_cell.columns.values
cell_list = [c for c in cell_list_new if c in cell_list_old]
timepoint = cell_df['Timepoint'].tolist()
cell_type = cell_df['Type'].tolist()
new_cmatrix_df = org_matrix_by_cell[cell_list]
new_name_list = ['_'.join([x,y,z]) for x,y,z in zip(cell_list,timepoint,cell_type)]
new_name_dict = {k:v for k,v in zip(cell_list,new_name_list)}
print new_name_dict
new_cmatrix_df = new_cmatrix_df.rename(columns = new_name_dict)
new_gmatrix_df = new_cmatrix_df.transpose()
return new_cmatrix_df, new_gmatrix_df
if make_gene_matrix:
df_by_cell2, df_by_gene2 = make_new_matrix_gene(df_by_gene1, gene_file_source)
if make_cell_matrix:
df_by_cell2, df_by_gene2 = make_new_matrix_cell(df_by_cell1, cell_file_source)
else:
df_by_cell2, df_by_gene2 = df_by_cell1, df_by_gene1
def preprocess_df(np_by_cell, gen_list, number_expressed=3):
g_todelete = []
for g1, gene in enumerate(np_by_cell):
cells_exp = (gene >= 1.0).sum()
if cells_exp < number_expressed:
g_todelete.append(g1)
g1_todelete = sorted(g_todelete, reverse = True)
print np_by_cell.shape
for pos in g1_todelete:
if type(gen_list[pos]) != float:
print 'Gene '+gen_list[pos]+' not expressed in '+str(number_expressed)+' cells.'
pass
del gen_list[pos]
n_by_cell = np.delete(np_by_cell, g1_todelete, axis=0)
print n_by_cell.shape
return n_by_cell, gen_list
np_by_cell2 = np.array(df_by_cell2.values, dtype='f')
gen_list = df_by_cell2.index.tolist()
np_by_cell, n_gene_list = preprocess_df(np_by_cell2, gen_list)
df_by_gene = pd.DataFrame(np_by_cell.transpose(), index = df_by_cell2.columns.values, columns= n_gene_list)
df_by_cell = df_by_gene.transpose()
def find_top_common_genes(log2_df_by_cell, num_common=100):
top_common_list = []
count = 0
log2_df_by_gene = log2_df_by_cell.transpose()
log2_df2_gene = pd.DataFrame(log2_df_by_gene.convert_objects(convert_numeric=True))
log_mean = log2_df2_gene.mean(axis=0).order(ascending=False)
log2_sorted_gene = log2_df_by_gene.reindex_axis(log2_df_by_gene.mean(axis=0).order(ascending=False).index, axis=1)
for gene in log2_sorted_gene.colums.tolist():
if not log2_df_by_gene[gene].any() <= 1.1:
if count < num_common:
count+=1
top_common_list.append(gene)
if count == 100:
done = True
break
if done:
return log2_df_by_gene[top_common_list].transpose()
else:
return False
def log2_oulierfilter(df_by_cell, plot=False):
log2_df = np.log2(df_by_cell+1)
top_log2 = find_top_common_genes(log2_df)
if not top_log2:
print "no common genes found"
return log2_df, log2_df.transpose()
log2_df2= pd.DataFrame(log2_df.convert_objects(convert_numeric=True))
log_mean = top_log2.mean(axis=0).order(ascending=False)
log2_sorted = top_log2.reindex_axis(top_log2.mean(axis=0).order(ascending=False).index, axis=1)
xticks = []
keep_col= []
log2_cutoff = np.average(log2_sorted)-np.std(log2_sorted)
for col, m in zip(log2_sorted.columns.tolist(),log2_sorted.mean()):
print m
if m > log2_cutoff:
keep_col.append(col)
xticks.append(col+' '+str("%.2f" % m))
filtered_df_by_cell = df_by_cell[keep_col]
filtered_df_by_gene = filtered_df_by_cell.transpose()
filtered_log2 = np.log2(filtered_df_by_cell[filtered_df_by_cell>0])
if plot:
ax = sns.boxplot(data=filtered_log2, whis= .75, notch=True)
ax = sns.stripplot(x=filtered_log2.columns.values, y=filtered_log2.mean(axis=0), size=4, jitter=True, edgecolor="gray")
xtickNames = plt.setp(ax, xticklabels=xticks)
plt.setp(xtickNames, rotation=90, fontsize=9)
plt.show()
plt.clf()
sns.distplot(filtered_log2.mean())
plt.show()
log2_expdf_cell = np.log2(filtered_df_by_cell+1)
log2_expdf_gene = log2_expdf_cell.transpose()
return log2_expdf_cell, log2_expdf_gene
def run_cluster(by_gene_matrix):
cell_list = [x for x in list(by_gene_matrix.index.values)]
cell_dist = pdist(np.array(by_gene_matrix), metric='euclidean')
row_dist = pd.DataFrame(squareform(cell_dist), columns=cell_list, index=cell_list)
row_clusters = linkage(cell_dist, metric=metric, method='average')
link_mat = pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
row_dendr = dendrogram(row_clusters, labels=cell_list, leaf_rotation=90, leaf_font_size=8)
plt.savefig(os.path.join(path_to_file,'dendrogram_gene.png'))
plt.clf()
return cell_dist, row_dist, row_clusters, link_mat, row_dendr
def augmented_dendrogram(*args, **kwargs):
plt.clf()
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
for i, d in zip(ddata['icoord'], ddata['dcoord'], ):
x = 0.5 * sum(i[1:3])
y = d[1]
if y >= 200000:
plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')
plt.show()
plt.savefig(os.path.join(path_to_file,'augmented_dendrogram.png'))
def cluster_indices(cluster_assignments):
n = cluster_assignments.max()
indices = []
for cluster_number in range(1, n + 1):
indices.append(np.where(cluster_assignments == cluster_number)[0])
return indices
def clust_members(r_link, cutoff):
clust = fcluster(r_link,cutoff)
num_clusters = clust.max()
indices = cluster_indices(clust)
return num_clusters, indices
def print_clust_membs(indices, cell_list):
for k, ind in enumerate(indices):
print "cluster", k + 1, "is", [cell_list[x] for x in ind]
def plot_tree(dendr, pos=None, save=False):
icoord = scipy.array(dendr['icoord'])
dcoord = scipy.array(dendr['dcoord'])
color_list = scipy.array(dendr['color_list'])
xmin, xmax = icoord.min(), icoord.max()
ymin, ymax = dcoord.min(), dcoord.max()
if pos:
icoord = icoord[pos]
dcoord = dcoord[pos]
for xs, ys, color in zip(icoord, dcoord, color_list):
plt.plot(xs, ys, color)
plt.xlim(xmin-10, xmax + 0.1*abs(xmax))
plt.ylim(ymin, ymax + 0.1*abs(ymax))
if save:
plt.savefig(os.path.join(path_to_file,'plot_dendrogram.png'))
plt.show()
# Create a nested dictionary from the ClusterNode's returned by SciPy
def add_node(node, parent):
# First create the new node and append it to its parent's children
newNode = dict( node_id=node.id, children=[] )
parent["children"].append( newNode )
# Recursively add the current node's children
if node.left: add_node( node.left, newNode )
if node.right: add_node( node.right, newNode )
cc = []
# Label each node with the names of each leaf in its subtree
def label_tree(n, id2name):
# If the node is a leaf, then we have its name
if len(n["children"]) == 0:
leafNames = [ id2name[n["node_id"]] ]
# If not, flatten all the leaves in the node's subtree
else:
leafNames = reduce(lambda ls, c: ls + label_tree(c,id2name), n["children"], [])
cc.append((len(leafNames), [x.strip('\n') for x in leafNames]))
cc.sort(key=lambda tup: tup[0], reverse = True)
# Delete the node id since we don't need it anymore and
# it makes for cleaner JSON
del n["node_id"]
# Labeling convention: "-"-separated leaf names
n["name"] = name = "-".join(sorted(map(str, leafNames)))
return leafNames
#Makes labeled json tree for visulaization in d3
def make_tree_json(row_clusters, df_by_gene):
T= to_tree(row_clusters)
# Create dictionary for labeling nodes by their IDs
labels = list(df_by_gene.index)
id2name = dict(zip(range(len(labels)), labels))
# Initialize nested dictionary for d3, then recursively iterate through tree
d3Dendro = dict(children=[], name="Root1")
add_node( T, d3Dendro )
label_tree( d3Dendro["children"][0], id2name )
# Output to JSON
json.dump(d3Dendro, open(os.path.join(path_to_file,"d3-dendrogram.json"), "w"), sort_keys=True, indent=4)
return cc
#finds significant genes between subclusters
def find_twobytwo(cc, df_by_cell, full_by_cell_df, fraction_to_plot=10):
gene_list = full_by_cell_df.index.tolist()
by_gene_df = full_by_cell_df.transpose()
pair_dict = {}
parent = cc[0][1]
p_num = cc[0][0]
l_nums = [x[0] for x in cc]
c_lists = [c[1] for c in cc[1:]]
unique_count = 1
pair_list = []
for i, c in enumerate(c_lists):
for i2, c2 in enumerate(c_lists):
overlap = [i for i in c if i in c2]
if not overlap and len(c)>=p_num/fraction_to_plot and len(c2)>=p_num/fraction_to_plot:
if (c,c2) not in pair_list:
pair_list.append((c,c2))
pair_list.append((c2,c))
pair_dict[str(len(c))+'cells_vs_'+str(len(c2))+'cells'+str(unique_count)]= [c, c2]
unique_count+=1
for v, k in pair_dict.items():
g_pvalue_dict = {}
index_list = []
sig_gene_list = []
cell_list1 = [x.strip('\n') for x in k[0]]
cell_list2 = [xx.strip('\n') for xx in k[1]]
group1 = str(len(cell_list1))
group2 = str(len(cell_list2))
df_by_cell_1 = full_by_cell_df[cell_list1]
df_by_cell_2 = full_by_cell_df[cell_list2]
df_by_gene_1 = df_by_cell_1.transpose()
df_by_gene_2 = df_by_cell_2.transpose()
for g in gene_list:
g_pvalue = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_2[g])
if g_pvalue[0] > 0 and g_pvalue[1] <= 1:
g_pvalue_dict[g] = g_pvalue
if g not in [s[0] for s in sig_gene_list]:
sig_gene_list.append([g, g_pvalue[1]])
sig_gene_list.sort(key=lambda tup: tup[1])
pvalues = [p[1] for p in sig_gene_list]
gene_index = [ge[0] for ge in sig_gene_list]
mean_log2_exp_list = []
sig_1_2_list = []
mean1_list = []
mean2_list = []
for sig_gene in gene_index:
sig_gene_df = by_gene_df[sig_gene]
mean_log2_exp_list.append(sig_gene_df.mean())
sig_cell_df = sig_gene_df.transpose()
mean_cell1 = sig_cell_df[cell_list1].mean()
mean1_list.append(mean_cell1)
mean_cell2 = sig_cell_df[cell_list2].mean()
mean2_list.append(mean_cell2)
ratio_1_2 = (mean_cell1+1)/(mean_cell2+1)
sig_1_2_list.append(ratio_1_2)
sig_df = pd.DataFrame({'pvalues':pvalues,'mean_all':mean_log2_exp_list,'mean_group1':mean1_list, 'mean_group2':mean2_list, 'ratio_1_2':sig_1_2_list}, index=gene_index)
cell_names_df = pd.DataFrame({'cells1':pd.Series(cell_list1, index=range(len(cell_list1))), 'cells2':pd.Series(cell_list2, index=range(len(cell_list2)))})
sig_df.to_csv(os.path.join(filename,'sig_'+v+'_pvalues.txt'), sep = '\t')
cell_names_df.to_csv(os.path.join(filename,'sig_'+v+'_cells.txt'), sep = '\t')
def plot_PCA(df_by_gene, num_genes=100, gene_list_filter=False, title='', plot=False, label_map=False, annotate=False):
gene_list = df_by_gene.columns.tolist()
print len(gene_list)
sns.set_palette("RdBu_r", 10, 1)
if gene_list_filter:
sig_by_gene = df_by_gene[gene_list_filter]
sig_by_cell = sig_by_gene.transpose()
else:
sig_by_gene = df_by_gene
sig_by_cell = sig_by_gene.transpose()
gene_pca = skPCA(n_components=3)
np_by_gene = np.asarray(sig_by_gene)
by_gene_trans = gene_pca.fit_transform(np_by_gene)
Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=sig_by_gene.columns.tolist())
pca_rank_df = Pc_df.abs().sum(axis=1)
Pc_sort_df = pca_rank_df.nlargest(len(sig_by_gene.columns.tolist()))
top_pca_list = Pc_sort_df.index.tolist()
print top_pca_list[0:num_genes], 'top_pca_list'
top_by_gene = df_by_gene[top_pca_list[0:num_genes]]
gene_top = skPCA(n_components=2)
cell_pca = skPCA(n_components=2)
top_by_cell = top_by_gene.transpose()
np_top_gene = np.asarray(top_by_cell)
np_top_cell = np.asarray(top_by_gene)
top_cell_trans = cell_pca.fit_transform(np_top_cell)
top_gene_trans = gene_top.fit_transform(np_top_gene)
fig, (ax_cell, ax_gene) = plt.subplots(2, 1, figsize=(15, 30), sharex=False)
if label_map:
X = [x for x in top_cell_trans[:, 0]]
Y = [y for y in top_cell_trans[:, 1]]
labels = [label_map[cell][2] for cell in top_by_cell.columns.tolist()]
markers = [label_map[cell][1] for cell in top_by_cell.columns.tolist()]
colors = [label_map[cell][0] for cell in top_by_cell.columns.tolist()]
label_done = []
for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels):
if l in label_done:
lab = ''
else:
lab= l
label_done.append(l)
ax_cell.scatter(X_pos, Y_pos, marker=m, c=color, label=lab, s=20)
else:
ax_cell.scatter(top_cell_trans[:, 0], top_cell_trans[:, 1], alpha=0.75)
ax_cell.set_xlim([min(top_cell_trans[:, 0])-1, max(top_cell_trans[:, 0]+1)])
ax_cell.set_ylim([min(top_cell_trans[:, 1])-1, max(top_cell_trans[:, 1]+1)])
ax_cell.set_title(title+'_cell')
ax_cell.legend(loc='best', ncol=1, prop={'size':12}, markerscale=2, frameon=True)
ax_cell.set_xlabel('PC1')
ax_cell.set_ylabel('PC2')
if annotate:
for label, x, y in zip(top_by_cell.columns, top_cell_trans[:, 0], top_cell_trans[:, 1]):
ax_cell.annotate(label, (x+0.1, y+0.1))
ax_gene.scatter(top_gene_trans[:, 0], top_gene_trans[:, 1], alpha=0.75)
ax_gene.set_xlim([min(top_gene_trans[:, 0])-1, max(top_gene_trans[:, 0]+1)])
ax_gene.set_ylim([min(top_gene_trans[:, 1])-1, max(top_gene_trans[:, 1]+1)])
ax_gene.set_title(title+'_gene')
ax_gene.set_xlabel('PC1')
ax_gene.set_ylabel('PC2')
print len(top_by_gene.columns), len(top_gene_trans[:, 0]), len(top_gene_trans[:, 1])
for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]):
ax_gene.annotate(label, (x, y))
if plot:
plt.show()
if title != '':
save_name = '_'.join(title.split(' ')[0:2])
plt.savefig(os.path.join(filename,save_name+'_skpca.pdf'), bbox_inches='tight')
else:
plt.savefig(os.path.join(filename,'non_group_skpca.pdf'), bbox_inches='tight')
plt.close()
return top_pca_list
def clust_heatmap(gene_list, df_by_gene, num_to_plot=len(gene_list), title='', plot=False, label_map=False):
if num_to_plot >175:
sns.set(context= 'poster', font_scale = 0.65/(num_to_plot/100))
else:
sns.set(context= 'poster', font_scale = .80, font ='Verdana')
sns.set_palette('RdBu',4,0.1)
cell_list = df_by_gene.index.tolist()
cg = sns.clustermap(df_by_gene[gene_list[0:num_to_plot]].transpose(), metric=metric, method=method, z_score=0, figsize=(30, 25))
col_order = cg.dendrogram_col.reordered_ind
cg.ax_heatmap.set_title(title)
if label_map:
Xlabs = [cell_list[i] for i in col_order]
colors = [label_map[cell][0] for cell in Xlabs]
for xtick, color in zip(cg.ax_heatmap.get_xticklabels(), colors):
xtick.set_color(color)
xtick.set_rotation(270)
if plot:
plt.show()
cell_linkage = cg.dendrogram_col.linkage
link_mat = pd.DataFrame(cell_linkage,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(cell_linkage.shape[0])])
if title != '':
save_name = '_'.join(title.split(' ')[0:2])
cg.savefig(os.path.join(filename, save_name+'_heatmap.pdf'), bbox_inches='tight')
else:
cg.savefig(os.path.join(filename,'Non_group_heatmap_z1_deleted.pdf'), bbox_inches='tight')
plt.close()
return cell_linkage, df_by_gene[gene_list[0:num_to_plot]], col_order
def make_subclusters(cc, log2_expdf_cell, gene_corr_list=False, fraction_to_plot=8, filename=filename, base_name=base_name):
parent = cc[0][1]
p_num = cc[0][0]
l_nums = [x[0] for x in cc]
c_lists = [c[1] for c in cc]
group_ID = 0
for num_members, cell_list in zip(l_nums, c_lists):
if num_members < p_num and num_members >= p_num/fraction_to_plot:
group_ID+=1
title = 'Group_'+str(group_ID)+'_with_'+str(num_members)+'_cells'
cell_subset = log2_expdf_cell[cell_list]
gene_subset = cell_subset.transpose()
norm_df_cell1 = np.exp2(cell_subset)
norm_df_cell = norm_df_cell1 -1
norm_df_cell.to_csv(os.path.join(filename, base_name+'_'+title+'_matrix.txt'), sep = '\t', index_col=0)
if label_map:
top_pca = plot_PCA(gene_subset, num_genes=gene_number, title=title, plot=False, label_map=label_map)
else:
top_pca = plot_PCA(gene_subset, num_genes=gene_number, title=title, plot=False)
if top_pca != []:
top_pca_by_gene = gene_subset[top_pca]
top_pca_by_cell = top_pca_by_gene.transpose()
if gene_corr_list:
top_genes_search = [x for x in top_pca]
corr_plot(gene_corr_list+top_genes_search[0:3], gene_subset, title = title)
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(top_pca, top_pca_by_gene, num_to_plot=gene_number, title=title, plot=False, label_map=label_map)
plt.close()
else:
pass
def clust_stability(log2_expdf_gene, iterations=16):
sns.set(context='poster', font_scale = 1)
sns.set_palette("RdBu_r")
stability_ratio = []
total_genes = len(log2_expdf_gene.columns.tolist())
end_num = 1000
iter_list = range(100,int(round(end_num)),int(round(end_num/iterations)))
for gene_number in iter_list:
title= str(gene_number)+' genes plot.'
top_pca = plot_PCA(log2_expdf_gene, num_genes=gene_number, title=title)
top_pca_by_gene = log2_expdf_gene[top_pca]
top_pca_by_cell = top_pca_by_gene.transpose()
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(top_pca, top_pca_by_gene, num_to_plot=gene_number, title=title)
if gene_number == 100:
s1 = col_order
s0 = col_order
else:
s2= col_order
sm_running = difflib.SequenceMatcher(None,s1,s2)
sm_first = difflib.SequenceMatcher(None,s0,s2)
stability_ratio.append((sm_running.ratio(), sm_first.ratio()))
s1=col_order
plt.close()
x= iter_list[1:]
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
y1= [m[0] for m in stability_ratio]
y2= [m[1] for m in stability_ratio]
sns.barplot(x, y1, palette="RdBu_r", ax=ax1)
ax1.set_ylabel('Running ratio (new/last)')
sns.barplot(x, y2, palette="RdBu_r", ax=ax2)
ax2.set_ylabel('Ratio to 100')
plt.savefig(os.path.join(filename,'clustering_stability.pdf'), bbox_inches='tight')
plt.show()
plt.close()
return stability_ratio
#run correlation matrix and save only those above threshold
def run_corr(df_by_gene, title, method_name='pearson', sig_threshold= 0.5, run_new=True, min_period=3):
if run_new:
if method_name != 'kendall':
corr_by_gene = df_by_gene.corr(method=method_name, min_periods=min_period)
else:
corr_by_gene = df_by_gene.corr(method=method_name)
corr_by_cell = df_by_cell.corr()
cor = corr_by_gene
cor.loc[:,:] = np.tril(cor.values, k=-1)
cor = cor.stack()
corr_by_gene_pos = cor[cor >=sig_threshold]
corr_by_gene_neg = cor[cor <=(sig_threshold*-1)]
with open(os.path.join(path_to_file,'gene_correlations_sig_neg_'+method_name+'.p'), 'wb') as fp:
pickle.dump(corr_by_gene_neg, fp)
with open(os.path.join(path_to_file,'gene_correlations_sig_pos_'+method_name+'.p'), 'wb') as fp0:
pickle.dump(corr_by_gene_pos, fp0)
with open(os.path.join(path_to_file,'by_gene_corr.p'), 'wb') as fp1:
pickle.dump(corr_by_gene, fp1)
with open(os.path.join(path_to_file,'by_cell_corr.p'), 'wb') as fp2:
pickle.dump(corr_by_cell, fp2)
else:
corr_by_g_pos = open(os.path.join(path_to_file,'gene_correlations_sig_pos_'+method_name+'.p'), 'rb')
corr_by_g_neg = open(os.path.join(path_to_file,'gene_correlations_sig_neg_'+method_name+'.p'), 'rb')
corr_by_gene_pos = pickle.load(corr_by_g_pos)
corr_by_gene_neg = pickle.load(corr_by_g_neg)
cor_pos_df = pd.DataFrame(corr_by_gene_pos)
cor_neg_df = pd.DataFrame(corr_by_gene_neg)
sig_corr = cor_pos_df.append(cor_neg_df)
sig_corrs = pd.DataFrame(sig_corr[0], columns=["corr"])
if run_new:
sig_corrs.to_csv(os.path.join(path_to_file, title+'_counts_corr_sig_'+method_name+'.txt'), sep = '\t')
return sig_corrs
#corr_plot finds and plots all correlated genes, log turns on log scale, sort plots the genes in the rank order of the gene searched
def corr_plot(terms_to_search, df_by_gene, title, log=False, sort=True, sig_threshold=0.5):
sig_corrs = run_corr(df_by_gene, title, sig_threshold=sig_threshold)
for term_to_search in terms_to_search:
corr_tup = [(term_to_search, 1)]
neg = True
fig, ax = plt.subplots()
marker = itertools.cycle(('+', 'o', '*'))
linestyles = itertools.cycle(('--', '-.', '-', ':'))
for index, row in sig_corrs.iterrows():
if term_to_search in index:
neg = False
if index[0]==term_to_search:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
if neg:
print term_to_search+' not correlated.'
corr_tup.sort(key=itemgetter(1), reverse=True)
corr_df = pd.DataFrame(corr_tup, columns=['GeneID', 'Correlation'])
corr_df.to_csv(os.path.join(filename, title+'_Corr_w_'+term_to_search+'_list.txt'), sep = '\t', index=False)
for c in corr_tup:
print c
to_plot = [x[0] for x in corr_tup]
sns.set_palette(sns.cubehelix_palette(len(to_plot), start=1, rot=-.9, reverse=True))
try:
sorted_df = df_by_gene.sort([term_to_search])
log2_df = np.log2(df_by_gene[to_plot])
sorted_log2_df=np.log2(sorted_df[to_plot])
ylabel='CPM (log2)'
if sort and log:
ax = sorted_log2_df.plot()
xlabels = sorted_log2_df[to_plot].index.values
elif sort:
ax =sorted_df[to_plot].plot()
xlabels = sorted_df[to_plot].index.values
elif log:
ax = log2_df.plot()
ylabel= 'log2 FPKM'
xlabels = log2_df.index.values
else:
ax = df_by_gene[to_plot].plot()
xlabels = df_by_gene[to_plot].index.values
ax.set_xlabel('Cell #')
ax.set_ylabel(ylabel)
ax.set_title('Correlates with '+term_to_search, loc='right')
ax.xaxis.set_minor_locator(LinearLocator(numticks=len(xlabels)))
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=6)
ax.set_ylim([0, df_by_gene[to_plot].values.max()])
ax.tick_params(axis='x', labelsize=1)
if len(corr_tup) > 15:
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, 1.05), ncol=6, prop={'size':6})
else:
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, 1.05), ncol=4, prop={'size':8})
fig = plt.gcf()
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.98, left=0.03)
plt.savefig(os.path.join(filename, title+'_corr_with_'+term_to_search+'.pdf'), bbox_inches='tight')
plt.close()
except KeyError:
print term_to_search+' not in this matrix'
pass
def cell_color_map(cell_group_filename):
colors = ['b', 'g', 'r', 'm', 'c', 'orange', 'darkslateblue']
markers = ['o', 'v','D','*','x','h', 's']
cell_groups_df = pd.read_csv(os.path.join(path_to_file, cell_group_filename), delimiter= '\t')
label_map = {}
for i, col in enumerate(cell_groups_df.columns.tolist()):
for cell in cell_groups_df[col]:
if str(cell) != 'nan':
label_map[cell] = (colors[i],markers[i],col)
print label_map
return label_map
def multi_group_sig(full_by_cell_df, cell_group_filename):
cell_groups_df = pd.read_csv(os.path.join(path_to_file, cell_group_filename), delimiter= '\t')
group_name_list = cell_groups_df.columns.tolist()
group_pairs = list(set(itertools.permutations(group_name_list,2)))
gene_list = full_by_cell_df.index.tolist()
print group_pairs
for gp in group_pairs:
g_pvalue_dict = {}
index_list = []
sig_gene_list = []
cell_list1 = [c for c in cell_groups_df[gp[0]].tolist() if str(c) != 'nan']
cell_list2 = [c for c in cell_groups_df[gp[1]].tolist() if str(c) != 'nan']
df_by_cell_1 = full_by_cell_df[cell_list1]
df_by_cell_2 = full_by_cell_df[cell_list2]
df_by_gene_1 = df_by_cell_1.transpose()
df_by_gene_2 = df_by_cell_2.transpose()
for g in gene_list:
g_pvalue = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_2[g])
if g_pvalue[0] > 0 and g_pvalue[1] <= 1:
g_pvalue_dict[g] = g_pvalue
if g not in [s[0] for s in sig_gene_list]:
sig_gene_list.append([g, g_pvalue[1]])
sig_gene_list.sort(key=lambda tup: tup[1])
pvalues = [p[1] for p in sig_gene_list]
gene_index = [ge[0] for ge in sig_gene_list]
by_gene_df = full_by_cell_df.transpose()
mean_log2_exp_list = []
sig_1_2_list = []
mean1_list = []
mean2_list = []
for sig_gene in gene_index:
sig_gene_df = by_gene_df[sig_gene]
mean_log2_exp_list.append(sig_gene_df.mean())
sig_cell_df = sig_gene_df.transpose()
mean_cell1 = sig_cell_df[cell_list1].mean()
mean1_list.append(mean_cell1)
mean_cell2 = sig_cell_df[cell_list2].mean()
mean2_list.append(mean_cell2)
ratio_1_2 = (mean_cell1+1)/(mean_cell2+1)
sig_1_2_list.append(ratio_1_2)
sig_df = pd.DataFrame({'pvalues':pvalues,'mean_all':mean_log2_exp_list,'mean_group1':mean1_list, 'mean_group2':mean2_list, 'ratio_1_2':sig_1_2_list}, index=gene_index)
cell_names_df = pd.DataFrame({'cells1':pd.Series(cell_list1, index=range(len(cell_list1))), 'cells2':pd.Series(cell_list2, index=range(len(cell_list2)))})
sig_df.to_csv(os.path.join(filename,'sig_'+gp[0]+'_'+gp[1]+'_pvalues.txt'), sep = '\t')
cell_names_df.to_csv(os.path.join(filename,'sig_'+gp[0]+'_'+gp[1]+'_cells.txt'), sep = '\t')
gene_number= select_gene_number
log2_expdf_cell, log2_expdf_gene = log2_oulierfilter(df_by_cell, plot=False)
if test_clust_stability:
stability_ratio = clust_stability(log2_expdf_gene)
#cc_gene_df = cell_cycle(hu_cc_gene_df, log2_expdf_gene)
if group_file:
label_map = cell_color_map(cell_group_filename)
else:
label_map=False
if group_sig_test:
multi_group_sig(log2_expdf_cell, cell_group_filename)
top_pca = plot_PCA(log2_expdf_gene, num_genes=gene_number, title='all_cells_pca', plot=False, label_map=label_map)
top_pca_by_gene = log2_expdf_gene[top_pca]
top_pca_by_cell = top_pca_by_gene.transpose()
cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(top_pca, top_pca_by_gene, num_to_plot=gene_number, label_map=label_map)
#cell_dist, row_dist, row_clusters, link_mat, row_dendr = run_cluster(top_pca_by_gene)
cc = make_tree_json(cell_linkage, plotted_df_by_gene)
make_subclusters(cc, log2_expdf_cell, gene_corr_list=['KRT19'])
sig_gene_list = find_twobytwo(cc, top_pca_by_cell, log2_expdf_cell)
#augmented_dendrogram(row_clusters, labels=top_pca_by_cell.columns.tolist(), leaf_rotation=90, leaf_font_size=8)
| mit | -4,946,865,316,839,125,000 | 43.890244 | 175 | 0.609587 | false | 2.931251 | false | false | false |
lancevalour/adbpy | adbpy/device.py | 1 | 2819 | """
Device class to control the connected Android device through Android Debug Bridge.
"""
import subprocess
import os
class Device:
def __init__(self, device_id):
self.__device_id = device_id
# Device properties
def get_id(self):
return self.__device_id
def get_screen_density(self):
output = subprocess.check_output("adb -s " + self.__device_id + " shell wm density")
return int(output.strip().split(":")[1].strip())
def get_screen_size(self):
output = subprocess.check_output("adb -s " + self.__device_id + " shell wm size")
size = output.strip().split(":")[1].strip()
sizes = size.split("x")
return [int(sizes[0]), int(sizes[1])]
def set_screen_density(self, density):
subprocess.call("adb -s " + self.__device_id + " shell wm density " + str(density))
# Installation
def install_apk(self, apk_path):
print("adb -s " + self.__device_id + " install " + str(apk_path))
subprocess.call("adb -s " + self.__device_id + " install " + str(apk_path))
# Control
def tap(self, x, y):
subprocess.call("adb " + "-s " + self.__device_id + " shell input tap " + str(x) + " " + str(y))
def tap_back(self):
subprocess.call("adb " + "-s " + self.__device_id + " shell input keyevent " + "KEYCODE_BACK")
def tap_home(self):
subprocess.call("adb " + "-s " + self.__device_id + " shell input keyevent --longpress " + "KEYCODE_HOME")
def tap_menu(self):
subprocess.call("adb " + "-s " + self.__device_id + " shell input keyevent " + "KEYCODE_MENU")
def swipe(self, x1, y1, x2, y2, duration):
subprocess.call("adb " + "-s " + self.__device_id + " shell input swipe "
+ str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2) + " " + str(duration))
def long_press(self, x, y, duration):
self.swipe(x, y, x, y, duration=duration)
# Screen capture
def take_screenshot(self, name, dst_path=os.path.abspath(os.path.dirname(__file__))):
subprocess.call("adb " + "-s " + self.__device_id + " shell screencap /sdcard/" + name + ".png")
subprocess.call("adb " + "-s " + self.__device_id + " pull /sdcard/" + name + ".png " + dst_path)
subprocess.call("adb " + "-s " + self.__device_id + " shell rm /sdcard/" + name + ".png")
def record_screen(self, name, dst_path=os.path.abspath(os.path.dirname(__file__)), time=10):
subprocess.call("adb " + "-s " + self.__device_id + " shell screenrecord --time-limit " + str(
time) + " /sdcard/" + name + ".mp4")
subprocess.call("adb " + "-s " + self.__device_id + " pull /sdcard/" + name + ".mp4 " + dst_path)
subprocess.call("adb " + "-s " + self.__device_id + " shell rm /sdcard/" + name + ".mp4")
| mit | -1,329,071,152,124,937,000 | 42.369231 | 114 | 0.561901 | false | 3.247696 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.