repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
njwilson23/rasterio
|
rasterio/tool.py
|
1
|
5429
|
"""
Implementations of various common operations, like `show()` for displaying an
array or with matplotlib, and `stats()` for computing min/max/avg. Most can
handle a numpy array or `rasterio.Band()`. Primarily supports `$ rio insp`.
"""
from __future__ import absolute_import
import code
import collections
import logging
import warnings
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
except RuntimeError as e:
# Certain environment configurations can trigger a RuntimeError like:
# Trying to import matplotlibRuntimeError: Python is not installed as a
# framework. The Mac OS X backend will not be able to function correctly
# if Python is not installed as a framework. See the Python ...
warnings.warn(str(e), RuntimeWarning, stacklevel=2)
plt = None
import numpy
import rasterio
from rasterio.five import zip_longest
logger = logging.getLogger('rasterio')
Stats = collections.namedtuple('Stats', ['min', 'max', 'mean'])
# Collect dictionary of functions for use in the interpreter in main()
funcs = locals()
def show(source, cmap='gray', with_bounds=True):
"""
Display a raster or raster band using matplotlib.
Parameters
----------
source : array-like or (raster dataset, bidx)
If array-like, should be of format compatible with
matplotlib.pyplot.imshow. If the tuple (raster dataset, bidx),
selects band `bidx` from raster.
cmap : str (opt)
Specifies the colormap to use in plotting. See
matplotlib.Colors.Colormap. Default is 'gray'.
with_bounds : bool (opt)
Whether to change the image extent to the spatial bounds of the image,
rather than pixel coordinates. Only works when source is
(raster dataset, bidx).
"""
if isinstance(source, tuple):
arr = source[0].read(source[1])
xs = source[0].res[0] / 2.
ys = source[0].res[1] / 2.
if with_bounds:
extent = (source[0].bounds.left - xs, source[0].bounds.right - xs,
source[0].bounds.bottom - ys, source[0].bounds.top - ys)
else:
extent = None
else:
arr = source
extent = None
if plt is not None:
imax = plt.imshow(arr, cmap=cmap, extent=extent)
fig = plt.gcf()
fig.show()
else:
raise ImportError("matplotlib could not be imported")
def stats(source):
"""Return a tuple with raster min, max, and mean.
"""
if isinstance(source, tuple):
arr = source[0].read(source[1])
else:
arr = source
return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
def show_hist(source, bins=10, masked=True, title='Histogram'):
"""
Easily display a histogram with matplotlib.
Parameters
----------
bins : int, optional
Compute histogram across N bins.
data : np.array or rasterio.Band or tuple(dataset, bidx)
Input data to display. The first three arrays in multi-dimensional
arrays are plotted as red, green, and blue.
masked : bool, optional
When working with a `rasterio.Band()` object, specifies if the data
should be masked on read.
title : str, optional
Title for the figure.
"""
if plt is None:
raise ImportError("Could not import matplotlib")
if isinstance(source, (tuple, rasterio.Band)):
arr = source[0].read(source[1], masked=masked)
else:
arr = source
# The histogram is computed individually for each 'band' in the array
# so we need the overall min/max to constrain the plot
rng = arr.min(), arr.max()
if len(arr.shape) is 2:
arr = [arr]
colors = ['gold']
else:
colors = ('red', 'green', 'blue', 'violet', 'gold', 'saddlebrown')
# If a rasterio.Band() is given make sure the proper index is displayed
# in the legend.
if isinstance(source, (tuple, rasterio.Band)):
labels = [str(source[1])]
else:
labels = (str(i + 1) for i in range(len(arr)))
# This loop should add a single plot each band in the input array,
# regardless of if the number of bands exceeds the number of colors.
# The colors slicing ensures that the number of iterations always
# matches the number of bands.
# The goal is to provide a curated set of colors for working with
# smaller datasets and let matplotlib define additional colors when
# working with larger datasets.
for bnd, color, label in zip_longest(arr, colors[:len(arr)], labels):
plt.hist(
bnd.flatten(),
bins=bins,
alpha=0.5,
color=color,
label=label,
range=rng
)
plt.legend(loc="upper right")
plt.title(title, fontweight='bold')
plt.grid(True)
plt.xlabel('DN')
plt.ylabel('Frequency')
fig = plt.gcf()
fig.show()
def main(banner, dataset, alt_interpreter=None):
""" Main entry point for use with python interpreter """
local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)
if not alt_interpreter:
code.interact(banner, local=local)
elif alt_interpreter == 'ipython':
import IPython
IPython.InteractiveShell.banner1 = banner
IPython.start_ipython(argv=[], user_ns=local)
else:
raise ValueError("Unsupported interpreter '%s'" % alt_interpreter)
return 0
|
bsd-3-clause
| 3,904,077,660,816,779,300 | 30.201149 | 78 | 0.63695 | false | 3.988979 | false | false | false |
MixedEmotions/27_emotion_video_dcu
|
emotionService/emotionService.py
|
1
|
5891
|
# coding: utf-8
# In[ ]:
from __future__ import division
import logging
import os
import xml.etree.ElementTree as ET
from senpy.plugins import EmotionPlugin, SenpyPlugin
from senpy.models import Results, EmotionSet, Entry, Emotion, Error
logger = logging.getLogger(__name__)
import numpy as np
import math, itertools
from collections import defaultdict
import gzip
from datetime import datetime
import requests, shutil
import subprocess
import sys
import validators
from haolin.ESClass import DCU_EmotionService
import json
class emotionService(EmotionPlugin):
def __init__(self, info, *args, **kwargs):
super(emotionService, self).__init__(info, *args, **kwargs)
self.name = info['name']
self.id = info['module']
self._info = info
local_path = os.path.dirname(os.path.abspath(__file__))
self._dimensions = ['V','A']
self._centroid_mappings = {
"V": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence",
"A": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal",
"D": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance"
}
self._storage_path = '/senpy-plugins/tmp'
def activate(self, *args, **kwargs):
st = datetime.now()
self._predictor = DCU_EmotionService()
logger.info("{} {}".format(datetime.now() - st, "predictor loaded"))
st = datetime.now()
logger.info("{} {}".format(datetime.now() - st, "active"))
logger.info("%s plugin is ready to go!" % self.name)
def deactivate(self, *args, **kwargs):
try:
logger.info("%s plugin is being deactivated..." % self.name)
except Exception:
print("Exception in logger while reporting deactivation of %s" % self.name)
# CUSTOM FUNCTION
def _download_file_old(self, saveFolder = '/senpy-plugins/tmp', url = "http://mixedemotions.insight-centre.org/tmp/little-girl.mp4"):
logger.info("{} {}".format(datetime.now(), "downloading "+url))
st = datetime.now()
global dump
downloadedFile = requests.get(url, stream=True)
dump = downloadedFile.raw
path, filename = os.path.dirname(url), os.path.basename(url)
with open(os.path.join(saveFolder, filename), 'wb') as file:
shutil.copyfileobj(dump, file)
del dump
del downloadedFile
logger.info("{} {}".format(datetime.now() - st, "downloaded "+url))
return os.path.join(saveFolder,filename)
def _download_file(self, saveFolder = '/senpy-plugins/tmp', url = "http://mixedemotions.insight-centre.org/tmp/little-girl.mp4"):
st = datetime.now()
logger.info("{} {}".format(datetime.now(), "downloading "+url))
path, filename = os.path.dirname(url), os.path.basename(url)
outfilename = os.path.join(saveFolder,filename)
subprocess.call(['wget', '-O', outfilename, url])
logger.info("{} {}".format(datetime.now() - st, "downloaded "+url))
return outfilename
def _remove_file(self, filename):
st = datetime.now()
logger.info("{} {}".format(datetime.now(), "deleting "+ filename))
subprocess.call(['rm', '-f', filename])
logger.info("{} {}".format(datetime.now() - st, "deleted "+filename))
def _convert_longformat_to_shortformat(self, json_long):
json_long = json.loads(json_long)
json_short = {
'V': np.mean([json_long[frame]['0']['emotion']['pad:pleasure'] for frame in json_long]) ,
'A': np.mean([json_long[frame]['0']['emotion']['pad:arousal' ] for frame in json_long])
}
return json_short
def _extract_features(self, filename, convert=True):
# predictor = DCU_EmotionService()
json_res = self._predictor.analysis_video(filename, vis=False)
if convert:
json_res = self._convert_longformat_to_shortformat(json_res)
return json_res
def analyse(self, **params):
logger.debug("emotionService with params {}".format(params))
filename = params.get("i", None)
## FILE MANIPULATIONS ------------------------------- \
if validators.url(filename):
filename = self._download_file(saveFolder = self._storage_path, url = filename)
else:
filename = os.path.join(self._storage_path,filename)
logger.info("{} {}".format(datetime.now(), filename))
if not os.path.isfile(filename):
raise Error("File %s does not exist" % filename)
## EXTRACTING FEATURES ------------------------------- \
feature_set = self._extract_features(filename, convert=True)
# self._remove_file(filename)
## GENERATING OUTPUT --------------------------------- \
response = Results()
entry = Entry()
entry['filename'] = os.path.basename(filename)
emotionSet = EmotionSet()
emotionSet.id = "Emotions"
emotion1 = Emotion()
for dimension in self._dimensions:
emotion1[ self._centroid_mappings[dimension] ] = 5*(1+feature_set[dimension])
emotionSet.onyx__hasEmotion.append(emotion1)
entry.emotions = [emotionSet,]
response.entries.append(entry)
return response
|
apache-2.0
| -8,383,515,891,701,562,000 | 32.282486 | 137 | 0.55101 | false | 4.029412 | false | false | false |
muccc/luftschleuse2
|
software/lockd/announce.py
|
1
|
1588
|
# This file is part of lockd, the daemon of the luftschleuse2 project.
#
# See https://github.com/muccc/luftschleuse2 for more information.
#
# Copyright (C) 2013 Tobias Schneider <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import socket
class Announcer:
def __init__(self, host, port):
self.timestamp = time.time()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('0.0.0.0', 2080))
self.target = (host, port)
self.message = 'unknown'
def tick(self):
if time.time() - self.timestamp > 1:
self.announce()
self.timestamp = time.time()
def announce(self):
self.sock.sendto(self.message, self.target)
def update_state(self, state):
self.message = state.get_state_as_string()
f = open("/tmp/system_state", "w")
f.write(state.get_state_as_string() + "\n")
f.close()
|
gpl-3.0
| 6,334,732,133,319,795,000 | 35.930233 | 74 | 0.65869 | false | 3.544643 | false | false | false |
Harhoy/transport
|
transport.py
|
1
|
9259
|
from __future__ import division
import numpy as np
import math as m
from easygui import multenterbox
import pandas as pd
import matplotlib.pyplot as plt
import math as m
def import_xl(file_path):
df = pd.read_excel(file_path,header = None)
df = df.values
return df
def export_xl(file_path,sheets):
writer = pd.ExcelWriter(file_path)
for sheet,name in sheets.items():
df = pd.DataFrame(name)
df.to_excel(writer,sheet)
writer.save()
#Henter ut en kolonne
def column(matrix, i):
return [row[i] for row in matrix]
#Henter ut en rad
def row(matrix, i):
return [column[i] for column in matrix]
#Selection sort O(n2)
def selection_sort(array):
n = len(array)
for i in range(0,n):
smallest = i
for j in range(i,n):
if array[j]<array[smallest]:
smallest = j
copy = array[i]
array[i] = array[smallest]
array[smallest] = copy
return array
#Leser om to lister inneholder minst ett felles tall
def common_node(array_1,array_2):
x = selection_sort(array_1)
y = selection_sort(array_2)
i = 0
j = 0
share = 0
stop = max([len(x),len(y)])-1
while min([i,j])< stop:
if x[i]>y[j]:
j+=1
elif x[i]<y[j]:
i+=1
else:
share = 1
j = 10**6
i = 10**6
return share
def common_node_count(array_1,array_2):
x = selection_sort(array_1)
y = selection_sort(array_2)
i = 0
j = 0
share = 0
while i < len(x) and j < len(y):
if x[i]>y[j]:
j+=1
elif x[i]<y[j]:
i+=1
else:
share += 1
j +=1
i +=1
return share
#KORTERSTE RUTE FUNKSJONER
#Lager en graf fra lenke-liste
def make_graph(array):
#nodes = common_node_count(column(array,0),column(array,1))
nodes = 35
matrix = np.full((nodes,nodes),10**6) #Lager matrise med store tall som byttes
for i in range(0,len(array)): #Hovedloop
#Trekker fra en for sammenlignbarhet med python-arrays
matrix[array[i][1]-1][array[i][0]-1] = array[i][2]
matrix[array[i][0]-1][array[i][1]-1] = array[i][2]
np.fill_diagonal(matrix, 0)
return matrix
#Lager lengdematrise n x n
def floyd_warshall(array):
matrix = make_graph(array)
#nodes = common_node_count(column(array,0),column(array,1))
nodes = 35
pred = np.full((nodes,nodes),-1)
for i in range(0,nodes):
for j in range(0,nodes):
if i != j:
pred[i][j] = i
for k in range(0,nodes):
for i in range(0,nodes):
for j in range(0,nodes):
if matrix[i][j] > matrix[i][k] + matrix[k][j]:
matrix[i][j] = matrix[i][k] + matrix[k][j]
pred[i][j] = pred[k][j]
return matrix,pred
#Laster inn data fra en csv fil til et nettverksarray
def get_network(net_csv):
graf = open(net_csv,'r')
lenker=0
for line in graf:
lenker+=1
graf_edit = np.full((lenker, 3),0)
graf = open(net_csv,'r')
k = 0
for line in graf:
stuff = line.split(";")
graf_edit[k][0] = float(stuff[0])
graf_edit[k][1] = float(stuff[1])
temp = stuff[2].split('\n')[0]
graf_edit[k][2] = float(temp)
k+=1
return graf_edit
#Lager en path-vektor
def path(p,i,j,path_vec):
if i == j:
path_vec.append(i)
else:
path(p, i, p[i][j],path_vec)
path_vec.append(j)
#Henter en spesifikk path
def get_path(p,i,j):
#j = j + 1
path_vec=[]
path(p,i,j,path_vec)
#for i in range(1,len(path_vec)):
# path_vec[i] = path_vec[i] - 1
return path_vec
#Lager adjecency-matrise (ikke ferdig)
def build_adj(pred):
adj_mat = np.zeros((len(pred),len(pred)))
array_a = []
array_b = []
for i in range(1,len(pred)):
for j in range(1,len(pred)):
array_a = get_path(pred,i,j)
print array_a
array_b = get_path(pred,2,10)
print array_b
try:
adj_mat[1][j] = common_node(array_a,array_b)
except:
adj_mat[1][j] = 0
print adj_mat[1][j]
return adj_mat
#Nettverkslaster
#Argumenter: (1) Forgjenger-matrise (2) antall noder (3) nettverksfil (4) od-matrise
def network_loader(graf,net,od,pred):
#Antall noder
n = len(od)-1
#Redigering
for k in range(0,len(net)):
net[k][3]=0 #Nulllstiller antall reiser
net[k][2]=graf[k][2] #Legger inn oppdaterteavstander fra grafen
#Legger ut reiser paa nettet
for i in range(0,n):
for j in range(0,n):
path = get_path(pred,i,j)
len_path=get_len_path(path)
for h in range(0,len_path):
for k in range(0,len(net)):
if net[k][0] == path[h]+1 and net[k][1] == path[1+h]+1:
net[k][3] += int(od[i][j])
elif net[k][1] == path[h]+1 and net[k][0] == path[1+h]+1:
net[k][3] += int(od[i][j])
return net
#a=get_path(pred,5,12)
#GRAVITASJONSFUNKSJONER
def deter_mat_make(length_mat):
deter_mat = np.zeros((len(length_mat),len(length_mat)))
for i in range(0,len(length_mat)):
for j in range(0,len(length_mat)):
deter_mat[i][j] = deter(length_mat[i][j])
return deter_mat
def deter(length):
return 2.71**(beta*length)
def sumproduct(list1,list2):
sums = 0
for i in range(0,len(list1)):
sums += list1[i]*list2[i]
return sums
def gravity(origin, destination, length_mat):
#Initialization
deter_mat = deter_mat_make(length_mat) #Lager matrise med forvitring
dimension = len(origin) #Henter ut matrisedimensjonene
alpha = [1]*(dimension) #Intitierer alpha-vektor
beta = [1]*(dimension) #Intitierer beta-vektor
largest = 10**6 #Intitierer storste avvik
alpha_last = alpha #Intitierer alpha -1
beta_last = beta #Intitierer beta -1
k = 0 #Intitierer tellevariabler for iterasjoner
iterasjoner = []
#Hovedlokke
while largest > .00001:
#Oppdaterer faktorene
for p in range(0,dimension):
alpha[p] = origin[p]/(sumproduct(beta_last,column(deter_mat,p)))
beta[p] = destination[p]/(sumproduct(alpha,row(deter_mat,p)))
largest = 0
#Looper for aa finne storste element
for j in range(0,dimension):
current = alpha[j]*sumproduct(beta,column(deter_mat,j))-origin[j]
if current>largest:
largest = current
#Setter forrige beta
beta_last = beta
iterasjoner.append(largest)
#Legger til en iterasjon
k+=1
print "Konvergens, Gravitasjonsmodell", largest
if k == maxiter:
largest = 0
return alpha,beta,k,iterasjoner
def create_od(origin,destination, length_mat):
alpha,beta,k,iterasjoner = gravity(origin, destination, length_mat)
deter_mat = deter_mat_make(length_mat)
od = np.zeros((len(origin),len(origin)))
for i in range(0,len(origin)):
for j in range(0,len(origin)):
od[i][j] = alpha[i]*beta[j]*deter_mat[i][j]
return od,alpha,beta,k,iterasjoner
def calc_pt_matrix(od,length_mat):
out_od = np.zeros((len(od),len(od)))
for i in range(0,len(od)):
for j in range(0,len(od)):
out_od[i][j] = int(out_od[i][j])*length_mat[i][j]
return out_od
def get_min(net):
smallest = 10**6
smallest_id = 10**6
for i in range(0,len(net)):
if net[i][3]/net[i][2]<smallest and net[i][5]==0:
smallest = net[i][3]/net[i][2]
smallest_id = i
return smallest_id,smallest
def change_graph(graph,net):
graph_out = graph
for i in range(0,len(net)):
if net[i][5]==1:
graph_out[i][2]=k_just*graph_out[i][2]
return graph_out
def production(net):
sumcost = 0
for i in range(0,len(net)):
if net[i][5]!=1:
sumcost += (net[i][3]/capacity)*net[i][2]
return sumcost
def sum_pass(net):
sumpass = 0
for i in range(0,len(net)):
sumpass+=net[i][3]
return sumpass
def get_len_path(path):
len_path = 0
if len(path) < 3:
len_path = 0
elif len(path) == 3:
len_path = 2
else:
len_path=int(len(path)/2)+int(len(path)%2)+1
return len_path
def obj(od,length_mat,net,prodgoal):
return (production(net)*kmk*dogn-prodgoal)**2*(k_just-1)*capacity/.9+time_cost(od,length_mat)
def time_cost(od,length_mat):
cost = 0
for i in range(0,len(od)-1):
for j in range(0,len(od)-1):
cost += od[i][j]*length_mat[i][j]
return cost
def get_zero_net(net):
zero_net = np.zeros((len(net),6))
for i in range(0,len(net)):
zero_net[i][2] = net[i][2]
zero_net[i][3] = net[i][3]
zero_net[i][5] = net[i][5]
return zero_net
def update_zero_net(net,zero_net):
for i in range(0,len(net)):
zero_net[i][5] = net[i][5]
return zero_net
|
mit
| -5,535,627,304,960,529,000 | 26.804805 | 97 | 0.550491 | false | 2.888924 | false | false | false |
cvegaj/ElectriCERT
|
venv3/lib/python3.6/site-packages/cert_schema/model.py
|
1
|
12691
|
"""
class ProofType(Enum):
merkle_proof_2017 = 0
class SignatureType(Enum):
signed_content = 0,
signed_transaction = 1
Signature
---------
|
|-- EmbeddedSignature: signs "contents" directly
|
|-- TransactionSignature: "contents" are embedded in transaction. Merkle proof for multiple
"""
import re
import sys
import pytz
from dateutil.parser import parse
from cert_schema import *
V1_1_REGEX = re.compile('[0-9a-fA-F]{24}')
V1_2_REGEX = re.compile('[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}')
USE_PREFIX = False
def scope_name(name):
"""
TBD whether we want to include prefix. Doing this for now. Default is no prefix
:param name:
:return:
"""
if USE_PREFIX:
return BLOCKCERTS_PREFIX + name
else:
return name
class SignatureLine(object):
def __init__(self, image, name=None, job_title=None):
self.image = image
self.name = name
self.job_title = job_title
class ProofType(Enum):
merkle_proof_2017 = 0
class SignatureType(Enum):
signed_content = 0,
signed_transaction = 1
class Signature(object):
def __init__(self, signature_type, content_to_verify):
self.signature_type = signature_type
self.content_to_verify = content_to_verify
class TransactionSignature(Signature):
"""
Content is embedded in transaction in some manner
"""
def __init__(self, content_to_verify, transaction_id, merkle_proof=None):
super(TransactionSignature, self).__init__(SignatureType.signed_transaction, content_to_verify)
self.transaction_id = transaction_id
self.merkle_proof = merkle_proof
class EmbeddedSignature(Signature):
"""
Content is signed directly
"""
def __init__(self, content_to_verify, signature_value):
super(EmbeddedSignature, self).__init__(SignatureType.signed_content, content_to_verify)
self.signature_value = signature_value
class MerkleProof(object):
def __init__(self, target_hash, merkle_root, proof_type, original_proof_json):
self.target_hash = target_hash
self.merkle_root = merkle_root
self.proof_type = proof_type
self.proof_json = original_proof_json
from copy import deepcopy
chainpoint_proof = deepcopy(original_proof_json)
chainpoint_proof['type'] = 'ChainpointSHA256v2'
self.chainpoint_proof = chainpoint_proof
class Issuer(object):
def __init__(self, id, name, image, revocation_url=None):
self.id = id
self.name = name
self.image = image
self.revocation_url = revocation_url
class BlockchainCertificate(object):
def __init__(self, version, uid, recipient_name, recipient_public_key, title, description, signature_image,
issued_on, expires, subtitle, signatures, certificate_json, txid, issuer, revocation_addresses=[]):
self.version = version
self.uid = uid
self.recipient_name = recipient_name
self.recipient_public_key = recipient_public_key
self.title = title
self.description = description
self.signature_image = signature_image
self.issued_on = issued_on
self.expires = expires
self.subtitle = subtitle
self.signatures = signatures
self.certificate_json = certificate_json
self.txid = txid
self.issuer = issuer
self.revocation_addresses = revocation_addresses
def __str__(self):
sb = []
for key in self.__dict__:
_value = self.__dict__[key]
if _value and isstring(_value) and 'data:image/png;base64' in str(_value):
mapped_value = '<base64_encoded_image>'
elif _value:
mapped_value = _value
else:
mapped_value = '<None>'
sb.append("{key}='{value}'".format(key=key, value=mapped_value))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
def isstring(s):
if (sys.version_info[0] >= 3):
return isinstance(s, str)
return isinstance(s, basestring)
def parse_issuer(issuer_json):
if 'revocationList' in issuer_json:
revocation_list = issuer_json['revocationList']
else:
revocation_list = None
return Issuer(issuer_json['id'], issuer_json['name'], issuer_json['image'], revocation_list)
def detect_version(certificate_json):
# assumes it's a certificate. Maybe add some schema validation
if not '@context' in certificate_json:
return BlockcertVersion.V1_1
context = certificate_json['@context']
if isinstance(context, list):
version_marker = context[-1]
else:
version_marker = context
if 'v1' in version_marker:
return BlockcertVersion.V1_2
elif '2.0-alpha' in version_marker:
return BlockcertVersion.V2_ALPHA
elif '2.0' in version_marker or 'v2' in version_marker:
return BlockcertVersion.V2
raise UnknownBlockcertVersionException()
def is_v1_uid(uid):
if V1_1_REGEX.search(uid):
return True
else:
return False
def parse_chainpoint_proof(proof_json):
proof_type = ProofType.merkle_proof_2017
return MerkleProof(proof_json['targetHash'], proof_json['merkleRoot'], proof_type, proof_json)
def parse_date(raw_date):
if raw_date is None:
return None
parsed_date = parse(raw_date)
utc = pytz.UTC
if parsed_date.tzinfo is None or parsed_date.tzinfo.utcoffset(parsed_date) is None:
parsed_date = utc.localize(parsed_date)
return parsed_date
def parse_expires_date(assertion):
if 'expires' in assertion:
return parse_date(assertion['expires'])
else:
return None
def get_value_or_default(node, field):
value = None
if field in node:
value = node[field]
return value
def parse_v2_blockchain_certificate(certificate_json, version_marker):
assertion = certificate_json
uid = assertion['id']
badge = assertion['badge']
recipient = assertion['recipient']
issuer = parse_issuer(badge['issuer'])
issued_on = parse_date(assertion['issuedOn'])
signature = assertion[scope_name('signature')]
txid = signature['anchors'][0]['sourceId']
merkle_proof = parse_chainpoint_proof(signature)
signature_lines = []
if scope_name('signatureLines') in badge:
signature_lines_raw = badge[scope_name('signatureLines')]
for l in signature_lines_raw:
image = l['image']
name = get_value_or_default(l, 'name')
job_title = get_value_or_default(l, 'job_title')
signature_lines.append(SignatureLine(image, name, job_title))
subtitle = get_value_or_default(badge, 'subtitle')
if version_marker == BlockcertVersion.V2_ALPHA:
recipient_profile = recipient[scope_name('recipientProfile')]
else:
recipient_profile = certificate_json[scope_name('recipientProfile')]
recipient_public_key_full = recipient_profile['publicKey']
recipient_public_key = str.split(str(recipient_public_key_full), ':')[1]
import copy
document_json = copy.deepcopy(certificate_json)
del document_json['signature']
transaction_signature = TransactionSignature(document_json, txid, merkle_proof)
return BlockchainCertificate(version_marker,
uid,
recipient_profile['name'],
recipient_public_key,
badge['name'],
badge['description'],
signature_lines,
issued_on,
parse_expires_date(assertion),
subtitle,
[transaction_signature],
certificate_json,
txid,
issuer)
def parse_v1_2_blockchain_certificate(certificate_json):
document = certificate_json['document']
receipt = certificate_json['receipt']
certificate = document['certificate']
assertion = document['assertion']
recipient = document['recipient']
recipient_public_key = recipient['publicKey']
issued_on = parse_date(assertion['issuedOn'])
issuer = parse_issuer(certificate['issuer'])
assertion_uid = assertion['uid']
txid = receipt['anchors'][0]['sourceId']
signature_lines = []
if 'image:signature' in assertion:
signature_lines.append(SignatureLine(assertion['image:signature']))
subtitle = get_value_or_default(certificate, 'subtitle')
recipient_revocation_address = get_value_or_default(recipient, 'revocationKey')
revocation_addresses = [recipient_public_key]
if recipient_revocation_address:
revocation_addresses.append(recipient_revocation_address)
embedded_signature = EmbeddedSignature(assertion_uid, document['signature'])
transaction_signature = TransactionSignature(document, txid, parse_chainpoint_proof(receipt))
return BlockchainCertificate(BlockcertVersion.V1_2,
assertion_uid,
recipient['givenName'] + ' ' + recipient['familyName'],
recipient_public_key,
certificate['name'],
certificate['description'],
signature_lines,
issued_on,
parse_expires_date(assertion),
subtitle,
[embedded_signature, transaction_signature],
certificate_json,
txid,
issuer,
revocation_addresses)
def parse_v1_1_blockchain_certificate(json_certificate, txid, certificate_bytes):
subtitle = json_certificate['certificate']['subtitle']['content']
display_subtitle = json_certificate['certificate']['subtitle']['display']
if display_subtitle in ['true', 'True', 'TRUE']:
subtitle = subtitle
else:
subtitle = None
issuer = parse_issuer(json_certificate['certificate']['issuer'])
issued_on = parse_date(json_certificate['assertion']['issuedOn'])
recipient_pubkey = json_certificate['recipient']['pubkey']
assertion_uid = json_certificate['assertion']['uid']
revocation_addresses = [recipient_pubkey]
embedded_signature = EmbeddedSignature(assertion_uid, json_certificate['signature'])
transaction_signature = TransactionSignature(certificate_bytes, txid)
signature_lines = []
if 'image:signature' in json_certificate['assertion']:
signature_lines.append(SignatureLine(json_certificate['assertion']['image:signature']))
return BlockchainCertificate(BlockcertVersion.V1_1,
assertion_uid,
json_certificate['recipient']['givenName'] + ' ' + json_certificate['recipient'][
'familyName'],
recipient_pubkey,
json_certificate['certificate']['title'],
json_certificate['certificate']['description'],
signature_lines,
issued_on,
parse_expires_date(json_certificate['assertion']),
subtitle,
[embedded_signature, transaction_signature],
json_certificate,
txid,
issuer,
revocation_addresses)
def to_certificate_model(certificate_json, txid=None, certificate_bytes=None):
version = detect_version(certificate_json)
if version == BlockcertVersion.V1_1:
if not txid or not certificate_bytes:
raise InvalidCertificateError('V1.1 Blockchain Certificates require a transaction id and raw bytes')
return parse_v1_1_blockchain_certificate(certificate_json, txid, certificate_bytes)
elif version == BlockcertVersion.V1_2:
return parse_v1_2_blockchain_certificate(certificate_json)
elif version == BlockcertVersion.V2 or version == BlockcertVersion.V2_ALPHA:
return parse_v2_blockchain_certificate(certificate_json, version)
else:
raise UnknownBlockcertVersionException(version)
|
gpl-3.0
| 154,558,770,172,818,700 | 33.961433 | 116 | 0.60405 | false | 4.265882 | false | false | false |
smurfix/HomEvenT
|
modules/path.py
|
1
|
1945
|
# -*- coding: utf-8 -*-
##
## Copyright © 2007, Matthias Urlichs <[email protected]>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
"""\
This code implements primitive "if true" and "if false" checks.
"""
from homevent.check import Check,register_condition,unregister_condition
from homevent.module import Module
import os
class ExistsPathCheck(Check):
name="exists path"
doc="Check if there's something behind that path"
def check(self,*args):
assert len(args) == 1,"Need exactly one argument (file name)"
return os.path.exists(args[0])
class ExistsFileCheck(Check):
name="exists file"
doc="Check if there's a file at that path"
def check(self,*args):
assert len(args) == 1,"Need exactly one argument (file name)"
return os.path.isfile(args[0])
class ExistsDirCheck(Check):
name="exists directory"
doc="Check if there's a directory at that path"
def check(self,*args):
assert len(args) == 1,"Need exactly one argument (directory name)"
return os.path.isdir(args[0])
class PathModule(Module):
"""\
This module provides a couple of filesystem existence checks.
"""
info = "Check for file/directory existence"
def load(self):
register_condition(ExistsPathCheck)
register_condition(ExistsFileCheck)
register_condition(ExistsDirCheck)
def unload(self):
unregister_condition(ExistsPathCheck)
unregister_condition(ExistsFileCheck)
unregister_condition(ExistsDirCheck)
init = PathModule
|
gpl-3.0
| -2,167,329,687,825,755,100 | 28.454545 | 72 | 0.738169 | false | 3.521739 | false | false | false |
mohd-akram/item.tf
|
tf2api.py
|
1
|
15632
|
"""This module is based on the Steam WebAPI and can be used to get information
about items in TF2. Using this module, you can obtain the item schema,
store prices, bundles, item sets and attributes for TF2.
You can also obtain market prices from backpack.tf and trade.tf.
There are also functions for parsing the information of each item.
"""
import asyncio
import json
from collections import defaultdict, OrderedDict
import aiohttp
async def getschema(apikey):
"""Return the schema"""
schema_task = asyncio.ensure_future(_getschemaoverview(apikey))
all_items = []
start = 0
while start is not None:
items, start = await _getschemaitems(apikey, start)
all_items.extend(items)
schema = await schema_task
schema['result']['items'] = all_items
return schema
async def _getschemaoverview(apikey):
url = ('https://api.steampowered.com/IEconItems_440/GetSchemaOverview/v1/'
f'?key={apikey}&language=en')
return await _getjsonresponse(url)
async def _getschemaitems(apikey, start):
url = ('https://api.steampowered.com/IEconItems_440/GetSchemaItems/v1/'
f'?key={apikey}&language=en&start={start}')
result = (await _getjsonresponse(url))['result']
return result['items'], result.get('next')
async def getitemsinfo(apikey, storeprices, indexes):
"""Return a dictionary of AssetClassInfo values with defindex as key"""
url = ('https://api.steampowered.com/ISteamEconomy/GetAssetClassInfo/v0001/'
'?key={0}&language=en&appid=440&class_count={1}'.format(apikey,
len(indexes)
))
for n, index in enumerate(indexes):
classid = storeprices[index]['classid']
url += '&classid{0}={1}'.format(n, classid)
infobyid = (await _getjsonresponse(url))['result']
del infobyid['success']
return {int(iteminfo['app_data']['def_index']): iteminfo
for iteminfo in infobyid.values()}
async def getbundles(apikey, storeprices):
"""Return a dictionary of store bundles with defindex as key"""
indexes = [index for index, price in storeprices.items()
if not {'Bundles', 'Class_Bundles'}.isdisjoint(price['tags'])]
return await getitemsinfo(apikey, storeprices, indexes)
def getitemsets(schema):
"""Return an ordered dictionary of itemsets with 'name' as key"""
return OrderedDict([(itemset['name'], itemset) for itemset in
schema['result']['item_sets']])
def getitems(schema):
"""Return an ordered dictionary of items in the schema where the key is
defindex for each item"""
return OrderedDict([(item['defindex'], item) for item in
schema['result']['items']])
def getitemsbyname(schema):
"""Return an ordered dictionary of items in the schema where the key is
item_name for each item"""
itemsbyname = OrderedDict()
duplicates = getobsoleteindexes()
for item in schema['result']['items']:
name = item['item_name']
if name not in itemsbyname:
if item['defindex'] not in duplicates:
itemsbyname[name] = item
return itemsbyname
def getattributes(schema):
"""Return a dictionary with each attribute's name as key"""
return {attribute['name']: attribute for attribute in
schema['result']['attributes']}
def getparticleeffects(schema):
"""Return a dictionary with each particle effect's id as key"""
return {effect['id']: effect for effect in
schema['result']['attribute_controlled_attached_particles']}
async def getstoreprices(apikey):
"""Return a dictionary of store prices where the key is defindex for
each item"""
url = ('https://api.steampowered.com/ISteamEconomy/GetAssetPrices/v0001/'
'?key={}&language=en&appid=440¤cy=usd'.format(apikey))
prices = (await _getjsonresponse(url))['result']['assets']
return {int(price['name']): price for price in prices}
def getnewstoreprices(storeprices):
"""Return a dictionary of store prices of new items with defindex as key"""
return {index: price for index, price in storeprices.items()
if 'New' in price['tags']}
async def getbackpackprices(apikey, items, itemsbyname):
"""Get market prices from backpack.tf.
Return a dictionary where the key is defindex and value is a dictionary of
prices for the item"""
url = ('https://backpack.tf/api/IGetPrices/v4/'
'?key={}&compress=1'.format(apikey))
pricesdata = (await _getjsonresponse(url))['response']['items']
pricesdict = defaultdict(dict)
qualities = getallqualities()
denoms = {'metal': 'Refined', 'hat': 'Hat', 'keys': 'Key',
'earbuds': 'Bud', 'usd': 'USD'}
for name, iteminfo in pricesdata.items():
if name not in itemsbyname:
continue
index = itemsbyname[name]['defindex']
item = items[index]
iscrate = False
if 'attributes' in item and item['attributes']:
attribute = item['attributes'][0]
if attribute['name'] == 'set supply crate series':
iscrate = True
crateno = str(attribute['value'])
if 'prices' not in iteminfo:
continue
for quality, tradeinfo in iteminfo['prices'].items():
try:
qualityname = qualities[int(quality)]
except KeyError:
continue
for tradable, craftinfo in tradeinfo.items():
# Ignore non-tradable version if there is a tradable one
if tradable == 'Non-Tradable' and 'Tradable' in tradeinfo:
continue
for craftable, price in craftinfo.items():
if type(price) is list:
price = price[0]
else:
if iscrate and crateno in price:
price = price[crateno]
elif '0' in price:
price = price['0']
else:
continue
if not price['value']:
continue
value = price['value']
valuehigh = (' - {:g}'.format(price['value_high'])
if 'value_high' in price else '')
denom = denoms[price['currency']]
if (value != 1 or valuehigh) and denom not in ('Refined',
'USD'):
denom += 's'
qlty = (qualityname if craftable != 'Non-Craftable'
else 'Uncraftable')
pricesdict[index][qlty] = '{:g}{} {}'.format(
value, valuehigh, denom)
return pricesdict
async def gettradeprices(apikey, items, itemsbyname):
"""Get market prices from trade.tf.
Return a dictionary where the key is defindex and value is a dictionary of
prices for the item"""
url = 'https://www.trade.tf/api/spreadsheet.json?key={}'.format(apikey)
pricesdata = (await _getjsonresponse(url))['items']
pricesdict = defaultdict(dict)
itemnames = set()
crates = defaultdict(int)
qualities = getallqualities()
qualities[-1] = 'Uncraftable'
denoms = {'r': 'Refined', 'k': 'Key', 'b': 'Bud'}
for index, prices in pricesdata.items():
index = int(index)
if index not in items:
# For crates, index = 10000*crate_defindex + crate_number
crateno = index % 10000
index //= 10000
# Store the price of the highest crate number only
if crateno < crates[index]:
continue
else:
crates[index] = crateno
name = items[index]['item_name']
# Trade.tf uses different indexes.
idx = itemsbyname[name]['defindex']
if index != idx and name in itemnames:
continue
for quality, price in prices.items():
quality = int(quality)
if 'regular' not in price:
continue
price = price['regular']
if price['unsure']:
continue
value = price['low']
valuehigh = (' - {:g}'.format(round(price['hi'], 2))
if value != price['hi'] else '')
denom = denoms[price['unit']]
qualityname = qualities[quality]
if (value != 1 or valuehigh) and denom != 'Refined':
denom += 's'
itemnames.add(name)
pricesdict[idx][qualityname] = '{:g}{} {}'.format(round(value, 2),
valuehigh,
denom)
return pricesdict
def getweapontags():
"""Return all weapon tags"""
return ('primary', 'secondary', 'melee', 'pda', 'pda2', 'building')
def getalltags():
"""Return all item tags"""
return (('weapon', 'cosmetic', 'hat', 'misc', 'taunt', 'tool', 'action',
'paint', 'craft', 'token', 'bundle', 'tournament', 'halloween') +
getweapontags())
def getallclasses():
"""Return an OrderedDict of TF2 classes with name as key and
a list of aliases as value"""
return OrderedDict([('Scout', ['Scoot']),
('Soldier', ['Solly']),
('Pyro', []),
('Demoman', ['Demo']),
('Heavy', ['Hoovy']),
('Engineer', ['Engi', 'Engie']),
('Medic', []),
('Sniper', []),
('Spy', [])])
def getallqualities():
"""Return a dictionary of TF2 item qualities with number as key and
description as value"""
return {6: 'Unique',
3: 'Vintage',
11: 'Strange',
1: 'Genuine',
14: "Collector's",
13: 'Haunted',
5: 'Unusual'}
def getalldenoms():
"""Return an OrderedDict of price denominations in descending order with
the defindex of their corresponding items as value"""
return OrderedDict([('Earbuds', 143),
('Key', 5021),
('Refined', 5002),
('Reclaimed', 5001),
('Scrap', 5000),
('Weapon', 0)])
def getstoreprice(item, storeprices):
"""Get store price of item"""
index = item['defindex']
return ('{:.2f}'.format(storeprices[index]['prices']['USD'] / 100.00)
if index in storeprices else '')
def getmarketprice(item, marketprices):
"""Get market price of item"""
index = item['defindex']
return marketprices[index] if index in marketprices else {}
def getitemattributes(item, allattributes, effects):
"""Get attributes of item"""
attributelist = []
if 'attributes' in item:
attributes = item['attributes']
for a in attributes:
value = a['value']
attribute = allattributes[a['name']]
if not attribute['hidden'] and 'description_string' in attribute:
description = attribute['description_string']
descformat = attribute['description_format']
if descformat == 'value_is_particle_index':
value = effects[value]['name']
description = description.replace('%s1', '{}')
else:
if descformat == 'value_is_percentage':
value = (value * 100) - 100
elif descformat == 'value_is_inverted_percentage':
value = 100 - (value * 100)
elif descformat == 'value_is_additive_percentage':
value *= 100
description = description.replace('%s1', '{:g}')
description = description.format(value)
attrdict = {'description': description,
'type': attribute['effect_type']}
if attrdict['type'] == 'unusual':
attrdict['type'] = 'neutral'
attributelist.append(attrdict)
order = ('neutral', 'positive', 'negative')
return sorted(attributelist, key=lambda k: order.index(k['type']))
def getitemclasses(item):
"""Get the TF2 classes that can use this item"""
return (sorted(item['used_by_classes'],
key=list(getallclasses().keys()).index)
if 'used_by_classes' in item else [])
def getitemtags(item):
"""Get a list of tags that describe the item"""
tags = []
itemclass = item['item_class']
itemtypename = item['item_type_name']
if itemclass == 'bundle':
tags.append(itemclass)
elif itemclass == 'craft_item':
tags.append('craft')
elif itemclass.endswith('_token'):
tags.append('token')
if 'item_slot' in item:
slot = item['item_slot']
if slot in getweapontags() and itemclass != 'slot_token':
tags.append('weapon')
if slot == 'misc':
tags.append('cosmetic')
if itemtypename in ('#TF_Wearable_Hat', 'Hat', 'Mask',
'Holiday Hat', 'Headset', 'Hair'):
tags.append('hat')
else:
tags.append(slot)
if itemtypename == 'Tournament Medal':
tags.append('tournament')
if 'tool' in item:
tags.append('tool')
if item['tool']['type'] == 'paint_can':
tags.append('paint')
if item.get('holiday_restriction') == 'halloween_or_fullmoon':
tags.append('halloween')
return tags
def getobsoleteindexes():
"""Return the indexes of obsolete items that have newer versions"""
map_stamps = {
2007, 2015, 2049, 2079, 2123, 2125, 2138, 2139, 2140, 2143, 2155, 2156
}
starter_packs = set(range(2018, 2027)) | set(range(2094, 2103))
return {699, 2093} | map_stamps | starter_packs
async def getplayerbackpack(apikey, steamid):
"""Return the player backpack of the given steamid"""
url = ('https://api.steampowered.com/IEconItems_440/GetPlayerItems/v0001/'
f'?key={apikey}&steamid={steamid}')
return (await _getjsonresponse(url)).get('result')
async def getplayersummary(apikey, steamid):
"""Return the player summary of the given steamid"""
return (await getplayersummaries(apikey, [steamid]))[0]
async def getplayersummaries(apikey, steamids):
"""Return the player summaries of a list of steamids"""
url = ('https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/'
f"?key={apikey}&steamids={','.join(steamids)}")
return (await _getjsonresponse(url))['response']['players']
async def resolvevanityurl(apikey, vanityurl):
"""Return the steamid of a given vanity url"""
url = ('https://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/'
f'?key={apikey}&vanityurl={vanityurl}')
response = (await _getjsonresponse(url))['response']
if response['success'] == 1:
return response['steamid']
async def _getjsonresponse(url):
headers = {'User-Agent': 'tf2api'}
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url) as response:
return json.loads((await response.read()).decode())
|
mit
| 3,765,730,247,329,332,700 | 32.18896 | 80 | 0.5625 | false | 4.146419 | false | false | false |
WDavidX/pipy
|
v3dht22.py
|
1
|
20163
|
#!/usr/bin/env python
# 2014-07-11 DHT22.py
import time
import atexit
import pigpio
import smbus
import subprocess
import os
import signal
import RPi.GPIO as GPIO
import psutil
import json
import socket
import math
import datetime
import urllib2
class i2c_device:
def __init__(self, addr, port=1):
self.addr = addr
self.bus = smbus.SMBus(port)
# Write a single command
def write_cmd(self, cmd):
self.bus.write_byte(self.addr, cmd)
time.sleep(0.0001)
# Write a command and argument
def write_cmd_arg(self, cmd, data):
self.bus.write_byte_data(self.addr, cmd, data)
time.sleep(0.0001)
# Write a block of data
def write_block_data(self, cmd, data):
self.bus.write_block_data(self.addr, cmd, data)
time.sleep(0.0001)
# Read a single byte
def read(self):
return self.bus.read_byte(self.addr)
# Read
def read_data(self, cmd):
return self.bus.read_byte_data(self.addr, cmd)
# Read a block of data
def read_block_data(self, cmd):
return self.bus.read_block_data(self.addr, cmd)
# LCD Address
ADDRESS = 0x3f
# commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# flags for backlight control
LCD_BACKLIGHT = 0x08
LCD_NOBACKLIGHT = 0x00
En = 0b00000100 # Enable bit
Rw = 0b00000010 # Read/Write bit
Rs = 0b00000001 # Register select bit
class lcd:
"""
Class to control LCD display
"""
LCD_BacklightOpt = LCD_NOBACKLIGHT
LCD_BacklightOpt = LCD_BACKLIGHT
LCD_TurnOn, LCD_TurnOff = LCD_BACKLIGHT, LCD_NOBACKLIGHT
def __init__(self):
self.lcd_device = i2c_device(ADDRESS)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x02)
self.lcd_write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)
self.lcd_write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)
time.sleep(0.2)
self.backlight = lcd.LCD_BacklightOpt
# clocks EN to latch command
def lcd_backlighton(self, N=-1):
try:
N = int(N)
if not N in [1, 0, -1]:
raise Exception("Wrong backlight option")
except Exception:
N = lcd.LCD_TurnOff
if N == 1:
self.LCD_BacklightOpt = lcd.LCD_TurnOn
elif N == 0:
self.LCD_BacklightOpt = lcd.LCD_TurnOff
elif self.LCD_BacklightOpt == lcd.LCD_TurnOff:
self.LCD_BacklightOpt = lcd.LCD_TurnOn
else:
self.LCD_BacklightOpt = lcd.LCD_TurnOff
def lcd_strobe(self, data):
self.lcd_device.write_cmd(data | En | self.LCD_BacklightOpt)
time.sleep(.0005)
self.lcd_device.write_cmd(((data & ~En) | self.LCD_BacklightOpt))
time.sleep(.0001)
def lcd_write_four_bits(self, data):
self.lcd_device.write_cmd(data | self.LCD_BacklightOpt)
self.lcd_strobe(data)
# write a command to lcd
def lcd_write(self, cmd, mode=0):
self.lcd_write_four_bits(mode | (cmd & 0xF0))
self.lcd_write_four_bits(mode | ((cmd << 4) & 0xF0))
# put string function
def lcd_display_string(self, string, line):
if line == 1:
self.lcd_write(0x80)
if line == 2:
self.lcd_write(0xC0)
if line == 3:
self.lcd_write(0x94)
if line == 4:
self.lcd_write(0xD4)
for char in string:
self.lcd_write(ord(char), Rs)
# clear lcd and set to home
def lcd_clear(self):
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_RETURNHOME)
class sensor:
"""
A class to read relative humidity and temperature from the
DHT22 sensor. The sensor is also known as the AM2302.
The sensor can be powered from the Pi 3V3 or the Pi 5V rail.
Powering from the 3V3 rail is simpler and safer. You may need
to power from 5V if the sensor is connected via a long cable.
For 3V3 operation connect pin 1 to 3V3 and pin 4 to ground.
Connect pin 2 to a gpio.
For 5V operation connect pin 1 to 5V and pin 4 to ground.
The following pin 2 connection works for me. Use at YOUR OWN RISK.
5V--5K_resistor--+--10K_resistor--Ground
|
DHT22 pin 2 -----+
|
gpio ------------+
"""
def __init__(self, pi, gpio, LED=None, power=None):
"""
Instantiate with the Pi and gpio to which the DHT22 output
pin is connected.
Optionally a LED may be specified. This will be blinked for
each successful reading.
Optionally a gpio used to power the sensor may be specified.
This gpio will be set high to power the sensor. If the sensor
locks it will be power cycled to restart the readings.
Taking readings more often than about once every two seconds will
eventually cause the DHT22 to hang. A 3 second interval seems OK.
"""
self.pi = pi
self.gpio = gpio
self.LED = LED
self.power = power
if power is not None:
pi.write(power, 1) # Switch sensor on.
time.sleep(2)
self.powered = True
self.cb = None
atexit.register(self.cancel)
self.bad_CS = 0 # Bad checksum count.
self.bad_SM = 0 # Short message count.
self.bad_MM = 0 # Missing message count.
self.bad_SR = 0 # Sensor reset count.
self.bad_Trigger = False # flag true if the last trigger was good
# Power cycle if timeout > MAX_TIMEOUTS.
self.no_response = 0
self.MAX_NO_RESPONSE = 2
self.rhum = -999
self.temp = -999
self.tov = None
self.high_tick = 0
self.bit = 40
pi.set_pull_up_down(gpio, pigpio.PUD_OFF)
pi.set_watchdog(gpio, 0) # Kill any watchdogs.
self.cb = pi.callback(gpio, pigpio.EITHER_EDGE, self._cb)
def _cb(self, gpio, level, tick):
"""
Accumulate the 40 data bits. Format into 5 bytes, humidity high,
humidity low, temperature high, temperature low, checksum.
"""
diff = pigpio.tickDiff(self.high_tick, tick)
self.bad_Trigger = False
if level == 0:
# Edge length determines if bit is 1 or 0.
if diff >= 50:
val = 1
if diff >= 200: # Bad bit?
self.CS = 256 # Force bad checksum.
else:
val = 0
if self.bit >= 40: # Message complete.
self.bit = 40
elif self.bit >= 32: # In checksum byte.
self.CS = (self.CS << 1) + val
if self.bit == 39:
# 40th bit received.
self.pi.set_watchdog(self.gpio, 0)
self.no_response = 0
total = self.hH + self.hL + self.tH + self.tL
if (total & 255) == self.CS: # Is checksum ok?
self.rhum = ((self.hH << 8) + self.hL) * 0.1
if self.tH & 128: # Negative temperature.
mult = -0.1
self.tH = self.tH & 127
else:
mult = 0.1
self.temp = ((self.tH << 8) + self.tL) * mult
self.tov = time.time()
if self.LED is not None:
self.pi.write(self.LED, 0)
else:
self.bad_Trigger = True
self.bad_CS += 1
elif self.bit >= 24: # in temp low byte
self.tL = (self.tL << 1) + val
elif self.bit >= 16: # in temp high byte
self.tH = (self.tH << 1) + val
elif self.bit >= 8: # in humidity low byte
self.hL = (self.hL << 1) + val
elif self.bit >= 0: # in humidity high byte
self.hH = (self.hH << 1) + val
else: # header bits
pass
self.bit += 1
elif level == 1:
self.high_tick = tick
if diff > 250000:
self.bit = -2
self.hH = 0
self.hL = 0
self.tH = 0
self.tL = 0
self.CS = 0
else: # level == pigpio.TIMEOUT:
self.pi.set_watchdog(self.gpio, 0)
if self.bit < 8: # Too few data bits received.
self.bad_MM += 1 # Bump missing message count.
self.no_response += 1
if self.no_response > self.MAX_NO_RESPONSE:
self.no_response = 0
self.bad_SR += 1 # Bump sensor reset count.
if self.power is not None:
self.powered = False
self.pi.write(self.power, 0)
time.sleep(2)
self.pi.write(self.power, 1)
time.sleep(2)
self.powered = True
elif self.bit < 39: # Short message receieved.
self.bad_SM += 1 # Bump short message count.
self.no_response = 0
else: # Full message received.
self.no_response = 0
def sensor_info(self):
return self.temp, self.rhum, self.bad_Trigger, self.bad_SM
def is_last_tigger(self):
return self.bad_Trigger
def temperature(self):
"""Return current temperature."""
return self.temp
def humidity(self):
"""Return current relative humidity."""
return self.rhum
def staleness(self):
"""Return time since measurement made."""
if self.tov is not None:
return time.time() - self.tov
else:
return -999
def bad_checksum(self):
"""Return count of messages received with bad checksums."""
return self.bad_CS
def short_message(self):
"""Return count of short messages."""
return self.bad_SM
def missing_message(self):
"""Return count of missing messages."""
return self.bad_MM
def sensor_resets(self):
"""Return count of power cycles because of sensor hangs."""
return self.bad_SR
def trigger(self):
"""Trigger a new relative humidity and temperature reading."""
if self.powered:
if self.LED is not None:
self.pi.write(self.LED, 1)
self.pi.write(self.gpio, pigpio.LOW)
time.sleep(0.017) # 17 ms
self.pi.set_mode(self.gpio, pigpio.INPUT)
self.pi.set_watchdog(self.gpio, 200)
def cancel(self):
"""Cancel the DHT22 sensor."""
self.pi.set_watchdog(self.gpio, 0)
if self.cb != None:
self.cb.cancel()
self.cb = None
def orignal_sample():
pass
# Intervals of about 2 seconds or less will eventually hang the DHT22.
INTERVAL = 3
pi = pigpio.pi()
s = sensor(pi, 4, LED=None, power=None)
r = 0
next_reading = time.time()
while True:
r += 1
s.trigger()
time.sleep(0.2)
print("r={} H={} T={} stale={:3.2f} bad_checksum={} SMS={} Missing={} resets={}".format(
r, s.humidity(), s.temperature(), s.staleness(),
s.bad_checksum(), s.short_message(), s.missing_message(),
s.sensor_resets()))
next_reading += INTERVAL
time.sleep(next_reading - time.time()) # Overall INTERVAL second polling.
s.cancel()
pi.stop()
def init_mylcd():
mylcd = lcd()
mylcd.lcd_display_string("Emma Be Happy".center(20), 2)
mylcd.lcd_display_string("DHT22 Version 1.0".center(20), 3)
return mylcd
def backlight_control(fname="backlighton.txt"):
t=datetime.datetime.now()
if t.hour>22 or t.hour<8: return 1
if os.path.exists(fname): return 1
else: return 0
def get_log_file_name(tlast, tnew, outdir=r".", fname_prefix=r"dht22-"):
t_lastdate_num = int(time.strftime("%Y%m%d", time.localtime(tlast)))
t_newdate_num = int(time.strftime("%Y%m%d", time.localtime(tnew)))
# if not os.path.exists(outdir): os.mkdir(outdir)
fnout = os.path.join(os.getcwdu(), outdir, fname_prefix + time.strftime("%y-%m-%d") + ".txt")
# print fnout, t_newdate_num,t_lastdate_num
if (t_newdate_num > t_lastdate_num) and not (os.path.exists(fnout)): open(fnout, 'w').close()
os.system("""sudo chown pi %s"""%fnout)
return fnout
def update_lcd(mylcd, t, h):
mytimestr = time.strftime("%m-%d %H:%M %a")
mylcd.lcd_display_string(mytimestr.center(20), 1)
mylcd.lcd_display_string(("%.1fF %.1fC %.1f%%" % (t * 9 / 5.0 + 32, t, h)).center(20), 4)
def get_weather_api():
minneapolis_url = r'http://api.openweathermap.org/data/2.5/weather?id=5037649&units=metric'
try:
response = urllib2.urlopen(minneapolis_url,timeout=5)
data = json.load(response)
except urllib2.URLError, e:
print "urlopen error at %s, %s"%(time.strftime("%m-%d %H:%M:%S"),e)
return None
except socket.timeout,e:
print "urlopen error at %s, %s"%(time.strftime("%m-%d %H:%M:%S"),e)
return None
return data
wunderground_counter=0
def get_wunderground():
global wunderground_counter
myapi="8e1199ad75005651"
features="conditions"
settings="lang:EN"
query="55414"
format="json"
minneapolis_url="""http://api.wunderground.com/api/%s/%s/%s/q/%s.%s"""%(myapi,features,settings,query,format)
try:
wunderground_counter=wunderground_counter+1
print "Calling wunderground %5d at %s"%(wunderground_counter, time.strftime("%m-%d %H:%M:%S"))
response = urllib2.urlopen(minneapolis_url,timeout=5)
data = json.load(response)
except Exception,e:
print "get_wunderground error %s"%e
return None
#print data['current_observation']['feelslike_c']
return data
def get_sunset_time_str(date_info=None,latitude=44.97,longitude=-93.26,tz_info='US/Central'):
import pytz,datetime,astral
a=astral.Astral()
a.solar_depression='civil'
if date_info is None: date_info=datetime.date.today()
utc_datetime=a.sunset_utc(date_info,latitude,longitude)
lc_datetime=utc_datetime.astimezone(pytz.timezone(tz_info))
outstr="%02d%02d"%(lc_datetime.hour,lc_datetime.minute)
return outstr
def main1():
# Some init values
outdir = r"pipylog_" + os.path.splitext(__file__)[0]
if not os.path.exists(outdir): os.mkdir(outdir)
updateIntervalSec, runningTimeHour = 60, 24
retryInvervalSec = 3
totalLoopNum, errorLoopNum = 0, 0
main_t0 = time.time()
# init instances
pi = pigpio.pi()
try:
s = sensor(pi, 18, LED=None, power=None)
except Exception, e:
print "%s"%(e)
mylcd = init_mylcd()
mylcd.lcd_backlighton(backlight_control())
dht_running = True
initDone = False
# Sensor first few trials
init_t0 = -time.time()
init_loop = 0
while not initDone:
loop_t0 = time.time()
s.trigger()
t, h, badtrigger, badsm = s.sensor_info()
if (h != -999):
initDone = True
else: # time.sleep(loop_t0+3-time.time())
init_loop += 1
time.sleep(loop_t0 + retryInvervalSec - time.time())
print "Init sensor %d loops in in %.1f seconds" % (init_loop, time.time() + init_t0)
print "Output directory %s" % (os.path.abspath(outdir))
loop_t0=0
n_15min=0
n_5min=0
wunderground_data,weather_data=None,None
while (dht_running):
loop_t_last = loop_t0
loop_t0 = time.time()
try:
s.trigger()
totalLoopNum += 1
print totalLoopNum,errorLoopNum
t, h, badtrigger, badsm = s.sensor_info()
# print totalLoopNum,t,h,badtrigger, badsm
if badtrigger:
errorLoopNum += 1
t_badtrigger_waitsec = max(0, loop_t0 + retryInvervalSec - time.time())
if t_badtrigger_waitsec > 0: time.sleep(t_badtrigger_waitsec)
continue
mylcd.lcd_backlighton(backlight_control())
fnout = get_log_file_name(loop_t_last, loop_t0, outdir)
with open(fnout, 'a') as fhout:
fhout.write(
"%.2f , %s , %4.1f , %4.1f\n" % (\
loop_t0, time.strftime("%H%M%S", time.localtime(loop_t0)), t, h))
# first row
str1 = time.strftime("%H:%M %m-%d %a ",time.localtime(time.time()+60))+get_sunset_time_str()
mylcd.lcd_display_string(str1.center(20), 1)
# second row
str2 = "Emma Be Happy %d" % (totalLoopNum)
print str2
mylcd.lcd_display_string(str2.center(20), 2)
# third row from internet
if n_15min<math.floor(loop_t0 / (60*15.0)):
n_15min=math.floor(loop_t0 / (60*15.0))
wunderground_data=get_wunderground()
if wunderground_data is not None:
str3 = "%.0f/%.0fC %s %s"%(\
float(wunderground_data['current_observation']['feelslike_c']),\
float(wunderground_data['current_observation']['temp_c']),\
wunderground_data['current_observation']['weather'].split(" ")[-1],\
wunderground_data['current_observation']['relative_humidity'])
mylcd.lcd_display_string(str3.center(20), 3)
# fourth row
mylcd.lcd_display_string(("%.1fF %.1fC %.1f%%" % (t * 9 / 5.0 + 32, t, h)).center(20), 4)
twaitsec = max(0, loop_t0 + updateIntervalSec - time.time())
if twaitsec > 0: time.sleep(twaitsec)
except KeyboardInterrupt:
dht_running = False
except Exception, e:
# dht_running=False
print "%s"%e
time.sleep(1)
continue
print "\n" * 2
print "%s terminated" % (os.path.abspath(__file__))
print "Up time: %.1f sec, %d loops from %s " % (
time.time() -main_t0, totalLoopNum, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(main_t0)))
print "Log file at %s" % ( fnout)
if errorLoopNum > 0: print "Error loops %d/%d" % (errorLoopNum, totalLoopNum)
def start_daemon():
p1 = subprocess.Popen(["ps","axo","pid,ppid,pgrp,tty,tpgid,sess,comm"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk", "$2==1"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["awk", "$1==$3"], stdin=p2.stdout, stdout=subprocess.PIPE)
pdata,perr=p3.communicate()
pigpiod_found=False
for idx,item in enumerate(pdata.split("\n")):
pname=(item.strip()).split(' ')[-1]
if pname == "pigpiod":
pigpiod_found=True
line=item.strip()
break
if pigpiod_found: print line
else:
os.system("sudo pigpiod")
print "\nstarting pigpiod..."
time.sleep(3)
if __name__ == "__main__":
pass
# orignal_sample()
start_daemon()
main1()
#print get_weather_api()
|
mit
| -1,763,873,955,601,170,400 | 30.802839 | 118 | 0.560581 | false | 3.379651 | false | false | false |
VanceKingSaxbeA/MarketsEngine
|
src/googlequotemachine.py
|
1
|
3986
|
/*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @[email protected]. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""from src.googlefinancequote import *
import sqlite3 as lite
import string
import gc
import time
import math
from src.dbbackup import *
import _thread
from src.goldsaxanalytics import *
def actionking(lck, tempf, stocklist, dbase, attmt,actionlist,cycle,timeatpresent,timetotake):
if tempf == []:
timeatpresent = time.clock()
if (time.clock() - timeatpresent) > timetotake:
return 0
lck.acquire()
lck.release()
f = pullprocess(stocklist)
sorter = []
con = lite.connect(dbase)
for fuck in f:
for suck in tempf:
if (fuck[0] == suck[0]):
try:
Jack = float(fuck[3].replace(",",""))
Jill = float(suck[3].replace(",",""))
except ValueError:
break
if (abs(Jack-Jill)> 0.01):
sorter.append(fuck[0])
stmt = "INSERT INTO "+fuck[0]+"table(ONNN, ATTT, PRIC) VALUES ('"+fuck[1]+"', '"+fuck[2]+"', "+fuck[3].replace(",","")+");"
cur = con.cursor()
try:
cur.execute(stmt)
con.commit()
except lite.OperationalError:
time.sleep(0.05)
try:
cur.execute(stmt)
con.commit()
except lite.OperationalError:
time.sleep(0.05)
try:
cur.execute(stmt)
con.commit()
except lite.OperationalError:
con.commit()
con.close()
if sorter != []:
attmt = 0
"""
a_lock = _thread.allocate_lock()
with a_lock:
for item in sorter:
_thread.start_new_thread(goldsaxanalytics.fetch,(item,dbase,a_lock))
"""
if tempf != [] and sorter == [] and attmt == 4:
gc.collect()
return null
if tempf != [] and sorter == [] and attmt == 3:
time.sleep(60)
gc.collect()
attmt = 4
if tempf != [] and sorter == [] and attmt == 2:
time.sleep(30)
gc.collect()
attmt = 3
if tempf != [] and sorter == [] and attmt == 1:
time.sleep(10)
gc.collect()
attmt = 2
if tempf != [] and sorter == []:
time.sleep(5)
attmt = 1
gc.collect()
time.sleep(0.0001)
gc.collect()
cycle = cycle + 1
return actionking(lck,f, stocklist,dbase, attmt,actionlist,cycle,timeatpresent,timetotake)
def pullprocess(ass):
sds = googlefinancequote.getquote(ass)
return sds
/*email to provide support at [email protected], [email protected], For donations please write to [email protected]*/
|
mit
| -2,020,211,338,537,667,600 | 38.87 | 530 | 0.466633 | false | 4.346783 | false | false | false |
w4nderlust/lingua-politicae
|
scraping/scrape_facebook.py
|
1
|
1718
|
import os
import facebook
import json
import requests
# get temporary access token here: https://developers.facebook.com/tools/explorer/
from globals import POLITICIANS_INFO_FILE_PATH, FACEBOOK_POSTS_DIRECTORY
access_token = 'EAACEdEose0cBAAKiNQ3ZB3kpnGu7GqkWq4mUHQBb4BuKmae6FHH3jSTIqZBeuqU7hhVv3WiAdxWMLbwx1h9ptmzRWMwufknjSkG2ORXPo8WNuI6IeUGFcrZBciUWE4tD7rXKYGlVdLZB4ZCfQ4hmQdUag39FpdWkxxe9i3gBKcMxwq5kwOvv2CcZAFjr28ls4ZD'
graph = facebook.GraphAPI(access_token)
try:
with open(POLITICIANS_INFO_FILE_PATH) as data_file:
users = json.load(data_file)
except ValueError:
users = []
print("A problem occurred when parsing politicians_info.json")
def get_posts(user):
profile = graph.get_object(user['facebook'])
output_file = os.path.join(FACEBOOK_POSTS_DIRECTORY, user["facebook"] + '_facebook.json')
results = []
print("getting {0}".format(user['facebook']))
posts = graph.get_connections(profile['id'], 'feed?limit=100')
pages = 0
while pages < 10:
try:
# Perform some action on each post in the collection we receive from
# Facebook.
for post in posts['data']:
results.append(post)
# Attempt to make a request to the next page of data, if it exists.
posts = requests.get(posts['paging']['next']).json()
pages += 1
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
print("posts {0}".format(len(results)))
with open(output_file, 'w') as outfile:
json.dump(results, outfile)
for user in users:
get_posts(user)
print('all done')
|
apache-2.0
| -5,462,487,004,753,837,000 | 29.678571 | 213 | 0.672293 | false | 3.140768 | false | false | false |
tulip-control/tulip-control
|
contrib/fmu/robotfmu.py
|
1
|
3360
|
#!/usr/bin/env python
"""Demonstration of FMU export from a controller synthesized using TuLiP
This is a modified copy from the TuLiP sources of
examples/robot_planning/continuous.py that exports an FMU. The key
changes are
* caching of the abstraction and controller in AbstractPwa.p and FSM.p, and
* calling exportFMU() and `make test_controller` (near end of this file).
"""
import os.path
import os
import pickle
import sys
import numpy as np
from tulip import spec, synth, hybrid
from polytope import box2poly
from tulip.abstract import prop2part, discretize
from exportFMU import exportFMU
BUILDDIR = "build/"
def specify_discretize_synthesize():
"""Return PWA partition and controller, dump them to pickle files."""
# Problem parameters
input_bound = 1.0
uncertainty = 0.01
# Continuous state space
cont_state_space = box2poly([[0., 3.], [0., 2.]])
# Continuous dynamics
A = np.array([[1.0, 0.], [0., 1.0]])
B = np.array([[0.1, 0.], [0., 0.1]])
E = np.array([[1., 0.], [0., 1.]])
# Available control, possible disturbances
U = input_bound * np.array([[-1., 1.], [-1., 1.]])
W = uncertainty * np.array([[-1., 1.], [-1., 1.]])
# Convert to polyhedral representation
U = box2poly(U)
W = box2poly(W)
# Construct the LTI system describing the dynamics
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, cont_state_space)
# Define atomic propositions for relevant regions of state space
cont_props = {}
cont_props['home'] = box2poly([[0., 1.], [0., 1.]])
cont_props['lot'] = box2poly([[2., 3.], [1., 2.]])
# Compute proposition preserving partition of the continuous state space
cont_partition = prop2part(cont_state_space, cont_props)
pwa = discretize(
cont_partition, sys_dyn, closed_loop=True,
N=8, min_cell_volume=0.1, plotit=False)
"""Specifications"""
# Environment variables and assumptions
env_vars = {'park'}
env_init = set()
env_prog = '!park'
env_safe = set()
# System variables and requirements
sys_vars = {'X0reach'}
sys_init = {'X0reach'}
sys_prog = {'home'} # []<>home
sys_safe = {'(X(X0reach) <-> lot) || (X0reach && !park)'}
sys_prog |= {'X0reach'}
# Create the specification
specs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
specs.qinit = r'\A \E'
specs.moore = False
specs.plus_one = False
"""Synthesize"""
ctrl = synth.synthesize(
specs, sys=pwa.ts, ignore_sys_init=True, solver='gr1c')
# store the result for future use
if len(BUILDDIR) > 0 and not os.path.exists(BUILDDIR):
os.mkdir(BUILDDIR)
pickle.dump(ctrl, open(BUILDDIR + 'FSM.p', 'wb'))
pickle.dump(pwa, open(BUILDDIR + 'AbstractPwa.p', 'wb'))
return pwa, ctrl
def pickle_files_exist():
return (
os.path.isfile(BUILDDIR + 'AbstractPwa.p') and
os.path.isfile(BUILDDIR + 'FSM.p'))
if __name__ == '__main__':
if pickle_files_exist():
pwa = pickle.load(open(BUILDDIR + "AbstractPwa.p", "rb"))
ctrl = pickle.load(open(BUILDDIR + "FSM.p", "rb"))
else:
pwa, ctrl = specify_discretize_synthesize()
x0 = np.array([1.5, 1.5])
d0 = 18
exportFMU(ctrl, pwa, x0, d0)
os.system("make test_controller")
|
bsd-3-clause
| 7,800,883,244,847,692,000 | 29 | 76 | 0.622917 | false | 3.149016 | false | false | false |
pyfa-org/Pyfa
|
service/port/eft.py
|
1
|
34060
|
# =============================================================================
# Copyright (C) 2014 Ryan Holmes
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import re
from logbook import Logger
from eos.const import FittingModuleState, FittingSlot
from eos.db.gamedata.queries import getDynamicItem
from eos.saveddata.booster import Booster
from eos.saveddata.cargo import Cargo
from eos.saveddata.citadel import Citadel
from eos.saveddata.drone import Drone
from eos.saveddata.fighter import Fighter
from eos.saveddata.fit import Fit
from eos.saveddata.implant import Implant
from eos.saveddata.module import Module
from eos.saveddata.ship import Ship
from gui.fitCommands.helpers import activeStateLimit
from service.const import PortEftOptions
from service.fit import Fit as svcFit
from service.market import Market
from service.port.muta import parseMutant, renderMutant
from service.port.shared import IPortUser, fetchItem, processing_notify
pyfalog = Logger(__name__)
MODULE_CATS = ('Module', 'Subsystem', 'Structure Module')
SLOT_ORDER = (FittingSlot.LOW, FittingSlot.MED, FittingSlot.HIGH, FittingSlot.RIG, FittingSlot.SUBSYSTEM, FittingSlot.SERVICE)
OFFLINE_SUFFIX = '/OFFLINE'
NAME_CHARS = '[^,/\[\]]' # Characters which are allowed to be used in name
def exportEft(fit, options, callback):
# EFT formatted export is split in several sections, each section is
# separated from another using 2 blank lines. Sections might have several
# sub-sections, which are separated by 1 blank line
sections = []
header = '[{}, {}]'.format(fit.ship.item.typeName, fit.name)
# Section 1: modules, rigs, subsystems, services
modsBySlotType = {}
for module in fit.modules:
modsBySlotType.setdefault(module.slot, []).append(module)
modSection = []
mutants = {} # Format: {reference number: module}
mutantReference = 1
for slotType in SLOT_ORDER:
rackLines = []
modules = modsBySlotType.get(slotType, ())
for module in modules:
if module.item:
# if module was mutated, use base item name for export
if module.isMutated:
modName = module.baseItem.typeName
else:
modName = module.item.typeName
if module.isMutated and options[PortEftOptions.MUTATIONS]:
mutants[mutantReference] = module
mutationSuffix = ' [{}]'.format(mutantReference)
mutantReference += 1
else:
mutationSuffix = ''
modOfflineSuffix = ' {}'.format(OFFLINE_SUFFIX) if module.state == FittingModuleState.OFFLINE else ''
if module.charge and options[PortEftOptions.LOADED_CHARGES]:
rackLines.append('{}, {}{}{}'.format(
modName, module.charge.typeName, modOfflineSuffix, mutationSuffix))
else:
rackLines.append('{}{}{}'.format(modName, modOfflineSuffix, mutationSuffix))
else:
rackLines.append('[Empty {} slot]'.format(
FittingSlot(slotType).name.capitalize() if slotType is not None else ''))
if rackLines:
modSection.append('\n'.join(rackLines))
if modSection:
sections.append('\n\n'.join(modSection))
# Section 2: drones, fighters
minionSection = []
droneExport = exportDrones(fit.drones)
if droneExport:
minionSection.append(droneExport)
fighterExport = exportFighters(fit.fighters)
if fighterExport:
minionSection.append(fighterExport)
if minionSection:
sections.append('\n\n'.join(minionSection))
# Section 3: implants, boosters
charSection = []
if options[PortEftOptions.IMPLANTS]:
implantExport = exportImplants(fit.implants)
if implantExport:
charSection.append(implantExport)
if options[PortEftOptions.BOOSTERS]:
boosterExport = exportBoosters(fit.boosters)
if boosterExport:
charSection.append(boosterExport)
if charSection:
sections.append('\n\n'.join(charSection))
# Section 4: cargo
if options[PortEftOptions.CARGO]:
cargoExport = exportCargo(fit.cargo)
if cargoExport:
sections.append(cargoExport)
# Section 5: mutated modules' details
mutationLines = []
if mutants and options[PortEftOptions.MUTATIONS]:
for mutantReference in sorted(mutants):
mutant = mutants[mutantReference]
mutationLines.append(renderMutant(mutant, firstPrefix='[{}] '.format(mutantReference), prefix=' '))
if mutationLines:
sections.append('\n'.join(mutationLines))
text = '{}\n\n{}'.format(header, '\n\n\n'.join(sections))
if callback:
callback(text)
else:
return text
def exportDrones(drones):
droneLines = []
for drone in sorted(drones, key=lambda d: d.item.typeName):
droneLines.append('{} x{}'.format(drone.item.typeName, drone.amount))
return '\n'.join(droneLines)
def exportFighters(fighters):
fighterLines = []
for fighter in sorted(fighters, key=lambda f: f.item.typeName):
fighterLines.append('{} x{}'.format(fighter.item.typeName, fighter.amount))
return '\n'.join(fighterLines)
def exportImplants(implants):
implantLines = []
for implant in sorted(implants, key=lambda i: i.slot or 0):
implantLines.append(implant.item.typeName)
return '\n'.join(implantLines)
def exportBoosters(boosters):
boosterLines = []
for booster in sorted(boosters, key=lambda b: b.slot or 0):
boosterLines.append(booster.item.typeName)
return '\n'.join(boosterLines)
def exportCargo(cargos):
cargoLines = []
for cargo in sorted(cargos, key=lambda c: (c.item.group.category.name, c.item.group.name, c.item.typeName)):
cargoLines.append('{} x{}'.format(cargo.item.typeName, cargo.amount))
return '\n'.join(cargoLines)
def importEft(lines):
lines = _importPrepare(lines)
try:
fit = _importCreateFit(lines)
except EftImportError:
return
aFit = AbstractFit()
aFit.mutations = _importGetMutationData(lines)
stubPattern = '^\[.+?\]$'
modulePattern = '^(?P<typeName>{0}+?)(,\s*(?P<chargeName>{0}+?))?(?P<offline>\s*{1})?(\s*\[(?P<mutation>\d+?)\])?$'.format(NAME_CHARS, OFFLINE_SUFFIX)
droneCargoPattern = '^(?P<typeName>{}+?) x(?P<amount>\d+?)$'.format(NAME_CHARS)
sections = []
for section in _importSectionIter(lines):
for line in section.lines:
# Stub line
if re.match(stubPattern, line):
section.itemSpecs.append(None)
continue
# Items with quantity specifier
m = re.match(droneCargoPattern, line)
if m:
try:
itemSpec = MultiItemSpec(m.group('typeName'))
# Items which cannot be fetched are considered as stubs
except EftImportError:
section.itemSpecs.append(None)
else:
itemSpec.amount = int(m.group('amount'))
section.itemSpecs.append(itemSpec)
continue
# All other items
m = re.match(modulePattern, line)
if m:
try:
itemSpec = RegularItemSpec(m.group('typeName'), chargeName=m.group('chargeName'))
# Items which cannot be fetched are considered as stubs
except EftImportError:
section.itemSpecs.append(None)
else:
if m.group('offline'):
itemSpec.offline = True
if m.group('mutation'):
itemSpec.mutationIdx = int(m.group('mutation'))
section.itemSpecs.append(itemSpec)
continue
_clearTail(section.itemSpecs)
sections.append(section)
hasDroneBay = any(s.isDroneBay for s in sections)
hasFighterBay = any(s.isFighterBay for s in sections)
for section in sections:
if section.isModuleRack:
aFit.addModules(section.itemSpecs)
elif section.isImplantRack:
for itemSpec in section.itemSpecs:
aFit.addImplant(itemSpec)
elif section.isDroneBay:
for itemSpec in section.itemSpecs:
aFit.addDrone(itemSpec)
elif section.isFighterBay:
for itemSpec in section.itemSpecs:
aFit.addFighter(itemSpec)
elif section.isCargoHold:
for itemSpec in section.itemSpecs:
aFit.addCargo(itemSpec)
# Mix between different kinds of item specs (can happen when some
# blank lines are removed)
else:
for itemSpec in section.itemSpecs:
if itemSpec is None:
continue
if itemSpec.isModule:
aFit.addModule(itemSpec)
elif itemSpec.isImplant:
aFit.addImplant(itemSpec)
elif itemSpec.isDrone and not hasDroneBay:
aFit.addDrone(itemSpec)
elif itemSpec.isFighter and not hasFighterBay:
aFit.addFighter(itemSpec)
elif itemSpec.isCargo:
aFit.addCargo(itemSpec)
# Subsystems first because they modify slot amount
for i, m in enumerate(aFit.subsystems):
if m is None:
dummy = Module.buildEmpty(aFit.getSlotByContainer(aFit.subsystems))
dummy.owner = fit
fit.modules.replaceRackPosition(i, dummy)
elif m.fits(fit):
m.owner = fit
fit.modules.replaceRackPosition(i, m)
sFit = svcFit.getInstance()
sFit.recalc(fit)
sFit.fill(fit)
# Other stuff
for modRack in (
aFit.rigs,
aFit.services,
aFit.modulesHigh,
aFit.modulesMed,
aFit.modulesLow,
):
for i, m in enumerate(modRack):
if m is None:
dummy = Module.buildEmpty(aFit.getSlotByContainer(modRack))
dummy.owner = fit
fit.modules.replaceRackPosition(i, dummy)
elif m.fits(fit):
m.owner = fit
if not m.isValidState(m.state):
pyfalog.warning('service.port.eft.importEft: module {} cannot have state {}', m, m.state)
fit.modules.replaceRackPosition(i, m)
for implant in aFit.implants:
fit.implants.append(implant)
for booster in aFit.boosters:
fit.boosters.append(booster)
for drone in aFit.drones.values():
fit.drones.append(drone)
for fighter in aFit.fighters:
fit.fighters.append(fighter)
for cargo in aFit.cargo.values():
fit.cargo.append(cargo)
return fit
def importEftCfg(shipname, lines, iportuser):
"""Handle import from EFT config store file"""
# Check if we have such ship in database, bail if we don't
sMkt = Market.getInstance()
try:
sMkt.getItem(shipname)
except (KeyboardInterrupt, SystemExit):
raise
except:
return [] # empty list is expected
fits = [] # List for fits
fitIndices = [] # List for starting line numbers for each fit
for line in lines:
# Detect fit header
if line[:1] == "[" and line[-1:] == "]":
# Line index where current fit starts
startPos = lines.index(line)
fitIndices.append(startPos)
for i, startPos in enumerate(fitIndices):
# End position is last file line if we're trying to get it for last fit,
# or start position of next fit minus 1
endPos = len(lines) if i == len(fitIndices) - 1 else fitIndices[i + 1]
# Finally, get lines for current fitting
fitLines = lines[startPos:endPos]
try:
# Create fit object
fitobj = Fit()
# Strip square brackets and pull out a fit name
fitobj.name = fitLines[0][1:-1]
# Assign ship to fitting
try:
fitobj.ship = Ship(sMkt.getItem(shipname))
except ValueError:
fitobj.ship = Citadel(sMkt.getItem(shipname))
moduleList = []
for x in range(1, len(fitLines)):
line = fitLines[x]
if not line:
continue
# Parse line into some data we will need
misc = re.match("(Drones|Implant|Booster)_(Active|Inactive)=(.+)", line)
cargo = re.match("Cargohold=(.+)", line)
# 2017/03/27 NOTE: store description from EFT
description = re.match("Description=(.+)", line)
if misc:
entityType = misc.group(1)
entityState = misc.group(2)
entityData = misc.group(3)
if entityType == "Drones":
droneData = re.match("(.+),([0-9]+)", entityData)
# Get drone name and attempt to detect drone number
droneName = droneData.group(1) if droneData else entityData
droneAmount = int(droneData.group(2)) if droneData else 1
# Bail if we can't get item or it's not from drone category
try:
droneItem = sMkt.getItem(droneName, eager="group.category")
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Cannot get item.")
continue
if droneItem.category.name == "Drone":
# Add drone to the fitting
d = Drone(droneItem)
d.amount = droneAmount
if entityState == "Active":
d.amountActive = droneAmount
elif entityState == "Inactive":
d.amountActive = 0
fitobj.drones.append(d)
elif droneItem.category.name == "Fighter": # EFT saves fighter as drones
ft = Fighter(droneItem)
ft.amount = int(droneAmount) if ft.amount <= ft.fighterSquadronMaxSize else ft.fighterSquadronMaxSize
fitobj.fighters.append(ft)
else:
continue
elif entityType == "Implant":
# Bail if we can't get item or it's not from implant category
try:
implantItem = sMkt.getItem(entityData, eager="group.category")
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Cannot get item.")
continue
if implantItem.category.name != "Implant":
continue
# Add implant to the fitting
imp = Implant(implantItem)
if entityState == "Active":
imp.active = True
elif entityState == "Inactive":
imp.active = False
fitobj.implants.append(imp)
elif entityType == "Booster":
# Bail if we can't get item or it's not from implant category
try:
boosterItem = sMkt.getItem(entityData, eager="group.category")
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Cannot get item.")
continue
# All boosters have implant category
if boosterItem.category.name != "Implant":
continue
# Add booster to the fitting
b = Booster(boosterItem)
if entityState == "Active":
b.active = True
elif entityState == "Inactive":
b.active = False
fitobj.boosters.append(b)
# If we don't have any prefixes, then it's a module
elif cargo:
cargoData = re.match("(.+),([0-9]+)", cargo.group(1))
cargoName = cargoData.group(1) if cargoData else cargo.group(1)
cargoAmount = int(cargoData.group(2)) if cargoData else 1
# Bail if we can't get item
try:
item = sMkt.getItem(cargoName)
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Cannot get item.")
continue
# Add Cargo to the fitting
c = Cargo(item)
c.amount = cargoAmount
fitobj.cargo.append(c)
# 2017/03/27 NOTE: store description from EFT
elif description:
fitobj.notes = description.group(1).replace("|", "\n")
else:
withCharge = re.match("(.+),(.+)", line)
modName = withCharge.group(1) if withCharge else line
chargeName = withCharge.group(2) if withCharge else None
# If we can't get module item, skip it
try:
modItem = sMkt.getItem(modName)
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Cannot get item.")
continue
# Create module
m = Module(modItem)
# Add subsystems before modules to make sure T3 cruisers have subsystems installed
if modItem.category.name == "Subsystem":
if m.fits(fitobj):
fitobj.modules.append(m)
else:
m.owner = fitobj
# Activate mod if it is activable
if m.isValidState(FittingModuleState.ACTIVE):
m.state = activeStateLimit(m.item)
# Add charge to mod if applicable, on any errors just don't add anything
if chargeName:
try:
chargeItem = sMkt.getItem(chargeName, eager="group.category")
if chargeItem.category.name == "Charge":
m.charge = chargeItem
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Cannot get item.")
pass
# Append module to fit
moduleList.append(m)
# Recalc to get slot numbers correct for T3 cruisers
sFit = svcFit.getInstance()
sFit.recalc(fitobj)
sFit.fill(fitobj)
for module in moduleList:
if module.fits(fitobj):
fitobj.modules.append(module)
# Append fit to list of fits
fits.append(fitobj)
if iportuser: # NOTE: Send current processing status
processing_notify(
iportuser, IPortUser.PROCESS_IMPORT | IPortUser.ID_UPDATE,
"%s:\n%s" % (fitobj.ship.name, fitobj.name)
)
except (KeyboardInterrupt, SystemExit):
raise
# Skip fit silently if we get an exception
except Exception as e:
pyfalog.error("Caught exception on fit.")
pyfalog.error(e)
pass
return fits
def _importPrepare(lines):
for i in range(len(lines)):
lines[i] = lines[i].strip()
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
return lines
mutantHeaderPattern = re.compile('^\[(?P<ref>\d+)\](?P<tail>.*)')
def _importGetMutationData(lines):
data = {}
# Format: {ref: [lines]}
mutaLinesMap = {}
currentMutaRef = None
currentMutaLines = []
consumedIndices = set()
def completeMutaLines():
if currentMutaRef is not None and currentMutaLines:
mutaLinesMap[currentMutaRef] = currentMutaLines
for i, line in enumerate(lines):
m = mutantHeaderPattern.match(line)
# Start and reset at header line
if m:
completeMutaLines()
currentMutaRef = int(m.group('ref'))
currentMutaLines = []
currentMutaLines.append(m.group('tail'))
consumedIndices.add(i)
# Reset at blank line
elif not line:
completeMutaLines()
currentMutaRef = None
currentMutaLines = []
elif currentMutaRef is not None:
currentMutaLines.append(line)
consumedIndices.add(i)
else:
completeMutaLines()
# Clear mutant info from source
for i in sorted(consumedIndices, reverse=True):
del lines[i]
# Run parsing
data = {}
for ref, mutaLines in mutaLinesMap.items():
_, mutaType, mutaAttrs = parseMutant(mutaLines)
data[ref] = (mutaType, mutaAttrs)
return data
def _importSectionIter(lines):
section = Section()
for line in lines:
if not line:
if section.lines:
yield section
section = Section()
else:
section.lines.append(line)
if section.lines:
yield section
def _importCreateFit(lines):
"""Create fit and set top-level entity (ship or citadel)."""
fit = Fit()
header = lines.pop(0)
m = re.match('\[(?P<shipType>[^,]+),\s*(?P<fitName>.+)\]', header)
if not m:
pyfalog.warning('service.port.eft.importEft: corrupted fit header')
raise EftImportError
shipType = m.group('shipType').strip()
fitName = m.group('fitName').strip()
try:
ship = fetchItem(shipType)
try:
fit.ship = Ship(ship)
except ValueError:
fit.ship = Citadel(ship)
fit.name = fitName
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning('service.port.eft.importEft: exception caught when parsing header')
raise EftImportError
return fit
def _clearTail(lst):
while lst and lst[-1] is None:
del lst[-1]
class EftImportError(Exception):
"""Exception class emitted and consumed by EFT importer internally."""
...
class Section:
def __init__(self):
self.lines = []
self.itemSpecs = []
self.__itemDataCats = None
@property
def itemDataCats(self):
if self.__itemDataCats is None:
cats = set()
for itemSpec in self.itemSpecs:
if itemSpec is None:
continue
cats.add(itemSpec.item.category.name)
self.__itemDataCats = tuple(sorted(cats))
return self.__itemDataCats
@property
def isModuleRack(self):
return all(i is None or i.isModule for i in self.itemSpecs)
@property
def isImplantRack(self):
return all(i is not None and i.isImplant for i in self.itemSpecs)
@property
def isDroneBay(self):
return all(i is not None and i.isDrone for i in self.itemSpecs)
@property
def isFighterBay(self):
return all(i is not None and i.isFighter for i in self.itemSpecs)
@property
def isCargoHold(self):
return (
all(i is not None and i.isCargo for i in self.itemSpecs) and
not self.isDroneBay and not self.isFighterBay)
class BaseItemSpec:
def __init__(self, typeName):
item = fetchItem(typeName, eagerCat=True)
if item is None:
raise EftImportError
self.typeName = typeName
self.item = item
@property
def isModule(self):
return False
@property
def isImplant(self):
return False
@property
def isDrone(self):
return False
@property
def isFighter(self):
return False
@property
def isCargo(self):
return False
class RegularItemSpec(BaseItemSpec):
def __init__(self, typeName, chargeName=None):
super().__init__(typeName)
self.charge = self.__fetchCharge(chargeName)
self.offline = False
self.mutationIdx = None
def __fetchCharge(self, chargeName):
if chargeName:
charge = fetchItem(chargeName, eagerCat=True)
if not charge or charge.category.name != 'Charge':
charge = None
else:
charge = None
return charge
@property
def isModule(self):
return self.item.category.name in MODULE_CATS
@property
def isImplant(self):
return (
self.item.category.name == 'Implant' and (
'implantness' in self.item.attributes or
'boosterness' in self.item.attributes))
class MultiItemSpec(BaseItemSpec):
def __init__(self, typeName):
super().__init__(typeName)
self.amount = 0
@property
def isDrone(self):
return self.item.category.name == 'Drone'
@property
def isFighter(self):
return self.item.category.name == 'Fighter'
@property
def isCargo(self):
return True
class AbstractFit:
def __init__(self):
# Modules
self.modulesHigh = []
self.modulesMed = []
self.modulesLow = []
self.rigs = []
self.subsystems = []
self.services = []
# Non-modules
self.implants = []
self.boosters = []
self.drones = {} # Format: {item: Drone}
self.fighters = []
self.cargo = {} # Format: {item: Cargo}
# Other stuff
self.mutations = {} # Format: {reference: (mutaplamid item, {attr ID: attr value})}
@property
def __slotContainerMap(self):
return {
FittingSlot.HIGH: self.modulesHigh,
FittingSlot.MED: self.modulesMed,
FittingSlot.LOW: self.modulesLow,
FittingSlot.RIG: self.rigs,
FittingSlot.SUBSYSTEM: self.subsystems,
FittingSlot.SERVICE: self.services}
def getContainerBySlot(self, slotType):
return self.__slotContainerMap.get(slotType)
def getSlotByContainer(self, container):
slotType = None
for k, v in self.__slotContainerMap.items():
if v is container:
slotType = k
break
return slotType
def addModules(self, itemSpecs):
modules = []
slotTypes = set()
for itemSpec in itemSpecs:
if itemSpec is None:
modules.append(None)
continue
m = self.__makeModule(itemSpec)
if m is None:
modules.append(None)
continue
modules.append(m)
slotTypes.add(m.slot)
_clearTail(modules)
# If all the modules have same slot type, put them to appropriate
# container with stubs
if len(slotTypes) == 1:
slotType = tuple(slotTypes)[0]
self.getContainerBySlot(slotType).extend(modules)
# Otherwise, put just modules
else:
for m in modules:
if m is None:
continue
self.getContainerBySlot(m.slot).append(m)
def addModule(self, itemSpec):
if itemSpec is None:
return
m = self.__makeModule(itemSpec)
if m is not None:
self.getContainerBySlot(m.slot).append(m)
def __makeModule(self, itemSpec):
# Mutate item if needed
m = None
if itemSpec.mutationIdx in self.mutations:
mutaItem, mutaAttrs = self.mutations[itemSpec.mutationIdx]
mutaplasmid = getDynamicItem(mutaItem.ID)
if mutaplasmid:
try:
m = Module(mutaplasmid.resultingItem, itemSpec.item, mutaplasmid)
except ValueError:
pass
else:
for attrID, mutator in m.mutators.items():
if attrID in mutaAttrs:
mutator.value = mutaAttrs[attrID]
# If we still don't have item (item is not mutated or we
# failed to construct mutated item), try to make regular item
if m is None:
try:
m = Module(itemSpec.item)
except ValueError:
return None
if itemSpec.charge is not None and m.isValidCharge(itemSpec.charge):
m.charge = itemSpec.charge
if itemSpec.offline and m.isValidState(FittingModuleState.OFFLINE):
m.state = FittingModuleState.OFFLINE
elif m.isValidState(FittingModuleState.ACTIVE):
m.state = activeStateLimit(m.item)
return m
def addImplant(self, itemSpec):
if itemSpec is None:
return
if 'implantness' in itemSpec.item.attributes:
self.implants.append(Implant(itemSpec.item))
elif 'boosterness' in itemSpec.item.attributes:
self.boosters.append(Booster(itemSpec.item))
else:
pyfalog.error('Failed to import implant: {}', itemSpec.typeName)
def addDrone(self, itemSpec):
if itemSpec is None:
return
if itemSpec.item not in self.drones:
self.drones[itemSpec.item] = Drone(itemSpec.item)
self.drones[itemSpec.item].amount += itemSpec.amount
def addFighter(self, itemSpec):
if itemSpec is None:
return
fighter = Fighter(itemSpec.item)
fighter.amount = itemSpec.amount
self.fighters.append(fighter)
def addCargo(self, itemSpec):
if itemSpec is None:
return
if itemSpec.item not in self.cargo:
self.cargo[itemSpec.item] = Cargo(itemSpec.item)
self.cargo[itemSpec.item].amount += itemSpec.amount
def _lineIter(text):
"""Iterate over non-blank lines."""
for line in text.splitlines():
line = line.strip()
if line:
yield line
def parseAdditions(text):
items = []
sMkt = Market.getInstance()
pattern = '^(?P<typeName>{}+?)( x(?P<amount>\d+?))?$'.format(NAME_CHARS)
for line in _lineIter(text):
m = re.match(pattern, line)
if not m:
continue
item = sMkt.getItem(m.group('typeName'))
if item is None:
continue
amount = m.group('amount')
amount = 1 if amount is None else int(amount)
items.append((item, amount))
return items
def isValidDroneImport(text):
pattern = 'x\d+$'
for line in _lineIter(text):
if not re.search(pattern, line):
return False, ()
itemData = parseAdditions(text)
if not itemData:
return False, ()
for item, amount in itemData:
if not item.isDrone:
return False, ()
return True, itemData
def isValidFighterImport(text):
pattern = 'x\d+$'
for line in _lineIter(text):
if not re.search(pattern, line):
return False, ()
itemData = parseAdditions(text)
if not itemData:
return False, ()
for item, amount in itemData:
if not item.isFighter:
return False, ()
return True, itemData
def isValidCargoImport(text):
pattern = 'x\d+$'
for line in _lineIter(text):
if not re.search(pattern, line):
return False, ()
itemData = parseAdditions(text)
if not itemData:
return False, ()
for item, amount in itemData:
if item.isAbyssal:
return False, ()
return True, itemData
def isValidImplantImport(text):
pattern = 'x\d+$'
for line in _lineIter(text):
if re.search(pattern, line):
return False, ()
itemData = parseAdditions(text)
if not itemData:
return False, ()
for item, amount in itemData:
if not item.isImplant:
return False, ()
return True, itemData
def isValidBoosterImport(text):
pattern = 'x\d+$'
for line in _lineIter(text):
if re.search(pattern, line):
return False, ()
itemData = parseAdditions(text)
if not itemData:
return False, ()
for item, amount in itemData:
if not item.isBooster:
return False, ()
return True, itemData
|
gpl-3.0
| 3,213,832,208,081,448,000 | 34.113402 | 154 | 0.555255 | false | 4.148094 | false | false | false |
lexxito/monitoring
|
ceilometer/tests/api/v2/test_app.py
|
1
|
10718
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 IBM Corp.
# Copyright © 2013 Julien Danjou
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test basic ceilometer-api app
"""
import json
import os
import mock
import wsme
from ceilometer.api import acl
from ceilometer.api import app
from ceilometer.openstack.common import fileutils
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import gettextutils
from ceilometer import service
from ceilometer.tests.api.v2 import FunctionalTest
from ceilometer.tests import base
from ceilometer.tests import db as tests_db
class TestApp(base.BaseTestCase):
def setUp(self):
super(TestApp, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
def test_keystone_middleware_conf(self):
self.CONF.set_override("auth_protocol", "foottp",
group=acl.OPT_GROUP_NAME)
self.CONF.set_override("auth_version", "v2.0",
group=acl.OPT_GROUP_NAME)
self.CONF.set_override("pipeline_cfg_file",
self.path_get("etc/ceilometer/pipeline.yaml"))
self.CONF.set_override('connection', "log://", group="database")
self.CONF.set_override("auth_uri", None, group=acl.OPT_GROUP_NAME)
api_app = app.setup_app()
self.assertTrue(api_app.auth_uri.startswith('foottp'))
def test_keystone_middleware_parse_conffile(self):
pipeline_conf = self.path_get("etc/ceilometer/pipeline.yaml")
content = "[DEFAULT]\n"\
"pipeline_cfg_file = {0}\n"\
"[{1}]\n"\
"auth_protocol = barttp\n"\
"auth_version = v2.0\n".format(pipeline_conf,
acl.OPT_GROUP_NAME)
tmpfile = fileutils.write_to_tempfile(content=content,
prefix='ceilometer',
suffix='.conf')
service.prepare_service(['ceilometer-api',
'--config-file=%s' % tmpfile])
self.CONF.set_override('connection', "log://", group="database")
api_app = app.setup_app()
self.assertTrue(api_app.auth_uri.startswith('barttp'))
os.unlink(tmpfile)
class TestPecanApp(FunctionalTest):
database_connection = tests_db.MongoDBFakeConnectionUrl()
def test_pecan_extension_guessing_unset(self):
# check Pecan does not assume .jpg is an extension
response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg')
self.assertEqual(response.content_type, 'application/json')
class TestApiMiddleware(FunctionalTest):
# This doesn't really matter
database_connection = tests_db.MongoDBFakeConnectionUrl()
no_lang_translated_error = 'No lang translated error'
en_US_translated_error = 'en-US translated error'
def _fake_get_localized_message(self, message, user_locale):
if user_locale is None:
return self.no_lang_translated_error
else:
return self.en_US_translated_error
def test_json_parsable_error_middleware_404(self):
response = self.get_json('/invalid_path',
expect_errors=True,
headers={"Accept":
"application/json"}
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/json")
self.assertTrue(response.json['error_message'])
response = self.get_json('/invalid_path',
expect_errors=True,
headers={"Accept":
"application/json,application/xml"}
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/json")
self.assertTrue(response.json['error_message'])
response = self.get_json('/invalid_path',
expect_errors=True,
headers={"Accept":
"application/xml;q=0.8, \
application/json"}
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/json")
self.assertTrue(response.json['error_message'])
response = self.get_json('/invalid_path',
expect_errors=True
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/json")
self.assertTrue(response.json['error_message'])
response = self.get_json('/invalid_path',
expect_errors=True,
headers={"Accept":
"text/html,*/*"}
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/json")
self.assertTrue(response.json['error_message'])
def test_json_parsable_error_middleware_translation_400(self):
# Ensure translated messages get placed properly into json faults
with mock.patch.object(gettextutils, 'get_localized_message',
side_effect=self._fake_get_localized_message):
response = self.post_json('/alarms', params={'name': 'foobar',
'type': 'threshold'},
expect_errors=True,
headers={"Accept":
"application/json"}
)
self.assertEqual(response.status_int, 400)
self.assertEqual(response.content_type, "application/json")
self.assertTrue(response.json['error_message'])
self.assertEqual(response.json['error_message']['faultstring'],
self.no_lang_translated_error)
def test_xml_parsable_error_middleware_404(self):
response = self.get_json('/invalid_path',
expect_errors=True,
headers={"Accept":
"application/xml,*/*"}
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/xml")
self.assertEqual(response.xml.tag, 'error_message')
response = self.get_json('/invalid_path',
expect_errors=True,
headers={"Accept":
"application/json;q=0.8 \
,application/xml"}
)
self.assertEqual(response.status_int, 404)
self.assertEqual(response.content_type, "application/xml")
self.assertEqual(response.xml.tag, 'error_message')
def test_xml_parsable_error_middleware_translation_400(self):
# Ensure translated messages get placed properly into xml faults
with mock.patch.object(gettextutils, 'get_localized_message',
side_effect=self._fake_get_localized_message):
response = self.post_json('/alarms', params={'name': 'foobar',
'type': 'threshold'},
expect_errors=True,
headers={"Accept":
"application/xml,*/*"}
)
self.assertEqual(response.status_int, 400)
self.assertEqual(response.content_type, "application/xml")
self.assertEqual(response.xml.tag, 'error_message')
fault = response.xml.findall('./error/faultstring')
for fault_string in fault:
self.assertEqual(fault_string.text, self.no_lang_translated_error)
def test_best_match_language(self):
# Ensure that we are actually invoking language negotiation
with mock.patch.object(gettextutils, 'get_localized_message',
side_effect=self._fake_get_localized_message):
response = self.post_json('/alarms', params={'name': 'foobar',
'type': 'threshold'},
expect_errors=True,
headers={"Accept":
"application/xml,*/*",
"Accept-Language":
"en-US"}
)
self.assertEqual(response.status_int, 400)
self.assertEqual(response.content_type, "application/xml")
self.assertEqual(response.xml.tag, 'error_message')
fault = response.xml.findall('./error/faultstring')
for fault_string in fault:
self.assertEqual(fault_string.text, self.en_US_translated_error)
def test_translated_then_untranslated_error(self):
resp = self.get_json('/alarms/alarm-id-3', expect_errors=True)
self.assertEqual(resp.status_code, 404)
self.assertEqual(json.loads(resp.body)['error_message']
['faultstring'], "Alarm alarm-id-3 Not Found")
with mock.patch('ceilometer.api.controllers.v2.EntityNotFound') \
as CustomErrorClass:
CustomErrorClass.return_value = wsme.exc.ClientSideError(
"untranslated_error", status_code=404)
resp = self.get_json('/alarms/alarm-id-5', expect_errors=True)
self.assertEqual(resp.status_code, 404)
self.assertEqual(json.loads(resp.body)['error_message']
['faultstring'], "untranslated_error")
|
apache-2.0
| -7,755,225,670,747,542,000 | 46.211454 | 78 | 0.547355 | false | 4.637386 | true | false | false |
danforthcenter/plantcv
|
plantcv/plantcv/visualize/obj_size_ecdf.py
|
1
|
1554
|
# Plot Empirical Cumulative Distribution Function for Object Size
import os
import cv2
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
from statsmodels.distributions.empirical_distribution import ECDF
from plotnine import ggplot, aes, geom_point, labels, scale_x_log10
def obj_size_ecdf(mask, title=None):
"""
Plot empirical cumulative distribution for object size based on binary mask.
Inputs:
mask = binary mask
title = a custom title for the plot (default=None)
Returns:
fig_ecdf = empirical cumulative distribution function plot
:param mask: numpy.ndarray
:param title: str
:return fig_ecdf: plotnine.ggplot.ggplot
"""
objects, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
areas = [cv2.contourArea(cnt) for cnt in objects]
# Remove objects with areas < 1px
areas = [i for i in areas if i >= 1.0]
ecdf = ECDF(areas, side='right')
ecdf_df = pd.DataFrame({'object area': ecdf.x[1:], 'cumulative probability': ecdf.y[1:]})
# create ecdf plot and apply log-scale for x-axis (areas)
fig_ecdf = (ggplot(data=ecdf_df, mapping=aes(x='object area', y='cumulative probability'))
+ geom_point(size=.1)
+ scale_x_log10())
if title is not None:
fig_ecdf = fig_ecdf + labels.ggtitle(title)
# Plot or print the ecdf
_debug(visual=fig_ecdf,
filename=os.path.join(params.debug_outdir, str(params.device) + '_area_ecdf.png'))
return fig_ecdf
|
mit
| -1,732,821,960,471,904,500 | 33.533333 | 94 | 0.677606 | false | 3.313433 | false | false | false |
shoopio/shoop
|
shuup_tests/browser/admin/test_picotable.py
|
2
|
6430
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import pytest
from django.core.urlresolvers import reverse
from shuup.testing.browser_utils import (
click_element, move_to_element, wait_until_appeared,
wait_until_appeared_xpath, wait_until_condition
)
from shuup.testing.factories import (
create_product, create_random_person, get_default_shop,
get_default_supplier
)
from shuup.testing.browser_utils import initialize_admin_browser_test
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
def create_contacts(shop):
for i in range(0, 200):
contact = create_random_person()
contact.save()
def create_products(shop):
supplier = get_default_supplier()
for i in range(0, 200):
sku = "sku-%d" % i
create_product(sku, shop, supplier, default_price=i)
# used in settings
list_view_settings = {
"contact": {
"page_header": "Contacts",
"default_column_count": 7,
"addable_fields": [(1, "Account Manager")],
"creator": create_contacts,
"test_pagination": True
},
"shop_product": {
"page_header": "Shop Products",
"default_column_count": 7,
"addable_fields": [(13, "Gtin"), (6, "Default Price")],
"creator": create_products,
"test_pagination": False
},
"permission_group": {
"page_header": "Permission Groups",
"default_column_count": 1,
"addable_fields": [(2, "Permissions"), (1, "Id")], # use reverse order due idx
"creator": None,
"test_pagination": False
}
}
@pytest.mark.browser
@pytest.mark.djangodb
@pytest.mark.parametrize("visit_type", list_view_settings.keys())
def test_list_views(browser, admin_user, live_server, settings, visit_type):
shop = get_default_shop()
creator = list_view_settings[visit_type].get("creator", None)
if creator and callable(creator):
creator(shop)
initialize_admin_browser_test(browser, live_server, settings)
_visit_list_view(browser, live_server, visit_type, creator)
if list_view_settings[visit_type].get("test_pagination", False):
_test_pagination(browser)
_set_settings(browser, visit_type, creator)
def _visit_list_view(browser, live_server, list_view_name, creator):
url = reverse("shuup_admin:%s.list" % list_view_name)
browser.visit("%s%s" % (live_server, url))
wait_until_condition(browser, lambda x: x.is_text_present(list_view_settings[list_view_name]["page_header"]))
_check_picotable_item_info(browser, creator)
def _test_pagination(browser):
ellipses = u"\u22ef"
items = _get_pagination_content(browser)
_assert_pagination_content(items, ["Previous", "1", "2", "3", ellipses, "11", "Next"])
_goto_page(browser, 3)
items = _get_pagination_content(browser)
_assert_pagination_content(items, ["Previous", "1", "2", "3", "4", "5", ellipses, "11", "Next"])
_goto_page(browser, 5)
items = _get_pagination_content(browser)
_assert_pagination_content(items, ["Previous", "1", ellipses, "3", "4", "5", "6", "7", ellipses, "11", "Next"])
_goto_page(browser, 7)
items = _get_pagination_content(browser)
_assert_pagination_content(items, ["Previous", "1", ellipses, "5", "6", "7", "8", "9", ellipses, "11", "Next"])
_goto_page(browser, 9)
items = _get_pagination_content(browser)
_assert_pagination_content(items, ["Previous", "1", ellipses, "7", "8", "9", "10", "11", "Next"])
_goto_page(browser, 11)
items = _get_pagination_content(browser)
_assert_pagination_content(items, ["Previous", "1", ellipses, "9", "10", "11", "Next"])
def _get_pagination_content(browser):
pagination = browser.find_by_css(".pagination")[0]
return pagination.find_by_tag("a")
def _assert_pagination_content(items, content):
assert [item.text for item in items] == content
def _goto_page(browser, page_number):
click_element(browser, "a[rel='%s']" % page_number)
element = "li.active a[rel='%s']" % page_number
wait_until_appeared(browser, element)
move_to_element(browser, element)
def _click_item(items, value):
index = [item.text for item in items].index(value)
items[index].click()
time.sleep(0.5) # Wait mithril for a half sec
def _set_settings(browser, setting_type, creator):
used_settings = list_view_settings[setting_type]
default_column_count = used_settings["default_column_count"]
addable_fields = used_settings["addable_fields"]
# not selected by default
for idx, text in addable_fields:
assert not browser.is_text_present(text)
browser.find_by_css(".shuup-toolbar .btn.btn-inverse").first.click()
# select settings
for idx, (index_key, text) in enumerate(addable_fields):
expected_index = default_column_count + 1 + idx
assert browser.is_text_present(text)
browser.find_by_xpath("//ul[@id='source-sortable']/li[%d]/button" % index_key).first.click()
wait_until_appeared_xpath(browser, "//ul[@id='target-sortable']/li[%d]/button" % expected_index)
# save settings
move_to_element(browser, ".shuup-toolbar .btn.btn-success")
browser.find_by_css(".shuup-toolbar .btn.btn-success").first.click()
_check_picotable_item_info(browser, creator)
if creator:
for idx, text in addable_fields:
wait_until_condition(browser, lambda x: x.is_text_present(text))
# go back to settings
browser.find_by_css(".shuup-toolbar .btn.btn-inverse").first.click()
wait_until_appeared_xpath(browser, "//a[contains(text(),'Reset Defaults')]")
# reset to defaults
browser.find_by_xpath("//a[contains(text(),'Reset Defaults')]").click()
# wait
_check_picotable_item_info(browser, creator)
# not selected by default
if creator:
for idx, text in addable_fields:
assert not browser.is_text_present(text)
def _check_picotable_item_info(browser, creator):
if creator:
wait_until_appeared(browser, ".picotable-item-info")
else:
wait_until_condition(browser, condition=lambda x: x.is_text_present("There are no permission groups to show"))
|
agpl-3.0
| -8,168,418,631,158,132,000 | 33.756757 | 118 | 0.655832 | false | 3.324716 | true | false | false |
blab/nextstrain-augur
|
tests/python2/test_fitness_model.py
|
1
|
10095
|
"""
Tests for the `fitness_model` module.
"""
import Bio.Align.AlignInfo
import Bio.Phylo
import Bio.SeqIO
import datetime
import numpy as np
import pytest
import sys
import os
# we assume (and assert) that this script is running from the tests/ directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from base.fitness_model import fitness_model
from base.frequencies import KdeFrequencies
from base.process import process
#
# Fixtures
#
# Set precalculated fitness model parameters which are the mean and standard
# deviation for the model.
MODEL_PARAMS = [1.0, 0.05]
@pytest.fixture
def simple_tree():
"""Returns a tree with three sequences: a root and two direct descendents with
one modification each.
"""
# Build simple tree.
tree = Bio.Phylo.read(StringIO("(A,B);"), "newick")
# Build sequences for tree nodes. One leaf has a Koel and epitope site
# mutation. The other leaf has a signal peptide mutation.
root = sequence()
leaf_a = modify_sequence_at_site(root, 145 + 16 - 1)
leaf_b = modify_sequence_at_site(root, 14)
# Assign sequences to nodes.
sequences = (root, leaf_a, leaf_b)
dates = (2012.5, 2013.25, 2014.8)
index = 0
for node in tree.find_clades(order="preorder"):
node.clade = index
node.aa = sequences[index]
node.attr = {"num_date": dates[index]}
index += 1
return tree
@pytest.fixture
def real_tree(multiple_sequence_alignment):
"""Returns a tree built with FastTree from a small set of nucleotide sequences
for H3N2.
"""
# Load the tree.
tree = Bio.Phylo.read("tests/data/fitness_model/H3N2_tree.newick", "newick")
# Make a lookup table of name to sequence.
sequences_by_name = dict([(alignment.name, str(alignment.seq))
for alignment in multiple_sequence_alignment])
# Assign sequences to the tree.
index = 0
for node in tree.find_clades():
if node.name is not None:
node.sequence = np.fromstring(sequences_by_name[node.name], "S1")
# Since sequence names look like "A/Singapore/TT0495/2017",
# convert the last element to a floating point value for
# simplicity.
node.attr = {"num_date": float(node.name.split("/")[-1])}
else:
# Build a "dumb" consensus from the alignment for the
# ancestral node and assign an arbitrary date in the
# past.
summary = Bio.Align.AlignInfo.SummaryInfo(multiple_sequence_alignment)
node.sequence = np.fromstring(str(summary.dumb_consensus(threshold=0.5, ambiguous="N")), "S1")
node.attr = {"num_date": 2014.8}
node.clade = index
index += 1
return tree
@pytest.fixture
def simple_fitness_model(simple_tree):
time_interval = (
datetime.date(2015, 1, 1),
datetime.date(2012, 1, 1)
)
start_date, end_date = process.get_time_interval_as_floats(time_interval)
return fitness_model(
tree=simple_tree,
frequencies=KdeFrequencies(
start_date=start_date,
end_date=end_date,
include_internal_nodes=True
),
predictor_input=["random"],
pivot_spacing=1.0 / 12,
time_interval=time_interval,
epitope_masks_fname="builds/flu/metadata/ha_masks.tsv",
epitope_mask_version="wolf"
)
@pytest.fixture
def real_fitness_model(real_tree, multiple_sequence_alignment):
time_interval = (
datetime.date(2017, 6, 1),
datetime.date(2014, 6, 1)
)
start_date, end_date = process.get_time_interval_as_floats(time_interval)
model = fitness_model(
tree=real_tree,
frequencies=KdeFrequencies(
start_date=start_date,
end_date=end_date,
include_internal_nodes=True
),
predictor_input=["random"],
pivot_spacing=1.0 / 12,
time_interval=time_interval,
epitope_masks_fname="builds/flu/metadata/ha_masks.tsv",
epitope_mask_version="wolf"
)
model.nuc_aln = multiple_sequence_alignment
model.nuc_alphabet = 'ACGT-N'
model.min_mutation_frequency = 0.01
return model
@pytest.fixture
def precalculated_fitness_model(simple_tree):
"""Provides a simple fitness model with precalculated model parameters such that
the model skips learning new parameters.
"""
time_interval = (
datetime.date(2015, 1, 1),
datetime.date(2012, 1, 1)
)
start_date, end_date = process.get_time_interval_as_floats(time_interval)
return fitness_model(
tree=simple_tree,
frequencies=KdeFrequencies(
start_date=start_date,
end_date=end_date,
include_internal_nodes=True
),
predictor_input={"random": MODEL_PARAMS},
pivot_spacing=1.0 / 12,
time_interval=time_interval,
epitope_masks_fname="builds/flu/metadata/ha_masks.tsv",
epitope_mask_version="wolf"
)
@pytest.fixture
def sequence():
"""Returns an amino acid sequence for an ancestral H3N2 virus (Hong Kong 1968).
"""
with open("tests/data/fitness_model/AAK51718.fasta", "r") as handle:
record = list(Bio.SeqIO.parse(handle, "fasta"))[0]
aa = str(record.seq)
return aa
@pytest.fixture
def multiple_sequence_alignment():
"""Returns a multiple sequence alignment containing a small test set of H3N2
sequences.
"""
msa = Bio.AlignIO.read("tests/data/fitness_model/H3N2_alignment.cleaned.fasta", "fasta")
return msa
#
# Utility functions
#
def modify_sequence_at_site(sequence, site):
"""Returns the given sequence with a modified base at the given site.
"""
other_sequence_list = list(sequence)
other_sequence_list[site] = "Z"
return "".join(other_sequence_list)
#
# Tests
#
class TestFitnessModel(object):
def test_prep_nodes(self, simple_fitness_model):
assert not hasattr(simple_fitness_model, "nodes")
assert not any([hasattr(node, "tips") for node in simple_fitness_model.tree.find_clades()])
simple_fitness_model.prep_nodes()
assert hasattr(simple_fitness_model, "nodes")
assert hasattr(simple_fitness_model, "rootnode")
assert hasattr(simple_fitness_model.rootnode, "pivots")
assert all([hasattr(node, "tips") for node in simple_fitness_model.tree.find_clades()])
def test_calc_node_frequencies(self, simple_fitness_model):
simple_fitness_model.prep_nodes()
assert not hasattr(simple_fitness_model, "freq_arrays")
simple_fitness_model.calc_node_frequencies()
assert hasattr(simple_fitness_model, "freq_arrays")
assert len(simple_fitness_model.freq_arrays) > 0
def test_calc_all_predictors(self, simple_fitness_model):
simple_fitness_model.prep_nodes()
simple_fitness_model.calc_node_frequencies()
assert not hasattr(simple_fitness_model, "predictor_arrays")
simple_fitness_model.calc_all_predictors()
assert hasattr(simple_fitness_model, "predictor_arrays")
assert len(simple_fitness_model.predictor_arrays) > 0
def test_standardize_predictors(self, simple_fitness_model):
simple_fitness_model.prep_nodes()
simple_fitness_model.calc_node_frequencies()
simple_fitness_model.calc_all_predictors()
assert not hasattr(simple_fitness_model, "predictor_means")
simple_fitness_model.standardize_predictors()
assert hasattr(simple_fitness_model, "predictor_means")
def test_select_clades_for_fitting(self, simple_fitness_model):
simple_fitness_model.prep_nodes()
simple_fitness_model.calc_node_frequencies()
simple_fitness_model.calc_all_predictors()
simple_fitness_model.standardize_predictors()
assert not hasattr(simple_fitness_model, "fit_clades")
simple_fitness_model.select_clades_for_fitting()
assert hasattr(simple_fitness_model, "fit_clades")
assert len(simple_fitness_model.fit_clades) > 0
def test_learn_parameters(self, real_fitness_model):
real_fitness_model.prep_nodes()
real_fitness_model.calc_node_frequencies()
real_fitness_model.calc_all_predictors()
real_fitness_model.standardize_predictors()
real_fitness_model.select_clades_for_fitting()
assert not hasattr(real_fitness_model, "last_fit")
real_fitness_model.learn_parameters(niter=1, fit_func="clade")
assert hasattr(real_fitness_model, "last_fit")
def test_assign_fitness(self, real_fitness_model):
real_fitness_model.prep_nodes()
real_fitness_model.calc_node_frequencies()
real_fitness_model.calc_all_predictors()
real_fitness_model.standardize_predictors()
real_fitness_model.select_clades_for_fitting()
real_fitness_model.learn_parameters(niter=1, fit_func="clade")
assert not any([hasattr(node, "fitness") for node in real_fitness_model.tree.get_terminals()])
real_fitness_model.assign_fitness()
assert all([hasattr(node, "fitness") for node in real_fitness_model.tree.get_terminals()])
def test_assign_fitness_with_precalculated_params(self, precalculated_fitness_model):
# The fitness model should have model parameters assigned by the user.
assert np.array_equal(precalculated_fitness_model.model_params, np.array([MODEL_PARAMS[0]]))
precalculated_fitness_model.predict()
# After prediction, the model parameters should be unchanged as the
# learning step should be skipped.
assert np.array_equal(precalculated_fitness_model.model_params, np.array([MODEL_PARAMS[0]]))
# Recalculate fitness model parameters which should be different from those given.
precalculated_fitness_model.learn_parameters(niter=1, fit_func="clade")
assert not np.array_equal(precalculated_fitness_model.model_params, np.array([MODEL_PARAMS[0]]))
|
agpl-3.0
| -5,951,141,255,891,297,000 | 35.709091 | 106 | 0.665478 | false | 3.517422 | true | false | false |
lalitkumarj/NEXT-psych
|
next/apps/TupleBanditsPureExploration/Dashboard.py
|
1
|
3313
|
"""
TupleBanditsPureExplorationDashboard
author: Nick Glattard, [email protected]
last updated: 4/24/2015
######################################
TupleBanditsPureExplorationDashboard
"""
import json
import numpy
import numpy.random
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from next.utils import utils
from next.apps.AppDashboard import AppDashboard
class TupleBanditsPureExplorationDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def get_app_supported_stats(self):
"""
Returns a list of dictionaries describing the identifier (stat_id) and
necessary params inputs to be used when calling getStats
Expected output (list of dicts, each with fields):
(string) stat_id : the identiifer of the statistic
(string) description : docstring of describing outputs
(list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task'
"""
stat_list = self.get_supported_stats()
stat = {}
stat['stat_id'] = 'most_current_ranking'
stat['description'] = self.most_current_ranking.__doc__
stat['necessary_params'] = ['alg_label']
stat_list.append(stat)
return stat_list
def most_current_ranking(self,app_id,exp_uid,alg_label):
"""
Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field' where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to
Expected output (in dict):
plot_type : 'columnar_table'
headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ]
(list of dicts with fields) data (each dict is a row, each field is the column for that row):
(int) index : index of target
(int) ranking : rank (0 to number of targets - 1) representing belief of being best arm
"""
alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list')
for algorithm in alg_list:
if algorithm['alg_label'] == alg_label:
alg_id = algorithm['alg_id']
alg_uid = algorithm['alg_uid']
list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid})
list_of_log_dict = sorted(list_of_log_dict, key=lambda k: k['num_reported_answers'] )
print didSucceed, message
item = list_of_log_dict[-1]
return_dict = {}
return_dict['headers'] = [{'label':'Rank','field':'rank'},{'label':'Target','field':'index'},{'label':'Score','field':'score'},{'label':'Precision','field':'precision'}]
return_dict['data'] = item['targets']
return_dict['plot_type'] = 'columnar_table'
return return_dict
|
apache-2.0
| -895,578,038,832,103,800 | 37.976471 | 278 | 0.635376 | false | 3.981971 | false | false | false |
clubcapra/Ibex
|
src/capra_ui/GpsPointManager/controllers/DialogAddPoint.py
|
1
|
1045
|
__author__ = 'jstcyr'
from PyQt4 import QtGui, QtCore
from ..views import DialogAddPointUi
from ..models.Coordinates import Coordinates
from ..utilities import CoordinatesUtils
class DialogAddPoint(QtGui.QDialog, DialogAddPointUi.Ui_Dialog_add_point):
def __init__(self, parent=None):
super(DialogAddPoint, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.buttonBox.accepted.connect(self.saveButtonClicked)
def saveButtonClicked(self):
try:
lat = CoordinatesUtils.ConvertToDecimalDegrees(self.lineEdit_latitude.text())
long = CoordinatesUtils.ConvertToDecimalDegrees(self.lineEdit_longitude.text())
coords = Coordinates(self.lineEdit_id.text(), lat, long)
self.parent.coordinates.append(coords)
self.parent.refreshCoordinatesList()
except ValueError as e:
print e
self.setVisible(True)
QtGui.QMessageBox.critical(self, QtCore.QString("Error"), QtCore.QString(e.message))
|
gpl-3.0
| 5,127,037,757,480,092,000 | 39.230769 | 96 | 0.686124 | false | 4.066148 | false | false | false |
genialis/resolwe
|
resolwe/flow/migrations/0023_process_entity_2.py
|
1
|
1150
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-01 03:15
from __future__ import unicode_literals
from django.db import migrations
def migrate_flow_collection(apps, schema_editor):
"""Migrate 'flow_collection' field to 'entity_type'."""
Process = apps.get_model("flow", "Process")
DescriptorSchema = apps.get_model("flow", "DescriptorSchema")
for process in Process.objects.all():
process.entity_type = process.flow_collection
process.entity_descriptor_schema = process.flow_collection
if (
process.entity_descriptor_schema is not None
and not DescriptorSchema.objects.filter(
slug=process.entity_descriptor_schema
).exists()
):
raise LookupError(
"Descriptow schema '{}' referenced in 'entity_descriptor_schema' not "
"found.".format(process.entity_descriptor_schema)
)
process.save()
class Migration(migrations.Migration):
dependencies = [
("flow", "0022_process_entity_1"),
]
operations = [migrations.RunPython(migrate_flow_collection)]
|
apache-2.0
| 5,028,491,642,079,892,000 | 30.081081 | 86 | 0.633913 | false | 4.307116 | false | false | false |
pitunti/alfaPitunti
|
plugin.video.alfa/channels/peliculashindu.py
|
1
|
5105
|
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = "http://www.peliculashindu.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="lista", title="Top Películas", url=urlparse.urljoin(host, "top")))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host))
itemlist.append(Item(channel=item.channel, action="explorar", title="Género", url=urlparse.urljoin(host, "genero")))
itemlist.append(Item(channel=item.channel, action="explorar", title="Listado Alfabético",
url=urlparse.urljoin(host, "alfabetico")))
# itemlist.append(Item(channel=item.channel, action="explorar", title="Listado por año", url=urlparse.urljoin(host, "año")))
itemlist.append(Item(channel=item.channel, action="lista", title="Otras Películas (No Bollywood)",
url=urlparse.urljoin(host, "estrenos")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "buscar-")))
return itemlist
def explorar(item):
logger.info()
itemlist = list()
url1 = str(item.url)
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
# logger.info("loca :"+url1+" aaa"+data)
if 'genero' in url1:
patron = '<div class="d"><h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
if 'alfabetico' in url1:
patron = '<\/li><\/ul><h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'año' in url1:
patron = '<ul class="anio"><li>(.+?)<\/ul>'
data_explorar = scrapertools.find_single_match(data, patron)
patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>'
matches = scrapertools.find_multiple_matches(data_explorar, patron_explorar)
for scrapedurl, scrapedtitle in matches:
if 'Acci' in scrapedtitle:
scrapedtitle = 'Acción'
if 'Anima' in scrapedtitle:
scrapedtitle = 'Animación'
if 'Fanta' in scrapedtitle:
scrapedtitle = 'Fantasía'
if 'Hist' in scrapedtitle:
scrapedtitle = 'Histórico'
if 'lico Guerra' in scrapedtitle:
scrapedtitle = 'Bélico Guerra'
if 'Ciencia' in scrapedtitle:
scrapedtitle = 'Ciencia Ficción'
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=scrapedurl))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "-")
item.url = item.url + texto
# logger.info("item="+item.url)
if texto != '':
return lista(item)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
url1 = str(item.url)
if 'http://www.peliculashindu.com/' in url1:
url1 = url1.replace("http://www.peliculashindu.com/", "")
if url1 != 'estrenos':
data = scrapertools.find_single_match(data, '<div id="cuerpo"><div class="iz">.+>Otras')
# data= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">.+>Otras')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos",
show=scrapedtitle))
# Paginacion
patron_pag = '<a href="([^"]+)" title="Siguiente .+?">'
paginasig = scrapertools.find_single_match(data, patron_pag)
next_page_url = item.url + paginasig
if paginasig != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
itemlist.extend(servertools.find_video_items(data=data))
logger.info("holaa" + data)
patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
show = scrapertools.find_single_match(data, patron_show)
logger.info("holaa" + show)
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
return itemlist
|
gpl-3.0
| 7,154,396,034,331,516,000 | 39.704 | 128 | 0.630896 | false | 3.301752 | false | false | false |
NCI-GDC/gdcdatamodel
|
docs/bin/schemata_to_graphviz.py
|
1
|
1305
|
import os
from gdcdatamodel import models as m
from graphviz import Digraph
def build_visualization():
print('Building schema documentation...')
# Load directory tree info
bin_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(os.path.abspath(
os.path.join(bin_dir, os.pardir, os.pardir)))
# Create graph
dot = Digraph(
comment="High level graph representation of GDC data model", format='pdf')
dot.graph_attr['rankdir'] = 'RL'
dot.node_attr['fillcolor'] = 'lightblue'
dot.node_attr['style'] = 'filled'
# Add nodes
for node in m.Node.get_subclasses():
label = node.get_label()
print label
dot.node(label, label)
# Add edges
for edge in m.Edge.get_subclasses():
if edge.__dst_class__ == 'Case' and edge.label == 'relates_to':
# Skip case cache edges
continue
src = m.Node.get_subclass_named(edge.__src_class__)
dst = m.Node.get_subclass_named(edge.__dst_class__)
dot.edge(src.get_label(), dst.get_label(), edge.get_label())
gv_path = os.path.join(root_dir, 'docs', 'viz', 'gdc_data_model.gv')
dot.render(gv_path)
print('graphviz output to {}'.format(gv_path))
if __name__ == '__main__':
build_visualization()
|
apache-2.0
| -1,928,741,707,726,198,000 | 29.348837 | 82 | 0.613027 | false | 3.425197 | false | false | false |
Diacamma2/asso
|
diacamma/member/migrations/0002_change_activity.py
|
1
|
1810
|
# -*- coding: utf-8 -*-
'''
Initial django functions
@author: Laurent GAY
@organization: sd-libre.fr
@contact: [email protected]
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.translation import ugettext_lazy as _
from diacamma.member.models import Activity, License
def convert_values(*args):
# add default activity
if len(Activity.objects.all()) == 0:
default_act = Activity.objects.create(
name=_("default"), description=_("default"))
else:
default_act = Activity.objects.all()[0]
for lic in License.objects.filter(activity__isnull=True):
lic.activity = default_act
lic.update()
class Migration(migrations.Migration):
dependencies = [
('member', '0001_initial'),
]
operations = [
migrations.RunPython(convert_values),
migrations.AlterField(
model_name='license',
name='activity',
field=models.ForeignKey(
default=None, on_delete=models.deletion.PROTECT, to='member.Activity', verbose_name='activity'),
),
]
|
gpl-3.0
| -43,555,210,291,383,250 | 29.677966 | 112 | 0.695028 | false | 4.022222 | false | false | false |
bintoro/schematics
|
schematics/types/base.py
|
1
|
27989
|
import uuid
import re
import datetime
import decimal
import itertools
import functools
import random
import string
import six
from six import iteritems
from ..exceptions import (
StopValidation, ValidationError, ConversionError, MockCreationError
)
try:
from string import ascii_letters # PY3
except ImportError:
from string import letters as ascii_letters #PY2
try:
basestring #PY2
except NameError:
basestring = str #PY3
try:
unicode #PY2
except:
import codecs
unicode = str #PY3
def utf8_decode(s):
if six.PY3:
s = str(s) #todo: right thing to do?
else:
s = unicode(s, 'utf-8')
return s
def fill_template(template, min_length, max_length):
return template % random_string(
get_value_in(
min_length,
max_length,
padding=len(template) - 2,
required_length=1))
def force_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
#obj = unicode(obj, encoding)
obj = utf8_decode(obj)
elif not obj is None:
#obj = unicode(obj)
obj = utf8_decode(obj)
return obj
def get_range_endpoints(min_length, max_length, padding=0, required_length=0):
if min_length is None and max_length is None:
min_length = 0
max_length = 16
elif min_length is None:
min_length = 0
elif max_length is None:
max_length = max(min_length * 2, 16)
if padding:
max_length = max_length - padding
min_length = max(min_length - padding, 0)
if max_length < required_length:
raise MockCreationError(
'This field is too short to hold the mock data')
min_length = max(min_length, required_length)
return min_length, max_length
def get_value_in(min_length, max_length, padding=0, required_length=0):
return random.randint(
*get_range_endpoints(min_length, max_length, padding, required_length))
def random_string(length, chars=ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(length))
_last_position_hint = -1
_next_position_hint = itertools.count()
class TypeMeta(type):
"""
Meta class for BaseType. Merges `MESSAGES` dict and accumulates
validator methods.
"""
def __new__(mcs, name, bases, attrs):
messages = {}
validators = []
for base in reversed(bases):
if hasattr(base, 'MESSAGES'):
messages.update(base.MESSAGES)
if hasattr(base, "_validators"):
validators.extend(base._validators)
if 'MESSAGES' in attrs:
messages.update(attrs['MESSAGES'])
attrs['MESSAGES'] = messages
for attr_name, attr in iteritems(attrs):
if attr_name.startswith("validate_"):
validators.append(attr)
attrs["_validators"] = validators
return type.__new__(mcs, name, bases, attrs)
class BaseType(TypeMeta('BaseTypeBase', (object, ), {})):
"""A base class for Types in a Schematics model. Instances of this
class may be added to subclasses of ``Model`` to define a model schema.
Validators that need to access variables on the instance
can be defined be implementing methods whose names start with ``validate_``
and accept one parameter (in addition to ``self``)
:param required:
Invalidate field when value is None or is not supplied. Default:
False.
:param default:
When no data is provided default to this value. May be a callable.
Default: None.
:param serialized_name:
The name of this field defaults to the class attribute used in the
model. However if the field has another name in foreign data set this
argument. Serialized data will use this value for the key name too.
:param deserialize_from:
A name or list of named fields for which foreign data sets are
searched to provide a value for the given field. This only effects
inbound data.
:param choices:
A list of valid choices. This is the last step of the validator
chain.
:param validators:
A list of callables. Each callable receives the value after it has been
converted into a rich python type. Default: []
:param serialize_when_none:
Dictates if the field should appear in the serialized data even if the
value is None. Default: True
:param messages:
Override the error messages with a dict. You can also do this by
subclassing the Type and defining a `MESSAGES` dict attribute on the
class. A metaclass will merge all the `MESSAGES` and override the
resulting dict with instance level `messages` and assign to
`self.messages`.
"""
MESSAGES = {
'required': u"This field is required.",
'choices': u"Value must be one of {0}.",
}
def __init__(self, required=False, default=None, serialized_name=None,
choices=None, validators=None, deserialize_from=None,
serialize_when_none=None, messages=None):
super(BaseType, self).__init__()
self.required = required
self._default = default
self.serialized_name = serialized_name
if choices and not isinstance(choices, (list, tuple)):
raise TypeError('"choices" must be a list or tuple')
self.choices = choices
self.deserialize_from = deserialize_from
self.validators = [functools.partial(v, self) for v in self._validators]
if validators:
self.validators += validators
self.serialize_when_none = serialize_when_none
self.messages = dict(self.MESSAGES, **(messages or {}))
self._position_hint = next(_next_position_hint) # For ordering of fields
def __call__(self, value):
return self.to_native(value)
def _mock(self, context=None):
return None
@property
def default(self):
default = self._default
if callable(self._default):
default = self._default()
return default
def to_primitive(self, value, context=None):
"""Convert internal data to a value safe to serialize.
"""
return value
def to_native(self, value, context=None):
"""
Convert untrusted data to a richer Python construct.
"""
return value
def allow_none(self):
if hasattr(self, 'owner_model'):
return self.owner_model.allow_none(self)
else:
return self.serialize_when_none
def validate(self, value):
"""
Validate the field and return a clean value or raise a
``ValidationError`` with a list of errors raised by the validation
chain. Stop the validation process from continuing through the
validators by raising ``StopValidation`` instead of ``ValidationError``.
"""
errors = []
for validator in self.validators:
try:
validator(value)
except ValidationError as exc:
errors.extend(exc.messages)
if isinstance(exc, StopValidation):
break
if errors:
raise ValidationError(errors)
def validate_required(self, value):
if self.required and value is None:
raise ValidationError(self.messages['required'])
def validate_choices(self, value):
if self.choices is not None:
if value not in self.choices:
raise ValidationError(self.messages['choices']
.format(unicode(self.choices)))
def mock(self, context=None):
if not self.required and not random.choice([True, False]):
return self.default
if self.choices is not None:
return random.choice(self.choices)
return self._mock(context)
class UUIDType(BaseType):
"""A field that stores a valid UUID value.
"""
MESSAGES = {
'convert': u"Couldn't interpret '{0}' value as UUID.",
}
def _mock(self, context=None):
return uuid.uuid4()
def to_native(self, value, context=None):
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except (AttributeError, TypeError, ValueError):
raise ConversionError(self.messages['convert'].format(value))
return value
def to_primitive(self, value, context=None):
return str(value)
class IPv4Type(BaseType):
""" A field that stores a valid IPv4 address """
def _mock(self, context=None):
return '.'.join(str(random.randrange(256)) for _ in range(4))
@classmethod
def valid_ip(cls, addr):
try:
addr = addr.strip().split(".")
except AttributeError:
return False
try:
return len(addr) == 4 and all(0 <= int(octet) < 256 for octet in addr)
except ValueError:
return False
def validate(self, value):
"""
Make sure the value is a IPv4 address:
http://stackoverflow.com/questions/9948833/validate-ip-address-from-list
"""
if not IPv4Type.valid_ip(value):
error_msg = 'Invalid IPv4 address'
raise ValidationError(error_msg)
return True
class StringType(BaseType):
"""A unicode string field. Default minimum length is one. If you want to
accept empty strings, init with ``min_length`` 0.
"""
allow_casts = (int, str)
MESSAGES = {
'convert': u"Couldn't interpret '{0}' as string.",
'max_length': u"String value is too long.",
'min_length': u"String value is too short.",
'regex': u"String value did not match validation regex.",
}
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = regex
self.max_length = max_length
self.min_length = min_length
super(StringType, self).__init__(**kwargs)
def _mock(self, context=None):
return random_string(get_value_in(self.min_length, self.max_length))
def to_native(self, value, context=None):
if value is None:
return None
if not isinstance(value, unicode):
if isinstance(value, self.allow_casts):
if not isinstance(value, str):
value = str(value)
value = utf8_decode(value) #unicode(value, 'utf-8')
else:
raise ConversionError(self.messages['convert'].format(value))
return value
def validate_length(self, value):
len_of_value = len(value) if value else 0
if self.max_length is not None and len_of_value > self.max_length:
raise ValidationError(self.messages['max_length'])
if self.min_length is not None and len_of_value < self.min_length:
raise ValidationError(self.messages['min_length'])
def validate_regex(self, value):
if self.regex is not None and re.match(self.regex, value) is None:
raise ValidationError(self.messages['regex'])
class URLType(StringType):
"""A field that validates input as an URL.
If verify_exists=True is passed the validate function will make sure
the URL makes a valid connection.
"""
MESSAGES = {
'invalid_url': u"Not a well formed URL.",
'not_found': u"URL does not exist.",
}
URL_REGEX = re.compile(
r'^https?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,2000}[A-Z0-9])?\.)+[A-Z]{2,63}\.?|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
def __init__(self, verify_exists=False, **kwargs):
self.verify_exists = verify_exists
super(URLType, self).__init__(**kwargs)
def _mock(self, context=None):
return fill_template('http://a%s.ZZ', self.min_length,
self.max_length)
def validate_url(self, value):
if not URLType.URL_REGEX.match(value):
raise StopValidation(self.messages['invalid_url'])
if self.verify_exists:
from six.moves import urllib
try:
request = urllib.Request(value)
urllib.urlopen(request)
except Exception:
raise StopValidation(self.messages['not_found'])
class EmailType(StringType):
"""A field that validates input as an E-Mail-Address.
"""
MESSAGES = {
'email': u"Not a well formed email address."
}
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016'
r'-\177])*"'
# domain
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,2000}[A-Z0-9])?\.)+[A-Z]{2,63}\.?$',
re.IGNORECASE
)
def _mock(self, context=None):
return fill_template('%[email protected]', self.min_length,
self.max_length)
def validate_email(self, value):
if not EmailType.EMAIL_REGEX.match(value):
raise StopValidation(self.messages['email'])
class NumberType(BaseType):
"""A number field.
"""
MESSAGES = {
'number_coerce': u"Value '{0}' is not {1}.",
'number_min': u"{0} value should be greater than {1}.",
'number_max': u"{0} value should be less than {1}.",
}
def __init__(self, number_class, number_type,
min_value=None, max_value=None, **kwargs):
self.number_class = number_class
self.number_type = number_type
self.min_value = min_value
self.max_value = max_value
super(NumberType, self).__init__(**kwargs)
def _mock(self, context=None):
return get_value_in(self.min_value, self.max_value)
def to_native(self, value, context=None):
try:
value = self.number_class(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['number_coerce']
.format(value, self.number_type.lower()))
return value
def validate_is_a_number(self, value):
try:
self.number_class(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['number_coerce']
.format(value, self.number_type.lower()))
def validate_range(self, value):
if self.min_value is not None and value < self.min_value:
raise ValidationError(self.messages['number_min']
.format(self.number_type, self.min_value))
if self.max_value is not None and value > self.max_value:
raise ValidationError(self.messages['number_max']
.format(self.number_type, self.max_value))
return value
class IntType(NumberType):
"""A field that validates input as an Integer
"""
def __init__(self, *args, **kwargs):
super(IntType, self).__init__(number_class=int,
number_type='Int',
*args, **kwargs)
class LongType(NumberType):
"""A field that validates input as a Long
"""
def __init__(self, *args, **kwargs):
try:
number_class = long #PY2
except NameError:
number_class = int #PY3
super(LongType, self).__init__(number_class=number_class,
number_type='Long',
*args, **kwargs)
class FloatType(NumberType):
"""A field that validates input as a Float
"""
def __init__(self, *args, **kwargs):
super(FloatType, self).__init__(number_class=float,
number_type='Float',
*args, **kwargs)
class DecimalType(BaseType):
"""A fixed-point decimal number field.
"""
MESSAGES = {
'number_coerce': u"Number '{0}' failed to convert to a decimal.",
'number_min': u"Value should be greater than {0}.",
'number_max': u"Value should be less than {0}.",
}
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(DecimalType, self).__init__(**kwargs)
def _mock(self, context=None):
return get_value_in(self.min_value, self.max_value)
def to_primitive(self, value, context=None):
return unicode(value)
def to_native(self, value, context=None):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, basestring):
value = unicode(value)
try:
value = decimal.Decimal(value)
except (TypeError, decimal.InvalidOperation):
raise ConversionError(self.messages['number_coerce'].format(value))
return value
def validate_range(self, value):
if self.min_value is not None and value < self.min_value:
error_msg = self.messages['number_min'].format(self.min_value)
raise ValidationError(error_msg)
if self.max_value is not None and value > self.max_value:
error_msg = self.messages['number_max'].format(self.max_value)
raise ValidationError(error_msg)
return value
class HashType(BaseType):
MESSAGES = {
'hash_length': u"Hash value is wrong length.",
'hash_hex': u"Hash value is not hexadecimal.",
}
def _mock(self, context=None):
return random_string(self.LENGTH, string.hexdigits)
def to_native(self, value, context=None):
if len(value) != self.LENGTH:
raise ValidationError(self.messages['hash_length'])
try:
int(value, 16)
except ValueError:
raise ConversionError(self.messages['hash_hex'])
return value
class MD5Type(HashType):
"""A field that validates input as resembling an MD5 hash.
"""
LENGTH = 32
class SHA1Type(HashType):
"""A field that validates input as resembling an SHA1 hash.
"""
LENGTH = 40
class BooleanType(BaseType):
"""A boolean field type. In addition to ``True`` and ``False``, coerces these
values:
+ For ``True``: "True", "true", "1"
+ For ``False``: "False", "false", "0"
"""
TRUE_VALUES = ('True', 'true', '1')
FALSE_VALUES = ('False', 'false', '0')
def _mock(self, context=None):
return random.choice([True, False])
def to_native(self, value, context=None):
if isinstance(value, basestring):
if value in self.TRUE_VALUES:
value = True
elif value in self.FALSE_VALUES:
value = False
if isinstance(value, int) and value in [0, 1]:
value = bool(value)
if not isinstance(value, bool):
raise ConversionError(u"Must be either true or false.")
return value
class DateType(BaseType):
"""Defaults to converting to and from ISO8601 date values.
"""
SERIALIZED_FORMAT = '%Y-%m-%d'
MESSAGES = {
'parse': u"Could not parse {0}. Should be ISO8601 (YYYY-MM-DD).",
}
def __init__(self, **kwargs):
self.serialized_format = self.SERIALIZED_FORMAT
super(DateType, self).__init__(**kwargs)
def _mock(self, context=None):
return datetime.datetime(
year=random.randrange(600) + 1900,
month=random.randrange(12) + 1,
day=random.randrange(28) + 1,
)
def to_native(self, value, context=None):
if isinstance(value, datetime.date):
return value
try:
return datetime.datetime.strptime(value, self.serialized_format).date()
except (ValueError, TypeError):
raise ConversionError(self.messages['parse'].format(value))
def to_primitive(self, value, context=None):
return value.strftime(self.serialized_format)
class DateTimeType(BaseType):
"""Defaults to converting to and from ISO8601 datetime values.
:param formats:
A value or list of values suitable for ``datetime.datetime.strptime``
parsing. Default: `('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ')`
:param serialized_format:
The output format suitable for Python ``strftime``. Default: ``'%Y-%m-%dT%H:%M:%S.%f'``
"""
DEFAULT_FORMATS = (
'%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ',
)
SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
MESSAGES = {
'parse_formats': u'Could not parse {0}. Valid formats: {1}',
'parse': u"Could not parse {0}. Should be ISO8601.",
}
def __init__(self, formats=None, serialized_format=None, **kwargs):
"""
"""
if isinstance(formats, basestring):
formats = [formats]
if formats is None:
formats = self.DEFAULT_FORMATS
if serialized_format is None:
serialized_format = self.SERIALIZED_FORMAT
self.formats = formats
self.serialized_format = serialized_format
super(DateTimeType, self).__init__(**kwargs)
def _mock(self, context=None):
return datetime.datetime(
year=random.randrange(600) + 1900,
month=random.randrange(12) + 1,
day=random.randrange(28) + 1,
hour=random.randrange(24),
minute=random.randrange(60),
second=random.randrange(60),
microsecond=random.randrange(1000000),
)
def to_native(self, value, context=None):
if isinstance(value, datetime.datetime):
return value
for fmt in self.formats:
try:
return datetime.datetime.strptime(value, fmt)
except (ValueError, TypeError):
continue
if self.formats == self.DEFAULT_FORMATS:
message = self.messages['parse'].format(value)
else:
message = self.messages['parse_formats'].format(
value, ", ".join(self.formats)
)
raise ConversionError(message)
def to_primitive(self, value, context=None):
if callable(self.serialized_format):
return self.serialized_format(value)
return value.strftime(self.serialized_format)
class GeoPointType(BaseType):
"""A list storing a latitude and longitude.
"""
def _mock(self, context=None):
return (random.randrange(-90, 90), random.randrange(-90, 90))
def to_native(self, value, context=None):
"""Make sure that a geo-value is of type (x, y)
"""
if not len(value) == 2:
raise ValueError('Value must be a two-dimensional point')
if isinstance(value, dict):
for val in value.values():
if not isinstance(val, (float, int)):
raise ValueError('Both values in point must be float or int')
elif isinstance(value, (list, tuple)):
if (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
raise ValueError('Both values in point must be float or int')
else:
raise ValueError('GeoPointType can only accept tuples, lists, or dicts')
return value
class MultilingualStringType(BaseType):
"""
A multilanguage string field, stored as a dict with {'locale': 'localized_value'}.
Minimum and maximum lengths apply to each of the localized values.
At least one of ``default_locale`` or ``context['locale']`` must be defined
when calling ``.to_primitive``.
"""
allow_casts = (int, str)
MESSAGES = {
'convert': u"Couldn't interpret value as string.",
'max_length': u"String value in locale {0} is too long.",
'min_length': u"String value in locale {0} is too short.",
'locale_not_found': u"No requested locale was available.",
'no_locale': u"No default or explicit locales were given.",
'regex_locale': u"Name of locale {0} did not match validation regex.",
'regex_localized': u"String value in locale {0} did not match validation regex.",
}
LOCALE_REGEX = r'^[a-z]{2}(:?_[A-Z]{2})?$'
def __init__(self, regex=None, max_length=None, min_length=None,
default_locale=None, locale_regex=LOCALE_REGEX, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
self.default_locale = default_locale
self.locale_regex = re.compile(locale_regex) if locale_regex else None
super(MultilingualStringType, self).__init__(**kwargs)
def _mock(self, context=None):
return random_string(get_value_in(self.min_length, self.max_length))
def to_native(self, value, context=None):
"""Make sure a MultilingualStringType value is a dict or None."""
if not (value is None or isinstance(value, dict)):
raise ValueError('Value must be a dict or None')
return value
def to_primitive(self, value, context=None):
"""
Use a combination of ``default_locale`` and ``context['locale']`` to return
the best localized string.
"""
if value is None:
return None
context_locale = None
if context is not None and 'locale' in context:
context_locale = context['locale']
# Build a list of all possible locales to try
possible_locales = []
for locale in (context_locale, self.default_locale):
if not locale:
continue
if isinstance(locale, basestring):
possible_locales.append(locale)
else:
possible_locales.extend(locale)
if not possible_locales:
raise ConversionError(self.messages['no_locale'])
for locale in possible_locales:
if locale in value:
localized = value[locale]
break
else:
raise ConversionError(self.messages['locale_not_found'])
if not isinstance(localized, unicode):
if isinstance(localized, self.allow_casts):
if not isinstance(localized, str):
localized = str(localized)
#localized = unicode(localized, 'utf-8')
localized = utf8_decode(localized)
else:
raise ConversionError(self.messages['convert'])
return localized
def validate_length(self, value):
for locale, localized in value.items():
len_of_value = len(localized) if localized else 0
if self.max_length is not None and len_of_value > self.max_length:
raise ValidationError(self.messages['max_length'].format(locale))
if self.min_length is not None and len_of_value < self.min_length:
raise ValidationError(self.messages['min_length'].format(locale))
def validate_regex(self, value):
if self.regex is None and self.locale_regex is None:
return
for locale, localized in value.items():
if self.regex is not None and self.regex.match(localized) is None:
raise ValidationError(
self.messages['regex_localized'].format(locale))
if self.locale_regex is not None and self.locale_regex.match(locale) is None:
raise ValidationError(
self.messages['regex_locale'].format(locale))
|
bsd-3-clause
| -4,595,678,198,879,457,300 | 30.342665 | 95 | 0.586016 | false | 4.09855 | false | false | false |
dblN/misc
|
utils.py
|
1
|
3046
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from keras.layers import Dense
from keras.preprocessing.image import apply_transform
import matplotlib.pyplot as plt
def take_glimpses(image, location, sizes):
glimpses = []
resize = sizes[0]
for size in sizes:
glimpse = tf.image.extract_glimpse(image, size=size, offsets=location,
normalized=True, centered=True, uniform_noise=False)
glimpses += [tf.image.resize_images(glimpse, resize)]
return glimpses
def glimpse_network(image, location, sizes, activation="relu",
glimpse_num_features=128, location_num_features=128, output_dim=256):
assert len(sizes) == 3
with tf.variable_scope("glimpse_network"):
glimpses = []
resize = sizes[0]
for size in sizes:
glimpse = tf.image.extract_glimpse(image, size=size, offsets=location, uniform_noise=False,
normalized=True, centered=True)
glimpses += [tf.image.resize_images(glimpse, resize[0], resize[1])]
glimpse = tf.concat(-1, glimpses)
glimpse = tf.reshape(glimpse, (-1, np.prod(resize) * len(sizes)))
glimpse_feature = Dense(glimpse_num_features, activation=activation)(glimpse)
location_feature = Dense(location_num_features, activation=activation)(location)
feature = Dense(output_dim, activation=activation)(glimpse_feature + location_feature)
return feature, glimpses
def accuracy_score(y_preds, y_true):
return np.sum((y_preds == y_true).astype(np.float32)) / y_preds.shape[0]
def translate(batch_x, size=(128, 128)):
"""Make translated mnist"""
height = batch_x.shape[1]
width = batch_x.shape[2]
X = np.zeros((batch_x.shape[0],) + size + (1,), dtype=batch_x.dtype)
X[:, :height, :width, :] = batch_x
for i, x in enumerate(X[:]):
tx = np.random.uniform(-(size[1] - width), 0)
ty = np.random.uniform(-(size[0] - height), 0)
translation_matrix = np.asarray([
[1, 0, tx],
[0, 1, ty],
[0, 0, 1]
], dtype=batch_x.dtype)
X[i, :, :, :] = apply_transform(x, translation_matrix, channel_index=2, fill_mode="nearest", cval=0.)
return X
def plot_glimpse(images, locations, name="glimpse.png"):
image = images[0]
location = locations[:, 0, :]
fig = plt.figure()
plt.imshow(image, cmap=plt.get_cmap("gray"))
plt.plot(location[:, 0], location[:, 1])
for i, (x, y) in enumerate(location):
plt.annotate("t=%d" % i, xy=(x, y), xytext=(-10, 10),
textcoords="offset points", ha="right", va="bottom",
bbox=dict(boxstyle="round, pad=0.5", fc="white", alpha=0.5),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=0"))
plt.savefig(name)
plt.gcf().clear()
plt.close("all")
|
mit
| -7,068,430,309,194,083,000 | 34.418605 | 109 | 0.602101 | false | 3.332604 | false | false | false |
banacer/lab221
|
building_control/Python/Pubsub.py
|
1
|
1316
|
import pika
import logging
import sys
__mqtt_host = '172.26.50.120'
__mqtt_port = 1883
def printit(ch, method, properties, body):
"""
prints the body message. It's the default callback method
:param ch: keep null
:param method: keep null
:param properties: keep null
:param body: the message
:return:
"""
print(" [x] %r" % body)
def sub(queue_name,callback=printit):
"""
Connects to queue
:param queue_name: the queue to subscribe to
:param callback: optional callback function
:return:
"""
connection = pika.BlockingConnection(pika.ConnectionParameters(host=__mqtt_host))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
channel.basic_consume(callback,queue=queue_name,no_ack=True)
logging.info(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def pub(queue_name,message):
"""
publish to queue
:param queue_name: queue name
:param message: message
"""
connection = pika.BlockingConnection(pika.ConnectionParameters(host=__mqtt_host))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
channel.basic_publish(exchange='',routing_key=queue_name,body=message)
logging.info(" [x] Sent %s" % message)
connection.close()
|
mit
| 2,891,826,054,302,727,000 | 28.931818 | 85 | 0.681611 | false | 3.615385 | false | false | false |
BL-Labs/jokedbapp
|
jokedbapp/tests/test_load_omeka.py
|
1
|
1534
|
import unittest
from models import *
from utils.handle_transcript import TranscriptionParser, OmekaXML, om
from test_data import TRANSCRIPTIONS
OMEKA_COLLECTION = "test_omeka_collection.xml"
# User, Transcription, Joke, Picture
class TestUserClass(unittest.TestCase):
def setUp(self):
self.TP = TranscriptionParser()
self.o = OmekaXML()
from database import init_test_db
self.db_session = init_test_db()
self.u = User('admin', 'admin@localhost', 'admin', 'saltypasswordhash')
self.db_session.add(self.u)
self.db_session.commit()
def test_u01_user_create_regular(self):
u = User('ben', 'regular@localhost', 'transcriber', 'saltypasswordhash')
self.db_session.add(u)
self.db_session.commit()
def test_u02_user_create_publisher(self):
u = User('bob', 'bob@localhost', 'publisher', 'saltypasswordhash')
self.db_session.add(u)
self.db_session.commit()
def test_u03_find_an_admin(self):
admin = User.query.filter(User.role == 'admin').first()
self.assertEquals(admin.name, 'admin')
def test_u04_test_is_admin(self):
admin = User.query.filter(User.role == 'admin').first()
self.assertEquals(admin.is_admin(), True)
def test_u05_user_alter_email(self):
self.u.email = 'newadminemail'
self.db_session.add(self.u)
self.db_session.commit()
# now query for it
email_match = User.query.filter(User.email == "newadminemail").first()
self.assertEquals(email_match.name, 'admin')
def tearDown(self):
self.db_session.remove()
|
mit
| 3,395,962,392,050,536,000 | 29.078431 | 77 | 0.690352 | false | 3.175983 | true | false | false |
pobear/django-xadmin
|
xadmin/plugins/actions.py
|
1
|
10988
|
from django import forms
from django.core.exceptions import PermissionDenied
from django.db import router
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.template.response import TemplateResponse
# from django.utils.datastructures import SortedDict
from collections import OrderedDict as SortedDict
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.utils.text import capfirst
from xadmin.sites import site
from xadmin.util import model_format_dict, get_deleted_objects, model_ngettext
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.base import filter_hook, ModelAdminView
ACTION_CHECKBOX_NAME = '_selected_action'
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
def action_checkbox(obj):
return checkbox.render(ACTION_CHECKBOX_NAME, force_unicode(obj.pk))
action_checkbox.short_description = mark_safe(
'<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
action_checkbox.allow_export = False
action_checkbox.is_column = False
class BaseActionView(ModelAdminView):
action_name = None
description = None
icon = 'fa fa-tasks'
model_perm = 'change'
@classmethod
def has_perm(cls, list_view):
return list_view.get_model_perms()[cls.model_perm]
def init_action(self, list_view):
self.list_view = list_view
self.admin_site = list_view.admin_site
@filter_hook
def do_action(self, queryset):
pass
class DeleteSelectedAction(BaseActionView):
action_name = "delete_selected"
description = _(u'Delete selected %(verbose_name_plural)s')
delete_confirmation_template = None
delete_selected_confirmation_template = None
delete_models_batch = True
model_perm = 'delete'
icon = 'fa fa-times'
@filter_hook
def delete_models(self, queryset):
n = queryset.count()
if n:
if self.delete_models_batch:
queryset.delete()
else:
for obj in queryset:
obj.delete()
self.message_user(_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
}, 'success')
@filter_hook
def do_action(self, queryset):
# Check that the user has delete permission for the actual model
if not self.has_delete_permission():
raise PermissionDenied
using = router.db_for_write(self.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, self.opts, self.user, self.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if self.request.POST.get('post'):
if perms_needed:
raise PermissionDenied
self.delete_models(queryset)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(self.opts.verbose_name)
else:
objects_name = force_unicode(self.opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = self.get_context()
context.update({
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": self.opts,
"app_label": self.app_label,
'action_checkbox_name': ACTION_CHECKBOX_NAME,
})
# Display the confirmation page
return TemplateResponse(self.request, self.delete_selected_confirmation_template or
self.get_template_list('views/model_delete_selected_confirm.html'), context, current_app=self.admin_site.name)
class ActionPlugin(BaseAdminPlugin):
# Actions
actions = []
actions_selection_counter = True
global_actions = [DeleteSelectedAction]
def init_request(self, *args, **kwargs):
self.actions = self.get_actions()
return bool(self.actions)
def get_list_display(self, list_display):
if self.actions:
list_display.insert(0, 'action_checkbox')
self.admin_view.action_checkbox = action_checkbox
return list_display
def get_list_display_links(self, list_display_links):
if self.actions:
if len(list_display_links) == 1 and list_display_links[0] == 'action_checkbox':
return list(self.admin_view.list_display[1:2])
return list_display_links
def get_context(self, context):
if self.actions and self.admin_view.result_count:
av = self.admin_view
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', av.result_count)
new_context = {
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(av.result_list)},
'selection_note_all': selection_note_all % {'total_count': av.result_count},
'action_choices': self.get_action_choices(),
'actions_selection_counter': self.actions_selection_counter,
}
context.update(new_context)
return context
def post_response(self, response, *args, **kwargs):
request = self.admin_view.request
av = self.admin_view
# Actions with no confirmation
if self.actions and 'action' in request.POST:
action = request.POST['action']
if action not in self.actions:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
ac, name, description, icon = self.actions[action]
select_across = request.POST.get('select_across', False) == '1'
selected = request.POST.getlist(ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
av.message_user(msg)
else:
queryset = av.list_queryset._clone()
if not select_across:
# Perform the action only on the selected objects
queryset = av.list_queryset.filter(pk__in=selected)
response = self.response_action(ac, queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
return response
def response_action(self, ac, queryset):
if isinstance(ac, type) and issubclass(ac, BaseActionView):
action_view = self.get_model_view(ac, self.admin_view.model)
action_view.init_action(self.admin_view)
return action_view.do_action(queryset)
else:
return ac(self.admin_view, self.request, queryset)
def get_actions(self):
if self.actions is None:
return SortedDict()
actions = [self.get_action(action) for action in self.global_actions]
for klass in self.admin_view.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
if not class_actions:
continue
actions.extend(
[self.get_action(action) for action in class_actions])
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into a SortedDict keyed by name.
actions = SortedDict([
(name, (ac, name, desc, icon))
for ac, name, desc, icon in actions
])
return actions
def get_action_choices(self):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = []
for ac, name, description, icon in self.actions.itervalues():
choice = (name, description % model_format_dict(self.opts), icon)
choices.append(choice)
return choices
def get_action(self, action):
if isinstance(action, type) and issubclass(action, BaseActionView):
if not action.has_perm(self.admin_view):
return None
return action, getattr(action, 'action_name'), getattr(action, 'description'), getattr(action, 'icon')
elif callable(action):
func = action
action = action.__name__
elif hasattr(self.admin_view.__class__, action):
func = getattr(self.admin_view.__class__, action)
else:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description, getattr(func, 'icon', 'tasks')
# View Methods
def result_header(self, item, field_name, row):
if item.attr and field_name == 'action_checkbox':
item.classes.append("action-checkbox-column")
return item
def result_item(self, item, obj, field_name, row):
if item.field is None and field_name == u'action_checkbox':
item.classes.append("action-checkbox")
return item
# Media
def get_media(self, media):
if self.actions and self.admin_view.result_count:
media = media + self.vendor('xadmin.plugin.actions.js', 'xadmin.plugins.css')
return media
# Block Views
def block_results_bottom(self, context, nodes):
if self.actions and self.admin_view.result_count:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_bottom.actions.html', context_instance=context))
site.register_plugin(ActionPlugin, ListAdminView)
|
bsd-3-clause
| -8,988,290,766,967,416,000 | 36.630137 | 142 | 0.608664 | false | 4.337939 | false | false | false |
Ichimonji10/robottelo
|
tests/foreman/ui/test_discoveryrule.py
|
1
|
22334
|
# -*- encoding: utf-8 -*-
"""Test class for Foreman Discovery Rules
@Requirement: Discoveryrule
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: UI
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
from fauxfactory import gen_integer, gen_ipaddr, gen_string
from nailgun import entities
from robottelo.datafactory import (
filtered_datapoint,
invalid_values_list,
valid_data_list,
)
from robottelo.decorators import run_only_on, skip_if_bug_open, tier1
from robottelo.test import UITestCase
from robottelo.ui.factory import make_discoveryrule
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
@filtered_datapoint
def valid_search_queries():
"""Generates a list of all the input strings, (excluding html)"""
return [
'cpu_count ^ 10',
'disk_count > 5',
'disks_size <= {0}'.format(gen_string('numeric', 8)),
'ip = {0}'.format(gen_ipaddr()),
'model = KVM',
u'organization ~ {0}'.format(entities.Organization().create().name),
u'subnet = {0}'.format(entities.Subnet().create().name),
]
class DiscoveryRuleTestCase(UITestCase):
"""Implements Foreman discovery Rules in UI."""
@classmethod
def setUpClass(cls):
"""Display all the discovery rules on the same page"""
super(DiscoveryRuleTestCase, cls).setUpClass()
cls.per_page = entities.Setting().search(
query={'search': 'name="entries_per_page"'})[0]
cls.saved_per_page = str(cls.per_page.value)
cls.per_page.value = '100000'
cls.per_page.update({'value'})
cls.host_group = entities.HostGroup().create()
@classmethod
def tearDownClass(cls):
"""Restore previous 'entries_per_page' value"""
cls.per_page.value = cls.saved_per_page
cls.per_page.update({'value'})
super(DiscoveryRuleTestCase, cls).tearDownClass()
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create Discovery Rule using different names
@id: afdf7000-4bd0-41ec-9773-96ff68e27b8d
@Assert: Rule should be successfully created
"""
with Session(self.browser) as session:
for name in valid_data_list():
with self.subTest(name):
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
@run_only_on('sat')
@tier1
def test_positive_create_with_search(self):
"""Create Discovery Rule using different search queries
@id: 973ff6e5-572e-401c-bc8c-d614a583e883
@Assert: Rule should be successfully created and has expected search
field value
"""
with Session(self.browser) as session:
for query in valid_search_queries():
with self.subTest(query):
name = gen_string('alpha')
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
search_rule=query,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.assertEqual(
self.discoveryrules.get_attribute_value(
name, 'search'),
query
)
@run_only_on('sat')
@tier1
def test_positive_create_with_hostname(self):
"""Create Discovery Rule using valid hostname value
@id: e6742ca5-1d41-4ba3-8f2c-2169db92485b
@Assert: Rule should be successfully created and has expected hostname
field value
"""
name = gen_string('alpha')
hostname = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
hostname=hostname,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'hostname'),
hostname
)
@run_only_on('sat')
@tier1
def test_positive_create_with_hosts_limit(self):
"""Create Discovery Rule providing any number from range 1..100 for
hosts limit field
@id: 64b90586-c1a9-4be4-8c44-4fa19ca998f8
@Assert: Rule should be successfully created and has expected hosts
limit field value
"""
name = gen_string('alpha')
limit = str(gen_integer(1, 100))
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
host_limit=limit,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'host_limit'),
limit
)
@run_only_on('sat')
@tier1
def test_positive_create_with_priority(self):
"""Create Discovery Rule providing any number from range 1..100 for
priority field
@id: de847288-257a-4f0e-9cb6-9a0dd0877d23
@Assert: Rule should be successfully created and has expected priority
field value
"""
name = gen_string('alpha')
priority = str(gen_integer(1, 100))
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
priority=priority,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'priority'),
priority
)
@run_only_on('sat')
@tier1
def test_positive_create_disabled(self):
"""Create Discovery Rule in disabled state
@id: 0b98d467-aabf-4efe-890f-50d6edcd99ff
@Assert: Disabled rule should be successfully created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
enabled=False,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.assertEqual(
self.discoveryrules.get_attribute_value(
name, 'enabled', element_type='checkbox'),
False
)
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_name(self):
"""Create Discovery Rule with invalid names
@id: 79d950dc-4ca1-407e-84ca-9092d1cba978
@Assert: Error should be raised and rule should not be created
"""
with Session(self.browser) as session:
for name in invalid_values_list(interface='ui'):
with self.subTest(name):
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(
self.discoveryrules.wait_until_element(
common_locators['name_haserror'])
)
self.assertIsNone(self.discoveryrules.search(name))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_hostname(self):
"""Create Discovery Rule with invalid hostname
@id: a322c8ce-4f05-401a-88cb-a3d30b4ac446
@Assert: Error should be raised and rule should not be created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
hostname=gen_string('numeric'),
)
self.assertIsNotNone(self.discoveryrules.wait_until_element(
common_locators['haserror']
))
self.assertIsNone(self.discoveryrules.search(name))
@run_only_on('sat')
@tier1
def test_negative_create_with_limit(self):
"""Create Discovery Rule with invalid host limit
@id: 743d29f4-a901-400c-ad98-a3b8942f02b5
@Assert: Error should be raised and rule should not be created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
for limit in '-1', gen_string('alpha'):
with self.subTest(limit):
make_discoveryrule(
session,
name=name,
host_limit=limit,
hostgroup=self.host_group.name,
)
self.assertIsNotNone(
self.discoveryrules.wait_until_element(
common_locators['haserror'])
)
self.assertIsNone(self.discoveryrules.search(name))
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1308831)
@tier1
def test_negative_create_with_too_long_limit(self):
"""Create Discovery Rule with too long host limit value
@id: 450b49d9-1058-4186-9b23-15cc615e5bd6
@Assert: Validation error should be raised and rule should not be
created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
host_limit=gen_string('numeric', 50),
hostgroup=self.host_group.name,
)
self.assertIsNotNone(self.discoveryrules.wait_until_element(
common_locators['haserror']
))
self.assertIsNone(self.discoveryrules.search(name))
@run_only_on('sat')
@tier1
def test_negative_create_with_same_name(self):
"""Create Discovery Rule with name that already exists
@id: 5a914e76-de01-406d-9860-0e4e1521b074
@Assert: Error should be raised and rule should not be created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.wait_until_element(
common_locators['name_haserror']
))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_priority(self):
"""Create Discovery Rule with invalid priority
@id: f8829cce-86c0-452c-b866-d5645174e9e1
@Assert: Error should be raised and rule should not be created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
priority=gen_string('alpha'),
)
self.assertIsNotNone(self.discoveryrules.wait_until_element(
common_locators['haserror']
))
self.assertIsNone(self.discoveryrules.search(name))
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""Delete existing Discovery Rule
@id: fc5b714c-e5bc-4b0f-bc94-88e080318704
@Assert: Rule should be successfully deleted
"""
with Session(self.browser) as session:
for name in valid_data_list():
with self.subTest(name):
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
self.discoveryrules.delete(name)
@run_only_on('sat')
@tier1
def test_positive_update_name(self):
"""Update discovery rule name
@id: 16a79449-7200-492e-9ddb-65fc034e510d
@Assert: Rule name is updated
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
for new_name in valid_data_list():
with self.subTest(new_name):
self.discoveryrules.update(name=name, new_name=new_name)
self.assertIsNotNone(self.discoveryrules.search(new_name))
name = new_name # for next iteration
@run_only_on('sat')
@tier1
def test_positive_update_query(self):
"""Update discovery rule search query
@id: bcf85a4c-0b27-47a5-8d5d-7ede0f6eea41
@Assert: Rule search field is updated
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
for new_query in valid_search_queries():
with self.subTest(new_query):
self.discoveryrules.update(
name=name, search_rule=new_query)
self.assertEqual(
self.discoveryrules.get_attribute_value(
name, 'search'),
new_query
)
@run_only_on('sat')
@tier1
def test_positive_update_hostgroup(self):
"""Update discovery rule host group
@id: e10274e9-bf1b-42cd-a809-f19e707e7f4c
@Assert: Rule host group is updated
"""
name = gen_string('alpha')
new_hostgroup_name = entities.HostGroup().create().name
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
self.assertEqual(
self.discoveryrules.get_attribute_value(
name, 'hostgroup', element_type='select'),
self.host_group.name
)
self.discoveryrules.update(name=name, hostgroup=new_hostgroup_name)
self.assertEqual(
self.discoveryrules.get_attribute_value(
name, 'hostgroup', element_type='select'),
new_hostgroup_name
)
@run_only_on('sat')
@tier1
def test_positive_update_hostname(self):
"""Update discovery rule hostname value
@id: 753ff15b-da73-4fb3-87cd-14d504d8e882
@Assert: Rule host name is updated
"""
name = gen_string('alpha')
hostname = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
self.discoveryrules.update(name=name, hostname=hostname)
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'hostname'),
hostname
)
@run_only_on('sat')
@tier1
def test_positive_update_limit(self):
"""Update discovery rule limit value
@id: 69d59c34-407b-47d0-a2b8-46decb95ef47
@Assert: Rule host limit field is updated
"""
name = gen_string('alpha')
limit = str(gen_integer(1, 100))
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
self.discoveryrules.update(name=name, host_limit=limit)
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'host_limit'),
limit
)
@run_only_on('sat')
@tier1
def test_positive_update_priority(self):
"""Update discovery rule priority value
@id: be4de7a9-df8e-44ae-9910-7397341f6d07
@Assert: Rule priority is updated
"""
name = gen_string('alpha')
priority = str(gen_integer(1, 100))
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
self.discoveryrules.update(name=name, priority=priority)
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'priority'),
priority
)
@run_only_on('sat')
@tier1
def test_positive_update_disable_enable(self):
"""Update discovery rule enabled state. (Disabled->Enabled)
@id: 60d619e4-a039-4f9e-a16c-b05f0598e8fa
@Assert: Rule enabled checkbox is updated
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
enabled=False,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.discoveryrules.update(name=name, enabled=True)
self.assertEqual(
self.discoveryrules.get_attribute_value(
name, 'enabled', element_type='checkbox'),
True
)
@run_only_on('sat')
@tier1
def test_negative_update_name(self):
"""Update discovery rule name using invalid names only
@id: 65f32628-796a-4d7e-bf2c-c84c6b06f309
@Assert: Rule name is not updated
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session, name=name, hostgroup=self.host_group.name)
self.assertIsNotNone(self.discoveryrules.search(name))
for new_name in invalid_values_list(interface='ui'):
with self.subTest(new_name):
self.discoveryrules.update(name=name, new_name=new_name)
self.assertIsNotNone(
self.discoveryrules.wait_until_element(
common_locators['name_haserror'])
)
self.assertIsNone(self.discoveryrules.search(new_name))
@run_only_on('sat')
@tier1
def test_negative_update_hostname(self):
"""Update discovery rule host name using number as a value
@id: 18713425-22fe-4eaa-a515-8e08aa07e116
@Assert: Rule host name is not updated
"""
name = gen_string('alpha')
hostname = gen_string('alpha')
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
hostname=hostname,
)
self.assertIsNotNone(self.discoveryrules.search(name))
self.discoveryrules.update(
name=name, hostname=gen_string('numeric'))
self.assertIsNotNone(self.discoveryrules.wait_until_element(
common_locators['haserror']
))
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'hostname'),
hostname
)
@run_only_on('sat')
@tier1
def test_negative_update_limit(self):
"""Update discovery rule host limit using invalid values
@id: 7e8b7218-3c8a-4b03-b0df-484e0d793ceb
@Assert: Rule host limit is not updated
"""
name = gen_string('alpha')
limit = str(gen_integer(1, 100))
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
host_limit=limit,
)
self.assertIsNotNone(self.discoveryrules.search(name))
for new_limit in '-1', gen_string('alpha'):
with self.subTest(new_limit):
self.discoveryrules.update(
name=name, host_limit=new_limit)
self.assertIsNotNone(
self.discoveryrules.wait_until_element(
common_locators['haserror'])
)
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'host_limit'),
limit
)
@run_only_on('sat')
@tier1
def test_negative_update_priority(self):
"""Update discovery rule priority using invalid values
@id: d44ad49c-5d95-442f-a1b3-cd82dd8ffabf
@Assert: Rule priority is not updated
"""
name = gen_string('alpha')
priority = str(gen_integer(1, 100))
with Session(self.browser) as session:
make_discoveryrule(
session,
name=name,
hostgroup=self.host_group.name,
priority=priority,
)
self.assertIsNotNone(self.discoveryrules.search(name))
for new_priority in '-1', gen_string('alpha'):
with self.subTest(new_priority):
self.discoveryrules.update(
name=name, priority=new_priority)
self.assertIsNotNone(
self.discoveryrules.wait_until_element(
common_locators['haserror'])
)
self.assertEqual(
self.discoveryrules.get_attribute_value(name, 'priority'),
priority
)
|
gpl-3.0
| 5,553,202,389,800,551,000 | 34.116352 | 79 | 0.563849 | false | 4.19812 | true | false | false |
phwallen/smrc
|
heSensor.py
|
1
|
4357
|
'''
Simimple Model Railway Automation
Hall-effect Sensor Support Module
Author : Peter Wallen
Created : 21/1/13
Version 1.0
This code encapulates hardware associated with sensors used to detect the location of trains.
The hardware supported comprises of :
One or more Microchip MCP23017 16-Bit I/O Expanders acting as sensor controllers.
Each sensor controller can be connected to a maximum of 16 hall-effect sensors.
This module requires python-smbus
'''
import smbus
import time
bus = 0
def i2Cbus_open():
'''
This function must be called once by the automation script to open the I2C bus between
the Rpi and the sensor controller(s).
'''
global bus
try:
bus = smbus.SMBus(0)
except EnvironmentError as e:
print e
raise RuntimeError("Unable to open I2C bus")
def config(address):
'''
This function must be called once by the automation script for each sensor controller.
The address of the controller is determined by the A10,A1,A2 pins on the MCP23017 chip.
eg. If A0,A1 and A2 are LOW then the address should be 0x20.
For information about configuring the sensor controller see the Microchip MCP23017 datasheet.
For eaxample to connect sensors to GPA0 - GPA7, use GPB0 - GPB7 to drive LED indicators and
enable interupts to allow the last sensor triggered to be stored in the interupt capture register,
configure as follows:
bus.write_byte_data(address,IODIRA,0xff) # set all ports in bank A to input
bus.write_byte_data(address,IODIRB,0x00) # set all ports in bank B to output
bus.write_byte_data(address,GPPUA,0xff) # enable pullup resistors for bank A
bus.write_byte_data(address,GPINTENA,0xff) # enable interupts on port A
'''
global bus
# MCP23017 register constants
IODIRA = 0x00
IODIRB = 0x01
GPINTENA = 0X04
GPINTENB = 0x05
GPPUA = 0x0c
GPPUB = 0x0d
INTCAPA= 0x10
INTCAPB= 0x11
GPIOA = 0x12
GPIOB = 0x13
bus.write_byte_data(address,IODIRA,0xff) # set all ports in bank A to input
bus.write_byte_data(address,IODIRB,0x00) # set all ports in bank B to output
bus.write_byte_data(address,GPPUA,0xff) # enable pullup resistors for bank A
bus.write_byte_data(address,GPINTENA,0xff) # enable interupts on port A
class Sensor(object):
'''
The class describing a sensor object.
A sensor object is associate with each train detection sensor.
'''
def __init__(self,address,bank,port):
'''
The class constructor is called with the following parameters:
address : the address of the sensor controller on the I2C bus eg. 0X20
bank : the register group the sensor is connected to: 'A' for GPA0 - GPA7 and 'B' for GPB0 - GPB7
port : the port on the sensor controller the sensor is connected to (1 - 8).
NB. port 1 corresponds to pin GPx0 and port 8 corresponds to pin GPx7
where x = A or B
'''
global bus
mask_table = [0x00,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80]
if bus == 0 :
raise RuntimeError("I2C bus has not been opened")
self.address = address
self.port = 0
if bank == "A" :
self.iodir = 0x00
self.gpinten = 0x04
self.gppu = 0x0c
self.intcap = 0x10
self.gpio = 0x12
elif bank == "B" :
self.iodir = 0x01
self.gpinten = 0x05
self.gppu = 0x0d
self.intcap = 0x11
self.gpio = 0x13
else :
raise RuntimeError("Invalid bank must be A or B")
if port > 8 or port < 1 :
raise RuntimeError("Invalid port must be between 1 and 8")
else :
self.port |= mask_table[port]
def wait(self) :
'''
This method will poll the interupt capture registor for the sensor until its triggered.
In addition, it will control a status LED connected to the corresponding port on bank A.
'''
x = bus.read_byte_data(self.address,self.intcap)
# switch off indicator for appropriate port
status = bus.read_byte_data(self.address,0x13)
status &= self.port
bus.write_byte_data(self.address,0x13,status)
while (x & self.port) :
x = bus.read_byte_data(self.address,self.intcap)
time.sleep(1)
# switch on indicator for appropriate port
status = bus.read_byte_data(self.address,0x13)
status |= self.port
bus.write_byte_data(self.address,0x13,status)
|
mit
| 5,683,908,779,810,081,000 | 32.775194 | 103 | 0.684416 | false | 3.300758 | false | false | false |
hazelnusse/pydy
|
examples/pointfootwalker/GarciasPFW_eoms.py
|
1
|
2122
|
# Sun Aug 23 13:12:56 2009
from numpy import sin, cos, tan, vectorize
def f(x, t, parameter_list):
# Unpacking the parameters
Mh, mf, g, L, q3 = parameter_list
# Unpacking the states (q's and u's)
q1, q2, u1, u2 = x
s1 = sin(q1)
cos(q3) = cos(q3)
c2 = cos(q2)
sin(q3) = sin(q3)
s2 = sin(q2)
c1 = cos(q1)
# Kinematic differential equations
q1p = u1
q2p = -u2 + u1
# Dynamic differential equations
u1p = -L**4*mf**2*u2**2*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) + g*L**3*mf**2*cos(q3)*c1/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) + L**4*mf**2*u1**2*c2*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - g*L**3*mf**2*sin(q3)*s1/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) + Mh*g*mf*L**3*cos(q3)*c1/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) + g*L**3*mf**2*c2**2*sin(q3)*s1/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - Mh*g*mf*L**3*sin(q3)*s1/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - g*L**3*mf**2*c2**2*cos(q3)*c1/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - g*L**3*mf**2*cos(q3)*c2*s1*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - g*L**3*mf**2*c1*c2*sin(q3)*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4)
u2p = L**4*mf**2*u1**2*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) + Mh*mf*L**4*u1**2*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - L**4*mf**2*u2**2*c2*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - g*L**3*mf**2*cos(q3)*s1*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - g*L**3*mf**2*c1*sin(q3)*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - Mh*g*mf*L**3*cos(q3)*s1*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4) - Mh*g*mf*L**3*c1*sin(q3)*s2/(-L**4*mf**2 + L**4*mf**2*c2**2 - Mh*mf*L**4)
return [q1p, q2p, u1p, u2p]
def qdot2u(q, qd, parameter_list):
# Unpacking the parameters
Mh, mf, g, L, q3 = parameter_list
# Unpacking the q's and qdots
q1, q2 = q
q1p, q2p = qd
s1 = sin(q1)
cos(q3) = cos(q3)
c2 = cos(q2)
sin(q3) = sin(q3)
s2 = sin(q2)
c1 = cos(q1)
# Kinematic differential equations
u1 = q1p
u2 = q1p - q2p
return [u1, u2]
|
bsd-3-clause
| -8,282,384,684,407,211,000 | 54.868421 | 747 | 0.49623 | false | 1.698959 | false | false | false |
pyxll/pyxll-examples
|
matplotlib/interactiveplot.py
|
1
|
3441
|
"""
Example code showing how to draw an interactive matplotlib figure
in Excel.
While the figure is displayed Excel is still useable in the background
and the chart may be updated with new data by calling the same
function again.
"""
from pyxll import xl_func
from pandas.stats.moments import ewma
# matplotlib imports
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
# Qt imports
from PySide import QtCore, QtGui
import timer # for polling the Qt application
# dict to keep track of any chart windows
_plot_windows = {}
@xl_func("string figname, numpy_column<float> xs, numpy_column<float> ys, int span: string")
def mpl_plot_ewma(figname, xs, ys, span):
"""
Show a matplotlib line plot of xs vs ys and ewma(ys, span) in an interactive window.
:param figname: name to use for this plot's window
:param xs: list of x values as a column
:param ys: list of y values as a column
:param span: ewma span
"""
# Get the Qt app.
# Note: no need to 'exec' this as it will be polled in the main windows loop.
app = get_qt_app()
# create the figure and axes for the plot
fig = Figure(figsize=(600, 600), dpi=72, facecolor=(1, 1, 1), edgecolor=(0, 0, 0))
ax = fig.add_subplot(111)
# calculate the moving average
ewma_ys = ewma(ys, span=span)
# plot the data
ax.plot(xs, ys, alpha=0.4, label="Raw")
ax.plot(xs, ewma_ys, label="EWMA")
ax.legend()
# generate the canvas to display the plot
canvas = FigureCanvas(fig)
# Get or create the Qt windows to show the chart in.
if figname in _plot_windows:
# get from the global dict and clear any previous widgets
window = _plot_windows[figname]
layout = window.layout()
if layout:
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
else:
# create a new window for this plot and store it for next time
window = QtGui.QWidget()
window.resize(800, 600)
window.setWindowTitle(figname)
_plot_windows[figname] = window
# create the navigation toolbar
toolbar = NavigationToolbar(canvas, window)
# add the canvas and toolbar to the window
layout = window.layout() or QtGui.QVBoxLayout()
layout.addWidget(canvas)
layout.addWidget(toolbar)
window.setLayout(layout)
window.show()
return "[Plotted '%s']" % figname
#
# Taken from the ui/qt.py example
#
def get_qt_app():
"""
returns the global QtGui.QApplication instance and starts
the event loop if necessary.
"""
app = QtCore.QCoreApplication.instance()
if app is None:
# create a new application
app = QtGui.QApplication([])
# use timer to process events periodically
processing_events = {}
def qt_timer_callback(timer_id, time):
if timer_id in processing_events:
return
processing_events[timer_id] = True
try:
app = QtCore.QCoreApplication.instance()
if app is not None:
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
finally:
del processing_events[timer_id]
timer.set_timer(100, qt_timer_callback)
return app
|
unlicense
| -8,776,649,208,221,896,000 | 30.281818 | 92 | 0.657076 | false | 3.892534 | false | false | false |
ColumbiaCMB/kid_readout
|
apps/data_taking_scripts/2017-05-jpl-lf-n1-optical/single-horn/sweep_and_stream_bb_4.py
|
1
|
3439
|
from kid_readout.interactive import *
from kid_readout.equipment import hardware
from kid_readout.measurement import acquire
from kid_readout.roach import analog
from kid_readout.equipment import agilent_33220
import time
fg = agilent_33220.FunctionGenerator(addr=('192.168.1.135', 5025))
fg.set_load_ohms(1000)
fg.set_dc_voltage(0)
fg.enable_output(False)
ri = Roach2Baseband()
ri.set_modulation_output('high')
initial_f0s = np.load('/data/readout/resonances/2017-06-JPL-8x8-LF-N1_single_horn_4.npy')/1e6
nf = len(initial_f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ", atonce
initial_f0s = np.concatenate((initial_f0s, np.arange(1, 1 + atonce - (nf % atonce)) + initial_f0s.max()))
print len(initial_f0s)
nsamp = 2**20
offsets = np.arange(-16,16)*512./nsamp
last_f0s = initial_f0s
for heater_voltage in np.sqrt(np.linspace(0,4**2,16)):
fg.set_dc_voltage(heater_voltage)
if heater_voltage == 0:
print "heater voltage is 0 V, skipping wait"
else:
print "waiting 20 minutes", heater_voltage
time.sleep(1200)
fg.enable_output(True)
for dac_atten in [35]:
ri.set_dac_atten(dac_atten)
tic = time.time()
ncf = new_nc_file(suffix='%d_dB_load_heater_%.3f_V' % (dac_atten, heater_voltage))
swpa = acquire.run_sweep(ri, tone_banks=last_f0s[None,:] + offsets[:,None], num_tone_samples=nsamp,
length_seconds=0, verbose=True,
description='bb sweep')
print "resonance sweep done", (time.time()-tic)/60.
ncf.write(swpa)
current_f0s = []
for sidx in range(last_f0s.shape[0]):
swp = swpa.sweep(sidx)
res = swp.resonator
print res.f_0, res.Q, res.current_result.redchi, (last_f0s[sidx]*1e6-res.f_0)
if np.abs(res.f_0 - last_f0s[sidx]*1e6) > 200e3:
current_f0s.append(last_f0s[sidx]*1e6)
print "using original frequency for ",last_f0s[sidx]
else:
current_f0s.append(res.f_0)
print "fits complete", (time.time()-tic)/60.
current_f0s = np.array(current_f0s)/1e6
current_f0s.sort()
if np.any(np.diff(current_f0s)<0.031):
print "problematic resonator collision:",current_f0s
print "deltas:",np.diff(current_f0s)
problems = np.flatnonzero(np.diff(current_f0s)<0.031)+1
current_f0s[problems] = (current_f0s[problems-1] + current_f0s[problems+1])/2.0
if np.any(np.diff(current_f0s)<0.031):
print "repeated problematic resonator collision:",current_f0s
print "deltas:",np.diff(current_f0s)
problems = np.flatnonzero(np.diff(current_f0s)<0.031)+1
current_f0s[problems] = (current_f0s[problems-1] + current_f0s[problems+1])/2.0
ri.set_tone_freqs(current_f0s,nsamp)
ri.select_fft_bins(range(last_f0s.shape[0]))
last_f0s = current_f0s
raw_input("turn off compressor")
meas = ri.get_measurement(num_seconds=30.,description='stream with bb')
raw_input("turn on compressor")
ncf.write(meas)
print "dac_atten %f heater voltage %.3f V done in %.1f minutes" % (dac_atten, heater_voltage, (time.time()-tic)/60.)
ncf.close()
raw_input("check sweeps fit before going to next voltage step")
ri.set_dac_atten(20)
|
bsd-2-clause
| 2,862,236,436,444,174,300 | 39.458824 | 124 | 0.624891 | false | 2.885067 | false | false | false |
el33th4x0r/crosstex
|
crosstex/cmd.py
|
1
|
12121
|
import argparse
import importlib
import logging
import os
import os.path
import sys
import crosstex
import crosstex.style
logger = logging.getLogger('crosstex')
parser = argparse.ArgumentParser(prog='crosstex',
description='A modern, object-oriented bibliographic tool.')
#parser.add_argument('--quiet',
# help='Do not sanity check the input (XXX: ignored.')
#parser.add_argument('--strict',
# help='Apply stricter checks and check all entries (XXX:ignored.')
#parser.add_argument('--dump', metavar='TYPE',
# help='After parsing the bibliography, dump a list of all '
# 'objects of the type specified, or, with "file", print '
# 'a list of files processed. XXX: ignored')
#parser.add_argument('--no-sort', help='XXX: ignored')
#parser.add_argument('--capitalize', metavar='TYPE', action='append',
# help='Specify any string-like object, i.e. one with name and '
# 'shortname fields. Strings of the specified types will '
# 'appear in ALL CAPS. XXX: ignored')
#parser.add_argument('--no-field', metavar='TYPE', action='append',
# help='Specify a field name, and in any objects where that '
# 'field is optional it will be unassigned no matter what '
# 'appears in the database. For example, to turn off '
# 'page numbers, use "--no-field pages". XXX: ignored')
#parser.add_argument('-l', '--link', metavar='FIELD', action='append',
# help='Add to the list of fields used to generate links. '
# 'LaTeX documents should make use of links by including '
# 'the hyperref package. When converting to HTML, this '
# 'defaults to [Abstract, URL, PS, PDF, HTML, DVI, TEX, '
# 'BIB, FTP, HTTP, and RTF]. XXX: ignored')
#parser.add_argument('--no-link', help='XXX: ignored')
#parser.add_argument('--abstract',
# help='In the bibliography, include paper abstracts if available. XXX: ignored')
#parser.add_argument('--no-abstract')
#parser.add_argument('--keywords',
# help='In the bibliography, include paper keywords if available. XXX: ignored')
#parser.add_argument('--no-keywords')
#parser.add_argument('--popups',
# help='If abstracts or keywords are to appear for an entry'
# 'when generating HTML, instead hide these extra blocks'
# 'and reveal them as popups when the mouse hovers over'
# 'the entry. XXX: ignored')
#parser.add_argument('--no-popups')
#parser.add_argument('--title-head',
# help='In the bibliography, put the title bold and first. XXX:ignored')
#parser.add_argument('--no-title-head')
#parser.add_argument('--blank-labels',
# help='In the bibliography, leave out item labels. XXX:ignored')
#parser.add_argument('--no-blank-labels')
#parser.add_argument('--break-lines',
# help='In the bibliography, put author, title, and '
# 'publication information on separate lines. XXX:ignored')
#parser.add_argument('--no-break-lines')
#parser.add_argument('--last-first',
# help='The first name in each author list will appear "Last, '
# 'First" instead of "First Last" (the latter is the '
# 'default). XXX:ignored')
#parser.add_argument('--no-last-first')
parser.add_argument('--version', version='CrossTeX 0.9.0', action='version')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-d', '--dir', metavar='DIR', action='append', dest='dirs',
help='Add a directory in which to find data files, searched '
'from last specified to first.')
parser.add_argument('--cite', metavar='CITE', action='append',
help='Cite a key exactly as with the \cite LaTeX command.')
parser.add_argument('--cite-by', metavar='CITE_BY', default='style',
help='With "number", use numeric labels such as [1]. With '
'"initials", use labels based on author last-names such '
'as [SBRD04b]. With "fullname", use labels based on '
'author names such as [Sirer et al. 2004]. With '
'"style", use the style default.')
parser.add_argument('--style', metavar='STYLE', default='plain',
help='Use a standard style such as plain, unsrt, abbrv, '
'full, or alpha. Options set by the style may be '
'overidden by further command-line options.')
parser.add_argument('--short', metavar='TYPE', action='append',
help='Specify any string-like object, i.e. one with name and '
'shortname fields. Whenever possible,the short name '
'will be used, e.g. two-letter state codes for '
'"state", conference acronyms such as NSDI for '
'"conference", or initials such as E. G. Sirer for '
'"author".')
parser.add_argument('--titlecase', metavar='TITLECASE', default='default',
choices=('default', 'lower', 'upper', 'title'),
help='In the bibliography, force titles into lower-, upper-, '
'or title-case. Default: leave the titles unchanged.')
parser.add_argument('-f', '--format', metavar='FORMAT', dest='fmt', default='bbl',
help='Select a format for the output. Examples include '
'"bbl", "html", "bib", or "xtx". "bib" and "xtx" are '
'always available and not affected by "--style". '
'Other formats are dependent upon the choice of style.')
class SortAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
s = getattr(args, self.dest, []) or []
reverse = option_string in ('-S', '--reverse-sort')
s.append((values, reverse))
setattr(args, self.dest, s)
parser.add_argument('-s', '--sort', metavar='FIELD', dest='sort', action=SortAction,
help='Sort by specified field. Multiple sort orders are '
'applied in the order specified, e.g. "-s year -s '
'author" will cause elements to be grouped primarily by '
'author and sub-grouped by year.'
' XXX: this is currently ignored')
parser.add_argument('-S', '--reverse-sort', metavar='FIELD', dest='sort', action=SortAction,
help='Exactly as --sort, but sort by descending field values '
'rather than ascending.'
' XXX: this is currently ignored')
class HeadingAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
s = getattr(args, self.dest, None) or None
reverse = option_string in ('--reverse-heading',)
setattr(args, self.dest, (values, reverse))
parser.add_argument('--heading', metavar='FIELD', dest='heading', action=HeadingAction,
help='Divide entries and create headings in bibliography by '
'the value of the given field. XXX: ignored')
parser.add_argument('--reverse-heading', metavar='FIELD', dest='heading', action=HeadingAction,
help='Exactly as --heading, but sort by descending field '
'values rather than ascending. XXX: ignored')
parser.add_argument('-o', '--output', metavar='FILE',
help='Write the bibliography to the specified output file.')
parser.add_argument('--no-pages', action='store_const', const=True, default=False,
help='Skip pages.')
parser.add_argument('--no-address', action='store_const', const=True, default=False,
help='Skip address.')
parser.add_argument('--add-in', action='store_const', const=True, default=False,
help='Add "In" for articles.')
parser.add_argument('--add-proc', dest='add_proc',
action='store_const', const='proc', default=None,
help='Add "Proc. of" to conference and workshop publications.')
parser.add_argument('--add-proceedings', dest='add_proc',
action='store_const', const='proceedings',
help='Add "Proceedings of the" to conference and workshop publications.')
parser.add_argument('files', metavar='FILES', nargs='+',
help='A list of xtx, aux, or bib files to process.')
def main(argv):
try:
args = parser.parse_args()
path = list(args.dirs or []) + \
[os.path.join(os.path.join(os.path.expanduser('~'), '.crosstex'))] + \
['/usr/local/share/crosstex'] + \
['/XXX']
if args.verbose:
logger.setLevel(logging.DEBUG)
logging.getLogger('crosstex.parse').setLevel(logging.DEBUG)
xtx = crosstex.CrossTeX(xtx_path=path)
xtx.set_titlecase(args.titlecase)
if args.no_pages:
xtx.no_pages()
if args.no_address:
xtx.no_address()
if args.add_in:
xtx.add_in()
if args.add_proc == 'proc':
xtx.add_proc()
if args.add_proc == 'proceedings':
xtx.add_proceedings()
for s in args.short or []:
xtx.add_short(s)
xtx.set_style(args.fmt, args.style, args.cite_by)
for f in reversed(args.files):
xtx.parse(f)
# We'll use this check later
is_aux = os.path.splitext(args.files[-1])[1] == '.aux' or \
xtx.aux_citations() and os.path.splitext(args.files[-1])[1] == ''
# Get a list of things to cite
cite = []
warn_uncitable = True
if args.cite:
cite = args.cite
elif is_aux:
cite = xtx.aux_citations()
elif xtx.has_inline_citations():
cite = xtx.inline_citations()
else:
warn_uncitable = False
cite = xtx.all_citations()
objects = [(c, xtx.lookup(c)) for c in cite]
if warn_uncitable:
for c in [c for c, o in objects if not o or not o.citeable]:
logger.warning('Cannot find object for citation %r' % c)
citeable = [(c, o) for c, o in objects if o and o.citeable]
unique = {}
for c, o in citeable:
if o in unique:
unique[o].append(c)
else:
unique[o] = [c]
for o, cs in unique.items():
if len(cs) > 1:
cites = ', '.join(['%r' % c for c in cs])
logger.warning("Citations %s match to the same object; you'll see duplicates" % cites)
citeable = xtx.sort(citeable, args.sort)
if args.heading:
citeable = xtx.heading(citeable, args.heading[0], args.heading[1])
try:
rendered = xtx.render(citeable)
rendered = rendered.encode('utf8')
except crosstex.style.UnsupportedCitation as e:
logger.error('Style does not support citations for %s' % e.citetype)
return 1
if args.output:
with open(args.output, 'w') as fout:
fout.write(rendered.decode('utf-8'))
fout.flush()
elif is_aux and args.fmt == 'bbl':
with open(os.path.splitext(args.files[-1])[0] + '.bbl', 'w') as fout:
fout.write(rendered.decode('utf-8'))
fout.flush()
else:
sys.stdout.write(rendered.decode('utf-8'))
sys.stdout.flush()
return 0
except crosstex.CrossTeXError as e:
logger.error(str(e))
return 1
|
gpl-2.0
| -3,937,128,750,447,653,000 | 51.021459 | 102 | 0.557875 | false | 4.040333 | false | false | false |
natematias/pond-hopper
|
pond-hopper.py
|
1
|
8692
|
import re
import json
import sys
import os
import requests
import datetime
from bs4 import BeautifulSoup
from feedgen.feed import FeedGenerator
import dateutil.parser
from pytz import timezone
import pytz
import flask
from flask import Flask
from flask import render_template
from gender_detector import GenderDetector
import nltk
import string
from collections import defaultdict
import codecs
from mediameter.cliff import Cliff
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sex_detector = GenderDetector('us')
my_cliff = Cliff('http://civicprod.media.mit.edu',8080)
app = Flask(__name__)
class Article:
def __init__(self, section, author=None, social=False):
self.author = author
def append_feedgen(self, fe):
fe.title(self.title)
for byline in self.bylines:
fe.author({"name":byline["name"]})
fe.link([{"href":self.url},{"href":self.image}])
fe.id(self.url)
fe.updated(self.date)
fe.pubdate(self.date)
fe.description(self.subtitle)
class AtlanticArticle(Article):
def __init__(self, section, author=None, social=False):
#extract information from the page
title_link = section.findAll("a")[0]#findAll(attrs={"class":"article"})[0].findAll("a")[0]
self.title = title_link.findAll("h2")[0].get_text()
self.url = re.sub(r'\#.*', '', title_link['href'])
eastern = timezone('US/Eastern')
self.article_text = None
self.cliff = None
# some dates come in as July/August 2014, so strip the first month field from it
datefield = section.findAll("time")[0]#re.sub(r'.*\/?','',section.findAll(attrs={"class":"date"})[0].get_text())
#import pdb;pdb.set_trace()
self.date = eastern.localize(dateutil.parser.parse(datefield.text.strip()))
self.subtitle = section.findAll(attrs={"class":"dek"})[0].get_text()
self.bylines = []
if author is None:
for auth in section.findAll(attrs={"class":"author"}):
self.bylines.append({"name": auth.get_text(), "url": auth['href']})
else:
self.bylines.append({"name":author})
self.image = None
thumb = section.findAll("figure")
if(len(thumb)>0):
img = thumb[0].findAll("img")
if(len(img)>0):
self.image = img[0]['src']
#print self.image
#TODO: download the first paragraph from the article
print self.title.encode('ascii', 'ignore')
self.get_article_text()
self.query_cliff()
self.get_gender_counts()
#TODO: download social media metrics for the article
if(social):
self.facebook = facebook("http://theatlantic.com/" + self.url)
#self.twitter = twitter(self.url)
def get_article_text(self):
res = requests.get("http://theatlantic.com" + self.url)
soup = BeautifulSoup(res.text)
body_tag = soup.findAll(attrs={"class":"article-body"})
self.article_text = body_tag[0].text.replace("\n", " \n")
self.sentences = len(sent_detector.tokenize(self.article_text))
return self.article_text
def query_cliff(self):
#cliff_url = "http://cliff.mediameter.org/process"
a_text = self.article_text#.encode('ascii', 'ignore')
#res = requests.post(cliff_url, data={"demonyms":"false", "text":a_text})
self.cliff = my_cliff.parseText(a_text)#json.loads(res.text)
#f = codecs.open("articletext.log", "a", encoding='utf_8')
#f.write(a_text)
#f.write("\n\n ---------------\n\n")
#f.write(self.cliff)
#f.write("\n\n ---------------\n\n")
#f.close()
self.cliff['results']['mentions']=None
self.cliff['results']['places']=None
return self.cliff
def person_list(self):
return {"names":set(),"first":None, "gender":"unknown", "count":0}
def get_gender_counts(self):
if(self.cliff is None):
return None
people_list = defaultdict(self.person_list)
for person in self.cliff['results']['people']:
fullname = person['name']
nametokens = string.split(fullname.strip(), ' ')
surname = nametokens[-1]
if(len(nametokens)==0):
continue
## ASSUMPTION: SINGLE NAME IS A SURNAME SITUATION
people_list[surname]['names'].add(fullname)
people_list[surname]['count'] += person['count']
if(len(nametokens)>1):
people_list[surname]['first'] = nametokens[0]
counts = {"male":0, "female":0, "unknown":0}
for key in people_list.keys():
person = people_list[key]
if(person['first'] is None):
counts['unknown'] += person['count']
continue
gender = sex_detector.guess(person['first'])
counts[gender] += person['count']
people_list[gender] = gender
self.people_list = people_list
self.gender_counts = counts
@app.route("/metrics/byline/<byline>")
def byline_metrics(byline):
url = "http://www.theatlantic.com/author/" + byline.replace("/","") + "/"
fg, articles = get_fg(url,social=True)
#twitter = [str(x.twitter) for x in articles]
twitter = []
facebook = [str(x.facebook['data'][0]['total_count']) for x in articles]
labels = ['"' + x.date.strftime('%b %d %Y') + '"' for x in articles]
labels.reverse()
data = {"twitter":twitter,"facebook":facebook}
data['twitter'].reverse()
data['facebook'].reverse()
return render_template("metrics.html", fg = fg, articles=articles, byline=byline, twitter = twitter, facebook=facebook, labels=labels, data=data)
# get a feed for a byline
@app.route("/byline/<byline>")
def byline(byline):
url = "http://www.theatlantic.com/" + byline.replace("/","") + "/"
#print url
return get_feed_for_url(url)
# get a feed for a section
@app.route("/section/<sectiona>/<sectionb>/<sectionc>/")
def section(sectiona,sectionb,sectionc):
url = "http://www.theatlantic.com/{0}/{1}/{2}".format(sectiona,sectionb,sectionc)
return get_feed_for_url(url)
def get_fg(url, social=False):
res = requests.get(url)
soup = BeautifulSoup(res.text)
#load the articles into classes
articles = []
author_tag = soup.findAll("div", attrs={"class":"author-header"})
#at = author_tag.findAll("div", attrs={"class":"name"})
author = None
if len(author_tag)>0:
at = author_tag[0].findAll(attrs={"class":"name"})
#author = ' '.join(author_tag[0].get_text().split())
author = at[0].text.strip()
for article in soup.findAll(attrs={"class":"article"}):
articles.append(AtlanticArticle(article, author=author,social=social))
#import pdb; pdb.set_trace()
#set up the feed, with basic metadata
fg = FeedGenerator()
fg.link(href=url)
if(author is None and len(articles)>0):
fg.author(name=articles[0].bylines[0])
else:
fg.author(name=author)
title_tag = soup.findAll(attrs={"class":"display-category"})
#set the title if there's not a category -- e.g. it's a person's page
if(len(title_tag)>0):
title = ' '.join(title_tag[0].get_text().split())
else:
title = "Atlantic posts by {0}".format(author.encode('ascii', 'ignore'))
fg.title(title)
#set the description
description = soup.findAll(attrs={"class":"bio"})
if len(description)>0:
fg.description(' '.join(description[0].get_text().split()))
else:
fg.description("RSS Feed for {0}, generated by Pond Hopper 0.1".format(title))
#add each article to the feed
for article in articles:
article.append_feedgen(fg.add_entry())
return fg, articles
#return a feed for a url
def get_feed_for_url(url):
fg = get_fg(url)[0]
return flask.Response(fg.rss_str(pretty=True), mimetype='application/rss+xml')
#get facebook data for a url
def facebook(url):
#res = requests.get("http://graph.facebook.com/" + url)
res = requests.get("https://graph.facebook.com/fql?q=SELECT%20like_count,%20total_count,%20share_count,%20click_count,%20comment_count%20FROM%20link_stat%20WHERE%20url%20=%20%22{0}%22".format(url.replace("http://","")))
j = json.loads(res.text)
if "data" in j.keys() and len(j['data'])>0:
return j
else:
return {"data":[{"total_count":0}]}
#def twitter(url):
# res = requests.get("http://urls.api.twitter.com/1/urls/count.json?url=" + url)
# return json.loads(res.text)['count']
def reddit(url):
reddit_url = "http://buttons.reddit.com/button_info.json?url={0}".format(url)
res = requests.get(reddit_url)
#import pdb; pdb.set_trace()
j = json.loads(res.text)
if not "data" in j:
print "REDDIT ERROR WITH {0}".format(reddit_url)
return {"ups":"0", "num_comments":"0"}
else:
data = j['data']
if "children" in data and len(data["children"]) > 0 and "data" in data["children"][0]:
child = data["children"][0]
return {"ups":child["data"]["ups"],"num_comments":child["data"]["num_comments"]}
return {"ups":"0", "num_comments":"0"}
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0',port=5050)
|
mit
| -5,024,372,724,159,938,000 | 33.220472 | 221 | 0.652094 | false | 3.19794 | false | false | false |
dc-atlas/bcml
|
doc/conf.py
|
1
|
7251
|
#
#
# Copyright (C) 2010 Razvan Popovici <[email protected]>
# Copyright (C) 2010 Luca Beltrame <[email protected]>
# Copyright (C) 2010 Enrica Calura <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
# BCML documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 10 10:09:27 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BCML'
copyright = u'2010, Luca Beltrame, Enrica Calura, Razvan Popovici'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'BCMLdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BCML.tex', u'BCML Documentation',
u'Luca Beltrame, Enrica Calura, Razvan Popovici', 'manual'),
]
latex_elements = {
"papersize" : "a4paper"
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
lgpl-2.1
| 9,100,167,073,289,095,000 | 32.109589 | 80 | 0.717005 | false | 3.678843 | false | false | false |
lmandres/MeSH-Analysis-Methodology
|
searchlib/helper.py
|
1
|
30718
|
'''
Created on Jul 5, 2011
@author: Leo Andres (user)
'''
from datetime import timedelta
import os
import re
import xml.parsers.expat
class TextXMLParser:
element_path = ''
element_dictionary = {}
current_element_data = {}
def __init__(self):
self.element_path = ''
self.element_dictionary = {}
self.current_element_data = {}
def __element_path_to_list(self, element_path_in):
element_indexes = {
'path_list' : [],
'path_indexes' : []}
for match_item in re.finditer('<(.*?)(<\d*?>)?>', element_path_in):
element_indexes['path_list'].append(match_item.group(1))
path_index = None
try:
path_index = int(match_item.group(2).strip()[1:len(match_item.group(2).strip())-1])
except AttributeError:
pass
except ValueError:
pass
element_indexes['path_indexes'].append(path_index)
return element_indexes
def __append_element_data(self, element_path_in, element_attributes_in, element_data_in):
def __insert_item(
path_list_in,
path_indexes_in,
element_dictionary_in,
element_attr_in,
element_cdata_in):
element_dictionary_out = element_dictionary_in
try:
path_index = len(element_dictionary_in[path_list_in[0]])-1
try:
path_index = int(path_indexes_in[0])
except TypeError:
pass
except ValueError:
pass
if len(path_list_in) == 1:
element_dictionary_out[path_list_in[0]][path_index]['attributes'] = element_attr_in
element_dictionary_out[path_list_in[0]][path_index]['character_data'] = element_cdata_in
else:
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'] = __insert_item(
path_list_in[1:],
path_indexes_in[1:],
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'],
element_attr_in,
element_cdata_in)
except IndexError:
return None
except KeyError:
return None
return element_dictionary_out
self.element_dictionary = __insert_item(
self.__element_path_to_list(element_path_in)['path_list'],
self.__element_path_to_list(element_path_in)['path_indexes'],
self.element_dictionary,
element_attributes_in,
element_data_in)
def __append_element_dict(self, element_path_in):
def __insert_sub_element_dict(
path_list_in,
element_dictionary_in):
element_dictionary_out = element_dictionary_in
if path_list_in[0] not in element_dictionary_out.keys():
element_dictionary_out[path_list_in[0]] = []
if len(path_list_in) == 1:
element_dictionary_out[path_list_in[0]].append({
'attributes' : {},
'character_data' : [],
'sub_elements' : {}})
else:
path_index = len(element_dictionary_out[path_list_in[0]])-1
if len(element_dictionary_out[path_list_in[0]]) <= 0 or 'sub_elements' not in element_dictionary_out[path_list_in[0]][path_index].keys():
element_dictionary_out[path_list_in[0]].append({
'attributes' : {},
'character_data' : [],
'sub_elements' : {}})
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'] = __insert_sub_element_dict(
path_list_in[1:],
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'])
return element_dictionary_out
self.element_dictionary = __insert_sub_element_dict(
self.__element_path_to_list(element_path_in)['path_list'],
self.element_dictionary)
def __start_element_handler(self, element_name, element_attributes):
if self.element_path == '':
self.element_dictionary = {}
self.current_element_data = {}
self.element_path += '<' + element_name.strip() + '>'
self.__append_element_dict(self.element_path)
self.current_element_data[self.element_path] = {
'attrs' : {},
'cdata' : []}
if element_attributes:
self.current_element_data[self.element_path]['attrs'] = element_attributes
def __end_element_handler(self, element_name):
if self.current_element_data[self.element_path]['attrs'] or self.current_element_data[self.element_path]['cdata']:
self.__append_element_data(
self.element_path,
self.current_element_data[self.element_path]['attrs'],
self.current_element_data[self.element_path]['cdata'])
del(self.current_element_data[self.element_path])
if self.element_path.endswith('<' + element_name.strip() + '>'):
self.element_path = self.element_path[:self.element_path.rfind('<' + element_name.strip() + '>')]
def __character_data_handler(self, element_data):
if element_data.strip():
self.current_element_data[self.element_path]['cdata'].append(element_data.strip())
def parse_xml_file(self, xml_file_in):
self.element_path = ''
self.element_dictionary = {}
self.current_element_data = {}
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.__start_element_handler
parser.EndElementHandler = self.__end_element_handler
parser.CharacterDataHandler = self.__character_data_handler
parser.ParseFile(xml_file_in)
def parse_xml_string(self, xml_string_in, is_final = True):
self.element_path = ''
self.element_dictionary = {}
self.current_element_data = {}
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.__start_element_handler
parser.EndElementHandler = self.__end_element_handler
parser.CharacterDataHandler = self.__character_data_handler
parser.Parse(xml_string_in, is_final)
def get_element_item(self, element_path_in):
def __retrieve_item(path_list_in, path_indexes_in, element_dictionary_in):
try:
path_index = len(element_dictionary_in[path_list_in[0]])-1
try:
path_index = int(path_indexes_in[0])
except TypeError:
pass
except ValueError:
pass
if len(path_list_in) == 1:
return element_dictionary_in[path_list_in[0]][path_index]
else:
return __retrieve_item(
path_list_in[1:],
path_indexes_in[1:],
element_dictionary_in[path_list_in[0]][path_index]['sub_elements'])
except IndexError:
return None
except KeyError:
return None
return __retrieve_item(
self.__element_path_to_list(element_path_in)['path_list'],
self.__element_path_to_list(element_path_in)['path_indexes'],
self.element_dictionary)
def get_string_cdata(self, element_path_in):
element_value = None
try:
element_value_list = self.get_element_item(element_path_in)['character_data']
element_value = ''.join(element_value_list)
except TypeError:
pass
except IndexError:
pass
return element_value
def get_integer_cdata(self, element_path_in):
element_value = None
try:
element_value = int(self.get_string_cdata(element_path_in))
except TypeError:
pass
except IndexError:
pass
return element_value
def get_string_attr(self, element_path_in, element_attr_in):
element_value = None
try:
element_value = self.get_element_item(element_path_in)['attributes'][element_attr_in]
except TypeError:
pass
except KeyError:
pass
return element_value
def get_integer_attr(self, element_path_in, element_attr_in):
element_value = None
try:
element_value = int(self.get_string_attr(element_path_in, element_attr_in))
except ValueError:
pass
return element_value
class PubMedSearchSettings:
'''
classdocs
'''
settings_filename = os.path.abspath('pubmed_conf.xml')
search_settings = None
def __init__(self):
'''
Constructor
'''
self.read_settings()
def read_settings(self):
self.search_settings = TextXMLParser()
file_in = open(self.settings_filename, 'rb')
self.search_settings.parse_xml_file(file_in)
file_in.close()
def get_database_connection_type(self):
return self.search_settings.get_string_attr('<BiblioAnalysisSettings><DatabaseConnection>', 'type')
def get_database_connection_properties(self):
database_connection_properties = {}
database_connection_type_case = None
try:
database_connection_type_case = self.get_database_connection_type().upper()
except TypeError:
pass
if database_connection_type_case == 'JDBCODBCDRIVER':
database_odbc_dbq = None
database_odbc_driver = None
try:
database_odbc_driver = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Driver>').strip()
except TypeError:
pass
try:
database_odbc_dbq = os.path.abspath(self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DBQ>').strip())
except TypeError:
pass
if database_odbc_driver and database_odbc_dbq:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['dbq'] = database_odbc_dbq.strip()
elif database_connection_type_case == 'PYODBCDRIVER':
database_odbc_dbq = None
database_odbc_driver = None
database_odbc_server = None
database_odbc_db = None
database_odbc_trusted_conn = False
database_odbc_uid = None
database_odbc_pwd = None
try:
database_odbc_driver = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Driver>').strip()
except AttributeError:
pass
try:
database_odbc_dbq = os.path.abspath(self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DBQ>').strip())
except AttributeError:
pass
try:
database_odbc_server = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Server>').strip()
except AttributeError:
pass
try:
database_odbc_db = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Database>').strip()
except AttributeError:
pass
try:
database_odbc_trusted_conn_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><TrustedConnection>').strip().upper()
if database_odbc_trusted_conn_case == 'YES':
database_odbc_trusted_conn = True
elif database_odbc_trusted_conn_case == 'NO':
database_odbc_trusted_conn = False
elif database_odbc_trusted_conn_case == 'TRUE':
database_odbc_trusted_conn = True
elif database_odbc_trusted_conn_case == 'FALSE':
database_odbc_trusted_conn = False
elif database_odbc_trusted_conn_case == '1':
database_odbc_trusted_conn = True
elif database_odbc_trusted_conn_case == '0':
database_odbc_trusted_conn = False
else:
database_odbc_trusted_conn = False
except AttributeError:
pass
try:
database_odbc_uid = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Uid>')
except AttributeError:
pass
try:
database_odbc_pwd = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Pwd>')
except AttributeError:
pass
if database_odbc_driver and database_odbc_dbq:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['dbq'] = database_odbc_dbq.strip()
if database_odbc_driver and database_odbc_server and database_odbc_db:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['server'] = database_odbc_server.strip()
database_connection_properties['database'] = database_odbc_db.strip()
database_connection_properties['trusted_connection'] = 'NO'
if database_odbc_trusted_conn:
database_connection_properties['trusted_connection'] = 'YES'
if database_odbc_uid and database_odbc_pwd:
database_connection_properties['uid'] = database_odbc_uid
database_connection_properties['pwd'] = database_odbc_pwd
elif database_connection_type_case == 'PYPYODBCDRIVER':
database_odbc_dbq = None
database_odbc_driver = None
try:
database_odbc_driver = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Driver>').strip()
except TypeError:
pass
try:
database_odbc_dbq = os.path.abspath(self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DBQ>').strip())
except TypeError:
pass
if database_odbc_driver and database_odbc_dbq:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['dbq'] = database_odbc_dbq.strip()
elif database_connection_type_case == 'SQLITE3DRIVER':
database_filename = None
try:
database_filename = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><FileName>').strip()
except TypeError:
pass
if database_filename:
database_connection_properties['filename'] = database_filename.strip()
elif database_connection_type_case == 'MYSQLDRIVER':
database_host = None
database_user = None
database_passwd = None
database_db = None
try:
database_host = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Host>').strip()
database_user = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><User>').strip()
database_passwd = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Passwd>').strip()
database_db = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DB>').strip()
except TypeError:
pass
if database_host and database_user and database_passwd and database_db:
database_connection_properties['host'] = database_host.strip()
database_connection_properties['user'] = database_user.strip()
database_connection_properties['passwd'] = database_passwd.strip()
database_connection_properties['db'] = database_db.strip()
return database_connection_properties
def get_search_tool_name(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><SearchToolName>')
def get_reset_database_tables(self):
reset_database_tables = False
reset_database_tables_case = None
try:
reset_database_tables_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><ResetDatabaseTables>').strip().upper()
except TypeError:
pass
if reset_database_tables_case == 'YES':
reset_database_tables = True
elif reset_database_tables_case == 'NO':
reset_database_tables = False
elif reset_database_tables_case == 'TRUE':
reset_database_tables = True
elif reset_database_tables_case == 'FALSE':
reset_database_tables = False
elif reset_database_tables_case == '1':
reset_database_tables = True
elif reset_database_tables_case == '0':
reset_database_tables = False
else:
reset_database_tables = False
return reset_database_tables
def get_update_investigator_ids(self):
update_investigator_ids = False
update_investigator_ids_case = None
try:
update_investigator_ids_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><UpdateInvestigatorIDs>').strip().upper()
except TypeError:
pass
if update_investigator_ids_case == 'YES':
update_investigator_ids = True
elif update_investigator_ids_case == 'NO':
update_investigator_ids = False
elif update_investigator_ids_case == 'TRUE':
update_investigator_ids = True
elif update_investigator_ids_case == 'FALSE':
update_investigator_ids = False
elif update_investigator_ids_case == '1':
update_investigator_ids = True
elif update_investigator_ids_case == '0':
update_investigator_ids = False
else:
update_investigator_ids = False
return update_investigator_ids
def get_update_publication_results(self):
update_publication_results = False
update_publication_results_case = None
try:
update_publication_results_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><UpdatePublicationResults>').strip().upper()
except TypeError:
pass
if update_publication_results_case == 'YES':
update_publication_results = True
elif update_publication_results_case == 'NO':
update_publication_results = False
elif update_publication_results_case == 'TRUE':
update_publication_results = True
elif update_publication_results_case == 'FALSE':
update_publication_results = False
elif update_publication_results_case == '1':
update_publication_results = True
elif update_publication_results_case == '0':
update_publication_results = False
else:
update_publication_results = False
return update_publication_results
def get_eutils_address(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><EUtilsAddress>')
def get_email_address(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><EMailAddress>')
def get_http_delay(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><HTTPDelay>')
def get_weekday_hours_start_time(self):
return_timedelta = None
start_time_hours = None
start_time_minutes = None
start_time_string = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><WeekdayHours><StartTime>')
try:
match = re.match('(\d+):(\d+)', start_time_string)
if match:
start_time_hours = int(match.group(1))
start_time_minutes = int(match.group(2))
return_timedelta = timedelta(hours=start_time_hours, minutes=start_time_minutes)
except TypeError:
pass
return return_timedelta
def get_weekday_hours_end_time(self):
return_timedelta = None
end_time_hours = None
end_time_minutes = None
end_time_string = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><WeekdayHours><EndTime>')
try:
match = re.match('(\d+):(\d+)', end_time_string)
if match:
end_time_hours = int(match.group(1))
end_time_minutes = int(match.group(2))
return_timedelta = timedelta(hours=end_time_hours, minutes=end_time_minutes)
except TypeError:
pass
return return_timedelta
def get_return_maximum(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><ReturnMaximum>')
def get_minimum_date(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MinimumDate>')
def get_maximum_date(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MaximumDate>')
def get_maximum_url_length(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MaximumURLLength>')
def get_maximum_tries(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MaximumTries>')
def get_eutils_use_history(self):
eutils_use_history = False
eutils_use_history_case = None
try:
eutils_use_history_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><EUtilsUseHistory>').strip().upper()
except TypeError:
pass
if eutils_use_history_case == 'YES':
eutils_use_history = True
elif eutils_use_history_case == 'NO':
eutils_use_history = False
elif eutils_use_history_case == 'TRUE':
eutils_use_history = True
elif eutils_use_history_case == 'FALSE':
eutils_use_history = False
elif eutils_use_history_case == '1':
eutils_use_history = True
elif eutils_use_history_case == '0':
eutils_use_history = False
else:
eutils_use_history = False
return eutils_use_history
def get_search_strategies(self):
PERSON_BY_PERSON = 2**0
PERSON_ORGANIZATION = 2**1
PERSON_GRANT = 2**2
PERSON_COAUTHOR = 2**3
CTSA_GRANT = 2**4
PMCID_CITE_BY_PMCID = 2**5
PUBMED_ID_CITE_BY_PUBMED_ID = 2**6
PUBMED_ID_NEIGHBOR_PUBMED_ID = 2**7
search_strategies_array = [
'PersonByPerson',
'PersonOrganization',
'PersonGrant',
'PersonCoauthor',
'CTSAGrant',
'PMCIDCiteByPMCID',
'PubMedIDCiteByPubMedID',
'PubMedIDNeighborPubMedID']
search_strategies = 0
for strategy_index in range(0, len(search_strategies_array), 1):
search_strategy_case = None
try:
search_strategy_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><SearchStrategies><' + search_strategies_array[strategy_index] + '>').strip().upper()
except TypeError:
pass
if search_strategy_case == 'YES':
search_strategies |= (2**strategy_index)
elif search_strategy_case == 'NO':
search_strategies &= ~(2**strategy_index)
elif search_strategy_case == 'TRUE':
search_strategies |= (2**strategy_index)
elif search_strategy_case == 'FALSE':
search_strategies &= ~(2**strategy_index)
elif search_strategy_case == '1':
search_strategies |= (2**strategy_index)
elif search_strategy_case == '0':
search_strategies &= ~(2**strategy_index)
else:
search_strategies &= ~(2**strategy_index)
return search_strategies
def get_timeout(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><Timeout>')
class ClassifyMeSHTermsSettings:
'''
classdocs
'''
settings_filename = os.path.abspath('pubmed_conf.xml')
search_settings = None
def __init__(self):
'''
Constructor
'''
self.read_settings()
def read_settings(self):
self.search_settings = TextXMLParser()
file_in = open(self.settings_filename, 'rb')
self.search_settings.ParseFile(file_in)
file_in.close()
def get_descriptors_file_name(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><ClassifyMeSHTermsSettings><MeSHDescriptorsFileName>')
def get_qualifiers_file_name(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><ClassifyMeSHTermsSettings><MeSHQualifiersFileName>')
def get_reset_database_tables(self):
reset_database_tables = False
reset_database_tables_case = None
try:
reset_database_tables_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><ClassifyMeSHTermsSettings><ResetDatabaseTables>').strip().upper()
except TypeError:
pass
if reset_database_tables_case == 'YES':
reset_database_tables = True
elif reset_database_tables_case == 'NO':
reset_database_tables = False
elif reset_database_tables_case == 'TRUE':
reset_database_tables = True
elif reset_database_tables_case == 'FALSE':
reset_database_tables = False
elif reset_database_tables_case == '1':
reset_database_tables = True
elif reset_database_tables_case == '0':
reset_database_tables = False
else:
reset_database_tables = False
return reset_database_tables
|
gpl-2.0
| 2,402,271,350,444,328,000 | 39.259502 | 209 | 0.521388 | false | 4.804191 | false | false | false |
grow/grow
|
grow/cache/object_cache.py
|
1
|
2129
|
"""
Cache for storing and retrieving data in a pod.
Supports arbitrary data based on a cache key.
The contents of the cache should be raw and not internationalized as it will
be shared between locales.
"""
import re
FILE_OBJECT_CACHE = 'objectcache.json'
FILE_OBJECT_SUB_CACHE = 'objectcache.{}.json'
class ObjectCache(object):
"""Object cache for caching arbitrary data in a pod."""
def __contains__(self, key):
return key in self._cache
def __init__(self):
self._cache = {}
self._is_dirty = False
self.reset()
def add(self, key, value):
"""Add a new item to the cache or overwrite an existing value."""
if not self._is_dirty and (key not in self._cache or self._cache[key] != value):
self._is_dirty = True
self._cache[key] = value
def add_all(self, key_to_cached):
"""Update the cache with a preexisting set of data."""
for key, value in key_to_cached.items():
self.add(key, value)
def export(self):
"""Returns the raw cache data."""
return self._cache
def get(self, key):
"""Retrieve the value from the cache."""
return self._cache.get(key, None)
@property
def is_dirty(self):
"""Have the contents of the object cache been modified?"""
return self._is_dirty
def mark_clean(self):
"""Mark that the object cache is clean."""
self._is_dirty = False
def remove(self, key):
"""Removes a single element from the cache."""
self._is_dirty = True
return self._cache.pop(key, None)
def reset(self):
"""Reset the internal cache object."""
self._cache = {}
self._is_dirty = False
def search(self, pattern):
"""Search through the cache and return all the matching elements."""
if type(pattern) is not type(re.compile('.')):
pattern = re.compile(pattern)
results = {}
for key, value in self._cache.items():
if pattern.search(key) is not None:
results[key] = value
return results
|
mit
| 8,474,668,111,042,571,000 | 26.649351 | 88 | 0.592767 | false | 4.110039 | false | false | false |
rrader/Pomodoro
|
pomodoro/mainframe.py
|
1
|
4124
|
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
# mainframe.py
# Pomodoro
#
# Created by Roman Rader on 22.06.11.
# New BSD License 2011 Antigluk https://github.com/antigluk/Pomodoro
"""
Contains main frame of application.
"""
import wx
from state import PomodoroStateProxy as PomodoroState
from NotificationCenter.NotificationCenter import NotificationCenter
import logging
logging.getLogger('Pomodoro')
class MainFrameController(wx.Frame):
"""Main frame of Pomodoro"""
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'Pomodoro it!',
style=wx.BORDER_DEFAULT | wx.STAY_ON_TOP,
size=(220, 120),
)
state = PomodoroState()
self.__state_dict = {
state.StateNoState: {'bs': '...'},
state.StateInPomodoro: {'bs': u"Отменить..."},
state.StateInRest: {'bs': u"Отдыхайте!"},
state.StateWaitingPomodoro: {'bs': u"Начать помидору"},
state.StateWaitingRest: {'bs': u"Начать отдых"},
state.StatePomodoroKilled: {'bs': u"Начать помидору"},
}
self.buildFrame()
self.updateUI()
self.makeMenu()
self.Show(False)
NotificationCenter().addObserver(self,self.onDBUpdate,"dbUpdated")
NotificationCenter().addObserver(self,self.onUpdateUI,"updateUI")
def buildFrame(self):
self.panel = wx.Panel(self)
self.txt = wx.StaticText(self.panel, pos=(10, 10),
label='Pomodoro!')
self.times_l = wx.StaticText(self.panel, pos=(120, 10),
label=u"0 помидор")
self.timer_ctrl = wx.TextCtrl(self.panel, pos=(10, 30),
size=(200, -1), style=wx.TE_READONLY | wx.TE_CENTER)
self.start_button = wx.Button(self.panel, pos=(20, 70), label=''
, size=(170, -1))
self.start_button.Bind(wx.EVT_BUTTON, self.bClick)
def onUpdateUI(self, event):
self.updateUI()
def updateUI(self):
#TODO: проверять видимо ли окно. иначе не обновлять
#TODO: remove this ugly method
state = PomodoroState()
self.timer_ctrl.SetValue(state.text)
self.start_button.SetLabel(self.__state_dict[state.active]['bs'])
self.txt.SetLabel(state.caption)
self.times_l.SetLabel(u"%d помидор" % state.GetTodayCount())
def bClick(self, m):
logging.debug("Toggle state called from menu")
self.controller.toggleState()
def onExit(self,m):
logging.debug("Quit called from menu")
self.controller.quit()
def makeMenu(self):
self.menuBar = wx.MenuBar()
self.filemenu = wx.Menu()
self.pomodmenu = wx.Menu()
item = self.filemenu.Append(wx.ID_ANY, "Hide")
self.Bind(wx.EVT_MENU, self.hideFrame, item)
item = self.filemenu.Append(wx.ID_ANY, "Toggle pomodoro")
self.Bind(wx.EVT_MENU, self.bClick, item)
self.filemenu.AppendSeparator()
item = self.filemenu.Append(wx.ID_EXIT, "&Quit", "quit")
self.Bind(wx.EVT_MENU, self.onExit, id=wx.ID_EXIT)
item = self.pomodmenu.Append(wx.ID_ANY, "All", "List of pomodoros")
self.Bind(wx.EVT_MENU, self.showListOfPomodoros, item)
item = self.pomodmenu.Append(wx.ID_ANY, "Statistics", "Statistics")
self.Bind(wx.EVT_MENU, self.showStatistics, item)
self.menuBar.Append(self.filemenu, "&File")
self.menuBar.Append(self.pomodmenu, "&Pomodors")
self.SetMenuBar(self.menuBar)
def onDBUpdate(self, obj):
pass
def hideFrame(self, m):
logging.debug("Hide frame called from menu")
self.Show(False)
def showListOfPomodoros(self, m):
logging.debug("Show list of pomodors called from menu")
self.controller.showListOfPomodoros()
def showStatistics(self, m):
logging.debug("Show statistics of pomodors called from menu")
self.controller.showStatistics()
|
bsd-3-clause
| -2,721,271,360,558,041,600 | 32.764706 | 75 | 0.608912 | false | 3.193164 | false | false | false |
ace-han/onedegree
|
admin/account/api/v1/fields.py
|
1
|
1049
|
from rest_framework import serializers
from rest_framework.relations import RelatedField
class TaggedItemRelatedField(serializers.PrimaryKeyRelatedField):
"""
A custom field to use for the `tagged_object` generic relationship.
"""
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
self.queryset = kwargs.pop('queryset', self.queryset)
super(RelatedField, self).__init__(**kwargs)
def to_internal_value(self, data):
value = serializers.PrimaryKeyRelatedField.to_internal_value(self, data)
# self.root.instance => Profile instance
# relationship = get_attribute(instance, self.source_attrs)
# relationship.set(*value)
return value
# def to_representation(self, value):
# """
# Serialize tagged objects to a simple textual representation.
# """
# raise Exception('Unexpected type of tagged object')
|
bsd-3-clause
| 4,808,190,821,871,013,000 | 38.423077 | 80 | 0.638704 | false | 4.212851 | false | false | false |
gjhiggins/electrum
|
lib/wallet.py
|
1
|
75104
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import hashlib
import ast
import threading
import random
import time
import json
import copy
from operator import itemgetter
from util import NotEnoughFunds, PrintError, profiler
from bitcoin import *
from account import *
from version import *
from transaction import Transaction
from plugins import run_hook
import bitcoin
from synchronizer import Synchronizer
from mnemonic import Mnemonic
import paymentrequest
# internal ID for imported account
IMPORTED_ACCOUNT = '/x'
class WalletStorage(PrintError):
def __init__(self, path):
self.lock = threading.RLock()
self.data = {}
self.path = path
self.file_exists = False
self.modified = False
self.print_error("wallet path", self.path)
if self.path:
self.read(self.path)
def read(self, path):
"""Read the contents of the wallet file."""
try:
with open(self.path, "r") as f:
data = f.read()
except IOError:
return
try:
self.data = json.loads(data)
except:
try:
d = ast.literal_eval(data) #parse raw data from reading wallet file
except Exception as e:
raise IOError("Cannot read wallet file '%s'" % self.path)
self.data = {}
# In old versions of Electrum labels were latin1 encoded, this fixes breakage.
labels = d.get('labels', {})
for i, label in labels.items():
try:
unicode(label)
except UnicodeDecodeError:
d['labels'][i] = unicode(label.decode('latin1'))
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('Failed to convert label to json format', key)
continue
self.data[key] = value
self.file_exists = True
def get(self, key, default=None):
with self.lock:
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
def put(self, key, value, save = True):
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error("json error: cannot save", key)
return
with self.lock:
if value is not None:
if self.data.get(key) != value:
self.modified = True
self.data[key] = copy.deepcopy(value)
elif key in self.data:
self.modified = True
self.data.pop(key)
if save:
self.write()
def write(self):
assert not threading.currentThread().isDaemon()
if not self.modified:
return
with self.lock:
s = json.dumps(self.data, indent=4, sort_keys=True)
temp_path = "%s.tmp.%s" % (self.path, os.getpid())
with open(temp_path, "w") as f:
f.write(s)
f.flush()
os.fsync(f.fileno())
# perform atomic write on POSIX systems
try:
os.rename(temp_path, self.path)
except:
os.remove(self.path)
os.rename(temp_path, self.path)
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(self.path,stat.S_IREAD | stat.S_IWRITE)
self.print_error("saved")
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
def __init__(self, storage):
self.storage = storage
self.network = None
self.electrum_version = ELECTRUM_VERSION
self.gap_limit_for_change = 6 # constant
# saved fields
self.seed_version = storage.get('seed_version', NEW_SEED_VERSION)
self.use_change = storage.get('use_change',True)
self.use_encryption = storage.get('use_encryption', False)
self.seed = storage.get('seed', '') # encrypted
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.stored_height = storage.get('stored_height', 0) # last known height (for offline mode)
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
# This attribute is set when wallet.start_threads is called.
self.synchronizer = None
# imported_keys is deprecated. The GUI should call convert_imported_keys
self.imported_keys = self.storage.get('imported_keys',{})
self.load_accounts()
self.load_transactions()
self.build_reverse_history()
# load requests
self.receive_requests = self.storage.get('payment_requests', {})
# spv
self.verifier = None
# Transactions pending verification. A map from tx hash to transaction
# height. Access is not contended so no lock is needed.
self.unverified_tx = {}
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3',{})
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.tx_event = threading.Event()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type, True)
def diagnostic_name(self):
return self.basename()
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
# Flush storage only with the last put
self.storage.put('transactions', tx, False)
self.storage.put('txi', self.txi, False)
self.storage.put('txo', self.txo, False)
self.storage.put('pruned_txo', self.pruned_txo, True)
def clear_history(self):
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.pruned_txo = {}
self.save_transactions()
with self.lock:
self.history = {}
self.tx_addr_hist = {}
self.storage.put('addr_history', self.history, True)
@profiler
def build_reverse_history(self):
self.tx_addr_hist = {}
for addr, hist in self.history.items():
for tx_hash, h in hist:
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
@profiler
def check_history(self):
save = False
for addr, hist in self.history.items():
if not self.is_mine(addr):
self.history.pop(addr)
save = True
continue
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
tx.deserialize()
self.add_transaction(tx_hash, tx)
if save:
self.storage.put('addr_history', self.history, True)
# wizard action
def get_action(self):
pass
def basename(self):
return os.path.basename(self.storage.path)
def convert_imported_keys(self, password):
for k, v in self.imported_keys.items():
sec = pw_decode(v, password)
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
if address != k:
raise InvalidPassword()
self.import_key(sec, password)
self.imported_keys.pop(k)
self.storage.put('imported_keys', self.imported_keys)
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
for k, v in d.items():
if self.wallet_type == 'old' and k in [0, '0']:
v['mpk'] = self.storage.get('master_public_key')
self.accounts['0'] = OldAccount(v)
elif v.get('imported'):
self.accounts[k] = ImportedAccount(v)
elif v.get('xpub'):
self.accounts[k] = BIP32_Account(v)
elif v.get('pending'):
try:
self.accounts[k] = PendingAccount(v)
except:
pass
else:
self.print_error("cannot load account", v)
def synchronize(self):
pass
def can_create_accounts(self):
return False
def set_up_to_date(self,b):
with self.lock: self.up_to_date = b
def is_up_to_date(self):
with self.lock: return self.up_to_date
def update(self):
self.up_to_date = False
while not self.is_up_to_date():
time.sleep(0.1)
def is_imported(self, addr):
account = self.accounts.get(IMPORTED_ACCOUNT)
if account:
return addr in account.get_addresses(0)
else:
return False
def has_imported_keys(self):
account = self.accounts.get(IMPORTED_ACCOUNT)
return account is not None
def import_key(self, sec, password):
assert self.can_import(), 'This wallet cannot import private keys'
try:
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
except Exception:
raise Exception('Invalid private key')
if self.is_mine(address):
raise Exception('Address already in wallet')
if self.accounts.get(IMPORTED_ACCOUNT) is None:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
self.accounts[IMPORTED_ACCOUNT].add(address, pubkey, sec, password)
self.save_accounts()
# force resynchronization, because we need to re-run add_transaction
if address in self.history:
self.history.pop(address)
if self.synchronizer:
self.synchronizer.add(address)
return address
def delete_imported_key(self, addr):
account = self.accounts[IMPORTED_ACCOUNT]
account.remove(addr)
if not account.get_addresses(0):
self.accounts.pop(IMPORTED_ACCOUNT)
self.save_accounts()
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels, True)
return changed
def addresses(self, include_change = True):
return list(addr for acc in self.accounts for addr in self.get_account_addresses(acc, include_change))
def is_mine(self, address):
return address in self.addresses(True)
def is_change(self, address):
if not self.is_mine(address): return False
acct, s = self.get_address_index(address)
if s is None: return False
return s[0] == 1
def get_address_index(self, address):
for acc_id in self.accounts:
for for_change in [0,1]:
addresses = self.accounts[acc_id].get_addresses(for_change)
if address in addresses:
return acc_id, (for_change, addresses.index(address))
raise Exception("Address not found", address)
def get_private_key(self, address, password):
if self.is_watching_only():
return []
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_private_key(sequence, self, password)
def get_public_keys(self, address):
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_pubkeys(*sequence)
def sign_message(self, address, message, password):
keys = self.get_private_key(address, password)
assert len(keys) == 1
sec = keys[0]
key = regenerate_key(sec)
compressed = is_compressed(sec)
return key.sign_message(message, compressed, address)
def decrypt_message(self, pubkey, message, password):
address = public_key_to_bc_address(pubkey.decode('hex'))
keys = self.get_private_key(address, password)
secret = keys[0]
ec = regenerate_key(secret)
decrypted = ec.decrypt_message(message)
return decrypted
def add_unverified_tx(self, tx_hash, tx_height):
# Only add if confirmed and not verified
if tx_height > 0 and tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
# Remove from the unverified map and add to the verified map and
self.unverified_tx.pop(tx_hash, None)
with self.lock:
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
self.storage.put('verified_tx3', self.verified_tx, True)
conf, timestamp = self.get_confirmations(tx_hash)
self.network.trigger_callback('verified', (tx_hash, conf, timestamp))
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
return self.unverified_tx
def undo_verifications(self, height):
'''Used by the verifier when a reorg has happened'''
txs = []
with self.lock:
for tx_hash, item in self.verified_tx:
tx_height, timestamp, pos = item
if tx_height >= height:
self.verified_tx.pop(tx_hash, None)
txs.append(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.stored_height
def get_confirmations(self, tx):
""" return the number of confirmations of a monitored transaction. """
with self.lock:
if tx in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx]
conf = (self.get_local_height() - height + 1)
if conf <= 0: timestamp = None
elif tx in self.unverified_tx:
conf = -1
timestamp = None
else:
conf = 0
timestamp = None
return conf, timestamp
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
x = self.verified_tx.get(tx_hash)
y = self.unverified_tx.get(tx_hash)
if x:
height, timestamp, pos = x
return height, pos
elif y:
return y, 0
else:
return 1e12, 0
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo.values():
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
addresses = self.addresses(True)
is_relevant = False
is_send = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs:
addr = item.get('address')
if addr in addresses:
is_send = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_send:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_send:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
is_send = v < 0
else:
# all inputs are mine
fee = v_out - v_in
return is_relevant, is_send, v, fee
def get_addr_io(self, address):
h = self.history.get(address, [])
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
return coins
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a vcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain = None, exclude_frozen = True):
coins = []
if domain is None:
domain = self.addresses(True)
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
c = self.get_addr_utxo(addr)
for txo, v in c.items():
tx_height, value, is_cb = v
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
continue
prevout_hash, prevout_n = txo.split(':')
output = {
'address':addr,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
coins.append((tx_height, output))
continue
# sort by age
if coins:
coins = sorted(coins)
if coins[-1][0] != 0:
while coins[0][0] == 0:
coins = coins[1:] + [ coins[0] ]
return [value for height, value in coins]
def get_account_name(self, k):
return self.labels.get(k, self.accounts[k].get_name(k))
def get_account_names(self):
account_names = {}
for k in self.accounts.keys():
account_names[k] = self.get_account_name(k)
return account_names
def get_account_addresses(self, acc_id, include_change=True):
if acc_id is None:
addr_list = self.addresses(include_change)
elif acc_id in self.accounts:
acc = self.accounts[acc_id]
addr_list = acc.get_addresses(0)
if include_change:
addr_list += acc.get_addresses(1)
return addr_list
def get_account_from_address(self, addr):
"Returns the account that contains this address, or None"
for acc_id in self.accounts: # similar to get_address_index but simpler
if addr in self.get_account_addresses(acc_id):
return acc_id
return None
def get_account_balance(self, account):
return self.get_balance(self.get_account_addresses(account))
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.addresses(True)
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
with self.lock:
return self.history.get(address, [])
def get_status(self, h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return hashlib.sha256( status ).digest().encode('hex')
def find_pay_to_pubkey_address(self, prevout_hash, prevout_n):
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
self.print_error("found pay-to-pubkey address:", addr)
return addr
def add_transaction(self, tx_hash, tx):
is_coinbase = tx.inputs[0].get('is_coinbase') == True
with self.transaction_lock:
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs:
addr = txi.get('address')
if not txi.get('is_coinbase'):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
if addr == "(pubkey)":
addr = self.find_pay_to_pubkey_address(prevout_hash, prevout_n)
# find value from prev output
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
break
else:
self.pruned_txo[ser] = tx_hash
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs):
ser = tx_hash + ':%d'%n
_type, x, v = txo
if _type == 'address':
addr = x
elif _type == 'pubkey':
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = None
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
# save
self.transactions[tx_hash] = tx
def remove_transaction(self, tx_hash):
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
#tx = self.transactions.pop(tx_hash)
for ser, hh in self.pruned_txo.items():
if hh == tx_hash:
self.pruned_txo.pop(ser)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in dd.items():
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist):
with self.lock:
old_hist = self.history.get(addr, [])
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# remove tx if it's not referenced in histories
self.tx_addr_hist[tx_hash].remove(addr)
if not self.tx_addr_hist[tx_hash]:
self.remove_transaction(tx_hash)
self.history[addr] = hist
self.storage.put('addr_history', self.history, True)
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
tx.deserialize()
self.add_transaction(tx_hash, tx)
def get_history(self, domain=None):
from collections import defaultdict
# get domain
if domain is None:
domain = self.get_account_addresses(None)
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash, delta in tx_deltas.items():
conf, timestamp = self.get_confirmations(tx_hash)
history.append((tx_hash, conf, delta, timestamp))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for item in history:
tx_hash, conf, delta, timestamp = item
h2.append((tx_hash, conf, delta, timestamp, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def get_label(self, tx_hash):
label = self.labels.get(tx_hash)
is_default = (label == '') or (label is None)
if is_default:
label = self.get_default_label(tx_hash)
return label, is_default
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def fee_per_kb(self, config):
b = config.get('dynamic_fees')
f = config.get('fee_factor', 50)
F = config.get('fee_per_kb', bitcoin.RECOMMENDED_FEE)
return min(F, self.network.fee*(50 + f)/100) if b and self.network and self.network.fee else F
def get_tx_fee(self, tx):
# this method can be overloaded
return tx.get_fee()
@profiler
def estimated_fee(self, tx, fee_per_kb):
estimated_size = len(tx.serialize(-1))/2
fee = int(fee_per_kb * estimated_size / 1000.)
if fee < MIN_RELAY_TX_FEE: # and tx.requires_fee(self):
fee = MIN_RELAY_TX_FEE
return fee
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None, change_addr=None):
# check outputs
for type, data, value in outputs:
if type == 'address':
assert is_address(data), "Address " + data + " is invalid!"
fee_per_kb = self.fee_per_kb(config)
amount = sum(map(lambda x:x[2], outputs))
total = 0
inputs = []
tx = Transaction.from_io(inputs, outputs)
fee = fixed_fee if fixed_fee is not None else 0
# add inputs, sorted by age
for item in coins:
v = item.get('value')
total += v
self.add_input_info(item)
tx.add_input(item)
# no need to estimate fee until we have reached desired amount
if total < amount + fee:
continue
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx, fee_per_kb)
if total >= amount + fee:
break
else:
raise NotEnoughFunds()
# remove unneeded inputs.
removed = False
for item in sorted(tx.inputs, key=itemgetter('value')):
v = item.get('value')
if total - v >= amount + fee:
tx.inputs.remove(item)
total -= v
removed = True
continue
else:
break
if removed:
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx, fee_per_kb)
for item in sorted(tx.inputs, key=itemgetter('value')):
v = item.get('value')
if total - v >= amount + fee:
tx.inputs.remove(item)
total -= v
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx, fee_per_kb)
continue
break
self.print_error("using %d inputs"%len(tx.inputs))
# change address
if not change_addr:
# send change to one of the accounts involved in the tx
address = inputs[0].get('address')
account, _ = self.get_address_index(address)
if self.use_change and self.accounts[account].has_change():
# New change addresses are created only after a few confirmations.
# Choose an unused change address if any, otherwise take one at random
change_addrs = self.accounts[account].get_addresses(1)[-self.gap_limit_for_change:]
for change_addr in change_addrs:
if self.get_num_tx(change_addr) == 0:
break
else:
change_addr = random.choice(change_addrs)
else:
change_addr = address
# if change is above dust threshold, add a change output.
change_amount = total - ( amount + fee )
if fixed_fee is not None and change_amount > 0:
tx.outputs.append(('address', change_addr, change_amount))
elif change_amount > DUST_THRESHOLD:
tx.outputs.append(('address', change_addr, change_amount))
# recompute fee including change output
fee = self.estimated_fee(tx, fee_per_kb)
# remove change output
tx.outputs.pop()
# if change is still above dust threshold, re-add change output.
change_amount = total - ( amount + fee )
if change_amount > DUST_THRESHOLD:
tx.outputs.append(('address', change_addr, change_amount))
self.print_error('change', change_amount)
else:
self.print_error('not keeping dust', change_amount)
else:
self.print_error('not keeping dust', change_amount)
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def add_input_info(self, txin):
address = txin['address']
account_id, sequence = self.get_address_index(address)
account = self.accounts[account_id]
redeemScript = account.redeem_script(*sequence)
pubkeys = account.get_pubkeys(*sequence)
x_pubkeys = account.get_xpubkeys(*sequence)
# sort pubkeys and x_pubkeys, using the order of pubkeys
pubkeys, x_pubkeys = zip( *sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = list(pubkeys)
txin['x_pubkeys'] = list(x_pubkeys)
txin['signatures'] = [None] * len(pubkeys)
if redeemScript:
txin['redeemScript'] = redeemScript
txin['num_sig'] = account.m
else:
txin['redeemPubkey'] = account.get_pubkey(*sequence)
txin['num_sig'] = 1
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# Raise if password is not correct.
self.check_password(password)
# Add derivation for utxo in wallets
for i, addr in self.utxo_can_sign(tx):
txin = tx.inputs[i]
txin['address'] = addr
self.add_input_info(txin)
# Add private keys
keypairs = {}
for x in self.xkeys_can_sign(tx):
sec = self.get_private_key_from_xpubkey(x, password)
if sec:
keypairs[x] = sec
# Sign
if keypairs:
tx.sign(keypairs)
# Run hook, and raise if error
tx.error = None
run_hook('sign_transaction', self, tx, password)
if tx.error:
raise BaseException(tx.error)
def sendtx(self, tx):
# synchronous
h = self.send_tx(tx)
self.tx_event.wait()
return self.receive_tx(h, tx)
def send_tx(self, tx):
# asynchronous
self.tx_event.clear()
self.network.send([('blockchain.transaction.broadcast', [str(tx)])], self.on_broadcast)
return tx.hash()
def on_broadcast(self, r):
self.tx_result = r.get('result')
self.tx_event.set()
def receive_tx(self, tx_hash, tx):
out = self.tx_result
if out != tx_hash:
return False, "error: " + out
run_hook('receive_tx', tx, self)
return True, out
def update_password(self, old_password, new_password):
if new_password == '':
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode( decoded, new_password)
self.storage.put('seed', self.seed, True)
imported_account = self.accounts.get(IMPORTED_ACCOUNT)
if imported_account:
imported_account.update_password(old_password, new_password)
self.save_accounts()
if hasattr(self, 'master_private_keys'):
for k, v in self.master_private_keys.items():
b = pw_decode(v, old_password)
c = pw_encode(b, new_password)
self.master_private_keys[k] = c
self.storage.put('master_private_keys', self.master_private_keys, True)
self.use_encryption = (new_password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses), True)
return True
return False
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx (tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = self.verified_tx.keys() + self.unverified_tx.keys()
for tx_hash in self.transactions.keys():
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
from verifier import SPV
self.network = network
if self.network is not None:
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer = None
self.verifier = None
self.storage.put('stored_height', self.get_local_height(), True)
def restore(self, cb):
pass
def get_accounts(self):
return self.accounts
def add_account(self, account_id, account):
self.accounts[account_id] = account
self.save_accounts()
def save_accounts(self):
d = {}
for k, v in self.accounts.items():
d[k] = v.dump()
self.storage.put('accounts', d, True)
def can_import(self):
return not self.is_watching_only()
def can_export(self):
return not self.is_watching_only()
def is_used(self, address):
h = self.history.get(address,[])
c, u, x = self.get_addr_balance(address)
return len(h) > 0 and c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def can_sign(self, tx):
if self.is_watching_only():
return False
if tx.is_complete():
return False
if self.xkeys_can_sign(tx):
return True
if self.utxo_can_sign(tx):
return True
return False
def utxo_can_sign(self, tx):
out = set()
coins = self.get_spendable_coins()
for i in tx.inputs_without_script():
txin = tx.inputs[i]
for item in coins:
if txin.get('prevout_hash') == item.get('prevout_hash') and txin.get('prevout_n') == item.get('prevout_n'):
out.add((i, item.get('address')))
return out
def xkeys_can_sign(self, tx):
out = set()
for x in tx.inputs_to_sign():
if self.can_sign_xpubkey(x):
out.add(x)
return out
def get_private_key_from_xpubkey(self, x_pubkey, password):
if x_pubkey[0:2] in ['02','03','04']:
addr = bitcoin.public_key_to_bc_address(x_pubkey.decode('hex'))
if self.is_mine(addr):
return self.get_private_key(addr, password)[0]
elif x_pubkey[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
for k, v in self.master_public_keys.items():
if v == xpub:
xprv = self.get_master_private_key(k, password)
if xprv:
_, _, _, c, k = deserialize_xkey(xprv)
return bip32_private_key(sequence, k, c)
elif x_pubkey[0:2] == 'fe':
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
for k, account in self.accounts.items():
if xpub in account.get_master_pubkeys():
pk = account.get_private_key(sequence, self, password)
return pk[0]
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
addr = hash_160_to_bc_address(x_pubkey[4:].decode('hex'), addrtype)
if self.is_mine(addr):
return self.get_private_key(addr, password)[0]
else:
raise BaseException("z")
def can_sign_xpubkey(self, x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
addr = bitcoin.public_key_to_bc_address(x_pubkey.decode('hex'))
return self.is_mine(addr)
elif x_pubkey[0:2] == 'ff':
if not isinstance(self, BIP32_Wallet): return False
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub in [ self.master_public_keys[k] for k in self.master_private_keys.keys() ]
elif x_pubkey[0:2] == 'fe':
if not isinstance(self, OldWallet): return False
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
return xpub == self.get_master_public_key()
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
addr = hash_160_to_bc_address(x_pubkey[4:].decode('hex'), addrtype)
return self.is_mine(addr)
else:
raise BaseException("z")
def is_watching_only(self):
False
def can_change_password(self):
return not self.is_watching_only()
def get_unused_address(self, account):
# fixme: use slots from expired requests
domain = self.get_account_addresses(account, include_change=False)
for addr in domain:
if not self.history.get(addr) and addr not in self.receive_requests.keys():
return addr
def get_payment_request(self, addr, config):
import util
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'vcoin:' + addr + '?amount=' + util.format_satoshis(out.get('amount'))
out['status'] = self.get_request_status(addr)
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
return out
def get_request_status(self, key):
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
r = self.receive_requests[key]
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
if amount:
if self.up_to_date:
paid = amount <= self.get_addr_received(address)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = Hash(addr + "%d"%timestamp).encode('hex')[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.get_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = pr.signature.encode('hex')
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
import os
addr = req['address']
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, key)
with open(path, 'w') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(rdir, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=lambda x: x.get('time', 0))
class Imported_Wallet(Abstract_Wallet):
wallet_type = 'imported'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
a = self.accounts.get(IMPORTED_ACCOUNT)
if not a:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
def is_watching_only(self):
acc = self.accounts[IMPORTED_ACCOUNT]
n = acc.keypairs.values()
return len(n) > 0 and n == [[None, None]] * len(n)
def has_seed(self):
return False
def is_deterministic(self):
return False
def check_password(self, password):
self.accounts[IMPORTED_ACCOUNT].get_private_key((0,0), self, password)
def is_used(self, address):
return False
def get_master_public_keys(self):
return {}
def is_beyond_limit(self, address, account, is_change):
return False
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def has_seed(self):
return self.seed != ''
def is_deterministic(self):
return True
def is_watching_only(self):
return not self.has_seed()
def add_seed(self, seed, password):
if self.seed:
raise Exception("a seed exists")
self.seed_version, self.seed = self.format_seed(seed)
if password:
self.seed = pw_encode( self.seed, password)
self.use_encryption = True
else:
self.use_encryption = False
self.storage.put('seed', self.seed, False)
self.storage.put('seed_version', self.seed_version, False)
self.storage.put('use_encryption', self.use_encryption,True)
def get_seed(self, password):
return pw_decode(self.seed, password)
def get_mnemonic(self, password):
return self.get_seed(password)
def change_gap_limit(self, value):
assert isinstance(value, int), 'gap limit must be of type int, not of %s'%type(value)
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
return True
elif value >= self.min_acceptable_gap():
for key, account in self.accounts.items():
addresses = account.get_addresses(False)
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
account.receiving_pubkeys = account.receiving_pubkeys[0:n]
account.receiving_addresses = account.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
self.save_accounts()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
for account in self.accounts.values():
addresses = account.get_addresses(0)
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def default_account(self):
return self.accounts['0']
def create_new_address(self, account=None, for_change=0):
if account is None:
account = self.default_account()
address = account.create_new_address(for_change)
self.add_address(address)
return address
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.save_accounts()
def synchronize(self):
with self.lock:
for account in self.accounts.values():
account.synchronize(self)
def restore(self, callback):
from i18n import _
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
apply(callback, (msg,))
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
msg = "%s \n" % (_("Connecting..."))
apply(callback, (msg,))
time.sleep(0.1)
# wait until we are connected, because the user might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def is_beyond_limit(self, address, account, is_change):
if type(account) == ImportedAccount:
return False
addr_list = account.get_addresses(is_change)
i = addr_list.index(address)
prev_addresses = addr_list[:max(0, i)]
limit = self.gap_limit_for_change if is_change else self.gap_limit
if len(prev_addresses) < limit:
return False
prev_addresses = prev_addresses[max(0, i - limit):]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def get_action(self):
if not self.get_master_public_key():
return 'create_seed'
if not self.accounts:
return 'create_accounts'
def get_master_public_keys(self):
out = {}
for k, account in self.accounts.items():
if type(account) == ImportedAccount:
continue
name = self.get_account_name(k)
mpk_text = '\n\n'.join(account.get_master_pubkeys())
out[name] = mpk_text
return out
class BIP32_Wallet(Deterministic_Wallet):
# abstract class, bip32 logic
root_name = 'x/'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.master_public_keys = storage.get('master_public_keys', {})
self.master_private_keys = storage.get('master_private_keys', {})
self.gap_limit = storage.get('gap_limit', 20)
def is_watching_only(self):
return not bool(self.master_private_keys)
def can_import(self):
return False
def get_master_public_key(self):
return self.master_public_keys.get(self.root_name)
def get_master_private_key(self, account, password):
k = self.master_private_keys.get(account)
if not k: return
xprv = pw_decode(k, password)
try:
deserialize_xkey(xprv)
except:
raise InvalidPassword()
return xprv
def check_password(self, password):
xpriv = self.get_master_private_key(self.root_name, password)
xpub = self.master_public_keys[self.root_name]
if deserialize_xkey(xpriv)[3] != deserialize_xkey(xpub)[3]:
raise InvalidPassword()
def add_master_public_key(self, name, xpub):
if xpub in self.master_public_keys.values():
raise BaseException('Duplicate master public key')
self.master_public_keys[name] = xpub
self.storage.put('master_public_keys', self.master_public_keys, True)
def add_master_private_key(self, name, xpriv, password):
self.master_private_keys[name] = pw_encode(xpriv, password)
self.storage.put('master_private_keys', self.master_private_keys, True)
def derive_xkeys(self, root, derivation, password):
x = self.master_private_keys[root]
root_xprv = pw_decode(x, password)
xprv, xpub = bip32_private_derivation(root_xprv, root, derivation)
return xpub, xprv
def create_master_keys(self, password):
seed = self.get_seed(password)
self.add_cosigner_seed(seed, self.root_name, password)
def add_cosigner_seed(self, seed, name, password, passphrase=''):
# we don't store the seed, only the master xpriv
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed, passphrase))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, xprv, password)
def add_cosigner_xpub(self, seed, name):
# store only master xpub
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed,''))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
def mnemonic_to_seed(self, seed, password):
return Mnemonic.mnemonic_to_seed(seed, password)
def make_seed(self, lang=None):
return Mnemonic(lang).make_seed()
def format_seed(self, seed):
return NEW_SEED_VERSION, ' '.join(seed.split())
class BIP32_Simple_Wallet(BIP32_Wallet):
# Wallet with a single BIP32 account, no seed
# gap limit 20
wallet_type = 'xpub'
def create_xprv_wallet(self, xprv, password):
xpub = bitcoin.xpub_from_xprv(xprv)
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_private_key(self.root_name, xprv, password)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
self.use_encryption = (password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def create_xpub_wallet(self, xpub):
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
class BIP32_HD_Wallet(BIP32_Wallet):
# wallet that can create accounts
def __init__(self, storage):
self.next_account = storage.get('next_account2', None)
BIP32_Wallet.__init__(self, storage)
def can_create_accounts(self):
return self.root_name in self.master_private_keys.keys()
def addresses(self, b=True):
l = BIP32_Wallet.addresses(self, b)
if self.next_account:
_, _, _, next_address = self.next_account
if next_address not in l:
l.append(next_address)
return l
def get_address_index(self, address):
if self.next_account:
next_id, next_xpub, next_pubkey, next_address = self.next_account
if address == next_address:
return next_id, (0,0)
return BIP32_Wallet.get_address_index(self, address)
def num_accounts(self):
keys = []
for k, v in self.accounts.items():
if type(v) != BIP32_Account:
continue
keys.append(k)
i = 0
while True:
account_id = '%d'%i
if account_id not in keys:
break
i += 1
return i
def get_next_account(self, password):
account_id = '%d'%self.num_accounts()
derivation = self.root_name + "%d'"%int(account_id)
xpub, xprv = self.derive_xkeys(self.root_name, derivation, password)
self.add_master_public_key(derivation, xpub)
if xprv:
self.add_master_private_key(derivation, xprv, password)
account = BIP32_Account({'xpub':xpub})
addr, pubkey = account.first_address()
self.add_address(addr)
return account_id, xpub, pubkey, addr
def create_main_account(self, password):
# First check the password is valid (this raises if it isn't).
self.check_password(password)
assert self.num_accounts() == 0
self.create_account('Main account', password)
def create_account(self, name, password):
account_id, xpub, _, _ = self.get_next_account(password)
account = BIP32_Account({'xpub':xpub})
self.add_account(account_id, account)
self.set_label(account_id, name)
# add address of the next account
self.next_account = self.get_next_account(password)
self.storage.put('next_account2', self.next_account)
def account_is_pending(self, k):
return type(self.accounts.get(k)) == PendingAccount
def delete_pending_account(self, k):
assert type(self.accounts.get(k)) == PendingAccount
self.accounts.pop(k)
self.save_accounts()
def create_pending_account(self, name, password):
if self.next_account is None:
self.next_account = self.get_next_account(password)
self.storage.put('next_account2', self.next_account)
next_id, next_xpub, next_pubkey, next_address = self.next_account
if name:
self.set_label(next_id, name)
self.accounts[next_id] = PendingAccount({'pending':True, 'address':next_address, 'pubkey':next_pubkey})
self.save_accounts()
def synchronize(self):
# synchronize existing accounts
BIP32_Wallet.synchronize(self)
if self.next_account is None and not self.use_encryption:
try:
self.next_account = self.get_next_account(None)
self.storage.put('next_account2', self.next_account)
except:
self.print_error('cannot get next account')
# check pending account
if self.next_account is not None:
next_id, next_xpub, next_pubkey, next_address = self.next_account
if self.address_is_old(next_address):
self.print_error("creating account", next_id)
self.add_account(next_id, BIP32_Account({'xpub':next_xpub}))
# here the user should get a notification
self.next_account = None
self.storage.put('next_account2', self.next_account)
elif self.history.get(next_address, []):
if next_id not in self.accounts:
self.print_error("create pending account", next_id)
self.accounts[next_id] = PendingAccount({'pending':True, 'address':next_address, 'pubkey':next_pubkey})
self.save_accounts()
class NewWallet(BIP32_Wallet, Mnemonic):
# Standard wallet
root_derivation = "m/"
wallet_type = 'standard'
def create_main_account(self, password):
xpub = self.master_public_keys.get("x/")
account = BIP32_Account({'xpub':xpub})
self.add_account('0', account)
class Multisig_Wallet(BIP32_Wallet, Mnemonic):
# generic m of n
root_name = "x1/"
root_derivation = "m/"
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
self.wallet_type = storage.get('wallet_type')
m = re.match('(\d+)of(\d+)', self.wallet_type)
self.m = int(m.group(1))
self.n = int(m.group(2))
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
v = d.get('0')
if v:
if v.get('xpub3'):
v['xpubs'] = [v['xpub'], v['xpub2'], v['xpub3']]
elif v.get('xpub2'):
v['xpubs'] = [v['xpub'], v['xpub2']]
self.accounts = {'0': Multisig_Account(v)}
def create_main_account(self, password):
account = Multisig_Account({'xpubs': self.master_public_keys.values(), 'm': self.m})
self.add_account('0', account)
def get_master_public_keys(self):
return self.master_public_keys
def get_action(self):
for i in range(self.n):
if self.master_public_keys.get("x%d/"%(i+1)) is None:
return 'create_seed' if i == 0 else 'add_cosigners'
if not self.accounts:
return 'create_accounts'
class OldWallet(Deterministic_Wallet):
wallet_type = 'old'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 5)
def make_seed(self):
import old_mnemonic
seed = random_seed(128)
return ' '.join(old_mnemonic.mn_encode(seed))
def format_seed(self, seed):
import old_mnemonic
# see if seed was entered as hex
seed = seed.strip()
try:
assert seed
seed.decode('hex')
return OLD_SEED_VERSION, str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return OLD_SEED_VERSION, seed
def create_master_keys(self, password):
seed = self.get_seed(password)
mpk = OldAccount.mpk_from_seed(seed)
self.storage.put('master_public_key', mpk, True)
def get_master_public_key(self):
return self.storage.get("master_public_key")
def get_master_public_keys(self):
return {'Main Account':self.get_master_public_key()}
def create_main_account(self, password):
mpk = self.storage.get("master_public_key")
self.create_account(mpk)
def create_account(self, mpk):
self.accounts['0'] = OldAccount({'mpk':mpk, 0:[], 1:[]})
self.save_accounts()
def create_watching_only_wallet(self, mpk):
self.seed_version = OLD_SEED_VERSION
self.storage.put('seed_version', self.seed_version, False)
self.storage.put('master_public_key', mpk, True)
self.create_account(mpk)
def get_seed(self, password):
seed = pw_decode(self.seed, password).encode('utf8')
return seed
def check_password(self, password):
seed = self.get_seed(password)
self.accounts['0'].check_seed(seed)
def get_mnemonic(self, password):
import old_mnemonic
s = self.get_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
wallet_types = [
# category type description constructor
('standard', 'old', ("Old wallet"), OldWallet),
('standard', 'xpub', ("BIP32 Import"), BIP32_Simple_Wallet),
('standard', 'standard', ("Standard wallet"), NewWallet),
('standard', 'imported', ("Imported wallet"), Imported_Wallet),
('multisig', '2of2', ("Multisig wallet (2 of 2)"), Multisig_Wallet),
('multisig', '2of3', ("Multisig wallet (2 of 3)"), Multisig_Wallet)
]
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
seed_version = storage.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(storage.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
msg = "Your wallet has an unsupported seed version."
msg += '\n\nWallet file: %s' % os.path.abspath(storage.path)
if seed_version in [5, 7, 8, 9, 10]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if storage.get('master_public_keys') is None and storage.get('master_private_keys') is None and storage.get('imported_keys') is None:
# pbkdf2 was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise BaseException(msg)
wallet_type = storage.get('wallet_type')
if wallet_type:
for cat, t, name, loader in wallet_types:
if t == wallet_type:
if cat in ['hardware', 'twofactor']:
WalletClass = lambda storage: apply(loader().constructor, (storage,))
else:
WalletClass = loader
break
else:
if re.match('(\d+)of(\d+)', wallet_type):
WalletClass = Multisig_Wallet
else:
raise BaseException('unknown wallet type', wallet_type)
else:
if seed_version == OLD_SEED_VERSION:
WalletClass = OldWallet
else:
WalletClass = NewWallet
return WalletClass(storage)
@classmethod
def is_seed(self, seed):
if not seed:
return False
elif is_old_seed(seed):
return True
elif is_new_seed(seed):
return True
else:
return False
@classmethod
def is_old_mpk(self, mpk):
try:
int(mpk, 16)
assert len(mpk) == 128
return True
except:
return False
@classmethod
def is_xpub(self, text):
try:
assert text[0:4] == 'xpub'
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_xprv(self, text):
try:
assert text[0:4] == 'xprv'
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_address(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_address(x):
return False
return True
@classmethod
def is_private_key(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_private_key(x):
return False
return True
@classmethod
def from_seed(self, seed, password, storage):
if is_old_seed(seed):
klass = OldWallet
elif is_new_seed(seed):
klass = NewWallet
w = klass(storage)
w.add_seed(seed, password)
w.create_master_keys(password)
w.create_main_account(password)
return w
@classmethod
def from_address(self, text, storage):
w = Imported_Wallet(storage)
for x in text.split():
w.accounts[IMPORTED_ACCOUNT].add(x, None, None, None)
w.save_accounts()
return w
@classmethod
def from_private_key(self, text, password, storage):
w = Imported_Wallet(storage)
w.update_password(None, password)
for x in text.split():
w.import_key(x, password)
return w
@classmethod
def from_old_mpk(self, mpk, storage):
w = OldWallet(storage)
w.seed = ''
w.create_watching_only_wallet(mpk)
return w
@classmethod
def from_xpub(self, xpub, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xpub_wallet(xpub)
return w
@classmethod
def from_xprv(self, xprv, password, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xprv_wallet(xprv, password)
return w
@classmethod
def from_multisig(klass, key_list, password, storage, wallet_type):
storage.put('wallet_type', wallet_type, True)
self = Multisig_Wallet(storage)
key_list = sorted(key_list, key = lambda x: klass.is_xpub(x))
for i, text in enumerate(key_list):
assert klass.is_seed(text) or klass.is_xprv(text) or klass.is_xpub(text)
name = "x%d/"%(i+1)
if klass.is_xprv(text):
xpub = bitcoin.xpub_from_xprv(text)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, text, password)
elif klass.is_xpub(text):
self.add_master_public_key(name, text)
elif klass.is_seed(text):
if name == 'x1/':
self.add_seed(text, password)
self.create_master_keys(password)
else:
self.add_cosigner_seed(text, name, password)
self.use_encryption = (password != None)
self.storage.put('use_encryption', self.use_encryption, True)
self.create_main_account(password)
return self
|
gpl-3.0
| 9,193,974,612,486,709,000 | 35.142445 | 149 | 0.551622 | false | 3.863772 | false | false | false |
nextgis/nextgisweb_compulink
|
nextgisweb_compulink/compulink_admin/__init__.py
|
1
|
1255
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os import path
from nextgisweb.component import Component, require
from .model import Base, PROJECT_STATUS_PROJECT
from .ident import COMP_ID
from nextgisweb_compulink.init_data.command import DBInit
from .view import get_regions_from_resource, get_districts_from_resource, get_project_statuses
BASE_PATH = path.abspath(path.dirname(__file__))
class CompulinkAdminComponent(Component):
identity = COMP_ID
metadata = Base.metadata
@require('lookup_table')
def initialize(self):
pass
@require('lookup_table')
def initialize_db(self):
return
#TODO: fake! Need refactoring
args = self
args.action = 'all'
args.force = False
DBInit.execute(args, make_transaction=False)
@require('lookup_table')
def setup_pyramid(self, config):
from . import view
view.setup_pyramid(self, config)
def client_settings(self, request):
return dict(
regions_dict=get_regions_from_resource(sort=True),
districts_dict=get_districts_from_resource(sort=True),
statuses_dict=get_project_statuses(),
def_status=PROJECT_STATUS_PROJECT
)
|
gpl-2.0
| 5,916,443,532,301,515,000 | 27.522727 | 94 | 0.667729 | false | 3.83792 | false | false | false |
techtonik/readthedocs.org
|
readthedocs/projects/tasks.py
|
1
|
31592
|
"""Tasks related to projects
This includes fetching repository code, cleaning ``conf.py`` files, and
rebuilding documentation.
"""
import os
import shutil
import json
import logging
import socket
import requests
import hashlib
from collections import defaultdict
from celery import task, Task
from djcelery import celery as celery_app
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from readthedocs.builds.constants import (LATEST,
BUILD_STATE_CLONING,
BUILD_STATE_INSTALLING,
BUILD_STATE_BUILDING)
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import send_email, run_on_app_servers
from readthedocs.cdn.purge import purge
from readthedocs.doc_builder.loader import get_builder_class
from readthedocs.doc_builder.environments import (LocalEnvironment,
DockerEnvironment)
from readthedocs.doc_builder.exceptions import BuildEnvironmentError
from readthedocs.projects.exceptions import ProjectImportError
from readthedocs.projects.models import ImportedFile, Project
from readthedocs.projects.utils import make_api_version, make_api_project, symlink
from readthedocs.projects.constants import LOG_TEMPLATE
from readthedocs.privacy.loader import Syncer
from readthedocs.search.parse_json import process_all_json_files
from readthedocs.search.utils import process_mkdocs_json
from readthedocs.restapi.utils import index_search_request
from readthedocs.vcs_support import utils as vcs_support_utils
from readthedocs.api.client import api as api_v1
from readthedocs.restapi.client import api as api_v2
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
from readthedocs.core.resolver import resolve_path
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
class UpdateDocsTask(Task):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
max_retries = 5
default_retry_delay = (7 * 60)
name = 'update_docs'
def __init__(self, build_env=None, force=False, search=True, localmedia=True,
build=None, project=None, version=None):
self.build_env = build_env
self.build_force = force
self.build_search = search
self.build_localmedia = localmedia
self.build = {}
if build is not None:
self.build = build
self.version = {}
if version is not None:
self.version = version
self.project = {}
if project is not None:
self.project = project
def run(self, pk, version_pk=None, build_pk=None, record=True, docker=False,
search=True, force=False, localmedia=True, **kwargs):
env_cls = LocalEnvironment
if docker or settings.DOCKER_ENABLE:
env_cls = DockerEnvironment
self.project = self.get_project(pk)
self.version = self.get_version(self.project, version_pk)
self.build = self.get_build(build_pk)
self.build_search = search
self.build_localmedia = localmedia
self.build_force = force
self.build_env = env_cls(project=self.project, version=self.version,
build=self.build, record=record)
with self.build_env:
if self.project.skip:
raise BuildEnvironmentError(
_('Builds for this project are temporarily disabled'))
try:
self.setup_vcs()
except vcs_support_utils.LockTimeout, e:
self.retry(exc=e, throw=False)
raise BuildEnvironmentError(
'Version locked, retrying in 5 minutes.',
status_code=423
)
if self.project.documentation_type == 'auto':
self.update_documentation_type()
self.setup_environment()
# TODO the build object should have an idea of these states, extend
# the model to include an idea of these outcomes
outcomes = self.build_docs()
build_id = self.build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=self.version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=outcomes['html'],
search=outcomes['search'],
localmedia=outcomes['localmedia'],
pdf=outcomes['pdf'],
epub=outcomes['epub'],
)
if self.build_env.failed:
self.send_notifications()
@staticmethod
def get_project(project_pk):
"""Get project from API"""
project_data = api_v1.project(project_pk).get()
project = make_api_project(project_data)
return project
@staticmethod
def get_version(project, version_pk):
"""Ensure we're using a sane version"""
if version_pk:
version_data = api_v1.version(version_pk).get()
else:
version_data = (api_v1
.version(project.slug)
.get(slug=LATEST)['objects'][0])
return make_api_version(version_data)
@staticmethod
def get_build(build_pk):
"""
Retrieve build object from API
:param build_pk: Build primary key
"""
build = {}
if build_pk:
build = api_v2.build(build_pk).get()
return dict((key, val) for (key, val) in build.items()
if key not in ['project', 'version', 'resource_uri',
'absolute_uri'])
def update_documentation_type(self):
"""
Force Sphinx for 'auto' documentation type
This used to determine the type and automatically set the documentation
type to Sphinx for rST and Mkdocs for markdown. It now just forces
Sphinx, due to markdown support.
"""
ret = 'sphinx'
project_data = api_v2.project(self.project.pk).get()
project_data['documentation_type'] = ret
api_v2.project(self.project.pk).put(project_data)
self.project.documentation_type = ret
def setup_vcs(self):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
:param build_env: Build environment
"""
self.build_env.update_build(state=BUILD_STATE_CLONING)
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg='Updating docs from VCS'))
try:
update_imported_docs(self.version.pk)
commit = self.project.vcs_repo(self.version.slug).commit
if commit:
self.build['commit'] = commit
except ProjectImportError:
raise BuildEnvironmentError('Failed to import project',
status_code=404)
def setup_environment(self):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
:param build_env: Build environment to pass commands and execution through.
"""
build_dir = os.path.join(
self.project.venv_path(version=self.version.slug),
'build')
self.build_env.update_build(state=BUILD_STATE_INSTALLING)
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg='Removing existing build directory'))
shutil.rmtree(build_dir)
site_packages = '--no-site-packages'
if self.project.use_system_packages:
site_packages = '--system-site-packages'
self.build_env.run(
self.project.python_interpreter,
'-mvirtualenv',
site_packages,
self.project.venv_path(version=self.version.slug)
)
# Install requirements
requirements = [
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==13.1.0',
'setuptools==18.0.1',
'docutils==0.11',
'mkdocs==0.14.0',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.4',
'sphinx-rtd-theme==0.1.9',
'alabaster>=0.7,<0.8,!=0.7.5',
'recommonmark==0.1.1',
]
cmd = [
'python',
self.project.venv_bin(version=self.version.slug, filename='pip'),
'install',
'--use-wheel',
'-U',
]
if self.project.use_system_packages:
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
cmd.append('-I')
cmd.extend(requirements)
self.build_env.run(
*cmd,
bin_path=self.project.venv_bin(version=self.version.slug)
)
# Handle requirements
requirements_file_path = self.project.requirements_file
checkout_path = self.project.checkout_path(self.version.slug)
if not requirements_file_path:
builder_class = get_builder_class(self.project.documentation_type)
docs_dir = (builder_class(self.build_env)
.docs_dir())
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
self.build_env.run(
'python',
self.project.venv_bin(version=self.version.slug, filename='pip'),
'install',
'--exists-action=w',
'-r{0}'.format(requirements_file_path),
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
# Handle setup.py
checkout_path = self.project.checkout_path(self.version.slug)
setup_path = os.path.join(checkout_path, 'setup.py')
if os.path.isfile(setup_path) and self.project.use_virtualenv:
if getattr(settings, 'USE_PIP_INSTALL', False):
self.build_env.run(
'python',
self.project.venv_bin(version=self.version.slug, filename='pip'),
'install',
'--ignore-installed',
'.',
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
else:
self.build_env.run(
'python',
'setup.py',
'install',
'--force',
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
def build_docs(self):
"""Wrapper to all build functions
Executes the necessary builds for this task and returns whether the
build was successful or not.
:returns: Build outcomes with keys for html, search, localmedia, pdf,
and epub
:rtype: dict
"""
self.build_env.update_build(state=BUILD_STATE_BUILDING)
before_build.send(sender=self.version)
outcomes = defaultdict(lambda: False)
with self.project.repo_nonblockinglock(
version=self.version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
outcomes['html'] = self.build_docs_html()
outcomes['search'] = self.build_docs_search()
outcomes['localmedia'] = self.build_docs_localmedia()
outcomes['pdf'] = self.build_docs_pdf()
outcomes['epub'] = self.build_docs_epub()
after_build.send(sender=self.version)
return outcomes
def build_docs_html(self):
"""Build HTML docs"""
html_builder = get_builder_class(self.project.documentation_type)(
self.build_env
)
if self.build_force:
html_builder.force()
html_builder.append_conf()
success = html_builder.build()
if success:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=self.version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
# TODO do something here
pass
return success
def build_docs_search(self):
"""Build search data with separate build"""
if self.build_search:
if self.project.is_type_mkdocs:
return self.build_docs_class('mkdocs_json')
if self.project.is_type_sphinx:
return self.build_docs_class('sphinx_search')
return False
def build_docs_localmedia(self):
"""Get local media files with separate build"""
if self.build_localmedia:
if self.project.is_type_sphinx:
return self.build_docs_class('sphinx_singlehtmllocalmedia')
return False
def build_docs_pdf(self):
"""Build PDF docs"""
if (self.project.slug in HTML_ONLY or
not self.project.is_type_sphinx or
not self.project.enable_pdf_build):
return False
return self.build_docs_class('sphinx_pdf')
def build_docs_epub(self):
"""Build ePub docs"""
if (self.project.slug in HTML_ONLY or
not self.project.is_type_sphinx or
not self.project.enable_epub_build):
return False
return self.build_docs_class('sphinx_epub')
def build_docs_class(self, builder_class):
"""Build docs with additional doc backends
These steps are not necessarily required for the build to halt, so we
only raise a warning exception here. A hard error will halt the build
process.
"""
builder = get_builder_class(builder_class)(self.build_env)
success = builder.build()
builder.move()
return success
def send_notifications(self):
"""Send notifications on build failure"""
send_notifications.delay(self.version.pk, build_pk=self.build['id'])
update_docs = celery_app.tasks[UpdateDocsTask.name]
@task()
def update_imported_docs(version_pk):
"""
Check out or update the given project's repository
:param version_pk: Version id to update
"""
version_data = api_v1.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(
version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
# Get the actual code on disk
try:
before_vcs.send(sender=version)
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(version.identifier)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = LATEST
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
except Exception:
raise
finally:
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
api_v2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
# Web tasks
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False,
localmedia=False, search=False, pdf=False, epub=False):
"""Build Finished, do house keeping bits"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
if not pdf:
clear_pdf_artifacts(version)
if not epub:
clear_epub_artifacts(version)
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlink(project=version.project)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False,
pdf=False, epub=False):
"""Task to move built documentation to web servers
:param version_pk: Version id to sync files for
:param hostname: Hostname to sync to
:param html: Sync HTML
:type html: bool
:param localmedia: Sync local media files
:type localmedia: bool
:param search: Sync search files
:type search: bool
:param pdf: Sync PDF files
:type pdf: bool
:param epub: Sync ePub files
:type epub: bool
"""
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(
version=version.slug, type_=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(
version=version.slug, type_='sphinx_localmedia')
to_path = version.project.get_production_media_path(
type_='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(
version=version.slug, type_='sphinx_search')
to_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug,
type_='sphinx_pdf')
to_path = version.project.get_production_media_path(
type_='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug,
type_='sphinx_epub')
to_path = version.project.get_production_media_path(
type_='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug,
type_='mkdocs_json')
to_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit, delete_non_commit_files=True):
"""Task to update search indexes
:param version_pk: Version id to update
:param commit: Commit that updated index
:param delete_non_commit_files: Delete files not in commit from index
"""
version = Version.objects.get(pk=version_pk)
if version.project.is_type_sphinx:
page_list = process_all_json_files(version, build_dir=False)
elif version.project.is_type_mkdocs:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s',
version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]", version.project.slug,
log_msg)
index_search_request(
version=version,
page_list=page_list,
commit=commit,
project_scale=0,
page_scale=0,
# Don't index sections to speed up indexing.
# They aren't currently exposed anywhere.
section=False,
delete=delete_non_commit_files,
)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not project.cdn_enabled:
return
if not commit:
log.info(LOG_TEMPLATE
.format(project=project.slug, version=version.slug,
msg=('Imported File not being built because no commit '
'information')))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE
.format(project=version.project.slug, version=version.slug,
msg='Creating ImportedFiles'))
_manage_imported_files(version, path, commit)
else:
log.info(LOG_TEMPLATE
.format(project=project.slug, version=version.slug,
msg='No ImportedFile files'))
def _manage_imported_files(version, path, commit):
"""Update imported files for version
:param version: Version instance
:param path: Path to search
:param commit: Commit that updated path
"""
changed_files = set()
for root, __, filenames in os.walk(path):
for filename in filenames:
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
full_path = os.path.join(root, filename)
md5 = hashlib.md5(open(full_path, 'rb').read()).hexdigest()
try:
obj, __ = ImportedFile.objects.get_or_create(
project=version.project,
version=version,
path=dirpath,
name=filename,
)
except ImportedFile.MultipleObjectsReturned:
log.exception('Error creating ImportedFile')
continue
if obj.md5 != md5:
obj.md5 = md5
changed_files.add(dirpath)
if obj.commit != commit:
obj.commit = commit
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=version.project,
version=version
).exclude(commit=commit).delete()
# Purge Cache
changed_files = [resolve_path(
version.project, filename=file, version_slug=version.slug,
) for file in changed_files]
cdn_ids = getattr(settings, 'CDN_IDS', None)
if cdn_ids:
if version.project.slug in cdn_ids:
purge(cdn_ids[version.project.slug], changed_files)
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
"""Send email notifications for build failure
:param version: :py:cls:`Version` instance that failed
:param build: :py:cls:`Build` instance that failed
:param email: Email recipient address
"""
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
"""Send webhook notification for project webhook
:param version: Version instance to send hook for
:param build: Build instance that failed
:param hook_url: Hook URL to send to
"""
project = version.project
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE
.format(project=project.slug, version='',
msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
# Random Tasks
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s", path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
"""Remove artifacts from the web servers"""
version = Version.objects.get(pk=version_pk)
clear_pdf_artifacts(version)
clear_epub_artifacts(version)
clear_htmlzip_artifacts(version)
clear_html_artifacts(version)
def clear_pdf_artifacts(version):
run_on_app_servers('rm -rf %s'
% version.project.get_production_media_path(
type_='pdf', version_slug=version.slug))
def clear_epub_artifacts(version):
run_on_app_servers('rm -rf %s'
% version.project.get_production_media_path(
type_='epub', version_slug=version.slug))
def clear_htmlzip_artifacts(version):
run_on_app_servers('rm -rf %s'
% version.project.get_production_media_path(
type_='htmlzip', version_slug=version.slug))
def clear_html_artifacts(version):
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
@task(queue='web')
def remove_path_from_web(path):
"""
Remove the given path from the web servers file system.
"""
# Santity check for spaces in the path since spaces would result in
# deleting unpredictable paths with "rm -rf".
assert ' ' not in path, "No spaces allowed in path"
# TODO: We need some proper escaping here for the given path.
run_on_app_servers('rm -rf {path}'.format(path=path))
|
mit
| 5,030,569,804,307,383,000 | 34.576577 | 99 | 0.584167 | false | 4.218454 | false | false | false |
XiaoJianfeng/ibl
|
util/txt2xlsx.py
|
1
|
1634
|
#!/usr/bin/env python
import sys
import argparse
import csv
import xlsxwriter
''' convert txt file[s] to .xlsx file
usage: $0 [txt1 txt2 txt...] xlsx_file
multiple txts will be added as separate excel sheet
'''
#-----------------------------------------------------------------------------
def correct_data_type(v):
""" convert v to int if possible, else float if possible, else just return as it is"""
try:
return int(v)
except:
try:
return float(v)
except:
return v
def txt2xlsx(f_out, *f_in, **kwds):
sep = kwds['sep'] if ('sep' in kwds) else '\t'
workbook = xlsxwriter.Workbook(f_out)
for n, fn in enumerate(f_in):
fobj_in = sys.stdin if fn == '-' else open(fn, 'r')
f = csv.reader(fobj_in, delimiter=sep)
short_fn = "Sheet {}".format(n+1)
worksheet = workbook.add_worksheet(short_fn)
for i, row in enumerate(f):
worksheet.write_row(i, 0, map(correct_data_type, row))
sys.stderr.write("{num} lines in total for {name}\n".format(num=i+1, name=fn))
workbook.close()
#-----------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert txt to excel xlsx')
parser.add_argument('-d', '--delimiter', default='\t', help="delimiter for the txt file")
parser.add_argument('input', nargs='*', default=['-'], help="input file[s], blank for stdin")
parser.add_argument('output', help="output file")
args = parser.parse_args()
txt2xlsx(args.output, *args.input, sep=args.delimiter)
|
lgpl-3.0
| -6,901,755,302,760,224,000 | 32.346939 | 97 | 0.55814 | false | 3.62306 | false | false | false |
eResearchSA/reporting-producers
|
reporting/utilities.py
|
1
|
3059
|
#!/usr/bin/env python
# pylint: disable=broad-except
import logging
import sys
import random
import string
import socket
import datetime
import platform
import time
import os
from reporting.exceptions import PluginInitialisationError
global_vars=None
def set_global(vars):
global global_vars
global_vars = vars
def getLogger(name):
"""Get logging.Logger instance with logger name convention
"""
if "." in name:
name = "producer.%s" % name.rpartition(".")[-1]
return logging.getLogger(name)
log = getLogger(__name__)
def excepthook(exc_type, exc_value, exc_traceback):
"""Except hook used to log unhandled exceptions to log
"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
getLogger("producer").critical(
"Unhandled exception in reporting producer:", exc_info=(exc_type, exc_value, exc_traceback))
#return sys.__excepthook__(exctype, value, traceback)
def get_log_level(verbose):
if verbose <= 0:
return logging.ERROR
elif verbose == 1:
return logging.WARNING
elif verbose == 2:
return logging.INFO
return logging.DEBUG
def get_hostname():
"""Get the host name of a producer"""
global global_vars
if global_vars is not None and 'hostname' in global_vars:
return global_vars['hostname']
try:
return socket.getfqdn()
except:
return platform.node()
def list_to_dict(d, l, value):
if len(l) == 1:
d[l[0]] = value
else:
if l[0] not in d:
d[l[0]] = {}
list_to_dict(d[l[0]], l[1:], value)
def formatExceptionInfo():
""" Consistently format exception information """
cla, exc = sys.exc_info()[:2]
return (cla.__name__, str(exc))
def init_message():
return {'timestamp': int(time.time()), 'hostname': get_hostname()}
def init_object(class_name, *args, **kwargs):
mod_name = '.'.join(class_name.split('.')[:-1])
class_name = class_name.split('.')[-1]
log.debug("Loading plugin %s %s"%(mod_name, class_name))
try:
mod = __import__(mod_name, globals(), locals(), [class_name])
except SyntaxError as e:
raise PluginInitialisationError(
"Plugin %s (%s) contains a syntax error at line %s" %
(class_name, e.filename, e.lineno))
except ImportError as e:
log.exception(e)
raise PluginInitialisationError(
"Failed to import plugin %s: %s" %
(class_name, e[0]))
klass = getattr(mod, class_name, None)
if not klass:
raise PluginInitialisationError(
'Plugin class %s does not exist' % class_name)
try:
return klass(*args, **kwargs)
except Exception as exc:
raise PluginInitialisationError(
"Failed to load plugin %s with "
"the following error: %s - %s" %
(class_name, exc.__class__.__name__, exc.message))
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
|
apache-2.0
| -4,377,217,827,614,726,000 | 28.142857 | 100 | 0.61883 | false | 3.739609 | false | false | false |
nateGeorge/IDmyDog
|
process_ims/other/2d_haralick_map.py
|
1
|
3493
|
from __future__ import print_function
import pandas as pd
import pickle as pk
import cv2
import os
import re
import progressbar
import imutils
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mahotas.features import haralick
import json
from sklearn.decomposition import PCA
plt.style.use('seaborn-dark')
def get_fg_bg_rects(fg):
b, g, r, a = cv2.split(fg)
h, w = fg.shape[:2]
h -= 1
w -= 1 # to avoid indexing problems
rectDims = [10, 10] # h, w of rectangles
hRects = h / rectDims[0]
wRects = w / rectDims[1]
fgRects = []
bgRects = []
for i in range(wRects):
for j in range(hRects):
pt1 = (i * rectDims[0], j * rectDims[1])
pt2 = ((i + 1) * rectDims[0], (j + 1) * rectDims[1])
# alpha is 255 over the part of the dog
if a[pt1[1], pt1[0]] == 255 and a[pt2[1], pt2[0]] == 255:
fgRects.append([pt1, pt2])
#cv2.rectangle(fgcp, pt1, pt2, [0, 0, 255], 2) # for debugging
elif a[pt1[1], pt1[0]] == 0 and a[pt2[1], pt2[0]] == 0:
bgRects.append([pt1, pt2])
#cv2.rectangle(bgcp, pt1, pt2, [0, 0, 255], 2)
return fgRects, bgRects
def get_avg_hara(im, rects):
# returns the haralick texture averaged over all rectangles in an image
if len(rects)==0:
return None
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = 0
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara += haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0)
hara /= (len(rects))
return hara
def make_hara_map(im, rects):
# draws heatmap of haralick texture PCA dim1 variance
if len(rects)==0:
return None
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = []
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara.append(pcaFG.transform(haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0).reshape(1, -1)))
hara = np.array(hara)
haraMean = np.mean(hara, axis=0)
haraStd = np.std(hara, axis=0)
haraMins = np.min(hara, axis=0)
haraMaxs = np.max(hara, axis=0)
norm = (haraMaxs-haraMins)
copy = im.copy()
copy = cv2.cvtColor(copy, cv2.COLOR_BGRA2RGBA)
im = cv2.cvtColor(im, cv2.COLOR_BGRA2RGBA)
for i in range(hara.shape[0]):
brightScale = 255*(hara[i] - haraMins)/norm
bright = brightScale[0][0]
r = rects[i]
cv2.rectangle(copy, r[0], r[1], [0, bright, 0, 255], -1)
f, axarr = plt.subplots(2, 1)
axarr[0].imshow(copy)
axarr[1].imshow(im)
plt.show()
# load configuration
with open('../../config.json', 'rb') as f:
config = json.load(f)
mainImPath = config['image_dir']
pDir = config['pickle_dir']
pcaFG = pk.load(open(pDir + 'pcaFG.pk', 'rb'))
bb = pk.load(open(pDir + 'pDogs-bounding-boxes-clean.pd.pk', 'rb'))
bb.dropna(inplace=True)
# do something like sorted(bb.breed.unique().tolist())[50:] to check another breed
for breed in sorted(bb.breed.unique().tolist())[50:]:
print('breed:', breed)
cropDir = mainImPath + breed + '/grabcut/'
fgDir = cropDir + 'fg/'
fgFiles = os.listdir(fgDir)
for fi in fgFiles:
try:
fg = cv2.imread(fgDir + fi, -1) # -1 tells it to load alpha channel
except Exception as err:
print('exception:', err)
continue
fgRects, bgRects = get_fg_bg_rects(fg)
make_hara_map(fg, fgRects)
|
mit
| -3,552,087,796,513,353,000 | 30.196429 | 107 | 0.583453 | false | 2.756906 | false | false | false |
GrognardsFromHell/TemplePlus
|
tpdatasrc/tpgamefiles/rules/d20_combat/to_hit_processing.py
|
1
|
12638
|
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import logbook
import roll_history
debug_enabled = False
def debug_print(*args):
if debug_enabled:
for arg in args:
print arg,
return
def handle_sanctuary(to_hit_eo, d20a):
tgt = d20a.target
if tgt == OBJ_HANDLE_NULL or not tgt.is_critter():
return
if d20a.query_can_be_affected_action_perform(tgt):
return
flags = to_hit_eo.attack_packet.get_flags()
if flags & D20CAF_CRITICAL:
flags &= ~D20CAF_CRITICAL
if flags & D20CAF_HIT:
flags &= ~D20CAF_HIT
to_hit_eo.bonus_list.add_zeroed(262) # Action lost due to Sanctuary
to_hit_eo.attack_packet.set_flags(flags)
return
def add_percent_chance_history_stub():
return
def mirror_image_attack_roll(d20a, spell_id):
performer = d20a.performer
target = d20a.target
#Target AC
mi_ac_evt_obj = tpdp.EventObjAttack()
mi_ac_evt_obj.attack_packet.attacker = performer
mi_ac_evt_obj.attack_packet.target = target
flags = d20a.flags
flags |= D20CAF_TOUCH_ATTACK
mi_ac_evt_obj.attack_packet.set_flags(flags)
mi_ac_evt_obj.attack_packet.action_type = d20a.action_type
mi_ac_evt_obj.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAC, EK_NONE)
tgt_ac = mi_ac_evt_obj.bonus_list.get_sum()
#Performer to Hit Bonus
to_hit = tpdp.EventObjAttack()
to_hit.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonus2, EK_NONE)
dc = 20
to_hit_dice = dice_new("1d{}".format(dc))
to_hit_roll = to_hit_dice.roll()
to_hit_bonus = to_hit.bonus_list.get_sum()
spell_enum = tpdp.SpellPacket(spell_id).spell_enum
spell_name = game.get_spell_mesline(spell_enum)
roll_id = tpdp.create_history_dc_roll(performer, tgt_ac, to_hit_dice, to_hit_roll, spell_name, to_hit.bonus_list)
result = to_hit_roll - dc + to_hit_bonus
d20a.roll_id_0 = roll_id
return result
def hitMirrorImage(d20a, numberOfMirrorImages):
#Check if real target was hit
#A roll of 1 indicates hit on real target
mirrorDice = dice_new("1d{}".format(numberOfMirrorImages+1) )
mirrorRoll = mirrorDice.roll()
if mirrorRoll == 1:
return False
performer = d20a.performer
target = d20a.target
#Get spell_id and spellName
spell_id = target.d20_query_get_data(Q_Critter_Has_Mirror_Image,0)
roll_result = mirror_image_attack_roll(d20a, spell_id)
if roll_result >= 0:
target.d20_send_signal(S_Spell_Mirror_Image_Struck, spell_id, 0)
target.float_mesfile_line('mes\\combat.mes', 109)
game.create_history_from_pattern(10, performer, target)
return True
else:
#I am unsure how misses are actually handled in this version
return False
def getDefenderConcealment(d20a):
target = d20a.target
defenderConcealment = tpdp.EventObjAttack()
defenderConcealment.attack_packet.set_flags(d20a.flags)
defenderConcealment.attack_packet.target = target
defenderConcealment.attack_packet.attacker = d20a.performer
return defenderConcealment.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetDefenderConcealmentMissChance, EK_NONE)
def getAttackerConcealment(performer):
performerConcealment = tpdp.EventObjAttack()
performerConcealment.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetAttackerConcealmentMissChance, EK_NONE)
return performerConcealment.bonus_list.get_highest()
def getSuppressConcealment(performer, target):
#suppressingConditions can be easily expanded with new conditions if necessary
suppressingConditions = [tpdp.get_condition_ref("sp-True Strike"), tpdp.get_condition_ref("Weapon Seeking")]
if any(performer.d20_query_with_data(Q_Critter_Has_Condition, conRef, 0) for conRef in suppressingConditions):
return True
elif performer.can_blindsee(target):
return True
elif performer.d20_query("Ignore Concealment"): #Example for Arcane Archer; not implemented in AA
return True
return False
def rollConcealment(concealmentMissChance):
concealmentDice = dice_new("1d100")
concealmentDiceRoll = concealmentDice.roll()
if concealmentDiceRoll > concealmentMissChance:
return True, concealmentDiceRoll
return False, concealmentDiceRoll
def toHitResult(performerToHit, targetAc):
toHitDice = dice_new("1d20")
toHitRoll = toHitDice.roll()
if toHitRoll == 1:
return False, toHitRoll
elif toHitRoll == 20:
return True, toHitRoll
elif toHitRoll + performerToHit >= targetAc:
return True, toHitRoll
return False, toHitRoll
def to_hit_processing(d20a):
performer = d20a.performer #auto performer = d20a.d20APerformer;
d20Data = d20a.data1 #auto d20Data = d20a.data1;
target = d20a.target #auto tgt = d20a.d20ATarget;
if not target:
return
#Mirror Image
numberOfMirrorImages = target.d20_query(Q_Critter_Has_Mirror_Image)
if numberOfMirrorImages:
if hitMirrorImage(d20a, numberOfMirrorImages):
return
#Concealment
debug_print("Concealment")
targetConcealment = getDefenderConcealment(d20a)
performerCanSuppressConcealment = getSuppressConcealment(performer, target)
if performerCanSuppressConcealment:
targetConcealment = 0
concealmentMissChance = max(targetConcealment, getAttackerConcealment(performer))
if concealmentMissChance > 0:
is_success, miss_chance_roll = rollConcealment(concealmentMissChance)
if is_success:
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 60, miss_chance_roll, 194, 193)
d20a.roll_id_1 = roll_id
else: # concealment miss
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 60, miss_chance_roll, 195, 193)
d20a.roll_id_1 = roll_id
# Blind fight - give second chance
if not performer.has_feat(feat_blind_fight):
return
is_success, miss_chance_roll = rollConcealment(concealmentMissChance)
if not is_success:
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 61, miss_chance_roll, 195, 193)
return
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 61, miss_chance_roll, 194, 193)
d20a.roll_id_2 = roll_id
#ToHitBonus Actions
debug_print("To Hit")
to_hit_eo = tpdp.EventObjAttack()
to_hit_eo.attack_packet.set_flags(d20a.flags)
to_hit_eo.attack_packet.target = target
to_hit_eo.attack_packet.action_type = d20a.action_type #dispIoToHitBon.attackPacket.d20ActnType = d20a.action_type
to_hit_eo.attack_packet.attacker = performer
to_hit_eo.attack_packet.event_key = d20Data #dispIoToHitBon.attackPacket.dispKey = d20Data
unarmed = OBJ_HANDLE_NULL
if to_hit_eo.attack_packet.get_flags() & D20CAF_TOUCH_ATTACK:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
elif to_hit_eo.attack_packet.get_flags() & D20CAF_SECONDARY_WEAPON:
offhandItem = performer.item_worn_at(item_wear_weapon_secondary)
if offhandItem.type != obj_t_weapon:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
else:
to_hit_eo.attack_packet.set_weapon_used(offhandItem)
else:
mainhandItem = performer.item_worn_at(item_wear_weapon_primary)
if mainhandItem.type != obj_t_weapon:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
else:
to_hit_eo.attack_packet.set_weapon_used(mainhandItem)
to_hit_eo.attack_packet.ammo_item = performer.get_ammo_used()
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_FINAL_ATTACK_ROLL
to_hit_eo.attack_packet.set_flags(flags)
to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetBucklerAcPenalty , EK_NONE)
to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonus2, EK_NONE) # // note: the "Global" condition has ToHitBonus2 hook that dispatches the ToHitBonusBase
to_hit_bon_final = to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonusFromDefenderCondition, EK_NONE)
#targetAc Actions
debug_print("Target AC")
target_ac_eo = to_hit_eo.__copy__()
target_ac_eo.bonus_list.reset()
target_ac_eo.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAC, EK_NONE)
tgt_ac_final = target_ac_eo.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAcModifierFromAttacker, EK_NONE)
#Check if attacks hits
attackDidHit, toHitRoll = toHitResult(to_hit_bon_final, tgt_ac_final)
critAlwaysCheat = cheats.critical #Note: changed behavior from vanilla (this used to toggle the property)
#Check for special hit conditions
if not attackDidHit:
if to_hit_eo.attack_packet.get_flags() & D20CAF_ALWAYS_HIT:
attackDidHit = True
elif critAlwaysCheat:
attackDidHit = True
else:
#Reroll Check
if performer.d20_query(Q_RerollAttack):
tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags() )
rerollDidHit, toHitRoll = toHitResult(to_hit_bon_final, tgt_ac_final)
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_REROLL
to_hit_eo.attack_packet.set_flags(flags)
if not rerollDidHit:
logbook.inc_misses(performer)
else:
attackDidHit = True
if not attackDidHit:
debug_print("Missed")
roll_id = tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags() )
d20a.roll_id_0 = roll_id
return
#We have a hit sir!
debug_print("Scored a hit")
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_HIT
to_hit_eo.attack_packet.set_flags(flags)
logbook.inc_hits(performer)
#Check if attack was a critical hit
performerCritRange = to_hit_eo.__copy__()
performerCritRange.bonus_list.reset()
critRange = 21 - performerCritRange.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetCriticalHitRange, EK_NONE)
if target.d20_query(Q_Critter_Is_Immune_Critical_Hits):
isCritical = False
elif toHitRoll == 20:
isCritical = True
elif toHitRoll >= critRange:
isCritical = True
elif critAlwaysCheat:
isCritical = True
else:
isCritical = False
#Check to Confirm Critical Hit
crit_hit_roll = -1
if isCritical:
debug_print("Confirm critical:")
to_hit_bon_final += to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnConfirmCriticalBonus, EK_NONE)
critConfirmed, crit_hit_roll = toHitResult(to_hit_bon_final, tgt_ac_final)
#Check for special confirm conditions
if not critConfirmed:
if performer.d20_query("Always Confirm Criticals"):
critConfirmed = True
elif critAlwaysCheat:
critConfirmed = True
else:
if performer.d20_query(Q_RerollCritical):
tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags(), crit_hit_roll )
critConfirmed, crit_hit_roll = toHitResult(to_hit_bon_final, tgt_ac_final)
#no reroll flag seems to be added in original code
if critConfirmed:
debug_print("Crit confirm")
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_CRITICAL
to_hit_eo.attack_packet.set_flags(flags)
#Deflect Arrows
#Unsure why it is done after confirm crit,
#If done before, history window for normal attack
#could be done earlier
#dispIoToHitBon.Dispatch(dispIoToHitBon.attackPacket.victim, objHndl::null, dispTypeDeflectArrows, DK_NONE)
#unsure why it is not simply tgt, will copy it
to_hit_eo.dispatch(to_hit_eo.attack_packet.target, OBJ_HANDLE_NULL, ET_OnDeflectArrows, EK_NONE)
handle_sanctuary(to_hit_eo, d20a)
#Set flags
debug_print("Final")
d20a.flags = to_hit_eo.attack_packet.get_flags()
roll_id = tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags(), crit_hit_roll )
d20a.roll_id_0 = roll_id
return
|
mit
| -2,265,534,329,201,253,600 | 40.032468 | 182 | 0.677006 | false | 3.051183 | false | false | false |
openbermuda/karmapi
|
karmapi/nzquake.py
|
1
|
1633
|
""" The data is available from Geonet, the official source of New
Zealand earthquake hazard data:
http://wfs.geonet.org.nz/geonet/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonet:quake_search_v1&outputFormat=csv
Geonet Data policy
==================
All data and images are made available free of charge through the
GeoNet project to facilitate research into hazards and assessment of
risk. GeoNet is sponsored by the New Zealand Government through its
agencies: Earthquake Commission (EQC), GNS Science and Land
Information New Zealand (LINZ). The use of data or images is subject
to the following conditions:
Users are requested to acknowledge the GeoNet project sponsors as the
source of the data or images. (Suggested text: We acknowledge the New
Zealand GeoNet project and its sponsors EQC, GNS Science and LINZ, for
providing data/images used in this study.)
The GeoNet project sponsors accept no liability for any loss or
damage, direct or indirect, resulting from the use of the data or
images provided. The GeoNet project sponsors do not make any
representation in respect of the information's accuracy, completeness
or fitness for any particular purpose.
"""
URL = "http://wfs.geonet.org.nz/geonet/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonet:quake_search_v1&outputFormat=csv"
from pathlib import Path
import requests
import karmapi
import pandas
def get(path):
path = Path(path)
r = requests.get(URL)
path.write_bytes(r.content)
def datefix(df):
tt = df.origintime.apply(lambda x: x[:19])
df.index = pandas.to_datetime(tt)
return df
|
gpl-3.0
| -3,375,490,667,483,754,500 | 29.240741 | 137 | 0.764238 | false | 3.346311 | false | false | false |
hexforge/pulp_db
|
utils/decoder/hkseomd_decoder.py
|
1
|
7007
|
import sys
import struct
import pprint
from rosetta.common import get_spec, decode, build_standard_msg_parser, pb_parser
def build_msg_parsers(spec):
decoders = {}
endian = spec['endian']
decoders['header'] = build_standard_msg_parser(spec['header'])
decoders[100] = build_standard_msg_parser(spec['100.sequence_reset'])
decoders[101] = build_standard_msg_parser(spec['101.logon'])
decoders[102] = build_standard_msg_parser(spec['102.logon_responce'])
decoders[201] = build_standard_msg_parser(spec['201.retransmission_request'])
decoders[202] = build_standard_msg_parser(spec['202.retransmission_responce'])
decoders[203] = build_standard_msg_parser(spec['203.refresh_complete'])
decoders[10] = build_standard_msg_parser(spec['10.market_definition'])
decoders[14] = build_standard_msg_parser(spec['14.currency_rate'])
decoders[20] = build_standard_msg_parser(spec['20.trading_session_status'])
decoders[21] = build_standard_msg_parser(spec['21.security_status'])
decoders[30] = build_standard_msg_parser(spec['30.add_order'])
decoders[31] = build_standard_msg_parser(spec['31.modify_order'])
decoders[32] = build_standard_msg_parser(spec['32.delete_order'])
decoders[33] = build_standard_msg_parser(spec['33.add_odd_lot_order'])
decoders[34] = build_standard_msg_parser(spec['34.delete_odd_lot_order'])
decoders[51] = build_standard_msg_parser(spec['51.trade_cancel'])
decoders[52] = build_standard_msg_parser(spec['52.trade_ticker'])
decoders[62] = build_standard_msg_parser(spec['62.closing_price'])
decoders[40] = build_standard_msg_parser(spec['40.nominal_price'])
decoders[41] = build_standard_msg_parser(spec['41.indicative_equilibrium_price'])
decoders[60] = build_standard_msg_parser(spec['60.statistics'])
decoders[61] = build_standard_msg_parser(spec['61.market_turnover'])
decoders[44] = build_standard_msg_parser(spec['44.yield'])
decoders[70] = build_standard_msg_parser(spec['70.index_definition'])
decoders[71] = build_standard_msg_parser(spec['71.index_data'])
decoders[55] = build_standard_msg_parser(spec['55.top_of_book'])
decoders[42] = build_standard_msg_parser(spec['42.estimated_average_settlement_price'])
decoders[50] = build_standard_msg_parser(spec['50.Trade'])
sec_1 = build_standard_msg_parser(spec['11.security_definition'])
sec_2 = build_standard_msg_parser(spec['11.sub.security_definition'])
def decoder_11(data, index):
msg, index = sec_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['NoUnderlyingSecurities']):
msg, index = sec_2(data, index)
submessages.append(msg)
return msg, index
decoders[11] = decoder_11
liq_1 = build_standard_msg_parser(spec['13.liquidity_provider'])
liq_2 = build_standard_msg_parser(spec['13.sub.liquidity_provider'])
def decoder_13(data, index):
msg, index = liq_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['NoLiquidityProviders']):
msg, index = liq_2(data, index)
submessages.append(msg)
return msg, index
decoders[13] = decoder_13
agg_1 = build_standard_msg_parser(spec['53.aggregate_order_book_update'])
agg_2 = build_standard_msg_parser(spec['53.sub.aggregate_order_book_update_spec2'])
def decoder_53(data, index):
msg, index = agg_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['NoEntries']):
msg, index = agg_2(data, index)
submessages.append(msg)
return msg, index
decoders[53] = decoder_53
bro_1 = build_standard_msg_parser(spec['54.broker_queue'])
bro_2 = build_standard_msg_parser(spec['54.sub.broker_queue'])
def decoder_54(data, index):
msg, index = bro_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['ItemCount']):
msg, index = bro_2(data, index)
submessages.append(msg)
return msg, index
decoders[54] = decoder_54
news = build_standard_msg_parser(spec['22.news'])
news1 = build_standard_msg_parser(spec['22.sub1.news'])
news2 = build_standard_msg_parser(spec['22.sub2.news'])
news3 = build_standard_msg_parser(spec['22.sub3.news'])
news4 = build_standard_msg_parser(spec['22.sub4.news'])
news5 = build_standard_msg_parser(spec['22.sub5.news'])
news6 = build_standard_msg_parser(spec['22.sub6.news'])
news7 = build_standard_msg_parser(spec['22.sub7.news'])
news8 = build_standard_msg_parser(spec['22.sub8.news'])
def decoder_22(data, index):
msg, index = news(data, index)
n_msg, index = news1(data, index)
msg.update(n_msg)
market_codes = []
msg['market_codes'] = market_codes
for _ in range(n_msg['NoMarketCodes']):
msg, index = news2(data, index)
market_codes.append(msg)
n_msg, index = news3(data, index)
n_msg, index = news4(data, index)
sec_codes = []
msg['sec_codes'] = sec_codes
for _ in range(n_msg['NoSecurityCodes']):
msg, index = news5(data, index)
sec_codes.append(msg)
n_msg, index = news6(data, index)
n_msg, index = news7(data, index)
news_lines = []
msg['news_lines'] = news_lines
for _ in range(n_msg['NoNewsLines']):
msg, index = news8(data, index)
news_lines.append(msg)
return msg, index
decoders[22] = decoder_22
return decoders
def decode_playback(decoders, spec, playback):
header_decoder = decoders['header']
endian = spec['endian']
header_format = spec['header']['format']
header_size = struct.calcsize(header_format)
msg_num = 0
for data in pb_parser(playback):
if not data:
break
msg_num += 1
msg = []
# HEADER
index = 0
try:
decoded_header, index = header_decoder(data, index)
except struct.error:
raise
msg.append(decoded_header)
# SUBMSGS
number_of_submsgs = decoded_header['MsgCount']
for _ in range(number_of_submsgs):
try:
size = struct.unpack(endian+'H', data[index:index+2])[0]
typ = struct.unpack(endian+'H', data[index+2:index+4])[0]
sub_msg, index = decoders[typ](data, index)
except (struct.error, KeyError):
import pdb
pdb.set_trace()
raise
msg.append(sub_msg)
yield msg
print(msg_num)
def main():
spec = get_spec('hkseomd.ini')
decoders = build_msg_parsers(spec)
for msg in decode_playback(decoders, spec, sys.argv[1]):
#pass
pprint.pprint(msg)
if __name__ == '__main__':
main()
|
apache-2.0
| 6,615,086,416,629,237,000 | 39.04 | 91 | 0.62095 | false | 3.338256 | false | false | false |
nathanbjenx/cairis
|
cairis/bin/cairisd.py
|
1
|
1541
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
import os
import sys
from cairis.daemon import create_app, db
from cairis.daemon.CairisHTTPError import CairisHTTPError
from flask_script import Manager, Server, Command
app = create_app()
manager = Manager(app)
manager.add_command('runserver', Server(host='0.0.0.0', port=7071))
@app.after_request
def apply_caching(response):
response.headers["X-Frame-Options"] = "SAMEORIGIN"
return response
class TestClient(Command):
def run(self):
app.test_client()
manager.add_command('testclient', TestClient())
def main(args):
manager.run()
if __name__ == '__main__':
try:
main(sys.argv)
except CairisHTTPError as e:
print('Fatal CAIRIS error: ' + str(e))
sys.exit(-1)
|
apache-2.0
| -5,118,065,886,492,460,000 | 29.215686 | 67 | 0.73329 | false | 3.550691 | false | false | false |
ednapiranha/snapshots-from-here
|
snapshots/snappy.py
|
1
|
11535
|
# -*- coding: utf-8 -*-
import base64
import os
import random
import time
from auto_tagify import AutoTagify
from boto.s3.key import Key
from PIL import Image
from pymongo import DESCENDING
from pymongo.objectid import ObjectId
import settings
CONTENT_TYPE = 'image/jpeg'
ATAG = AutoTagify()
ATAG.link = "/tag"
RECENT_LIMIT = 12
class Snappy(object):
"""All the snapshot functionality"""
def __init__(self):
self.token = ''
self.env = 'dev'
self.db = settings.DATABASE
def set_environment(self, env='dev'):
if env == 'test':
self.env = env
self.db = settings.TEST_DATABASE
def get_or_create_email(self, email):
"""Find the email address in the system
or create it if it doesn't exist.
"""
email = email.lower().strip()
if not self.db.users.find_one({"email":email}):
self.db.users.update({"email":email},
{"$set":{"token":self._generate_token(email)}},
upsert=True)
emailer = self.db.users.find_one({"email":email})
self.token = emailer['token']
return emailer
def get_user_by_id(self, id):
"""Find a user by id."""
return self.db.users.find_one({"_id":ObjectId(id)})
def get_user_by_token(self, sender_token):
"""Find a user by token."""
return self.db.users.find_one({"token":sender_token})
def update_profile(self, email, **kwargs):
"""Update profile information."""
profile = {}
for key in kwargs:
profile[key] = str(kwargs[key]).strip()
self.db.users.update({"email":email},
{"$set":profile})
def _generate_token(self, email):
"""Generate a token based on the timestamp and the user's
email address.
"""
random_int = str(random.randrange(100, 10000))
token_string = '%s%s%s' % (random_int,
email,
str(int(time.time())))
return base64.b64encode(token_string)
def upload(self, description, filename, sender_token):
"""Upload the image to the user's account. Also, autotag the
description.
"""
image_full_path = os.path.join('tmp/', filename + '_original')
image_full_path_medium = os.path.join('tmp/', filename + '_medium')
image_full_path_thumb = os.path.join('tmp/', filename + '_thumb')
aws_key = Key(settings.BUCKET)
aws_key.key = filename + '_original.jpg'
aws_key.set_contents_from_filename(image_full_path,
headers={'Content-Type': CONTENT_TYPE})
image_full_path_original = '%s%s_original.jpg' % (settings.IMAGE_URL,
filename)
aws_key.key = filename + '_thumb.jpg'
aws_key.set_contents_from_filename(image_full_path_thumb,
headers={'Content-Type': CONTENT_TYPE})
image_full_path_thumb = '%s%s_thumb.jpg' % (settings.IMAGE_URL, filename)
aws_key.key = filename + '_medium.jpg'
aws_key.set_contents_from_filename(image_full_path_medium,
headers={'Content-Type': CONTENT_TYPE})
image_full_path_medium = '%s%s_medium.jpg' % (settings.IMAGE_URL,
filename)
ATAG.text = description
tagged_description = ATAG.generate()
self.db.photos.update({"image_filename":filename},
{"$set":{"description":description,
"tagged_description":tagged_description,
"tags":ATAG.tag_list(),
"image_original":image_full_path_original,
"image_thumb":image_full_path_thumb,
"image_medium":image_full_path_medium,
"token":sender_token,
"created_at":int(time.time())}},
upsert=True)
ATAG.text = ''
return self.db.photos.find_one({"image_filename":filename})
def get_email(self, sender_token):
"""Get the user's email by their token."""
return self.db.users.find_one({"token":sender_token})['email']
def update_description(self, image_id, description):
"""Update the description for the image."""
ATAG.text = description
tagged_description = ATAG.generate()
self.db.photos.update({"_id":ObjectId(image_id)},
{"$set":{"description":description,
"tagged_description":tagged_description,
"tags":ATAG.tag_list()}})
ATAG.text = ''
def get_recent(self, page=0, nav='next'):
"""Get all recently uploaded images. Navigation defaults at the next
image created (descending). If navigation is set to 'prev', we go in the
reverse direction.
"""
photos = self.db.photos.find().sort("created_at", DESCENDING)
page = self._set_page(photos, page, nav)
try:
return photos.skip(page*1).limit(1)[0]
except IndexError:
return self.db.photos.find().sort("created_at").limit(1)[0]
def get_recent_by_user(self, sender_token, page=0, nav='next'):
"""Get all recently uploaded images by a user. Navigation defaults at the
next image created (descending). If navigation is set to 'prev', we go in
the reverse direction.
"""
photos = self.db.photos.find({"token":sender_token}).sort("created_at", DESCENDING)
page = self._set_page(photos, page, nav)
try:
return photos.skip(page*1).limit(1)[0]
except IndexError:
return self.db.photos.find().sort("created_at").limit(1)[0]
def get_recent_tag(self, tag=None, page=0, nav='next'):
"""Get all recently uploaded images matching this tag. Navigation
defaults at the next image created (descending). If navigation is set to
'prev', we go in the reverse direction.
"""
photos = self.db.photos.find({"tags":tag}).sort("created_at", DESCENDING)
page = self._set_page(photos, page, nav)
try:
return photos.skip(page*1).limit(1)[0]
except IndexError:
return self.db.photos.find().sort("created_at").limit(1)[0]
def get_photo_count(self, tag=None):
"""Get the total number of photos. If a tag is specified,
get the total number with that tag.
"""
if tag:
return self.db.photos.find({"tags":tag}).count() - 1
else:
return self.db.photos.count() - 1
def get_photo_count_by_user(self, sender_token):
"""Get the total number of photos for a user.
"""
return self.db.photos.find({"token":sender_token}).count() - 1
def get_image(self, image_id):
"""Return the image matching the given id."""
return self.db.photos.find_one({"_id":ObjectId(image_id)})
def get_latest_snapshots(self, sender_token):
"""Get the last 12 images from this user."""
return self.db.photos.find({"token":
sender_token}).sort("created_at", DESCENDING).limit(RECENT_LIMIT)
def get_latest_favorites(self, sender_token):
"""Get the last 12 favorites from this user."""
favorites = self.db.favorites.find({"token":
sender_token}).sort("created_at", DESCENDING).limit(RECENT_LIMIT)
photos = []
for favorite in favorites:
photos.append(self.db.photos.find_one({"_id": favorite['image_id']}))
return photos
def get_image_by_user(self, image_id, sender_token):
"""Return an image matching the given id and user."""
return self.db.photos.find_one({"_id":ObjectId(image_id),
"token":sender_token})
def delete_image(self, image_id, sender_token):
"""Delete the image matching the given id and user."""
photo = self.db.photos.find_one({"_id":ObjectId(image_id),
"token":sender_token})
settings.BUCKET.delete_keys((photo['image_filename'] + '_thumb.jpg',
photo['image_filename'] + '_medium.jpg',
photo['image_filename'] + '_original.jpg'))
self.db.photos.remove({"_id":ObjectId(image_id)})
self.db.comments.remove({"image_id":ObjectId(image_id)})
self.db.favorites.remove({"image_id":ObjectId(image_id)})
def favorited(self, image_id, sender_token):
"""Toggled favorite/unfavorite of an image."""
photo = self.db.favorites.find_one({"image_id":ObjectId(image_id),
"token":sender_token})
if photo is None:
# favorite
self.db.favorites.update({"image_id":ObjectId(image_id)},
{"$set":{"token":sender_token,
"created_at":int(time.time())}},
upsert=True)
return True
else:
# unfavorite
self.db.favorites.remove({"_id":ObjectId(photo['_id'])})
return False
def is_favorited(self, image_id, sender_token):
"""Check to see if an image was favorited."""
photo = self.db.favorites.find_one({"image_id":ObjectId(image_id),
"token":sender_token})
if photo is None:
return False
return True
def add_comment(self, image_id, sender_token, description):
"""Add a comment."""
if len(description.strip()) < 1:
return False
else:
user = self.db.users.find_one({"token":sender_token})
comment = self.db.comments.save({"image_id":ObjectId(image_id),
"token":sender_token,
"email":user['email'],
"full_name":user['full_name'],
"description":description,
"created_at":int(time.time())})
return self.db.comments.find_one({"_id":ObjectId(comment)})
def get_comments(self, image_id):
"""Get all comments for this image."""
return self.db.comments.find({"image_id":ObjectId(
image_id)}).sort("created_at", DESCENDING)
def delete_comment(self, comment_id, sender_token):
"""Delete a comment that you wrote."""
self.db.comments.remove({"_id":ObjectId(comment_id),
"token":sender_token})
def _set_page(self, photos, page, nav):
"""Set the page and nav values."""
page = int(page)
if nav == 'next' and photos.count() > 1:
if page > photos.count() - 1:
page = photos.count() - 1
elif nav == 'prev':
if page < 0:
page = 0
else:
page = 0
return int(page)
|
bsd-3-clause
| 7,200,109,346,975,077,000 | 40.642599 | 91 | 0.525704 | false | 4.158255 | false | false | false |
Mikescher/Project-Euler_Befunge
|
compiled/Python2/Euler_Problem-046.py
|
1
|
4454
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABADt2k1Lw0AQBuC/st20l4S4k020zVAWL5716CGkRS0LoriI7qk/3lk/oNraFooo8j6wgelMs5uZ5taYJSotY9TF48Pt4vpJnT3fLR5VmeKru8W9ak7U"
+ "H5cBAAAAAAAAAAAAAAAAAAAAAAAA/AO/+V88Z3cUxNViii5GFagtPLW55UDjsvM0TiFPf/Kcn84Risp4qrqRXLjT53qqhvHbb75ZPV17GsIkU9E0HxnHLfliTH4+UEun"
+ "b7TNuaKgrWZLIW8o2JrCrj3eOJJSVR/LZVP2mLwtWLIsO/hRJxdTFV4rXWb9rHdVSvpykMXZesVASlK6K61h6o/G5Mg7TgeXjOMlm0bGY6wMZBoDTfJJnjOnNQlVFSpi"
+ "uc1ek5r12dxT7anZWN7v1YvXJ95d0rteOZmANS2FxkgwlIjzUpoQldOXupZwpRlBYier4EaaMX+/jVQP06fSQhnBWvOWm7Ye7jra3s+51ZaW919+zkqnV2nLvgfO40BT"
+ "9QKcPtfBiCwAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<200 and y<57):
return g[y*200 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<200 and y<57):
g[y*200 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,0,200)
gw(2,0,50)
gw(4,0,10000)
gw(3,0,2)
return 1
def _1():
gw(0,1,32)
gw(1,1,32)
gw(8,0,1073741824)
gw(tm(gr(3,0),gr(1,0)),(td(gr(3,0),gr(1,0)))+1,88)
sa(gr(3,0)+gr(3,0))
sa((1)if((gr(3,0)+gr(3,0))<gr(4,0))else(0))
return 2
def _2():
return (25)if(sp()!=0)else(3)
def _3():
sp();
return 4
def _4():
global t0
sa(gr(3,0)+1)
sa(gr(3,0)+1)
gw(3,0,gr(3,0)+1)
sa(tm(sp(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return 5
def _5():
global t0
return (6)if((t0)!=0)else(4)
def _6():
return (1)if(gr(4,0)>gr(3,0))else(7)
def _7():
gw(3,0,0)
gw(5,0,3)
return 8
def _8():
global t0
sa(gr(5,0)+2)
sa(gr(5,0)+2)
sa(gr(5,0)+2)
gw(5,0,gr(5,0)+2)
sa(tm(sp(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return (9)if((t0)!=0)else(10)
def _9():
sa(79)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
return 8
def _10():
sp();
sa(3)
sa(3-gr(5,0))
return 11
def _11():
return (12)if(sp()!=0)else(24)
def _12():
global t0
sa(sr());
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return (14)if((t0)!=0)else(13)
def _13():
sa(sp()+1)
sa(sr()-gr(5,0))
return 11
def _14():
global t0
t0=gr(5,0)
gw(9,0,0)
sa(sr());
sa(t0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
sa(sp()-v0)
sa(sp()/2);
sa(sr());
gw(7,0,sp())
sa(gr(8,0))
sa((1)if(gr(8,0)>gr(7,0))else(0))
return 15
def _15():
return (23)if(sp()!=0)else(16)
def _16():
sa(sr());
return 17
def _17():
return (20)if(sp()!=0)else(18)
def _18():
sp();
sa(sp()-(gr(9,0)*gr(9,0)))
return (13)if(sp()!=0)else(19)
def _19():
sp();
return 8
def _20():
return (21)if((sr()+gr(9,0))>gr(7,0))else(22)
def _21():
gw(9,0,gr(9,0)/2)
sa(sp()/4);
sa(sr());
return 17
def _22():
global t0
global t1
global t2
t0=sr()+gr(9,0)
t1=gr(7,0)
t2=t1-t0
gw(7,0,t2)
gw(9,0,(sr()*2)+gr(9,0))
gw(9,0,gr(9,0)/2)
sa(sp()/4);
return 16
def _23():
sa(sp()/4);
sa((1)if(sr()>gr(7,0))else(0))
return 15
def _24():
sys.stdout.write(str(sp())+" ")
sys.stdout.flush()
return 26
def _25():
sa(sr());
sa(32)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()+gr(3,0))
sa((1)if(sr()<gr(4,0))else(0))
return 2
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25]
c=0
while c<26:
c=m[c]()
|
mit
| -5,779,677,972,069,626,000 | 17.032389 | 136 | 0.504939 | false | 1.984848 | false | false | false |
anantag/twitterAPIHack
|
twitterstream.py
|
1
|
1840
|
import oauth2 as oauth
import urllib2 as urllib
# See Assignment 1 instructions or README for how to get these credentials
access_token_key = "55773725-mKHWq6Fyj2TmqR6xPiBamIw2EYb4B4O95CWXYJJZW"
access_token_secret = "OAw65RNmhHsXTyFIHSZod39nFRwkTdStfTmn5YB0oM"
consumer_key = "N1o286mxPcWFofmwhZqOow"
consumer_secret = "urHTpu960jcRwOhhH9GYIg3iL7l2M58vZn0V57qgfJE"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
'''
Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
def twitterreq(url, method, parameters):
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
return response
def fetchsamples():
url = "https://stream.twitter.com/1/statuses/sample.json"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
print line.strip()
if __name__ == '__main__':
fetchsamples()
|
gpl-2.0
| -1,027,548,437,362,856,700 | 29.163934 | 78 | 0.677174 | false | 3.274021 | false | false | false |
diorcety/translate
|
translate/convert/ical2po.py
|
1
|
4592
|
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert iCalendar files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ical2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import ical, po
class ical2po(object):
"""Convert one or two iCalendar files to a single PO file."""
SourceStoreClass = ical.icalfile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, input_file, output_file, template_file=None,
blank_msgstr=False, duplicate_style="msgctxt"):
"""Initialize the converter."""
self.blank_msgstr = blank_msgstr
self.duplicate_style = duplicate_style
self.extraction_msg = None
self.output_file = output_file
self.source_store = self.SourceStoreClass(input_file)
self.target_store = self.TargetStoreClass()
self.template_store = None
if template_file is not None:
self.template_store = self.SourceStoreClass(template_file)
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass(encoding="UTF-8")
target_unit.addlocation("".join(unit.getlocations()))
target_unit.addnote(unit.getnotes("developer"), "developer")
target_unit.source = unit.source
target_unit.target = ""
return target_unit
def convert_store(self):
"""Convert a single source format file to a target format file."""
self.extraction_msg = "extracted from %s" % self.source_store.filename
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def merge_stores(self):
"""Convert two source format files to a target format file."""
self.extraction_msg = ("extracted from %s, %s" %
(self.template_store.filename,
self.source_store.filename))
self.source_store.makeindex()
for template_unit in self.template_store.units:
target_unit = self.convert_unit(template_unit)
template_unit_name = "".join(template_unit.getlocations())
add_translation = (
not self.blank_msgstr and
template_unit_name in self.source_store.locationindex)
if add_translation:
source_unit = self.source_store.locationindex[template_unit_name]
target_unit.target = source_unit.source
self.target_store.addunit(target_unit)
def run(self):
"""Run the converter."""
if self.template_store is None:
self.convert_store()
else:
self.merge_stores()
if self.extraction_msg:
self.target_store.header().addnote(self.extraction_msg,
"developer")
self.target_store.removeduplicates(self.duplicate_style)
if self.target_store.isempty():
return 0
self.target_store.serialize(self.output_file)
return 1
def run_converter(input_file, output_file, template_file=None, pot=False,
duplicatestyle="msgctxt"):
"""Wrapper around converter."""
return ical2po(input_file, output_file, template_file, blank_msgstr=pot,
duplicate_style=duplicatestyle).run()
formats = {
"ics": ("po", run_converter),
("ics", "ics"): ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True, description=__doc__)
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.run(argv)
if __name__ == '__main__':
main()
|
gpl-2.0
| 6,030,933,143,855,780,000 | 34.596899 | 94 | 0.640679 | false | 4.133213 | false | false | false |
ptphp/PyLib
|
src/webpy1/src/borough/parseBody.py
|
1
|
4768
|
# coding=gbk
import re
import string,urlparse
import os.path as osp
nums = string.digits
# Çå³ýhtml´úÂëÀïµÄ¶àÓà¿Õ¸ñ
def clearBlank(html):
if not html or html == None : return ;
html = re.sub('\r|\n|\t','',html)
html = html.replace(' ','').replace(' ','').replace('\'','"')
return html
def clearInfo(html):
if not html or html == None : return ;
html = re.sub('´òµç»°¸øÎÒʱ£¬ÇëÒ»¶¨ËµÃ÷ÔÚ.*?Íø¿´µ½µÄ£¬Ð»Ð»£¡|·¢²¼ÈÕÆÚ£º.*?<br />|<a .*?>|\[ºô½Ð\]|</a>|<p .*?>','',html).replace('°ÙÐÕ','¿ìËÙ×âÁÞÍø')
return html
# html´úÂë½ØÈ¡º¯Êý
def rects(html,regx,cls=''):
if not html or html == None or len(html)==0 : return ;
# ÕýÔò±í´ïʽ½ØÈ¡
if regx[:1]==chr(40) and regx[-1:]==chr(41) :
reHTML = re.search(regx,html,re.I)
if reHTML == None : return
reHTML = reHTML.group()
intRegx = re.search(regx,reHTML,re.I)
R = reHTML[intRegx]
# ×Ö·û´®½ØÈ¡
else :
# È¡µÃ×Ö·û´®µÄλÖÃ
pattern =re.compile(regx.lower())
intRegx=pattern.findall(html.lower())
# Èç¹ûËÑË÷²»µ½¿ªÊ¼×Ö·û´®£¬ÔòÖ±½Ó·µ»Ø¿Õ
if not intRegx : return
R = intRegx
# ÇåÀíÄÚÈÝ
if cls:
RC = []
for item in R:
RC.append(resub(item,cls))
return RC
else:
return R
def rect(html,regx,cls=''):
#regx = regx.encode('utf-8')
if not html or html == None or len(html)==0 : return ;
# ÕýÔò±í´ïʽ½ØÈ¡
if regx[:1]==chr(40) and regx[-1:]==chr(41) :
reHTML = re.search(regx,html,re.I)
if reHTML == None : return
reHTML = reHTML.group()
intRegx = re.search(regx,reHTML,re.I)
R = reHTML[intRegx]
# ×Ö·û´®½ØÈ¡
else :
# È¡µÃ×Ö·û´®µÄλÖÃ
pattern =re.compile(regx.lower())
intRegx=pattern.findall(html)
# Èç¹ûËÑË÷²»µ½¿ªÊ¼×Ö·û´®£¬ÔòÖ±½Ó·µ»Ø¿Õ
if not intRegx : return
R = intRegx[0]
if cls:
R = resub(R,cls)
# ·µ»Ø½ØÈ¡µÄ×Ö·û
return R
# ÕýÔòÇå³ý
def resub(html,regexs):
if not regexs: return html
html =re.sub(regexs,'',html)
return html
def rereplace(html,regexs):
if not regexs: return html
html =html.repalce(regexs,'')
return html
#Ìø×ªµç»°URL
def telPageReplace(url):
telUrl=url.split('/')
finalUrl="phone_%s" % telUrl[len(telUrl)-1]
return url.replace(telUrl[len(telUrl)-1],finalUrl)
#ÅжÏÊý×Ö
def check(a):
if type(a) is not str:
return False
else:
for i in a:
if i not in nums:
return False
return True
#Åжϵ绰
def parseNum(a):
strs=''
if type(a) is not str:
return 0
else:
for i in a:
if i in nums or i == '.':
strs +=i
return strs
def reTel(str,regx):
#regx = '((13[0-9]|15[0-9]|18[89])\\d{8})'
p = re.compile(regx)
#print p
if p.findall(str):
return p.findall(str)[0]
else:
regx = '((13[0-9]|15[0-9]|18[89])\d{8})'
#regx = '(13[0-9]|15[0-9]|18[89])\d{8}'
res = re.search(regx,str).group()
if res:
return res
else:
return ''
def matchURL(tag,url):
print tag
print url
urls = re.findall('(.*)(src|href)=(.+?)( |/>|>).*|(.*)url\(([^\)]+)\)',tag,re.I)
if urls == None :
return tag
else :
if urls[0][5] == '' :
urlQuote = urls[0][2]
else:
urlQuote = urls[0][5]
if len(urlQuote) > 0 :
cUrl = re.sub('''['"]''','',urlQuote)
else :
return tag
urls = urlparse(url); scheme = urls[0];
if scheme!='' : scheme+='://'
host = urls[1]; host = scheme + host
if len(host)==0 : return tag
path = osp.dirname(urls[2]);
if path=='/' : path = '';
if cUrl.find("#")!=-1 : cUrl = cUrl[:cUrl.find("#")]
# ÅжÏÀàÐÍ
if re.search('''^(http|https|ftp):(//|\\\\)(([\w/\\\+\-~`@:%])+\.)+([\w/\\\.\=\?\+\-~`@':!%#]|(&)|&)+''',cUrl,re.I) != None :
# http¿ªÍ·µÄurlÀàÐÍÒªÌø¹ý
return tag
elif cUrl[:1] == '/' :
# ¾ø¶Ô·¾¶
cUrl = host + cUrl
elif cUrl[:3]=='../' :
# Ïà¶Ô·¾¶
while cUrl[:3]=='../' :
cUrl = cUrl[3:]
if len(path) > 0 :
path = osp.dirname(path)
elif cUrl[:2]=='./' :
cUrl = host + path + cUrl[1:]
elif cUrl.lower()[:7]=='mailto:' or cUrl.lower()[:11]=='javascript:' :
return tag
else :
cUrl = host + path + '/' + cUrl
R = tag.replace(urlQuote,'"' + cUrl + '"')
return R
def urlencode(str) :
str=str.decode('utf-8').encode('utf-8')
reprStr = repr(str).replace(r'\x', '%')
return reprStr[1:-1]
|
apache-2.0
| -9,106,232,021,613,570,000 | 27.556886 | 153 | 0.490352 | false | 2.26186 | false | false | false |
tsherwen/AC_tools
|
Scripts/2D_GEOSChem_slice_subregion_plotter_example.py
|
1
|
2934
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plotter for 2D slices of GEOS-Chem output NetCDFs files.
NOTES
---
- This is setup for Cly, but many other options (plot/species) are availible
by just updating passed variables/plotting function called.
"""
import AC_tools as AC
import numpy as np
import matplotlib.pyplot as plt
def main():
"""
Basic plotter of NetCDF files using AC_tools
"""
# --- Local settings hardwired here...
fam = 'Cly' # Family to plot
# print species in family for reference...
print((AC.GC_var(fam)))
# --- Get working directory etc from command line (as a dictionary object)
# (1st argument is fil directory with folder, 2nd is filename)
Var_rc = AC.get_default_variable_dict()
# Get details on extracted data (inc. resolution)
Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc)
# --- extract data and units of data for family/species...
arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam,
res=Data_rc['res'], rtn_units=True, annual_mean=False)
# --- Process data (add and extra processing of data here... )
# take average over time
print((arr.shape))
arr = arr.mean(axis=-1)
# Select surface values
print((arr.shape))
arr = arr[..., 0]
# convert to pptv
arr = arr*1E12
units = 'pptv'
# --- Plot up data...
print((arr.shape))
# - Plot a (very) simple plot ...
# AC.map_plot( arr.T, res=Data_rc['res'] )
# - plot a slightly better plot...
# (loads of options here - just type help(AC.plot_spatial_figure) in ipython)
# set range for data...
fixcb = np.array([0., 100.])
# number of ticks on colorbar (make sure the fixcb range divides by this)
nticks = 6
interval = (1/3.) # number of lat/lon labels... (x*15 degrees... )
# set limits of plot
lat_min = 5.
lat_max = 75.
lon_min = -30.
lon_max = 60.
left_cb_pos = 0.85 # set X (fractional) position
axis_titles = True # add labels for lat and lon
# title for plot
title = "Plot of annual average {}".format(fam)
# save as pdf (just set to True) or show?
# figsize = (7,5) # figsize to use? (e.g. square or rectangular plot)
# call plotter...
AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb,
lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max,
axis_titles=axis_titles, left_cb_pos=left_cb_pos,
nticks=nticks, interval=interval, title=title, show=False)
# are the spacings right? - if not just up
bottom = 0.1
top = 0.9
left = 0.1
right = 0.9
fig = plt.gcf()
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
# show and save as PDF?
plt.savefig('pete_plot.png')
AC.show_plot()
if __name__ == "__main__":
main()
|
mit
| 3,210,553,677,345,683,500 | 31.6 | 94 | 0.599864 | false | 3.300337 | false | false | false |
takluyver/xray
|
xray/groupby.py
|
1
|
12796
|
import itertools
from common import ImplementsReduce
from ops import inject_reduce_methods
import variable
import dataset
import numpy as np
def unique_value_groups(ar):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
values, inverse = np.unique(ar, return_inverse=True)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
groups[g].append(n)
return values, groups
def peek_at(iterable):
"""Returns the first value from iterable, as well as a new iterable with
the same content as the original iterable
"""
gen = iter(iterable)
peek = gen.next()
return peek, itertools.chain([peek], gen)
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
XArray.groupby
DataArray.groupby
"""
def __init__(self, obj, group_coord, squeeze=True):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group_coord : DataArray
1-dimensional array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
"""
if group_coord.ndim != 1:
# TODO: remove this limitation?
raise ValueError('`group_coord` must be 1 dimensional')
self.obj = obj
self.group_coord = group_coord
self.group_dim, = group_coord.dimensions
expected_size = dataset.as_dataset(obj).dimensions[self.group_dim]
if group_coord.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
if group_coord.name in obj.dimensions:
# assume that group_coord already has sorted, unique values
if group_coord.dimensions != (group_coord.name,):
raise ValueError('`group_coord` is required to be a '
'coordinate variable if `group_coord.name` '
'is a dimension in `obj`')
group_indices = np.arange(group_coord.size)
if not squeeze:
# group_indices = group_indices.reshape(-1, 1)
# use slices to do views instead of fancy indexing
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group_coord
else:
# look through group_coord to find the unique values
unique_values, group_indices = unique_value_groups(group_coord)
# TODO: switch this to using the new DataArray constructor when we
# get around to writing it:
# unique_coord = xary.DataArray(unique_values, name=group_coord.name)
variables = {group_coord.name: (group_coord.name, unique_values)}
unique_coord = dataset.Dataset(variables)[group_coord.name]
self.group_indices = group_indices
self.unique_coord = unique_coord
self._groups = None
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self.unique_coord.values,
self.group_indices))
return self._groups
def __len__(self):
return self.unique_coord.size
def __iter__(self):
return itertools.izip(self.unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self.group_indices:
yield self.obj.indexed(**{self.group_dim: indices})
def _infer_concat_args(self, applied_example):
if self.group_dim in applied_example.dimensions:
concat_dim = self.group_coord
indexers = self.group_indices
else:
concat_dim = self.unique_coord
indexers = np.arange(self.unique_coord.size)
return concat_dim, indexers
@property
def _combine(self):
return type(self.obj).concat
class ArrayGroupBy(GroupBy, ImplementsReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields XArrays without metadata
"""
array = variable.as_variable(self.obj)
# build the new dimensions
if isinstance(self.group_indices[0], int):
# group_dim is squeezed out
dims = tuple(d for d in array.dimensions if d != self.group_dim)
else:
dims = array.dimensions
# slice the data and build the new Arrays directly
indexer = [slice(None)] * array.ndim
group_axis = array.get_axis_num(self.group_dim)
for indices in self.group_indices:
indexer[group_axis] = indices
data = array.values[tuple(indexer)]
yield variable.Variable(dims, data)
def _combine_shortcut(self, applied, concat_dim, indexers):
stacked = variable.Variable.concat(
applied, concat_dim, indexers, shortcut=True)
stacked.attrs.update(self.obj.attrs)
name = self.obj.name
ds = self.obj.dataset.unselect(name)
ds[concat_dim.name] = concat_dim
# remove extraneous dimensions
for dim in self.obj.dimensions:
if dim not in stacked.dimensions:
del ds[dim]
ds[name] = stacked
return ds[name]
def _restore_dim_order(self, stacked, concat_dim):
def lookup_order(dimension):
if dimension == self.group_coord.name:
dimension, = concat_dim.dimensions
if dimension in self.obj.dimensions:
axis = self.obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dimensions, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes, indices or other contained arrays) but
only on the data and dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar.
Returns
-------
applied : DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (func(arr, **kwargs) for arr in grouped)
# peek at applied to determine which coordinate to stack over
applied_example, applied = peek_at(applied)
concat_dim, indexers = self._infer_concat_args(applied_example)
if shortcut:
combined = self._combine_shortcut(applied, concat_dim, indexers)
else:
combined = self._combine(applied, concat_dim, indexers)
reordered = self._restore_dim_order(combined, concat_dim)
return reordered
def reduce(self, func, dimension=None, axis=None, shortcut=True,
**kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dimension : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dimension, axis, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
_reduce_method_docstring = \
"""Reduce the items in this group by applying `{name}` along some
dimension(s).
Parameters
----------
dimension : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over all dimension for each group item.
**kwargs : dict
Additional keyword arguments passed on to `{name}`.
Returns
-------
reduced : {cls}
New {cls} object with `{name}` applied to its data and the
indicated dimension(s) removed.
"""
inject_reduce_methods(ArrayGroupBy)
class DatasetGroupBy(GroupBy):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset
The result of splitting, applying and combining this dataset.
"""
applied = [func(ds, **kwargs) for ds in self._iter_grouped()]
concat_dim, indexers = self._infer_concat_args(applied[0])
combined = self._combine(applied, concat_dim, indexers)
return combined
|
apache-2.0
| -8,291,218,335,638,136,000 | 37.197015 | 81 | 0.604408 | false | 4.500879 | false | false | false |
BirkbeckCTP/janeway
|
src/submission/urls.py
|
1
|
1920
|
__copyright__ = "Copyright 2017 Birkbeck, University of London"
__author__ = "Martin Paul Eve & Andy Byers"
__license__ = "AGPL v3"
__maintainer__ = "Birkbeck Centre for Technology and Publishing"
from django.conf.urls import url
from submission import views
urlpatterns = [
url(r'^start/$', views.start, name='submission_start'),
url(r'^(?P<type>[-\w.]+)/start/$', views.start, name='submission_start'),
url(r'^(?P<article_id>\d+)/info/$', views.submit_info, name='submit_info'),
url(r'^(?P<article_id>\d+)/authors/$', views.submit_authors, name='submit_authors'),
url(r'^(?P<article_id>\d+)/authors/(?P<author_id>\d+)/delete/$', views.delete_author, name='delete_author'),
url(r'^(?P<article_id>\d+)/funders/(?P<funder_id>\d+)/delete/$', views.delete_funder, name='delete_funder'),
url(r'^(?P<article_id>\d+)/files/$', views.submit_files, name='submit_files'),
url(r'^(?P<article_id>\d+)/funding/$', views.submit_funding, name='submit_funding'),
url(r'^submissions/$', views.submit_submissions, name='submission_submissions'),
url(r'^(?P<article_id>\d+)/review/$', views.submit_review, name='submit_review'),
url(r'^manager/article/settings/article/(?P<article_id>\d+)/publishernotes/order/$', views.publisher_notes_order,
name='submission_article_publisher_notes_order'),
url(r'^manager/configurator/$', views.configurator, name='submission_configurator'),
url(r'^manager/additional_fields/$', views.fields, name='submission_fields'),
url(r'^manager/additional_fields/(?P<field_id>\d+)/$', views.fields, name='submission_fields_id'),
url(r'^manager/licences/$', views.licenses, name='submission_licenses'),
url(r'^manager/licences/(?P<license_pk>\d+)/delete/',
views.delete_license,
name='submission_delete_license'),
url(r'^manager/licences/(?P<license_pk>\d+)/', views.licenses, name='submission_licenses_id'),
]
|
agpl-3.0
| 7,696,528,079,549,939,000 | 49.526316 | 117 | 0.665104 | false | 3.327556 | false | false | false |
dsoprea/RandomUtility
|
python/ssl_sign.py
|
1
|
3373
|
#!/usr/bin/env python2.7
import os.path
import argparse
import datetime
import hashlib
import random
import time
import M2Crypto.X509
import M2Crypto.ASN1
import M2Crypto.RSA
import M2Crypto.EVP
_OUTPUT_PATH = 'output'
_CA_PASSPHRASE = 'test'
_CA_KEY_PEM_FILENAME = 'output/ca.key.pem'
_CA_CRT_PEM_FILENAME = 'output/ca.crt.pem'
_SERIAL_NUMBER_GENERATOR_CB = lambda: \
hashlib.sha1(str(time.time()) + str(random.random())).\
hexdigest()
def pem_private_to_rsa(private_key_pem, passphrase=None):
def passphrase_cb(*args):
return passphrase
rsa = M2Crypto.RSA.load_key_string(
private_key_pem,
callback=passphrase_cb)
return rsa
def pem_csr_to_csr(csr_pem):
return M2Crypto.X509.load_request_string(csr_pem)
def pem_certificate_to_x509(cert_pem):
return M2Crypto.X509.load_cert_string(cert_pem)
def new_cert(ca_private_key_pem, csr_pem, validity_td, issuer_name, bits=2048,
is_ca=False, passphrase=None):
ca_rsa = pem_private_to_rsa(
ca_private_key_pem,
passphrase=passphrase)
def callback(*args):
pass
csr = pem_csr_to_csr(csr_pem)
public_key = csr.get_pubkey()
name = csr.get_subject()
cert = M2Crypto.X509.X509()
sn_hexstring = _SERIAL_NUMBER_GENERATOR_CB()
sn = int(sn_hexstring, 16)
cert.set_serial_number(sn)
cert.set_subject(name)
now_epoch = long(time.time())
notBefore = M2Crypto.ASN1.ASN1_UTCTIME()
notBefore.set_time(now_epoch)
notAfter = M2Crypto.ASN1.ASN1_UTCTIME()
notAfter.set_time(now_epoch + long(validity_td.total_seconds()))
cert.set_not_before(notBefore)
cert.set_not_after(notAfter)
cert.set_issuer(issuer_name)
cert.set_pubkey(public_key)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:FALSE')
cert.add_ext(ext)
pkey = M2Crypto.EVP.PKey()
pkey.assign_rsa(ca_rsa)
cert.sign(pkey, 'sha1')
cert_pem = cert.as_pem()
return cert_pem
def sign(ca_key_filepath, ca_crt_filepath, csr_filepath, passphrase=None):
with open(ca_crt_filepath) as f:
ca_cert_pem = f.read()
with open(ca_key_filepath) as f:
ca_private_key_pem = f.read()
ca_cert = pem_certificate_to_x509(ca_cert_pem)
issuer_name = ca_cert.get_issuer()
with open(csr_filepath) as f:
csr_pem = f.read()
validity_td = datetime.timedelta(days=400)
return new_cert(
ca_private_key_pem,
csr_pem,
validity_td,
issuer_name,
passphrase=passphrase)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sign a CSR')
parser.add_argument('ca_key_filepath',
help='File-path of CA PEM private-key')
parser.add_argument('ca_crt_filepath',
help='File-path of CA PEM certificate')
parser.add_argument('csr_filepath',
help='File-path of PEM CSR')
parser.add_argument('-p', '--passphrase',
help='CA passphrase')
args = parser.parse_args()
crt_pem = sign(
args.ca_key_filepath,
args.ca_crt_filepath,
args.csr_filepath,
passphrase=args.passphrase)
print(crt_pem)
|
gpl-2.0
| 3,708,214,889,509,246,500 | 24.360902 | 87 | 0.607471 | false | 3.265247 | false | false | false |
jjmiranda/edx-platform
|
openedx/core/djangoapps/user_api/views.py
|
1
|
40311
|
"""HTTP end-points for the User API. """
import copy
from opaque_keys import InvalidKeyError
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured, NON_FIELD_ERRORS, ValidationError
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect, csrf_exempt
from opaque_keys.edx import locator
from rest_framework import authentication
from rest_framework import filters
from rest_framework import generics
from rest_framework import status
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.exceptions import ParseError
from django_countries import countries
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
import third_party_auth
from django_comment_common.models import Role
from edxmako.shortcuts import marketing_link
from student.forms import get_registration_extension_form
from student.views import create_account_with_params
from student.cookies import set_logged_in_cookies
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.api.authentication import SessionAuthenticationAllowInactiveUser
from util.json_request import JsonResponse
from .preferences.api import get_country_time_zones, update_email_opt_in
from .helpers import FormDescription, shim_student_view, require_post_params
from .models import UserPreference, UserProfile
from .accounts import (
NAME_MAX_LENGTH, EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH,
USERNAME_MIN_LENGTH, USERNAME_MAX_LENGTH
)
from .accounts.api import check_account_exists
from .serializers import CountryTimeZoneSerializer, UserSerializer, UserPreferenceSerializer
class LoginSessionView(APIView):
"""HTTP end-points for logging in users. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the login form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_login_session"))
# Translators: This label appears above a field on the login form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the login form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
# Translators: These instructions appear on the login form, immediately
# below a field meant to hold the user's email address.
email_instructions = _("The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
# Translators: This label appears above a field on the login form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
}
)
form_desc.add_field(
"remember",
field_type="checkbox",
label=_("Remember me"),
default=False,
required=False,
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(require_post_params(["email", "password"]))
@method_decorator(csrf_protect)
def post(self, request):
"""Log in a user.
You must send all required form fields with the request.
You can optionally send an `analytics` param with a JSON-encoded
object with additional info to include in the login analytics event.
Currently, the only supported field is "enroll_course_id" to indicate
that the user logged in while enrolling in a particular course.
Arguments:
request (HttpRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 403 if authentication failed.
403 with content "third-party-auth" if the user
has successfully authenticated with a third party provider
but does not have a linked account.
HttpResponse: 302 if redirecting to another page.
Example Usage:
POST /user_api/v1/login_session
with POST params `email`, `password`, and `remember`.
200 OK
"""
# For the initial implementation, shim the existing login view
# from the student Django app.
from student.views import login_user
return shim_student_view(login_user, check_logged_in=True)(request)
class RegistrationView(APIView):
"""HTTP end-points for creating a new user. """
DEFAULT_FIELDS = ["email", "name", "username", "password"]
EXTRA_FIELDS = [
"first_name",
"last_name",
"city",
"state",
"country",
"gender",
"year_of_birth",
"level_of_education",
"company",
"title",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
]
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
def _is_field_visible(self, field_name):
"""Check whether a field is visible based on Django settings. """
return self._extra_fields_setting.get(field_name) in ["required", "optional"]
def _is_field_required(self, field_name):
"""Check whether a field is required based on Django settings. """
return self._extra_fields_setting.get(field_name) == "required"
def __init__(self, *args, **kwargs):
super(RegistrationView, self).__init__(*args, **kwargs)
# Backwards compatibility: Honor code is required by default, unless
# explicitly set to "optional" in Django settings.
self._extra_fields_setting = copy.deepcopy(configuration_helpers.get_value('REGISTRATION_EXTRA_FIELDS'))
if not self._extra_fields_setting:
self._extra_fields_setting = copy.deepcopy(settings.REGISTRATION_EXTRA_FIELDS)
self._extra_fields_setting["honor_code"] = self._extra_fields_setting.get("honor_code", "required")
# Check that the setting is configured correctly
for field_name in self.EXTRA_FIELDS:
if self._extra_fields_setting.get(field_name, "hidden") not in ["required", "optional", "hidden"]:
msg = u"Setting REGISTRATION_EXTRA_FIELDS values must be either required, optional, or hidden."
raise ImproperlyConfigured(msg)
# Map field names to the instance method used to add the field to the form
self.field_handlers = {}
for field_name in self.DEFAULT_FIELDS + self.EXTRA_FIELDS:
handler = getattr(self, "_add_{field_name}_field".format(field_name=field_name))
self.field_handlers[field_name] = handler
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the registration form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
This is especially important for the registration form,
since different edx-platform installations might
collect different demographic information.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Arguments:
request (HttpRequest)
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_registration"))
self._apply_third_party_auth_overrides(request, form_desc)
# Default fields are always required
for field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
# Custom form fields can be added via the form set in settings.REGISTRATION_EXTENSION_FORM
custom_form = get_registration_extension_form()
if custom_form:
for field_name, field in custom_form.fields.items():
restrictions = {}
if getattr(field, 'max_length', None):
restrictions['max_length'] = field.max_length
if getattr(field, 'min_length', None):
restrictions['min_length'] = field.min_length
field_options = getattr(
getattr(custom_form, 'Meta', None), 'serialization_options', {}
).get(field_name, {})
field_type = field_options.get('field_type', FormDescription.FIELD_TYPE_MAP.get(field.__class__))
if not field_type:
raise ImproperlyConfigured(
"Field type '{}' not recognized for registration extension field '{}'.".format(
field_type,
field_name
)
)
form_desc.add_field(
field_name, label=field.label,
default=field_options.get('default'),
field_type=field_options.get('field_type', FormDescription.FIELD_TYPE_MAP.get(field.__class__)),
placeholder=field.initial, instructions=field.help_text, required=field.required,
restrictions=restrictions,
options=getattr(field, 'choices', None), error_messages=field.error_messages,
include_default_option=field_options.get('include_default_option'),
)
# Extra fields configured in Django settings
# may be required, optional, or hidden
for field_name in self.EXTRA_FIELDS:
if self._is_field_visible(field_name):
self.field_handlers[field_name](
form_desc,
required=self._is_field_required(field_name)
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(csrf_exempt)
def post(self, request):
"""Create the user's account.
You must send all required form fields with the request.
You can optionally send a "course_id" param to indicate in analytics
events that the user registered while enrolling in a particular course.
Arguments:
request (HTTPRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 409 if an account with the given username or email
address already exists
"""
data = request.POST.copy()
email = data.get('email')
username = data.get('username')
# Handle duplicate email/username
conflicts = check_account_exists(email=email, username=username)
if conflicts:
conflict_messages = {
"email": _(
# Translators: This message is shown to users who attempt to create a new
# account using an email address associated with an existing account.
u"It looks like {email_address} belongs to an existing account. "
u"Try again with a different email address."
).format(email_address=email),
"username": _(
# Translators: This message is shown to users who attempt to create a new
# account using a username associated with an existing account.
u"It looks like {username} belongs to an existing account. "
u"Try again with a different username."
).format(username=username),
}
errors = {
field: [{"user_message": conflict_messages[field]}]
for field in conflicts
}
return JsonResponse(errors, status=409)
# Backwards compatibility: the student view expects both
# terms of service and honor code values. Since we're combining
# these into a single checkbox, the only value we may get
# from the new view is "honor_code".
# Longer term, we will need to make this more flexible to support
# open source installations that may have separate checkboxes
# for TOS, privacy policy, etc.
if data.get("honor_code") and "terms_of_service" not in data:
data["terms_of_service"] = data["honor_code"]
try:
user = create_account_with_params(request, data)
except ValidationError as err:
# Should only get non-field errors from this function
assert NON_FIELD_ERRORS not in err.message_dict
# Only return first error for each field
errors = {
field: [{"user_message": error} for error in error_list]
for field, error_list in err.message_dict.items()
}
return JsonResponse(errors, status=400)
response = JsonResponse({"success": True})
set_logged_in_cookies(request, response, user)
return response
def _add_email_field(self, form_desc, required=True):
"""Add an email field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the registration form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
},
required=required
)
def _add_name_field(self, form_desc, required=True):
"""Add a name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's full name.
name_label = _(u"Full name")
# Translators: This example name is used as a placeholder in
# a field on the registration form meant to hold the user's name.
name_placeholder = _(u"Jane Doe")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's full name.
name_instructions = _(u"Your legal name, used for any certificates you earn.")
form_desc.add_field(
"name",
label=name_label,
placeholder=name_placeholder,
instructions=name_instructions,
restrictions={
"max_length": NAME_MAX_LENGTH,
},
required=required
)
def _add_username_field(self, form_desc, required=True):
"""Add a username field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's public username.
username_label = _(u"Public username")
username_instructions = _(
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's public username.
u"The name that will identify you in your courses - "
u"{bold_start}(cannot be changed later){bold_end}"
).format(bold_start=u'<strong>', bold_end=u'</strong>')
# Translators: This example username is used as a placeholder in
# a field on the registration form meant to hold the user's username.
username_placeholder = _(u"JaneDoe")
form_desc.add_field(
"username",
label=username_label,
instructions=username_instructions,
placeholder=username_placeholder,
restrictions={
"min_length": USERNAME_MIN_LENGTH,
"max_length": USERNAME_MAX_LENGTH,
},
required=required
)
def _add_password_field(self, form_desc, required=True):
"""Add a password field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
},
required=required
)
def _add_level_of_education_field(self, form_desc, required=True):
"""Add a level of education field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's highest completed level of education.
education_level_label = _(u"Highest level of education completed")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.LEVEL_OF_EDUCATION_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"level_of_education",
label=education_level_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_gender_field(self, form_desc, required=True):
"""Add a gender field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's gender.
gender_label = _(u"Gender")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.GENDER_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"gender",
label=gender_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_year_of_birth_field(self, form_desc, required=True):
"""Add a year of birth field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's year of birth.
yob_label = _(u"Year of birth")
options = [(unicode(year), unicode(year)) for year in UserProfile.VALID_YEARS]
form_desc.add_field(
"year_of_birth",
label=yob_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_mailing_address_field(self, form_desc, required=True):
"""Add a mailing address field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's mailing address.
mailing_address_label = _(u"Mailing address")
form_desc.add_field(
"mailing_address",
label=mailing_address_label,
field_type="textarea",
required=required
)
def _add_goals_field(self, form_desc, required=True):
"""Add a goals field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This phrase appears above a field on the registration form
# meant to hold the user's reasons for registering with edX.
goals_label = _(u"Tell us why you're interested in {platform_name}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
)
form_desc.add_field(
"goals",
label=goals_label,
field_type="textarea",
required=required
)
def _add_city_field(self, form_desc, required=True):
"""Add a city field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the city in which they live.
city_label = _(u"City")
form_desc.add_field(
"city",
label=city_label,
required=required
)
def _add_state_field(self, form_desc, required=False):
"""Add a State/Province/Region field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the State/Province/Region in which they live.
state_label = _(u"State/Province/Region")
form_desc.add_field(
"state",
label=state_label,
required=required
)
def _add_company_field(self, form_desc, required=False):
"""Add a Company field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the Company
company_label = _(u"Company")
form_desc.add_field(
"company",
label=company_label,
required=required
)
def _add_title_field(self, form_desc, required=False):
"""Add a Title field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the Title
title_label = _(u"Title")
form_desc.add_field(
"title",
label=title_label,
required=required
)
def _add_first_name_field(self, form_desc, required=False):
"""Add a First Name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the First Name
first_name_label = _(u"First Name")
form_desc.add_field(
"first_name",
label=first_name_label,
required=required
)
def _add_last_name_field(self, form_desc, required=False):
"""Add a Last Name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the First Name
last_name_label = _(u"Last Name")
form_desc.add_field(
"last_name",
label=last_name_label,
required=required
)
def _add_country_field(self, form_desc, required=True):
"""Add a country field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the country in which the user lives.
country_label = _(u"Country")
error_msg = _(u"Please select your Country.")
form_desc.add_field(
"country",
label=country_label,
field_type="select",
options=list(countries),
include_default_option=True,
required=required,
error_messages={
"required": error_msg
}
)
def _add_honor_code_field(self, form_desc, required=True):
"""Add an honor code field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Separate terms of service and honor code checkboxes
if self._is_field_visible("terms_of_service"):
terms_label = _(u"Honor Code")
terms_link = marketing_link("HONOR")
terms_text = _(u"Review the Honor Code")
# Combine terms of service and honor code checkboxes
else:
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_label = _(u"Terms of Service and Honor Code")
terms_link = marketing_link("HONOR")
terms_text = _(u"Review the Terms of Service and Honor Code")
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
label = _(u"I agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(u"You must agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
form_desc.add_field(
"honor_code",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
},
supplementalLink=terms_link,
supplementalText=terms_text
)
def _add_terms_of_service_field(self, form_desc, required=True):
"""Add a terms of service field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_label = _(u"Terms of Service")
terms_link = marketing_link("TIS")
terms_text = _(u"Review the Terms of Service")
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
label = _(u"I agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(u"You must agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
form_desc.add_field(
"terms_of_service",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
},
supplementalLink=terms_link,
supplementalText=terms_text
)
def _apply_third_party_auth_overrides(self, request, form_desc):
"""Modify the registration form if the user has authenticated with a third-party provider.
If a user has successfully authenticated with a third-party provider,
but does not yet have an account with EdX, we want to fill in
the registration form with any info that we get from the
provider.
This will also hide the password field, since we assign users a default
(random) password on the assumption that they will be using
third-party auth to log in.
Arguments:
request (HttpRequest): The request for the registration form, used
to determine if the user has successfully authenticated
with a third-party provider.
form_desc (FormDescription): The registration form description
"""
if third_party_auth.is_enabled():
running_pipeline = third_party_auth.pipeline.get(request)
if running_pipeline:
current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)
if current_provider:
# Override username / email / full name
field_overrides = current_provider.get_register_form_data(
running_pipeline.get('kwargs')
)
for field_name in self.DEFAULT_FIELDS:
if field_name in field_overrides:
form_desc.override_field_properties(
field_name, default=field_overrides[field_name]
)
# Hide the password field
form_desc.override_field_properties(
"password",
default="",
field_type="hidden",
required=False,
label="",
instructions="",
restrictions={}
)
class PasswordResetView(APIView):
"""HTTP end-point for GETting a description of the password reset form. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the password reset form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("password_change_request"))
# Translators: This label appears above a field on the password reset
# form meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the password reset form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
# Translators: These instructions appear on the password reset form,
# immediately below a field meant to hold the user's email address.
email_instructions = _(u"The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
DRF class for interacting with the User ORM object
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all().prefetch_related("preferences")
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class ForumRoleUsersListView(generics.ListAPIView):
"""
Forum roles are represented by a list of user dicts
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
"""
Return a list of users with the specified role/course pair
"""
name = self.kwargs['name']
course_id_string = self.request.query_params.get('course_id')
if not course_id_string:
raise ParseError('course_id must be specified')
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
role = Role.objects.get_or_create(course_id=course_id, name=name)[0]
users = role.users.all()
return users
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
"""
DRF class for interacting with the UserPreference ORM
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
class PreferenceUsersListView(generics.ListAPIView):
"""
DRF class for listing a user's preferences
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
return User.objects.filter(preferences__key=self.kwargs["pref_key"]).prefetch_related("preferences")
class UpdateEmailOptInPreference(APIView):
"""View for updating the email opt in preference. """
authentication_classes = (SessionAuthenticationAllowInactiveUser,)
@method_decorator(require_post_params(["course_id", "email_opt_in"]))
@method_decorator(ensure_csrf_cookie)
def post(self, request):
""" Post function for updating the email opt in preference.
Allows the modification or creation of the email opt in preference at an
organizational level.
Args:
request (Request): The request should contain the following POST parameters:
* course_id: The slash separated course ID. Used to determine the organization
for this preference setting.
* email_opt_in: "True" or "False" to determine if the user is opting in for emails from
this organization. If the string does not match "True" (case insensitive) it will
assume False.
"""
course_id = request.data['course_id']
try:
org = locator.CourseLocator.from_string(course_id).org
except InvalidKeyError:
return HttpResponse(
status=400,
content="No course '{course_id}' found".format(course_id=course_id),
content_type="text/plain"
)
# Only check for true. All other values are False.
email_opt_in = request.data['email_opt_in'].lower() == 'true'
update_email_opt_in(request.user, org, email_opt_in)
return HttpResponse(status=status.HTTP_200_OK)
class CountryTimeZoneListView(generics.ListAPIView):
"""
**Use Cases**
Retrieves a list of all time zones, by default, or common time zones for country, if given
The country is passed in as its ISO 3166-1 Alpha-2 country code as an
optional 'country_code' argument. The country code is also case-insensitive.
**Example Requests**
GET /user_api/v1/preferences/time_zones/
GET /user_api/v1/preferences/time_zones/?country_code=FR
**Example GET Response**
If the request is successful, an HTTP 200 "OK" response is returned along with a
list of time zone dictionaries for all time zones or just for time zones commonly
used in a country, if given.
Each time zone dictionary contains the following values.
* time_zone: The name of the time zone.
* description: The display version of the time zone
"""
serializer_class = CountryTimeZoneSerializer
paginator = None
def get_queryset(self):
country_code = self.request.GET.get('country_code', None)
return get_country_time_zones(country_code)
|
agpl-3.0
| -1,252,888,636,332,038,100 | 36.5685 | 139 | 0.614324 | false | 4.655925 | false | false | false |
AVSystem/avs_commons
|
tools/rbtree_fuzz_gen_full_tree.py
|
1
|
1242
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 AVSystem <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import functools
import struct
nodes = int(sys.argv[1]) if len(sys.argv) > 1 else 1
def binsearch_depth(nums, num):
if not nums:
raise ValueError('should never happen')
at = len(nums) // 2
if nums[at] == num:
return 1
elif num < nums[at]:
return 1 + binsearch_depth(nums[:at], num)
else:
return 1 + binsearch_depth(nums[at+1:], num)
values = list(range(1, nodes+1))
ordered_values = sorted(values, key=functools.partial(binsearch_depth, values))
for num in ordered_values:
sys.stdout.buffer.write(struct.pack('=BI', 0, num))
|
apache-2.0
| -2,947,104,314,626,366,000 | 30.846154 | 79 | 0.697262 | false | 3.518414 | false | false | false |
millen1m/flask-restplus-server-example
|
app/modules/auth/views.py
|
1
|
4794
|
# coding: utf-8
"""
OAuth2 provider setup.
It is based on the code from the example:
https://github.com/lepture/example-oauth2-server
More details are available here:
* http://flask-oauthlib.readthedocs.org/en/latest/oauth2.html
* http://lepture.com/en/2013/create-oauth-server
"""
from flask import Blueprint, request, render_template, jsonify, session, redirect
from flask_login import current_user
import flask_login
import webargs
from werkzeug import exceptions as http_exceptions
from werkzeug import security
from app.extensions import db, api, oauth2, login_manager
from app.modules.users.models import User
from . import parameters
from .models import OAuth2Client
import logging
log = logging.getLogger('flask_oauthlib')
login_manager.login_view = "auth.login"
auth_blueprint = Blueprint('auth', __name__, url_prefix='/auth') # pylint: disable=invalid-name
def get_current_user():
if 'id' in session:
uid = session['id']
return User.query.get(uid)
else:
return User.query.get(1)
@auth_blueprint.route('/login', methods=['GET', 'POST'])
def login(*args, **kwargs):
if request.method == 'GET': # Note: it is critical to not have the action parameter on the form
return '''
Please log in to access your account
<form method='POST'>
<input type='text' name='email' id='email' placeholder='email'></input>
<input type='password' name='pw' id='pw' placeholder='password'></input>
<input type='submit' name='submit'></input>
</form>
'''
email = request.form['email']
user = User.query.get(email)
if request.form['pw']:
user = User.find_with_password(request.form['email'], request.form['pw'])
flask_login.login_user(user)
next = request.args.get("next")
if next is None:
next = 'auth/protected'
return redirect(next)
return 'Bad login'
@auth_blueprint.route('/logout', methods=['GET', 'POST'])
@flask_login.login_required
def logout(*args, **kwargs):
flask_login.logout_user()
return '''
<h1>You have successfully logged out</h1>
Would you like to log in again?
<form method='POST' action='login'>
<input type='text' name='email' id='email' placeholder='email'></input>
<input type='password' name='pw' id='pw' placeholder='password'></input>
<input type='submit' name='login'></input>
</form>
'''
@auth_blueprint.route('/protected')
@flask_login.login_required
def protected():
return 'Logged in as: ' + flask_login.current_user.username
@auth_blueprint.route('/oauth2/token', methods=['GET', 'POST'])
@oauth2.token_handler
def access_token(*args, **kwargs):
# pylint: disable=unused-argument
"""
This endpoint is for exchanging/refreshing an access token.
Returns:
response (dict): a dictionary or None as the extra credentials for
creating the token response.
"""
log.debug("requested token")
return None
@auth_blueprint.route('/oauth2/revoke', methods=['POST'])
@oauth2.revoke_handler
def revoke_token():
"""
This endpoint allows a user to revoke their access token.
"""
pass
@auth_blueprint.route('/oauth2/errors', methods=['POST'])
def error_message():
"""
This endpoint allows a user to revoke their access token.
"""
log.debug("Error")
pass
@oauth2.usergetter
def get_user(username, password, *args, **kwargs):
user = User.query.filter_by(username=username).first()
print("Running user getter")
if user.check_password(password):
return user
return None
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@auth_blueprint.route('/oauth2/authorize', methods=['GET', 'POST'])
@flask_login.login_required
@oauth2.authorize_handler
def authorize(*args, **kwargs):
# pylint: disable=unused-argument
"""
This endpoint asks user if he grants access to his data to the requesting
application.
"""
log.debug("requested authorization")
if not current_user.is_authenticated:
log.debug(("NOT AUTHENTICATED"))
return api.abort(code=http_exceptions.Unauthorized.code)
if request.method == 'GET':
client_id = kwargs.get('client_id')
log.debug("render authorizer")
oauth2_client = OAuth2Client.query.filter_by(client_id=client_id).first()
kwargs['client'] = oauth2_client
kwargs['user'] = current_user
# TODO: improve template design
return render_template('authorize.html', **kwargs)
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
|
mit
| -8,786,363,367,552,155,000 | 28.231707 | 100 | 0.645807 | false | 3.81992 | false | false | false |
datacommonsorg/website
|
server/tests/i18n_test.py
|
1
|
1720
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from flask import g
from unittest.mock import patch
from main import app
class TestHlParamSelection(unittest.TestCase):
def test_no_hl(self):
with app.test_client() as c:
c.get('/')
assert (g.locale == 'en')
assert (g.locale_choices == ['en'])
def test_default_hl(self):
with app.test_client() as c:
c.get('/?hl=en')
assert (g.locale == 'en')
assert (g.locale_choices == ['en'])
def test_simple_hl(self):
with app.test_client() as c:
c.get('/?hl=ru')
assert (g.locale == 'ru')
assert (g.locale_choices == ['ru', 'en'])
@patch('lib.i18n.AVAILABLE_LANGUAGES', ['en', 'pt-br', 'pt'])
def test_complex_hl(self):
with app.test_client() as c:
c.get('/?hl=pt-BR')
assert (g.locale == 'pt-br')
assert (g.locale_choices == ['pt-br', 'pt', 'en'])
def test_fallback_hl(self):
with app.test_client() as c:
c.get('/?hl=foobar')
assert (g.locale == 'en')
assert (g.locale_choices == ['en'])
|
apache-2.0
| 1,607,169,839,682,063,000 | 31.471698 | 74 | 0.594767 | false | 3.651805 | true | false | false |
shoopio/shoop
|
shuup/admin/dashboard/utils.py
|
2
|
1372
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import time
from heapq import heappop, heappush
from itertools import islice
from django.utils.timezone import now
from shuup.admin.module_registry import get_modules
def get_activity(request, n_entries=30, cutoff_hours=10):
"""
Get Activity objects from all modules as a list in latest-first order.
:param request: Request context
:type request: django.http.request.HttpRequest
:param n_entries: Number of entries to return in total.
:type n_entries: int
:param cutoff_hours: Calculate cutoff datetime so the oldest entry should be at most this old
:type cutoff_hours: float
:return: List of Activity objects
:rtype: list[Activity]
"""
cutoff_dt = now() - datetime.timedelta(hours=cutoff_hours)
activities = []
for module in get_modules():
for activity in islice(module.get_activity(request, cutoff=cutoff_dt), n_entries):
heappush(activities, (-time.mktime(activity.datetime.timetuple()), activity))
out = []
while activities and len(out) < n_entries:
out.append(heappop(activities)[1])
return out
|
agpl-3.0
| 5,926,010,667,520,457,000 | 33.3 | 97 | 0.708455 | false | 3.790055 | false | false | false |
cheminfo/RDKitjs
|
old/src/similarityMap_basic_functions.py
|
1
|
3270
|
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0):
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True, imageType=None, fitImage=False, options=None, **kwargs):
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor=None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in iteritems(omol._atomPs):
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0] # this is not bivariate case ... only univariate no mixtures #matplotlib.mlab.bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0)
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
|
bsd-3-clause
| -2,635,108,952,803,476,000 | 42.026316 | 240 | 0.666667 | false | 2.69802 | false | false | false |
jason-ni/eventlet-raft
|
eventlet_raft/server.py
|
1
|
3811
|
import sys
import eventlet
from eventlet import event
import logging
import msgpack
from .settings import BUF_LEN
LOG = logging.getLogger('Server')
class Server(object):
exit_event = event.Event()
def __init__(self, conf):
super(Server, self).__init__()
self._node_listen_ip = conf.get('server', 'node_listen_ip')
self._node_listen_port = int(conf.get('server', 'node_listen_port'))
self._node_listen_sock = None
self._client_listen_ip = conf.get('server', 'client_listen_ip')
self._client_listen_port = int(conf.get('server', 'client_listen_port'))
self._client_listen_sock = None
self._threads = []
def _handle_node_sock(self, node_sock):
LOG.debug("Get a node socket")
unpacker = msgpack.Unpacker()
while True:
try:
chunk = node_sock.recv(BUF_LEN)
if not chunk:
break
unpacker.feed(chunk)
for unpacked_msg in unpacker:
self._on_handle_node_msg(unpacked_msg)
except Exception as e:
LOG.exception("node sock error: %s" % str(e))
break
def _on_handle_node_msg(self, msg):
pass
def _handle_client_sock(self, client_sock):
LOG.info("Get a client socket")
unpacker = msgpack.Unpacker()
while True:
try:
chunk = client_sock.recv(BUF_LEN)
if not chunk:
break
unpacker.feed(chunk)
for unpacked_msg in unpacker:
LOG.info(unpacked_msg)
self._on_handle_client_msg(client_sock, unpacked_msg)
except Exception as e:
LOG.exception("client sock error: %s" % str(e))
break
def _on_handle_client_msg(self, msg):
pass
def _on_node_connect(self, node_sock, address):
pass
def _handle_node_accept(self):
while True:
node_sock, address = self._node_listen_sock.accept()
self._on_node_connect(node_sock, address)
self._threads.append(
eventlet.spawn(self._handle_node_sock, node_sock)
)
def _on_client_connect(self, client_sock, address):
pass
def _handle_client_accept(self):
while True:
client_sock, address = self._client_listen_sock.accept()
self._on_client_connect(client_sock, address)
self._threads.append(
eventlet.spawn(self._handle_client_sock, client_sock)
)
def _on_start(self):
pass
def start(self):
self._node_listen_sock = eventlet.listen(
(self._node_listen_ip, self._node_listen_port)
)
self._threads.append(eventlet.spawn(self._handle_node_accept))
self._client_listen_sock = eventlet.listen(
(self._client_listen_ip, self._client_listen_port)
)
self._threads.append(eventlet.spawn(self._handle_client_accept))
self._on_start()
def _shutdown(self):
LOG.debug("Exiting...")
self._on_exit()
for thread in self._threads:
if thread:
thread.kill()
else:
LOG.debug("--- none thread")
sys.exit(0)
def _on_exit(self):
pass
def wait(self):
LOG.debug("Waiting for msg to exit")
self.exit_event.wait()
LOG.debug("Received exit event")
self._shutdown()
def main():
from util import config_log
from conf import set_conf
set_conf('test.conf')
from .conf import CONF
config_log()
server = Server(CONF)
server.start()
server.wait()
if __name__ == '__main__':
main()
|
apache-2.0
| 5,002,739,255,207,698,000 | 28.091603 | 80 | 0.547625 | false | 3.95332 | false | false | false |
jpadilla/django-extensions
|
django_extensions/templatetags/highlighting.py
|
1
|
3357
|
# coding=utf-8
"""
Similar to syntax_color.py but this is intended more for being able to
copy+paste actual code into your Django templates without needing to
escape or anything crazy.
http://lobstertech.com/2008/aug/30/django_syntax_highlight_template_tag/
Example:
{% load highlighting %}
<style>
@import url("http://lobstertech.com/media/css/highlight.css");
.highlight { background: #f8f8f8; }
.highlight { font-size: 11px; margin: 1em; border: 1px solid #ccc;
border-left: 3px solid #F90; padding: 0; }
.highlight pre { padding: 1em; overflow: auto; line-height: 120%; margin: 0; }
.predesc { margin: 1.5em 1.5em -2.5em 1em; text-align: right;
font: bold 12px Tahoma, Arial, sans-serif;
letter-spacing: 1px; color: #333; }
</style>
<h2>check out this code</h2>
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is <colder> than &death&")
{% endhighlight %}
"""
import django
from django import template
from django.template import (
Context, Node, Template, TemplateSyntaxError, Variable,
)
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
try:
from pygments import highlight as pyghighlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
HAS_PYGMENTS = True
except ImportError:
HAS_PYGMENTS = False
register = template.Library()
@stringfilter
def parse_template(value):
return mark_safe(Template(value).render(Context()))
parse_template.is_safe = True
if django.get_version() >= "1.4":
register.filter(parse_template, is_safe=True)
else:
parse_template.is_safe = True
register.filter(parse_template)
class CodeNode(Node):
def __init__(self, language, nodelist, name=''):
self.language = Variable(language)
self.nodelist = nodelist
if name:
self.name = Variable(name)
else:
self.name = None
def render(self, context):
code = self.nodelist.render(context).strip()
lexer = get_lexer_by_name(self.language.resolve(context))
formatter = HtmlFormatter(linenos=False)
html = ""
if self.name:
name = self.name.resolve(context)
html = '<div class="predesc"><span>%s</span></div>' % name
return html + pyghighlight(code, lexer, formatter)
@register.tag
def highlight(parser, token):
"""
Allows you to put a highlighted source code <pre> block in your code.
This takes two arguments, the language and a little explaination message
that will be generated before the code. The second argument is optional.
Your code will be fed through pygments so you can use any language it
supports.
Usage::
{% load highlighting %}
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is colder than death")
{% endhighlight %}
"""
if not HAS_PYGMENTS:
raise ImportError("Please install 'pygments' library to use highlighting.")
nodelist = parser.parse(('endhighlight',))
parser.delete_first_token()
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'highlight' statement requires an argument")
return CodeNode(bits[0], nodelist, *bits[1:])
|
mit
| 1,858,541,039,714,672,400 | 29.518182 | 83 | 0.670241 | false | 3.705298 | false | false | false |
sjdv1982/seamless
|
seamless/core/status.py
|
1
|
10043
|
class SeamlessInvalidValueError(ValueError):
def __str__(self):
s = type(self).__name__
if len(self.args):
s += ":" + " ".join([str(a) for a in self.args])
return s
class SeamlessUndefinedError(ValueError):
def __str__(self):
s = type(self).__name__
if len(self.args):
s += ":" + " ".join([str(a) for a in self.args])
return s
import json
from enum import Enum
class MyEnum(Enum):
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __eq__(self, other):
if other is None:
return False
return self.value == other.value
StatusEnum = MyEnum("StatusEnum", (
"OK",
"PENDING",
"SUB",
"VOID",
))
StatusReasonEnum = MyEnum("StatusReasonEnum",(
"UNCONNECTED", # only for workers
# and cells connected from undefined macropaths
"UNDEFINED", # only for cells
"INVALID", # invalid value; worker or cell
"ERROR", # error in execution; only for workers
"UPSTREAM", # worker or cell
"EXECUTING" # only for workers, only for pending
))
class WorkerStatus:
def __init__(self,
status,
reason=None,
pins=None,
preliminary=False,
progress=0.0
):
self.status = status
self.reason = reason
self.pins = pins
self.preliminary = preliminary
self.progress = progress
def __getitem__(self, index):
if index == 0:
return self.status
if index == 1:
return self.reason
raise IndexError(index)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def status_cell(cell):
if cell._checksum is not None:
return StatusEnum.OK, None, cell._prelim
if not cell._void:
return StatusEnum.PENDING, None, None
return StatusEnum.VOID, cell._status_reason, None
def status_accessor(accessor):
if accessor is None:
return StatusEnum.VOID, StatusReasonEnum.UNCONNECTED, None
if accessor._checksum is not None:
return StatusEnum.OK, None, accessor._prelim
if not accessor._void:
return StatusEnum.PENDING, None, None
return StatusEnum.VOID, accessor._status_reason, None
def status_transformer(transformer):
prelim = transformer.preliminary
checksum = transformer._checksum
if checksum is not None and not prelim:
return WorkerStatus(StatusEnum.OK)
manager = transformer._get_manager()
tcache = manager.cachemanager.transformation_cache
livegraph = manager.livegraph
pins = None
if not transformer._void:
status = StatusEnum.PENDING
reason = StatusReasonEnum.UPSTREAM
tf_checksum = tcache.transformer_to_transformations.get(transformer)
if tf_checksum is not None:
if tf_checksum in tcache.transformation_jobs:
reason = StatusReasonEnum.EXECUTING
if reason == StatusReasonEnum.UPSTREAM:
if checksum is not None:
assert prelim
return WorkerStatus(StatusEnum.OK, preliminary=True)
else:
status = StatusEnum.VOID
reason = transformer._status_reason
upstreams = livegraph.transformer_to_upstream.get(transformer)
downstreams = livegraph.transformer_to_downstream.get(transformer)
pins = []
if reason == StatusReasonEnum.UNCONNECTED:
pins = []
if upstreams is not None:
for pinname, accessor in upstreams.items():
if pinname == "META":
continue
if accessor is None:
pins.append(pinname)
if downstreams is not None:
if not len(downstreams):
outp = transformer._output_name
assert outp is not None
pins.append(outp)
elif reason == StatusReasonEnum.UPSTREAM:
pins = {}
if upstreams is not None:
for pinname, accessor in upstreams.items():
astatus = status_accessor(accessor)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
return WorkerStatus(
status, reason, pins,
preliminary = transformer.preliminary,
progress = transformer._progress
)
def status_reactor(reactor):
manager = reactor._get_manager()
cachemanager = manager.cachemanager
livegraph = manager.livegraph
if reactor._pending:
return WorkerStatus(StatusEnum.PENDING)
elif not reactor._void:
return WorkerStatus(StatusEnum.OK)
rtreactor = livegraph.rtreactors[reactor]
status = StatusEnum.VOID
reason = reactor._status_reason
upstreams = livegraph.reactor_to_upstream[reactor]
pins = None
if reason == StatusReasonEnum.UNCONNECTED:
pins = []
for pinname, accessor in upstreams.items():
if accessor is None:
pins.append(pinname)
elif reason == StatusReasonEnum.UPSTREAM:
pins = {}
for pinname, accessor in upstreams.items():
astatus = status_accessor(accessor)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
for pinname in rtreactor.editpins:
cell = livegraph.editpin_to_cell[reactor][pinname]
astatus = status_accessor(cell)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
return WorkerStatus(
status, reason, pins
)
def status_macro(macro):
if macro._gen_context is not None:
assert not macro._void
gen_status = macro._gen_context._get_status()
if format_context_status(gen_status) != "OK":
return WorkerStatus(
StatusEnum.SUB, None, gen_status
)
return WorkerStatus(StatusEnum.OK)
manager = macro._get_manager()
livegraph = manager.livegraph
pins = None
if not macro._void:
status = StatusEnum.PENDING
reason = StatusReasonEnum.UPSTREAM
else:
status = StatusEnum.VOID
reason = macro._status_reason
upstreams = livegraph.macro_to_upstream[macro]
if reason == StatusReasonEnum.UNCONNECTED:
pins = []
for pinname, accessor in upstreams.items():
if accessor is None:
pins.append(pinname)
elif reason == StatusReasonEnum.UPSTREAM:
pins = {}
for pinname, accessor in upstreams.items():
astatus = status_accessor(accessor)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
return WorkerStatus(status, reason, pins)
def format_status(stat):
status, reason, prelim = stat
if status == StatusEnum.OK:
if prelim:
return "preliminary"
else:
return "OK"
elif status == StatusEnum.PENDING:
return "pending"
else:
if reason is None:
return "void"
else:
return reason.name.lower()
def format_worker_status(stat, as_child=False):
status, reason, pins = (
stat.status, stat.reason, stat.pins
)
if status == StatusEnum.OK:
if stat.preliminary:
return "preliminary"
return "OK"
elif status == StatusEnum.PENDING:
if reason == StatusReasonEnum.EXECUTING:
progress = stat.progress
if progress is not None and progress > 0:
return "executing, %.1f %%" % progress
else:
return "executing"
else:
return "pending"
elif status == StatusEnum.SUB:
sub = pins
ctx_status = format_context_status(sub)
ctx_statustxt = json.dumps(ctx_status, indent=2, sort_keys=True)
return ("macro ctx =>", ctx_status)
else:
if reason == StatusReasonEnum.UNCONNECTED:
result = "unconnected => "
result += ", ".join(pins)
elif reason == StatusReasonEnum.UPSTREAM:
result = reason.name.lower() + " => "
pinresult = []
for pinname, pstatus in pins.items():
if as_child:
pinresult.append(pinname)
else:
pinresult.append(pinname + " " + format_status(pstatus))
result += ", ".join(pinresult)
else:
result = reason.name.lower()
return result
def format_context_status(stat):
from .worker import Worker
from .cell import Cell
from .context import Context
result = {}
for childname, value in stat.items():
if isinstance(value, (str, dict)):
if value == "Status: OK":
continue
result[childname] = value
continue
child, childstat = value
if not isinstance(child, Context):
if childstat[0] == StatusEnum.VOID:
if childstat[1] == StatusReasonEnum.UPSTREAM:
continue
if childstat[0] == StatusEnum.PENDING:
if isinstance(child, Worker):
if childstat.reason != StatusReasonEnum.EXECUTING:
continue
else:
continue
if isinstance(child, Worker):
childresult = format_worker_status(childstat, as_child=True)
elif isinstance(child, Cell):
childresult = format_status(childstat)
elif isinstance(child, Context):
childresult = format_context_status(childstat)
else:
continue
if childresult == "OK":
continue
result[childname] = childresult
if not len(result):
result = "OK"
return result
|
mit
| 4,051,852,312,388,161,000 | 32.476667 | 76 | 0.572339 | false | 4.364624 | false | false | false |
sjorek/geoanonymizer.py
|
geoanonymizer/spatial/projection.py
|
1
|
2642
|
# -*- coding: utf-8 -*-
u"""
Functions dealing with geodesic projection systems.
WGS84 (EPSG 4326) projection system
“OpenStreetMap uses the WGS84 spatial reference system used by the
Global Positioning System (GPS). It uses geographic coordinates
between -180° and 180° longitude and -90° and 90° latitude. So
this is the "native" OSM format.
This is the right choice for you if you need geographical coordinates
or want to transform the coordinates into some other spatial reference
system or projection.”
-- from `Projections/Spatial reference systems: WGS84 (EPSG 4326)
<http://openstreetmapdata.com/info/projections#wgs84>`_
Mercator (EPSG 3857) projection system
“Most tiled web maps (such as the standard OSM maps and Google Maps)
use this Mercator projection.
The map area of such maps is a square with x and y coordiates both
between -20,037,508.34 and 20,037,508.34 meters. As a result data
north of about 85.1° and south of about -85.1° latitude can not be
shown and has been cut off. …
This is the right choice for you if you are creating tiled web maps.”
-- from `Projections/Spatial reference systems: Mercator (EPSG 3857)
<http://openstreetmapdata.com/info/projections#mercator>`_
Hint: Apple™ iOS or Google™ Android tracked coordinates use WGS84 (EPSG 4326)
projection and nearly all geomap-services, like google-maps, return this too,
although they're utilizing Mercator (EPSG 3857) projection internally.
"""
import math
def _generate_epsg_4326_to_epsg_3857_converter():
factor1 = 20037508.34 / 180
factor2 = math.pi / 360
factor3 = math.pi / 180
def convert_epsg_4326_to_epsg_3857(latitude, longitude):
"""
Convert WGS84 (EPSG 4326) to Mercator (EPSG 3857) projection.
"""
x = longitude * factor1
y = (math.log(math.tan((90 + latitude) * factor2)) / factor3) * factor1
return x, y
return convert_epsg_4326_to_epsg_3857
convert_gps_to_map_coordinates = _generate_epsg_4326_to_epsg_3857_converter()
def _generate_epsg_3857_to_epsg_4326_converter():
factor1 = 180 / 20037508.34
factor2 = 360 / math.pi
factor3 = math.pi / 20037508.34
def convert_epsg_3857_to_epsg_4326(x, y):
"""
Convert Mercator (EPSG 3857) to WGS84 (EPSG 4326) projection.
"""
longitude = x * factor1
latitude = factor2 * math.atan(math.exp(y * factor3)) - 90
return latitude, longitude
return convert_epsg_3857_to_epsg_4326
convert_map_to_gps_coordinates = _generate_epsg_3857_to_epsg_4326_converter()
|
mit
| -186,810,838,438,010,940 | 33.051948 | 79 | 0.692601 | false | 3.361538 | false | false | false |
alejandrorosas/ardupilot
|
Tools/autotest/pysim/multicopter.py
|
1
|
6720
|
#!/usr/bin/env python
from aircraft import Aircraft
import util, time, math
from math import degrees, radians
from rotmat import Vector3, Matrix3
class Motor(object):
def __init__(self, angle, clockwise, servo):
self.angle = angle # angle in degrees from front
self.clockwise = clockwise # clockwise == true, anti-clockwise == false
self.servo = servo # what servo output drives this motor
def build_motors(frame):
'''build a motors list given a frame type'''
frame = frame.lower()
if frame in [ 'quad', '+', 'x' ]:
motors = [
Motor(90, False, 1),
Motor(270, False, 2),
Motor(0, True, 3),
Motor(180, True, 4),
]
if frame in [ 'x', 'quadx' ]:
for i in range(4):
motors[i].angle -= 45.0
elif frame in ["y6"]:
motors = [
Motor(60, False, 1),
Motor(60, True, 7),
Motor(180, True, 4),
Motor(180, False, 8),
Motor(-60, True, 2),
Motor(-60, False, 3),
]
elif frame in ["hexa", "hexa+"]:
motors = [
Motor(0, True, 1),
Motor(60, False, 4),
Motor(120, True, 8),
Motor(180, False, 2),
Motor(240, True, 3),
Motor(300, False, 7),
]
elif frame in ["hexax"]:
motors = [
Motor(30, False, 7),
Motor(90, True, 1),
Motor(150, False, 4),
Motor(210, True, 8),
Motor(270, False, 2),
Motor(330, True, 3),
]
elif frame in ["octa", "octa+", "octax" ]:
motors = [
Motor(0, True, 1),
Motor(180, True, 2),
Motor(45, False, 3),
Motor(135, False, 4),
Motor(-45, False, 5),
Motor(-135, False, 6),
Motor(270, True, 7),
Motor(90, True, 8),
]
if frame == 'octax':
for i in range(8):
motors[i].angle += 22.5
elif frame in ["octa-quad"]:
motors = [
Motor( 45, False, 1),
Motor( -45, True, 2),
Motor(-135, False, 3),
Motor( 135, True, 4),
Motor( -45, False, 5),
Motor( 45, True, 6),
Motor( 135, False, 7),
Motor(-135, True, 8),
]
else:
raise RuntimeError("Unknown multicopter frame type '%s'" % frame)
return motors
class MultiCopter(Aircraft):
'''a MultiCopter'''
def __init__(self, frame='+',
hover_throttle=0.45,
terminal_velocity=15.0,
frame_height=0.1,
mass=1.5):
Aircraft.__init__(self)
self.motors = build_motors(frame)
self.motor_speed = [ 0.0 ] * len(self.motors)
self.mass = mass # Kg
self.hover_throttle = hover_throttle
self.terminal_velocity = terminal_velocity
self.terminal_rotation_rate = 4*radians(360.0)
self.frame_height = frame_height
# scaling from total motor power to Newtons. Allows the copter
# to hover against gravity when each motor is at hover_throttle
self.thrust_scale = (self.mass * self.gravity) / (len(self.motors) * self.hover_throttle)
self.last_time = time.time()
def update(self, servos):
for i in range(0, len(self.motors)):
servo = servos[self.motors[i].servo-1]
if servo <= 0.0:
self.motor_speed[i] = 0
else:
self.motor_speed[i] = servo
m = self.motor_speed
# how much time has passed?
t = time.time()
delta_time = t - self.last_time
self.last_time = t
# rotational acceleration, in rad/s/s, in body frame
rot_accel = Vector3(0,0,0)
thrust = 0.0
for i in range(len(self.motors)):
rot_accel.x += -radians(5000.0) * math.sin(radians(self.motors[i].angle)) * m[i]
rot_accel.y += radians(5000.0) * math.cos(radians(self.motors[i].angle)) * m[i]
if self.motors[i].clockwise:
rot_accel.z -= m[i] * radians(400.0)
else:
rot_accel.z += m[i] * radians(400.0)
thrust += m[i] * self.thrust_scale # newtons
# rotational air resistance
rot_accel.x -= self.gyro.x * radians(5000.0) / self.terminal_rotation_rate
rot_accel.y -= self.gyro.y * radians(5000.0) / self.terminal_rotation_rate
rot_accel.z -= self.gyro.z * radians(400.0) / self.terminal_rotation_rate
# update rotational rates in body frame
self.gyro += rot_accel * delta_time
# update attitude
self.dcm.rotate(self.gyro * delta_time)
self.dcm.normalize()
# air resistance
air_resistance = - self.velocity * (self.gravity/self.terminal_velocity)
accel_body = Vector3(0, 0, -thrust / self.mass)
accel_earth = self.dcm * accel_body
accel_earth += Vector3(0, 0, self.gravity)
accel_earth += air_resistance
# add in some wind (turn force into accel by dividing by mass).
# NOTE: disable this drag correction until we work out
# why it is blowing up
# accel_earth += self.wind.drag(self.velocity) / self.mass
# if we're on the ground, then our vertical acceleration is limited
# to zero. This effectively adds the force of the ground on the aircraft
if self.on_ground() and accel_earth.z > 0:
accel_earth.z = 0
# work out acceleration as seen by the accelerometers. It sees the kinematic
# acceleration (ie. real movement), plus gravity
self.accel_body = self.dcm.transposed() * (accel_earth + Vector3(0, 0, -self.gravity))
# new velocity vector
self.velocity += accel_earth * delta_time
# new position vector
old_position = self.position.copy()
self.position += self.velocity * delta_time
# constrain height to the ground
if self.on_ground():
if not self.on_ground(old_position):
print("Hit ground at %f m/s" % (self.velocity.z))
self.velocity = Vector3(0, 0, 0)
# zero roll/pitch, but keep yaw
(r, p, y) = self.dcm.to_euler()
self.dcm.from_euler(0, 0, y)
self.position = Vector3(self.position.x, self.position.y,
-(self.ground_level + self.frame_height - self.home_altitude))
# update lat/lon/altitude
self.update_position(delta_time)
|
gpl-3.0
| 795,064,505,372,224,100 | 34.183246 | 98 | 0.524554 | false | 3.38539 | false | false | false |
Hanaasagi/Ushio
|
initdb.py
|
1
|
1961
|
#!/usr/bin/python
# -*-coding:UTF-8-*-
import sys
import time
import yaml
import pymongo
from hashlib import md5
zone_map = {
'动漫': '一些关于动漫的事情',
'音乐': '一些关于音乐的事情',
'轻小说': '一些关于轻小说的事情'
}
def create_admin(db, setting):
email = raw_input('请输入可用邮箱账号: ')
username = raw_input('请输入管理员用户名: ')
password = raw_input('请输入管理员密码: ')
hash_object = md5(password + setting['salt'])
password = hash_object.hexdigest()
user = {
'username': username,
'password': password,
'money': setting['init_money'],
'register_time': time.time(),
'favorite': [],
'email': email,
'level': 0,
'qq': '',
'website': '',
'address': '',
'signal': u'这个人太懒,还没有留下任何东西',
'openemail': 1,
'openfavorite': 1,
'openqq': 1,
'following': [],
'follower': [],
'allowemail': 1,
'logintime': None,
'loginip': None
}
db.user.insert(user)
def create_zone():
for name, desc in zone_map.items():
data = {
'name': name,
'description': desc,
'nums': 0
}
db.zone.insert(data)
if __name__ == '__main__':
try:
with open('setting.yaml', 'r') as f:
setting = yaml.load(f)
except:
print 'can not load setting file'
sys.exit(0)
client = pymongo.MongoClient(setting['database']['address'])
db = client[setting['database']['db']]
isdo = raw_input('是否创建管理员账户(Y/n): ')
if isdo in ('Y', 'y'):
create_admin(db, setting['global'])
else:
print '什么都没做'
isdo = raw_input('是否初始化版块分区(Y/n): ')
if isdo in ('Y', 'y'):
create_zone()
else:
print '什么都没做'
|
mit
| 6,451,959,847,060,826,000 | 21.766234 | 64 | 0.509983 | false | 2.713622 | false | false | false |
brumar/WPsolving
|
lib/postEvaluation.py
|
1
|
4325
|
'''
Created on 25 juil. 2014
@author: Nevrose
'''
import csv
class weightEvaluator():
def __init__(self):
self.datas={}
def prepareStructure(self,problemBank):
for problemName in problemBank.dicPbm.iterkeys():
problem=problemBank.dicPbm[problemName]
for indexInfo in range(len(problem.text.textInformations)):
textInfo=problem.text.textInformations[indexInfo]
for indexRepresentation in range(len(textInfo.representations)):
rep=textInfo.representations[indexRepresentation]
if(problemName not in self.datas.keys()):
self.datas[problemName]={}
if(indexInfo not in self.datas[problemName].keys()):
self.datas[problemName][indexInfo]={}
if(indexRepresentation not in self.datas[problemName][indexInfo].keys()):
self.datas[problemName][indexInfo][indexRepresentation]={}
self.datas[problemName][indexInfo][indexRepresentation]={"representation":rep,"occurences":0,"verbalDescription":"","weight":0}
def bindConfrontationToPathsDatas(self,confrontationDic, dDic):
#self.dicPbmSetFormulaPlannedObserved[pbm][setName][formula]=[planned,observationsCount]
dic=confrontationDic.dicPbmSetFormulaPlannedObserved
for problem in dic.iterkeys():
for set in dic[problem].iterkeys():
for formula in dic[problem][set]:
if dic[problem][set][formula][0]==True:
numberOfObservations=dic[problem][set][formula][1]
#logging.info("found")
congruentLines=(len(dDic[problem][formula]))
for pathLine in dDic[problem][formula]:
path=pathLine["path"]
numberOfRepresentationUsed=len(path.interpretationsList)
for interpIndex in range(numberOfRepresentationUsed):
verbalDescription=path.interpretationsList[interpIndex]
textIndex=path.richInterpretationsList[interpIndex].indexTextInformation
repIndex=path.richInterpretationsList[interpIndex].indexSelectedRepresentation
#logging.info(verbalDescription,textIndex,repIndex)
self.datas[problem][textIndex][repIndex]["occurences"]+=float(numberOfObservations)/congruentLines
#logging.info(self.datas[problem][textIndex][repIndex]["occurences"])
if( self.datas[problem][textIndex][repIndex]["verbalDescription"]==""):
self.datas[problem][textIndex][repIndex]["verbalDescription"]=verbalDescription
def normaliseWeightByPbm(self):
for pbm in self.datas:
suma=0
for info in self.datas[pbm]:
for rep in self.datas[pbm][info]:
suma+=self.datas[pbm][info][rep]["occurences"]
for info in self.datas[pbm]:
for rep in self.datas[pbm][info]:
if(self.datas[pbm][info][rep]["verbalDescription"]!=""):
self.datas[pbm][info][rep]["weight"]=float(self.datas[pbm][info][rep]["occurences"])/suma
#=======================================================
# logging.info(pbm)
# logging.info(self.datas[pbm][info][rep]["verbalDescription"])
# logging.info(self.datas[pbm][info][rep]["weight"])
#=======================================================
def printCSV(self,csvFile="datasWeight.csv"):
with open(csvFile, 'wb') as csvfile:
for pbm in self.datas:
for info in self.datas[pbm]:
for rep in self.datas[pbm][info]:
writer = csv.writer(csvfile, delimiter=';',quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([pbm,self.datas[pbm][info][rep]["verbalDescription"],self.datas[pbm][info][rep]["weight"],self.datas[pbm][info][rep]["occurences"]])
|
mit
| -129,583,702,923,696,180 | 56.666667 | 176 | 0.551676 | false | 4.449588 | false | false | false |
cedrick-f/pyVot
|
src/PyVot.py
|
1
|
5947
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##This file is part of PyVot
#############################################################################
#############################################################################
## ##
## PyVot ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2006-2009 Cédrick FAURY
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx
import Icones
import sys, os, getpass
##import psyco
##psyco.log()
##psyco.full()
import globdef
from globdef import *
#import sys, os, time, traceback, types
import FenPrincipale
#import wx.aui
#import wx.html
#import images
# For debugging
##wx.Trap();
##print "wx.VERSION_STRING = %s (%s)" % (wx.VERSION_STRING, wx.USE_UNICODE and 'unicode' or 'ansi')
##print "pid:", os.getpid()
##raw_input("Press Enter...")
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class MySplashScreen(wx.SplashScreen):
def __init__(self):
bmp = Icones.getLogoSplashBitmap()
wx.SplashScreen.__init__(self, bmp,
wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,
5000, None, -1,
style = wx.BORDER_NONE|wx.FRAME_NO_TASKBAR)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.FutureCall(2000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
NomFichier = None
if len(sys.argv)>1: #un paramètre a été passé
parametre=sys.argv[1]
# on verifie que le fichier passé en paramètre existe
if os.path.isfile(parametre):
NomFichier = parametre
frame = FenPrincipale.wxPyVot(None, "PyVot", NomFichier)
frame.Show()
if self.fc.IsRunning():
self.Raise()
# wx.CallAfter(frame.ShowTip)
#---------------------------------------------------------------------------
class PyVotApp(wx.App):
def OnInit(self):
"""
Create and show the splash screen. It will then create and show
the main frame when it is time to do so.
"""
self.version = VERSION
# try:
self.auteur = unicode(getpass.getuser(),'cp1252')
# except:
# self.auteur = ""
wx.SystemOptions.SetOptionInt("mac.window-plain-transition", 1)
self.SetAppName("PyVot")
# For debugging
#self.SetAssertMode(wx.PYAPP_ASSERT_DIALOG)
# Normally when using a SplashScreen you would create it, show
# it and then continue on with the applicaiton's
# initialization, finally creating and showing the main
# application window(s). In this case we have nothing else to
# do so we'll delay showing the main frame until later (see
# ShowMain above) so the users can see the SplashScreen effect.
splash = MySplashScreen()
splash.Show()
return True
#---------------------------------------------------------------------------
def main():
## try:
# demoPath = os.path.dirname(__file__)
# os.chdir(demoPath)
# print demoPath
# except:
# pass
app = PyVotApp(False)
# wx.Log.SetActiveTarget( LogPrintStackStderr() )
app.MainLoop()
# def PyVotRunning():
# #
# # Cette fonction teste si PyVot.exe est déjà lancé, auquel cas on arrete tout.
# #
# if not HAVE_WMI:
# return False
# else:
# nb_instances=0
# try:
# controler=wmi.WMI()
# for elem in controler.Win32_Process():
# if "PyVot.exe"==elem.Caption:
# nb_instances=nb_instances+1
# if nb_instances>=2:
# sys.exit(0)
# except:
# pass
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# from customLogTarget import *
if __name__ == '__main__':
# __name__ = 'Main'
#
# On teste si PyVot est déjà lancé
#
# PyVotRunning()
#
# Amélioration de la vitesse de traitement en utilisant psyco
#
# if USE_PSYCO:
# try:
# import psyco
# HAVE_PSYCO=True
# except ImportError:
# HAVE_PSYCO=False
# if HAVE_PSYCO:
# print "Psyco !!!!!"
# psyco.full()
main()
#----------------------------------------------------------------------------
|
gpl-3.0
| -7,407,038,307,259,723,000 | 30.802139 | 99 | 0.478056 | false | 4.101379 | false | false | false |
holocronweaver/wanikani2anki
|
app/widgets.py
|
1
|
2595
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from kivy.base import EventLoop
from kivy.graphics import Color, Rectangle
from kivy.uix.button import Button
from kivy.uix.behaviors.togglebutton import ToggleButtonBehavior
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
class WKToggleButton(ToggleButton):
def on_state(self, widget, value):
"""Change solid color based on button state.
Unfortunately not implemented in default Kivy."""
if value == 'down':
self.background_color = self.background_color_down
self.color = self.color_down
else:
self.background_color = self.background_color_normal
self.color = self.color_normal
class ErrorLabel(Label):
"""Label widget which only shows itself when an error label is set."""
_error = False
@property
def error(self):
return self._error
@error.setter
def error(self, value):
self._error = value
if self._error:
self.text = self._error
with self.canvas.before:
# Border.
Color(rgba=self.border_color)
Rectangle(pos=self.pos, size=self.size)
# Background.
Color(rgba=self.background_color)
Rectangle(
pos=[int(self.pos[i] + self.border_margin)
for i in range(2)],
size=[self.size[i] - 2 * self.border_margin
for i in range(2)])
else:
self.text = ''
self.canvas.before.clear()
class TextInputPlus(TextInput):
"""Supports right-click context menus and max characters."""
use_bubble = True
max_char = None
def on_text(self, instance, value):
if self.max_char and len(value) > self.max_char:
self.text = value[:self.max_char]
def on_touch_down(self, touch):
super().on_touch_down(touch)
if touch.button == 'right':
pos = touch.pos
if self.collide_point(pos[0], pos[1]):
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
def paste(self):
super().paste()
if not self.multiline:
# Remove extraneous newlines.
self.text = self.text.rstrip()
|
mpl-2.0
| 5,325,115,064,332,014,000 | 33.144737 | 74 | 0.597303 | false | 4.073783 | false | false | false |
EricssonResearch/calvin-base
|
calvin/actorstore/systemactors/net/UDPSender.py
|
1
|
3461
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class UDPSender(Actor):
"""
Send all incoming tokens to given address/port over UDP
Control port takes control commands of the form (uri only applicable for connect.)
{
"command" : "connect"/"disconnect",
"uri": "udp://<address>:<port>"
}
Input:
data_in : Each received token will be sent to address set via control port
control_in : Control port
"""
@manage(['address', 'port'])
def init(self):
self.address = None
self.port = None
self.sender = None
self.setup()
def connect(self):
if self.sender:
calvinsys.close(self.sender)
self.sender = calvinsys.open(self, "network.socketclient", address=self.address, port=self.port, connection_type="UDP")
def will_migrate(self):
if self.sender:
calvinsys.close(self.sender)
def did_migrate(self):
self.setup()
if self.address is not None:
self.connect()
def setup(self):
self.regexp = calvinlib.use('regexp')
@stateguard(lambda self: self.sender and calvinsys.can_write(self.sender))
@condition(action_input=['data_in'])
def send(self, token):
calvinsys.write(self.sender, token)
# URI parsing - 0: protocol, 1: host, 2: port
URI_REGEXP = r'([^:]+)://([^/:]*):([0-9]+)'
def parse_uri(self, uri):
status = False
try:
parsed_uri = self.regexp.findall(self.URI_REGEXP, uri)[0]
protocol = parsed_uri[0]
if protocol != 'udp':
_log.warn("Protocol '%s' not supported, assuming udp" % (protocol,))
self.address = parsed_uri[1]
self.port = int(parsed_uri[2])
status = True
except:
_log.warn("malformed or erroneous control uri '%s'" % (uri,))
self.address = None
self.port = None
return status
@condition(action_input=['control_in'])
def control(self, control):
cmd = control.get('command', '')
if cmd == 'connect' and self.sender is None:
self._new_connection(control)
elif cmd == 'disconnect' and self.sender is not None:
self._close_connection()
def _new_connection(self, control):
if self.parse_uri(control.get('uri', '')):
self.connect()
def _close_connection(self):
calvinsys.close(self.sender)
self.sender = None
action_priority = (control, send)
requires = ['network.socketclient', 'regexp']
test_set = [
{
'input': {'data_in': [],
'control_in': []}
}
]
|
apache-2.0
| 5,605,926,367,011,575,000 | 29.359649 | 127 | 0.59896 | false | 3.8757 | false | false | false |
BrunoCaimar/ArcREST
|
src/arcresthelper/publishingtools.py
|
1
|
133645
|
from __future__ import print_function
from __future__ import absolute_import
from .securityhandlerhelper import securityhandlerhelper
import re as re
dateTimeFormat = '%Y-%m-%d %H:%M'
import arcrest
from . import featureservicetools as featureservicetools
from arcrest.hostedservice import AdminFeatureService
import datetime, time
import json
import os
import arcresthelper.common as common
import gc
import sys
from .packages.six.moves import urllib_parse as urlparse
try:
import pyparsing
pyparsingInstall = True
from arcresthelper import select_parser
except:
pyparsingInstall = False
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
#----------------------------------------------------------------------
def trace():
"""Determines information about where an error was thrown.
Returns:
tuple: line number, filename, error message
Examples:
>>> try:
... 1/0
... except:
... print("Error on '{}'\\nin file '{}'\\nwith error '{}'".format(*trace()))
...
Error on 'line 1234'
in file 'C:\\foo\\baz.py'
with error 'ZeroDivisionError: integer division or modulo by zero'
"""
import traceback, inspect, sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
class publishingtools(securityhandlerhelper):
#----------------------------------------------------------------------
def getItemID(self, userContent, title=None, name=None, itemType=None):
"""Gets the ID of an item by a combination of title, name, and type.
Args:
userContent (list): A list of user content.
title (str): The title of the item. Defaults to ``None``.
name (str): The name of the item. Defaults to ``None``.
itemType (str): The type of the item. Defaults to ``None``.
Returns:
str: The item's ID. If the item does not exist, ``None``.
Raises:
AttributeError: If both ``title`` and ``name`` are not specified (``None``).
See Also:
:py:func:`getItem`
"""
itemID = None
if name is None and title is None:
raise AttributeError('Name or Title needs to be specified')
for item in userContent:
if title is None and name is not None:
if item.name == name and (itemType is None or item.type == itemType):
return item.id
elif title is not None and name is None:
if item.title == title and (itemType is None or item.type == itemType):
return item.id
else:
if item.name == name and item.title == title and (itemType is None or item.type == itemType):
return item.id
return None
#----------------------------------------------------------------------
def getItem(self, userContent, title=None, name=None, itemType=None):
"""Gets an item by a combination of title, name, and type.
Args:
userContent (list): A list of user content.
title (str): The title of the item. Defaults to ``None``.
name (str): The name of the item. Defaults to ``None``.
itemType (str): The type of the item. Defaults to ``None``.
Returns:
str: The item's ID. If the item does not exist, ``None``.
Raises:
AttributeError: If both ``title`` and ``name`` are not specified (``None``).
See Also:
:py:func:`getItemID`
"""
itemID = None
if name is None and title is None:
raise AttributeError('Name or Title needs to be specified')
for item in userContent:
if title is None and name is not None:
if item.name == name and (itemType is None or item.type == itemType):
return item
elif title is not None and name is None:
if item.title == title and (itemType is None or item.type == itemType):
return item
else:
if item.name == name and item.title == title and (itemType is None or item.type == itemType):
return item
return None
#----------------------------------------------------------------------
def folderExist(self, name, folders):
"""Determines if a folder exists, case insensitively.
Args:
name (str): The name of the folder to check.
folders (list): A list of folder dicts to check against. The dicts must contain
the key:value pair ``title``.
Returns:
bool: ``True`` if the folder exists in the list, ``False`` otherwise.
"""
if name is not None and name != '':
folderID = None
for folder in folders:
if folder['title'].lower() == name.lower():
return True
del folders
return folderID
else:
return False
#----------------------------------------------------------------------
def publishItems(self, items_info):
"""Publishes a list of items.
Args:
items_info (list): A list of JSON configuration items to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.User.addItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
item_results = None
item_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
item_results = []
for item_info in items_info:
if 'ReplaceTag' in item_info:
itemInfo = {"ReplaceTag":item_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{FeatureService}" }
itemInfo['ItemInfo'] = self._publishItems(config=item_info)
if itemInfo['ItemInfo'] is not None and 'name' in itemInfo['ItemInfo']:
print ("%s created" % itemInfo['ItemInfo']['name'])
item_results.append(itemInfo)
else:
print (str(itemInfo['ItemInfo']))
return item_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
item_results = None
item_info = None
admin = None
del itemInfo
del item_results
del item_info
del admin
gc.collect()
#----------------------------------------------------------------------
def _publishItems(self, config):
name = None
tags = None
description = None
extent = None
admin = None
adminusercontent = None
itemData = None
itemId = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
itemParams = None
content = None
userInfo = None
userCommunity = None
results = None
folderName = None
folderId = None
res = None
sea = None
group_ids = None
shareResults = None
updateParams = None
url = None
resultItem = {}
try:
name = ''
tags = ''
description = ''
extent = ''
webmap_data = ''
if 'Data' in config:
itemData = config['Data']
if 'Url' in config:
url = config['Url']
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
skipIfExist = False
if 'SkipIfExist' in config:
skipIfExist = config['SkipIfExist']
if str(skipIfExist).lower() == 'true':
skipIfExist = True
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=itemType,searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
if skipIfExist == True:
resultItem['itemId'] = item.id
resultItem['url'] = item.item._curl + "/data"
resultItem['folderId'] = folderId
resultItem['name'] = name
return resultItem
results = item.updateItem(itemParameters=itemParams,
data=itemData,serviceUrl=url)
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=url,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
filePath=itemData)
#updateParams = arcrest.manageorg.ItemParameter()
#updateParams.title = name
#updateResults = item.updateItem(itemParameters=updateParams)
except Exception as e:
print (e)
if item is None:
return "Item could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
resultItem['itemId'] = item.id
resultItem['url'] = item.item._curl + "/data"
resultItem['folderId'] = folderId
resultItem['name'] = name
return resultItem
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
admin = None
adminusercontent = None
itemData = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
itemId = None
thumbnail = None
itemType = None
itemParams = None
content = None
userInfo = None
userCommunity = None
results = None
folderName = None
folderId = None
res = None
sea = None
group_ids = None
shareResults = None
updateParams = None
del name
del tags
del description
del extent
del admin
del adminusercontent
del itemData
del datestring
del snippet
del everyone
del org
del groupNames
del itemId
del thumbnail
del itemType
del itemParams
del content
del userInfo
del userCommunity
del results
del folderName
del folderId
del res
del sea
del group_ids
del shareResults
del updateParams
gc.collect()
#----------------------------------------------------------------------
def publishMap(self, maps_info, fsInfo=None, itInfo=None):
"""Publishes a list of maps.
Args:
maps_info (list): A list of JSON configuration maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
itemId = None
map_results = None
replaceInfo = None
replaceItem = None
map_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
map_results = []
for map_info in maps_info:
itemInfo = {}
if 'ReplaceInfo' in map_info:
replaceInfo = map_info['ReplaceInfo']
else:
replaceInfo = None
if replaceInfo != None:
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Layer':
if fsInfo is not None:
for fs in fsInfo:
if fs is not None and replaceItem['ReplaceString'] == fs['ReplaceTag']:
replaceItem['ReplaceString'] = fs['FSInfo']['url']
replaceItem['ItemID'] = fs['FSInfo']['itemId']
replaceItem['ItemFolder'] = fs['FSInfo']['folderId']
if 'convertCase' in fs['FSInfo']:
replaceItem['convertCase'] = fs['FSInfo']['convertCase']
elif 'ItemID' in replaceItem:
if 'ItemFolder' in replaceItem == False:
itemId = replaceItem['ItemID']
itemInfo = admin.content.getItem(itemId=itemId)
if itemInfo.owner:
if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:
replaceItem['ItemFolder'] = itemInfo.ownerFolder
else:
replaceItem['ItemFolder'] = None
elif replaceItem['ReplaceType'] == 'Global':
if itInfo is not None:
for itm in itInfo:
if itm is not None:
if replaceItem['ReplaceString'] == itm['ReplaceTag']:
if 'ItemInfo' in itm:
if 'url' in itm['ItemInfo']:
replaceItem['ReplaceString'] = itm['ItemInfo']['url']
if 'ReplaceTag' in map_info:
itemInfo = {"ReplaceTag":map_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{WebMap}" }
itemInfo['MapInfo'] = self._publishMap(config=map_info,
replaceInfo=replaceInfo)
map_results.append(itemInfo)
print ("%s webmap created" % itemInfo['MapInfo']['Name'])
return map_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
itemId = None
replaceInfo = None
replaceItem = None
map_info = None
admin = None
del itemInfo
del itemId
del replaceInfo
del replaceItem
del map_info
del admin
gc.collect()
#----------------------------------------------------------------------
def _publishMap(self, config, replaceInfo=None, operationalLayers=None, tableLayers=None):
name = None
tags = None
description = None
extent = None
webmap_data = None
itemJson = None
update_service = None
admin = None
adminusercontent = None
resultMap = None
json_data = None
replaceItem = None
opLayers = None
opLayer = None
layers = None
item = None
response = None
layerIdx = None
updatedLayer = None
updated = None
text = None
itemParams = None
updateResults = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
try:
name = ''
tags = ''
description = ''
extent = ''
webmap_data = None
mapJson = config['ItemJSON']
if isinstance(mapJson,list):
webmap_data = []
for jsonItem in mapJson:
#if os.path.exists(jsonItem) == False:
#return {"Results":{"error": "%s does not exist" % jsonItem}}
#if webmap_data is None:
#try:
#with open(jsonItem) as webMapInfo:
#webmap_data = json.load(webMapInfo)
#except:
#raise ValueError("%s is not a valid JSON File" % jsonItem)
#else:
try:
with open(jsonItem) as webMapInfo:
webmap_data.append(json.load(webMapInfo))
except:
raise ValueError("%s is not a valid JSON File" % jsonItem)
webmap_data = common.merge_dicts(webmap_data)
else:
if os.path.exists(mapJson) == False:
return {"Results":{"error": "%s does not exist" % mapJson}}
try:
with open(mapJson) as webMapInfo:
webmap_data = json.load(webMapInfo)
except:
raise ValueError("%s is not a valid JSON File" % mapJson)
update_service = 'FALSE'
resultMap = {'Layers':[],'Tables':[],'Results':{}}
if webmap_data is not None:
layersInfo= {}
if operationalLayers:
webmap_data['operationalLayers'] = operationalLayers
if tableLayers:
webmap_data['tables'] = tableLayers
if replaceInfo:
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Global':
webmap_data = common.find_replace(webmap_data,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Layer':
if 'tables' in webmap_data:
opLayers = webmap_data['tables']
for opLayer in opLayers:
layerInfo= {}
if replaceItem['SearchString'] in opLayer['url']:
opLayer['url'] = opLayer['url'].replace(replaceItem['SearchString'],replaceItem['ReplaceString'])
if 'ItemID' in replaceItem:
opLayer['itemId'] = replaceItem['ItemID']
else:
opLayer['itemId'] = None
#opLayer['itemId'] = get_guid()
if 'convertCase' in replaceItem:
if replaceItem['convertCase'] == 'lower':
layerInfo = {}
layerInfo['convertCase'] = replaceItem['convertCase']
layerInfo['fields'] = []
if 'layerDefinition' in opLayer:
if 'drawingInfo' in opLayer["layerDefinition"]:
if 'renderer' in opLayer["layerDefinition"]['drawingInfo']:
if 'field1' in opLayer["layerDefinition"]['drawingInfo']['renderer']:
opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'] = opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'].lower()
if 'labelingInfo' in opLayer["layerDefinition"]['drawingInfo']:
lblInfos = opLayer["layerDefinition"]['drawingInfo']['labelingInfo']
if len(lblInfos) > 0:
for lblInfo in lblInfos:
if 'labelExpression' in lblInfo:
result = re.findall(r"\[.*\]", lblInfo['labelExpression'])
if len(result)>0:
for res in result:
lblInfo['labelExpression'] = str(lblInfo['labelExpression']).replace(res,str(res).lower())
if 'labelExpressionInfo' in lblInfo:
if 'value' in lblInfo['labelExpressionInfo']:
result = re.findall(r"{.*}", lblInfo['labelExpressionInfo']['value'])
if len(result)>0:
for res in result:
lblInfo['labelExpressionInfo']['value'] = str(lblInfo['labelExpressionInfo']['value']).replace(res,str(res).lower())
if 'popupInfo' in opLayer:
if 'mediaInfos' in opLayer['popupInfo'] and not opLayer['popupInfo']['mediaInfos'] is None:
for chart in opLayer['popupInfo']['mediaInfos']:
if 'value' in chart:
if 'normalizeField' in chart and not chart['normalizeField'] is None:
chart['normalizeField'] = chart['normalizeField'].lower()
if 'fields' in chart['value']:
for i in range(len(chart['value']['fields'])):
chart['value']['fields'][i] = str(chart['value']['fields'][i]).lower()
if 'fieldInfos' in opLayer['popupInfo']:
for field in opLayer['popupInfo']['fieldInfos']:
newFld = str(field['fieldName']).lower()
if 'description' in opLayer['popupInfo']:
opLayer['popupInfo']['description'] = common.find_replace(obj = opLayer['popupInfo']['description'],
find = "{" + field['fieldName'] + "}",
replace = "{" + newFld + "}")
layerInfo['fields'].append({"PublishName":field['fieldName'],
'ConvertName':newFld})
field['fieldName'] = newFld
layersInfo[opLayer['id']] = layerInfo
opLayers = webmap_data['operationalLayers']
for opLayer in opLayers:
layerInfo= {}
if replaceItem['SearchString'] in opLayer['url']:
opLayer['url'] = opLayer['url'].replace(replaceItem['SearchString'],replaceItem['ReplaceString'])
if 'ItemID' in replaceItem:
opLayer['itemId'] = replaceItem['ItemID']
else:
opLayer['itemId'] = None
#opLayer['itemId'] = get_guid()
if 'convertCase' in replaceItem:
if replaceItem['convertCase'] == 'lower':
layerInfo = {}
layerInfo['convertCase'] = replaceItem['convertCase']
layerInfo['fields'] = []
if 'layerDefinition' in opLayer:
if 'drawingInfo' in opLayer["layerDefinition"]:
if 'renderer' in opLayer["layerDefinition"]['drawingInfo']:
if 'field1' in opLayer["layerDefinition"]['drawingInfo']['renderer']:
opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'] = opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'].lower()
if 'labelingInfo' in opLayer["layerDefinition"]['drawingInfo']:
lblInfos = opLayer["layerDefinition"]['drawingInfo']['labelingInfo']
if len(lblInfos) > 0:
for lblInfo in lblInfos:
if 'labelExpression' in lblInfo:
result = re.findall(r"\[.*\]", lblInfo['labelExpression'])
if len(result)>0:
for res in result:
lblInfo['labelExpression'] = str(lblInfo['labelExpression']).replace(res,str(res).lower())
if 'labelExpressionInfo' in lblInfo:
if 'value' in lblInfo['labelExpressionInfo']:
result = re.findall(r"{.*}", lblInfo['labelExpressionInfo']['value'])
if len(result)>0:
for res in result:
lblInfo['labelExpressionInfo']['value'] = str(lblInfo['labelExpressionInfo']['value']).replace(res,str(res).lower())
if 'popupInfo' in opLayer:
if 'mediaInfos' in opLayer['popupInfo'] and not opLayer['popupInfo']['mediaInfos'] is None:
for k in range(len(opLayer['popupInfo']['mediaInfos'])):
chart = opLayer['popupInfo']['mediaInfos'][k]
if 'value' in chart:
if 'normalizeField' in chart and not chart['normalizeField'] is None:
chart['normalizeField'] = chart['normalizeField'].lower()
if 'fields' in chart['value']:
for i in range(len(chart['value']['fields'])):
chart['value']['fields'][i] = str(chart['value']['fields'][i]).lower()
opLayer['popupInfo']['mediaInfos'][k] = chart
if 'fieldInfos' in opLayer['popupInfo']:
for field in opLayer['popupInfo']['fieldInfos']:
newFld = str(field['fieldName']).lower()
if 'description' in opLayer['popupInfo']:
opLayer['popupInfo']['description'] = common.find_replace(obj = opLayer['popupInfo']['description'],
find = "{" + field['fieldName'] + "}",
replace = "{" + newFld + "}")
layerInfo['fields'].append({"PublishName":field['fieldName'],
'ConvertName':newFld})
field['fieldName'] = newFld
layersInfo[opLayer['id']] = layerInfo
opLayers = webmap_data['operationalLayers']
resultMap['Layers'] = {}
for opLayer in opLayers:
currentID = opLayer['id']
#if 'url' in opLayer:
#opLayer['id'] = common.getLayerName(url=opLayer['url']) + "_" + str(common.random_int_generator(maxrange = 9999))
if 'applicationProperties' in webmap_data:
if 'editing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['editing'] is None:
if 'locationTracking' in webmap_data['applicationProperties']['editing'] and \
not webmap_data['applicationProperties']['editing']['locationTracking'] is None:
if 'info' in webmap_data['applicationProperties']['editing']['locationTracking'] and \
not webmap_data['applicationProperties']['editing']['locationTracking']['info'] is None:
if 'layerId' in webmap_data['applicationProperties']['editing']['locationTracking']['info']:
if webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] == currentID:
webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] = opLayer['id']
if 'viewing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['viewing'] is None:
if 'search' in webmap_data['applicationProperties']['viewing'] and \
not webmap_data['applicationProperties']['viewing']['search'] is None:
if 'layers' in webmap_data['applicationProperties']['viewing']['search'] and \
not webmap_data['applicationProperties']['viewing']['search']['layers'] is None:
for k in range(len(webmap_data['applicationProperties']['viewing']['search']['layers'])):
searchlayer = webmap_data['applicationProperties']['viewing']['search']['layers'][k]
if searchlayer['id'] == currentID:
searchlayer['id'] = opLayer['id']
if 'fields' in searchlayer and \
not searchlayer['fields'] is None:
for i in range(len(searchlayer['fields'])):
searchlayer['fields'][i]['Name'] = str(searchlayer['fields'][i]['Name']).lower()
if 'field' in searchlayer and \
not searchlayer['field'] is None:
searchlayer['field']['name'] = searchlayer['field']['name'].lower()
webmap_data['applicationProperties']['viewing']['search']['layers'][k] = searchlayer
if 'applicationProperties' in webmap_data:
webmap_data['applicationProperties'] = common.find_replace(webmap_data['applicationProperties'], currentID, opLayer['id'])
resultLayer = {"Name":opLayer['title'],
"ID":opLayer['id']
}
if currentID in layersInfo:
resultLayer['FieldInfo'] = layersInfo[currentID]
resultMap['Layers'][currentID] = resultLayer
if 'tables' in webmap_data:
opLayers = webmap_data['tables']
for opLayer in opLayers:
currentID = opLayer['id']
#opLayer['id'] = common.getLayerName(url=opLayer['url']) + "_" + str(common.random_int_generator(maxrange = 9999))
if 'applicationProperties' in webmap_data:
if 'editing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['editing'] is None:
if 'locationTracking' in webmap_data['applicationProperties']['editing'] and \
not webmap_data['applicationProperties']['editing']['locationTracking'] is None:
if 'info' in webmap_data['applicationProperties']['editing']['locationTracking'] and \
not webmap_data['applicationProperties']['editing']['locationTracking']['info'] is None:
if 'layerId' in webmap_data['applicationProperties']['editing']['locationTracking']['info']:
if webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] == currentID:
webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] = opLayer['id']
if 'viewing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['viewing'] is None:
if 'search' in webmap_data['applicationProperties']['viewing'] and \
not webmap_data['applicationProperties']['viewing']['search'] is None:
if 'layers' in webmap_data['applicationProperties']['viewing']['search'] and \
not webmap_data['applicationProperties']['viewing']['search']['layers'] is None:
for k in range(len(webmap_data['applicationProperties']['viewing']['search']['layers'])):
searchlayer = webmap_data['applicationProperties']['viewing']['search']['layers'][k]
if searchlayer['id'] == currentID:
searchlayer['id'] = opLayer['id']
if 'fields' in searchlayer and \
not searchlayer['fields'] is None:
for i in range(len(searchlayer['fields'])):
searchlayer['fields'][i]['Name'] = str(searchlayer['fields'][i]['Name']).lower()
if 'field' in searchlayer and \
not searchlayer['field'] is None:
searchlayer['field']['name'] = searchlayer['field']['name'].lower()
webmap_data['applicationProperties']['viewing']['search']['layers'][k] = searchlayer
if 'applicationProperties' in webmap_data:
webmap_data['applicationProperties'] = common.find_replace(webmap_data['applicationProperties'], currentID, opLayer['id'])
resultMap['Tables'].append({"Name":opLayer['title'],"ID":opLayer['id']})
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
extent = config['Extent']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
if webmap_data is None:
return None
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = "Web Map"
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=itemType,searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(webmap_data))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(webmap_data))
except Exception as e:
print (e)
if item is None:
return "Item could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
updateResults = item.updateItem(itemParameters=updateParams)
resultMap['Results']['itemId'] = item.id
resultMap['folderId'] = folderId
resultMap['Name'] = name
return resultMap
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
webmap_data = None
itemJson = None
update_service = None
admin = None
adminusercontent = None
resultMap = None
json_data = None
replaceItem = None
opLayers = None
opLayer = None
layers = None
item = None
response = None
layerIdx = None
updatedLayer = None
updated = None
text = None
itemParams = None
updateResults = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
del name
del tags
del description
del extent
del webmap_data
del itemJson
del update_service
del admin
del adminusercontent
del resultMap
del json_data
del replaceItem
del opLayers
del opLayer
del layers
del item
del response
del layerIdx
del updatedLayer
del updated
del text
del itemParams
del updateResults
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del thumbnail
del itemType
del typeKeywords
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
gc.collect()
#----------------------------------------------------------------------
def publishCombinedWebMap(self, maps_info, webmaps):
"""Publishes a combination of web maps.
Args:
maps_info (list): A list of JSON configuration combined web maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
admin = None
map_results = None
map_info = None
operationalLayers = None
tableLayers = None
item = None
response = None
opLays = None
operationalLayers = None
tblLays = None
tblLayer = None
itemInfo = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
map_results = []
for map_info in maps_info:
operationalLayers = []
tableLayers = []
for webmap in webmaps:
item = admin.content.getItem(itemId=webmap)
response = item.itemData()
if 'operationalLayers' in response:
opLays = []
for opLayer in response['operationalLayers']:
opLays.append(opLayer)
opLays.extend(operationalLayers)
operationalLayers = opLays
if 'tables' in response:
tblLays = []
for tblLayer in response['tables']:
tblLays.append(tblLayer)
tblLays.extend(tableLayers)
tableLayers = tblLays
if 'ReplaceTag' in map_info:
itemInfo = {"ReplaceTag":map_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{WebMap}" }
itemInfo['MapInfo'] = self._publishMap(config=map_info,
replaceInfo=None,
operationalLayers=operationalLayers,
tableLayers=tableLayers)
map_results.append(itemInfo)
if not itemInfo is None:
if not 'error' in itemInfo['MapInfo']['Results']:
print ("%s webmap created" % itemInfo['MapInfo']['Name'])
else:
print (str(itemInfo['MapInfo']['Results']))
else:
print ("Map not created")
return map_results
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishedCombinedWebMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
admin = None
map_info = None
tableLayers = None
item = None
response = None
opLays = None
operationalLayers = None
tblLays = None
tblLayer = None
itemInfo = None
del admin
del map_info
del tableLayers
del item
del response
del opLays
del operationalLayers
del tblLays
del tblLayer
del itemInfo
gc.collect()
#----------------------------------------------------------------------
def publishFsFromMXD(self, fs_config):
"""Publishes the layers in a MXD to a feauture service.
Args:
fs_config (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
fs = None
res = None
resItm = None
if self.securityhandler is None:
print ("Security handler required")
return
if self.securityhandler.is_portal:
url = self.securityhandler.org_url
else:
url = 'http://www.arcgis.com'
try:
res = []
if isinstance(fs_config, list):
for fs in fs_config:
if 'ReplaceTag' in fs:
resItm = {"ReplaceTag":fs['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs, url=url)
if not resItm['FSInfo'] is None and 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
else:
if 'ReplaceTag' in fs_config:
resItm = {"ReplaceTag":fs_config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs_config, url=url)
if 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
resItm = None
fs = None
del resItm
del fs
gc.collect()
#----------------------------------------------------------------------
def publishFeatureCollections(self, configs):
"""Publishes feature collections to a feature service.
Args:
configs (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
config = None
res = None
resItm = None
try:
res = []
if isinstance(configs, list):
for config in configs:
if 'ReplaceTag' in config:
resItm = {"ReplaceTag":config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
if 'Zip' in config:
resItm['FCInfo'] = self._publishFeatureCollection(config=config)
if not resItm['FCInfo'] is None and 'id' in resItm['FCInfo']:
print ("%s feature collection created" % resItm['FCInfo']['id'])
res.append(resItm)
else:
print (str(resItm['FCInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFeatureCollections",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
resItm = None
config = None
del resItm
del config
gc.collect()
#----------------------------------------------------------------------
def _publishFSFromMXD(self, config, url='http://www.arcgis.com'):
mxd = None
q = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
capabilities = None
maxRecordCount = None
loc_df = None
datestring = None
service_name = None
service_name_safe = None
sd_Info = None
admin = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
resultSD = None
publishParameters = None
resultFS = None
delres = None
status = None
group_ids = None
shareResults = None
updateParams = None
enableEditTracking = None
adminFS = None
json_dict = None
enableResults = None
layer = None
layers = None
layUpdateResult = None
definition = None
try:
# Report settings
dataFile = None
if 'Mxd' in config:
dataFile = config['Mxd']
elif 'Zip' in config:
dataFile = config['Zip']
# Service settings
service_name = config['Title']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
if 'EnableEditTracking' in config:
print ("enableEditTracking parameter has been deprecated, please add a definition section to the config")
enableEditTracking = config['EnableEditTracking']
else:
#print ("Please add an EnableEditTracking parameter to your feature service section")
enableEditTracking = False
folderName = config['Folder']
thumbnail = config['Thumbnail']
if 'Capabilities' in config:
print ("Capabilities parameter has been deprecated, please add a definition section to the config")
capabilities = config['Capabilities']
if 'Definition' in config:
definition = config['Definition']
if 'capabilities' in definition:
capabilities = definition['capabilities']
if 'maxRecordCount' in config:
maxRecordCount = config["maxRecordCount"]
else:
maxRecordCount = '1000' # If not cast as a string, the MXDtoFeatureServiceDef method called below returns an error stating 'cannot serialize 1000 (type int)'
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
skipIfExist = False
if 'SkipIfExist' in config:
skipIfExist = config['SkipIfExist']
if str(skipIfExist).lower() == 'true':
skipIfExist = True
datestring = datetime.datetime.now().strftime(loc_df)
service_name = service_name.replace('{DATE}',datestring)
service_name = service_name.replace('{Date}',datestring)
service_name_safe = service_name.replace(' ','_')
service_name_safe = service_name_safe.replace(':','_')
service_name_safe = service_name_safe.replace('-','_')
if os.path.exists(path=dataFile) == False:
raise ValueError("data file does not exit")
extension = os.path.splitext(dataFile)[1]
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
hostingServers = admin.hostingServers()
if len(hostingServers) == 0:
return "No hosting servers can be found, if this is portal, update the settings to include a hosting server."
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
if skipIfExist == True:
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType='Feature Service',searchorg=False)
if 'total' in items:
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if itemId is not None:
defItem = content.getItem(itemId)
results = {
"url": defItem.url,
"folderId": folderId,
"itemId": defItem.id,
"convertCase": self._featureServiceFieldCase,
"messages":"Exist"
}
return results
else:
print ("Error searching organzation, {0}".format(items))
if (extension == ".mxd"):
dataFileType = "serviceDefinition"
searchType = "Service Definition"
sd_Info = arcrest.common.servicedef.MXDtoFeatureServiceDef(mxd_path=dataFile,
service_name=service_name_safe,
tags=None,
description=None,
folder_name=None,
capabilities=capabilities,
maxRecordCount=maxRecordCount,
server_type='MY_HOSTED_SERVICES',
url=url)
if sd_Info is not None:
publishParameters = arcrest.manageorg.PublishSDParameters(tags=sd_Info['tags'],
overwrite='true')
elif (extension == ".zip"):
dataFileType = "Shapefile"
searchType = "Shapefile"
sd_Info = {'servicedef':dataFile,'tags':config['Tags']}
description = ""
if 'Description' in config:
description = config['Description']
publishParameters = arcrest.manageorg.PublishShapefileParameter(name=service_name,
layerInfo={'capabilities':capabilities},
description=description)
if 'hasStaticData' in definition:
publishParameters.hasStaticData = definition['hasStaticData']
if sd_Info is None:
print ("Publishing SD or Zip not valid")
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Publishing SD or Zip not valid"
})
itemParams = arcrest.manageorg.ItemParameter()
#if isinstance(hostingServers[0],arcrest.manageags.administration.AGSAdministration):
#itemParams.title = service_name_safe
#else:
#itemParams.title = service_name
itemParams.title = service_name
itemParams.thumbnail = thumbnail
itemParams.type = searchType
itemParams.overwrite = True
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType=searchType,searchorg=False)
defItem = None
defItemID = None
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == searchType:
if 'name' in res and res['name'] == service_name:
defItemID = res['id']
break
if 'title' in res and res['title'] == service_name:
defItemID = res['id']
break
#itemId = items['results'][0]['id']
if not defItemID is None:
defItem = content.getItem(defItemID).userItem
resultSD = defItem.updateItem(itemParameters=itemParams,
data=sd_Info['servicedef'])
if 'error' in resultSD:
return resultSD
if defItem.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=defItem.id,folder=folderId)
else:
try:
defItem = userInfo.addItem(itemParameters=itemParams,
filePath=sd_Info['servicedef'],
overwrite=True,
url=None,
text=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
except Exception as e:
print (e)
if defItem is None:
return "Item could not be added "
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
publishParameters=publishParameters,
overwrite = True,
wait=True)
except Exception as e:
print ("Error publishing item: Error Details: {0}".format(str(e)))
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title =service_name, itemType='Feature Service',searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if not itemId is None:
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title =service_name_safe, itemType='Feature Service',searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name_safe:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name_safe:
itemId = res['id']
break
if not itemId is None:
existingItem = admin.content.getItem(itemId = itemId).userItem
if existingItem.url is not None:
adminFS = AdminFeatureService(url=existingItem.url, securityHandler=self._securityHandler)
cap = str(adminFS.capabilities)
existingDef = {}
if 'Sync' in cap:
print ("Disabling Sync")
capItems = cap.split(',')
if 'Sync' in capItems:
capItems.remove('Sync')
existingDef['capabilities'] = ','.join(capItems)
enableResults = adminFS.updateDefinition(json_dict=existingDef)
if 'error' in enableResults:
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Sync Disabled")
else:
print ("Attempting to delete")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
adminFS = None
del adminFS
else:
print ("Attempting to delete")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Item exist and cannot be found, probably owned by another user.")
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Item exist and cannot be found, probably owned by another user."
}
)
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
overwrite = True,
publishParameters=publishParameters,
wait=True)
except Exception as e:
print ("Overwrite failed, deleting")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
overwrite = True,
publishParameters=publishParameters,
wait=True)
except Exception as e:
return e
results = {
"url": serviceItem.url,
"folderId": folderId,
"itemId": serviceItem.id,
"convertCase": self._featureServiceFieldCase,
"messages":""
}
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=serviceItem.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = service_name
updateResults = serviceItem.updateItem(itemParameters=updateParams)
adminFS = AdminFeatureService(url=serviceItem.url, securityHandler=self._securityHandler)
if enableEditTracking == True or str(enableEditTracking).upper() == 'TRUE':
json_dict = {'editorTrackingInfo':{}}
json_dict['editorTrackingInfo']['allowOthersToDelete'] = True
json_dict['editorTrackingInfo']['allowOthersToUpdate'] = True
json_dict['editorTrackingInfo']['enableEditorTracking'] = True
json_dict['editorTrackingInfo']['enableOwnershipAccessControl'] = False
enableResults = adminFS.updateDefinition(json_dict=json_dict)
if 'error' in enableResults:
results['messages'] += enableResults
json_dict = {'editFieldsInfo':{}}
json_dict['editFieldsInfo']['creationDateField'] = ""
json_dict['editFieldsInfo']['creatorField'] = ""
json_dict['editFieldsInfo']['editDateField'] = ""
json_dict['editFieldsInfo']['editorField'] = ""
layers = adminFS.layers
tables = adminFS.tables
for layer in layers:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] += layUpdateResult['error']
if not tables is None:
for layer in tables:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] += layUpdateResult['error']
if definition is not None:
enableResults = adminFS.updateDefinition(json_dict=definition)
if enableResults is not None and 'error' in enableResults:
results['messages'] = enableResults
else:
if 'editorTrackingInfo' in definition:
if 'enableEditorTracking' in definition['editorTrackingInfo']:
if definition['editorTrackingInfo']['enableEditorTracking'] == True:
json_dict = {'editFieldsInfo':{}}
json_dict['editFieldsInfo']['creationDateField'] = ""
json_dict['editFieldsInfo']['creatorField'] = ""
json_dict['editFieldsInfo']['editDateField'] = ""
json_dict['editFieldsInfo']['editorField'] = ""
layers = adminFS.layers
tables = adminFS.tables
for layer in layers:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] = layUpdateResult['error']
if not tables is None:
for layer in tables:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] = layUpdateResult['error']
return results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
definition = None
mxd = None
q = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
capabilities = None
maxRecordCount = None
loc_df = None
datestring = None
service_name = None
service_name_safe = None
sd_Info = None
admin = None
itemParams = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
resultSD = None
publishParameters = None
resultFS = None
delres = None
status = None
group_ids = None
shareResults = None
updateParams = None
enableEditTracking = None
adminFS = None
json_dict = None
enableResults = None
layer = None
layers = None
layUpdateResult = None
del definition
del layer
del layers
del layUpdateResult
del mxd
del q
del everyone
del org
del groupNames
del folderName
del thumbnail
del capabilities
del maxRecordCount
del loc_df
del datestring
del service_name
del service_name_safe
del sd_Info
del admin
del itemParams
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del resultSD
del publishParameters
del resultFS
del delres
del status
del group_ids
del shareResults
del updateParams
del enableEditTracking
del adminFS
del json_dict
del enableResults
gc.collect()
#----------------------------------------------------------------------
def _publishAppLogic(self, appDet, map_info=None, fsInfo=None):
itemInfo = None
replaceInfo = None
replaceItem = None
mapDet = None
lay = None
itemId = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
itemInfo = {}
if 'ReplaceInfo' in appDet:
replaceInfo = appDet['ReplaceInfo']
else:
replaceInfo = None
if replaceInfo != None:
for replaceItem in replaceInfo:
if fsInfo is not None:
for fsDet in fsInfo:
if 'ReplaceTag' in fsDet:
if 'ReplaceString' in replaceItem:
if fsDet is not None and replaceItem['ReplaceString'] == fsDet['ReplaceTag'] and \
(replaceItem['ReplaceType'] == 'Service' or replaceItem['ReplaceType'] == 'Layer'):
replaceItem['ReplaceString'] = fsDet['FSInfo']['url']
replaceItem['ItemID'] = fsDet['FSInfo']['itemId']
replaceItem['ItemFolder'] = fsDet['FSInfo']['folderId']
if 'convertCase' in fsDet['FSInfo']:
replaceItem['convertCase'] = fsDet['FSInfo']['convertCase']
replaceItem['ReplaceType'] = "Global"
if map_info is not None:
for mapDet in map_info:
if 'ReplaceTag' in mapDet:
if 'ReplaceString' in replaceItem:
if mapDet is not None and replaceItem['ReplaceString'] == mapDet['ReplaceTag'] and \
replaceItem['ReplaceType'] == 'Map':
replaceItem['ItemID'] = mapDet['MapInfo']['Results']['itemId']
replaceItem['ItemFolder'] = mapDet['MapInfo']['folderId']
replaceItem['LayerInfo'] = mapDet['MapInfo']['Layers']
elif mapDet is not None and replaceItem['ReplaceType'] == 'Layer':
repInfo = replaceItem['ReplaceString'].split("|")
if len(repInfo) == 2:
if repInfo[0] == mapDet['ReplaceTag']:
for key,value in mapDet['MapInfo']['Layers'].items():
if value["Name"] == repInfo[1]:
replaceItem['ReplaceString'] = value["ID"]
if 'ItemID' in replaceItem:
if 'ItemFolder' in replaceItem == False:
itemId = replaceItem['ItemID']
itemInfo = admin.content.getItem(itemId=itemId)
if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:
replaceItem['ItemFolder'] = itemInfo['ownerFolder']
else:
replaceItem['ItemFolder'] = None
if 'ReplaceTag' in appDet:
itemInfo = {"ReplaceTag":appDet['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{App}" }
if appDet['Type'] == 'Web Mapping Application':
itemInfo['AppInfo'] = self._publishApp(config=appDet,
replaceInfo=replaceInfo)
elif appDet['Type'] == 'Operation View':
itemInfo['AppInfo'] = self._publishDashboard(config=appDet,
replaceInfo=replaceInfo)
else:
itemInfo['AppInfo'] = self._publishApp(config=appDet,
replaceInfo=replaceInfo)
if not itemInfo['AppInfo'] is None:
if not 'error' in itemInfo['AppInfo']['Results'] :
print ("%s app created" % itemInfo['AppInfo']['Name'])
else:
print (str(itemInfo['AppInfo']['Results']))
else:
print ("App was not created")
return itemInfo
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishAppLogic",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
replaceInfo = None
replaceItem = None
mapDet = None
lay = None
itemId = None
admin = None
del admin
del replaceInfo
del replaceItem
del mapDet
del lay
del itemId
gc.collect()
#----------------------------------------------------------------------
def publishApp(self, app_info, map_info=None, fsInfo=None):
"""Publishes apps to AGOL/Portal
Args:
app_info (list): A list of JSON configuration apps to publish.
map_info (list): Defaults to ``None``.
fsInfo (list): Defaults to ``None``.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
appDet = None
try:
app_results = []
if isinstance(app_info, list):
for appDet in app_info:
app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo))
else:
app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo))
return app_results
except (common.ArcRestHelperError) as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
appDet = None
del appDet
gc.collect()
#----------------------------------------------------------------------
def _publishApp(self, config, replaceInfo):
resultApp = None
name = None
tags = None
description = None
extent = None
itemJson = None
admin = None
json_data = None
itemData = None
replaceItem = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
url = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
userCommunity = None
userContent = None
res = None
folderId = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
url = None
updateResults = None
portal = None
try:
resultApp = {'Results':{}}
name = ''
tags = ''
description = ''
extent = ''
itemJson = config['ItemJSON']
if os.path.exists(itemJson) == False:
return {"Results":{"error": "%s does not exist" % itemJson} }
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
portalself = admin.portals.portalSelf
if portalself.urlKey is None or portalself.customBaseUrl is None:
parsedURL = urlparse.urlparse(url=self._securityHandler.org_url, scheme='', allow_fragments=True)
orgURL = parsedURL.netloc + parsedURL.path
else:
orgURL = portalself.urlKey + '.' + portalself.customBaseUrl
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
folderName = config['Folder']
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
if os.path.exists(itemJson):
with open(itemJson) as json_data:
try:
itemData = json.load(json_data)
except:
raise ValueError("%s is not a valid JSON File" % itemJson)
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Map' and 'ItemID' in replaceItem:
if 'values' in itemData:
if 'webmap' in itemData['values']:
if itemData['values']['webmap'] == replaceItem['SearchString']:
itemData['values']['webmap'] = replaceItem['ItemID']
if 'folderId' in itemData:
itemData['folderId'] = replaceItem['ItemFolder']
if 'map' in itemData:
if 'itemId' in itemData['map']:
if itemData['map']['itemId'] == replaceItem['SearchString']:
itemData['map']['itemId'] = replaceItem['ItemID']
elif replaceItem['ReplaceType'] == 'Layer' and 'ReplaceString' in replaceItem:
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Folder':
if 'id' in userInfo.currentFolder:
folderID = userInfo.currentFolder['id']
else:
folderID = None
itemData = common.find_replace(itemData,replaceItem['SearchString'],folderID)
elif replaceItem['ReplaceType'] == 'Org':
itemData = common.find_replace(itemData,replaceItem['SearchString'],orgURL)
elif replaceItem['ReplaceType'] == 'GeoService':
if 'geometry' in portalself.helperServices:
if 'url' in portalself.helperServices["geometry"]:
itemData = common.find_replace(itemData,replaceItem['SearchString'],portalself.helperServices["geometry"]['url'])
elif replaceItem['ReplaceType'] == 'Global':
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
else:
print ("%s does not exist." % itemJson)
itemData = None
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
url = config['Url']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.description = description
itemParams.tags = tags
itemParams.snippet = snippet
itemParams.description = description
itemParams.typeKeywords = ",".join(typeKeywords)
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name,
itemType=
["Web Mapping Application",
"Application"],
searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(itemData))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(
itemParameters=itemParams,
overwrite=True,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(itemData))
except Exception as e:
print (e)
if item is None:
return "App could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
url = url.replace("{AppID}",item.id)
url = url.replace("{OrgURL}",orgURL)
#if portalself.urlKey is None or portalself.customBaseUrl is None:
#parsedURL = urlparse.urlparse(url=self._securityHandler.org_url, scheme='', allow_fragments=True)
#else:
#url = url.replace("{OrgURL}", portalself.urlKey + '.' + portalself.customBaseUrl)
updateParams.url = url
updateResults = item.updateItem(itemParameters=updateParams)
resultApp['Results']['itemId'] = item.id
resultApp['folderId'] = folderId
resultApp['Name'] = name
return resultApp
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
itemJson = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
url = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
userCommunity = None
userContent = None
res = None
folderId = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
url = None
updateResults = None
portal = None
del name
del portal
del tags
del description
del extent
del itemJson
del admin
del adminusercontent
del json_data
del itemData
del replaceItem
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del url
del thumbnail
del itemType
del typeKeywords
del itemParams
del userCommunity
del userContent
del res
del folderId
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
del updateResults
gc.collect()
#----------------------------------------------------------------------
def _publishDashboard(self, config, replaceInfo):
resultApp = None
tags = None
description = None
extent = None
itemJson = None
layerIDSwitch = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
item = None
response = None
layerNamesID = None
layerIDs = None
tableNamesID = None
tableIDs = None
opLayer = None
widget = None
widgets = None
mapTool = None
dataSource = None
configFileAsString = None
repl = None
name = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
resultApp = None
updateResults = None
try:
resultApp = {'Results':{}}
tags = ''
description = ''
extent = ''
itemJson = config['ItemJSON']
if os.path.exists(itemJson) == False:
return {"Results":{"error": "%s does not exist" % itemJson} }
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
folderName = config['Folder']
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.refresh()
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
layerIDSwitch = []
if os.path.exists(itemJson):
with open(itemJson) as json_data:
try:
itemData = json.load(json_data)
except:
raise ValueError("%s is not a valid JSON File" % itemJson)
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Global':
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Map' and 'ItemID' in replaceItem:
item = admin.content.getItem(itemId=replaceItem['ItemID'])
response = item.itemData()
layerNamesID = {}
layerIDs =[]
tableNamesID = {}
tableIDs =[]
if 'operationalLayers' in response:
for opLayer in response['operationalLayers']:
#if 'LayerInfo' in replaceItem:
#for layers in replaceItem['LayerInfo']:
layerNamesID[opLayer['title']] = opLayer['id']
layerIDs.append(opLayer['id'])
if 'tables' in response:
for opLayer in response['tables']:
tableNamesID[opLayer['title']] = opLayer['id']
tableIDs.append(opLayer['id'])
widgets = itemData['widgets']
dataSourceIDToFields = {}
for widget in widgets:
if 'mapId' in widget:
if replaceItem['SearchString'] == widget['mapId']:
widget['mapId'] = replaceItem['ItemID']
if 'mapTools' in widget:
for mapTool in widget['mapTools']:
if 'layerIds' in mapTool:
mapTool['layerIds'] = layerIDs
if 'dataSources' in widget:
for dataSource in widget['dataSources']:
if 'layerId' in dataSource:
if 'LayerInfo' in replaceItem:
if dataSource['layerId'] in replaceItem['LayerInfo']:
layerIDSwitch.append({"OrigID":dataSource['layerId'],
"NewID":replaceItem['LayerInfo'][dataSource['layerId']]['ID']})
#'FieldInfo':replaceItem['LayerInfo'][dataSource['layerId']]['FieldInfo']})
#dataSourceIDToFields[dataSource['id']] = {'NewID': replaceItem['LayerInfo'][dataSource['layerId']]['ID'],
#'FieldInfo': replaceItem['LayerInfo'][dataSource['layerId']]['FieldInfo']}
dataSource['layerId'] = replaceItem['LayerInfo'][dataSource['layerId']]['ID']
elif dataSource['name'] in layerNamesID:
layerIDSwitch.append({"OrigID":dataSource['layerId'],"NewID":layerNamesID[dataSource['name']] })
dataSource['layerId'] = layerNamesID[dataSource['name']]
for dataSource in widget['dataSources']:
if 'filter' in dataSource:
if dataSource['parentDataSourceId'] in dataSourceIDToFields:
if 'whereClause' in dataSource['filter']:
whercla = str(dataSource['filter']['whereClause'])
if pyparsingInstall:
try:
selectResults = select_parser.select_stmt.parseString("select * from xyzzy where " + whercla)
whereElements = list(selectResults['where_expr'])
for h in range(len(whereElements)):
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whereElements[h] == field['PublishName']:
whereElements[h] = field['ConvertName']
#whercla = whercla.replace(
#old=field['PublishName'],
#new=field['ConvertName'])
dataSource['filter']['whereClause'] = " ".join(whereElements)
except select_parser.ParseException as pe:
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whercla.contains(field['PublishName']):
whercla = whercla.replace(
old=field['PublishName'],
new=field['ConvertName'])
else:
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whercla.contains(field['PublishName']):
whercla = whercla.replace(
old=field['PublishName'],
new=field['ConvertName'])
configFileAsString = json.dumps(itemData)
for repl in layerIDSwitch:
configFileAsString.replace(repl['OrigID'],repl['NewID'])
itemData = json.loads(configFileAsString)
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.description = description
itemParams.snippet = snippet
itemParams.typeKeywords = ",".join(typeKeywords)
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=
["Web Mapping Application",
"Application",
"Operation View"],
searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(itemData))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(
itemParameters=itemParams,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(itemData))
except Exception as e:
print (e)
if item is None:
return "Dashboard could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
updateResults = item.updateItem(itemParameters=updateParams)
resultApp['Results']['itemId'] = item.id
resultApp['folderId'] = folderId
resultApp['Name'] = name
return resultApp
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishDashboard",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
tags = None
description = None
extent = None
itemJson = None
layerIDSwitch = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
item = None
response = None
layerNamesID = None
layerIDs = None
tableNamesID = None
tableIDs = None
opLayer = None
widget = None
widgets = None
mapTool = None
dataSource = None
configFileAsString = None
repl = None
name = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
updateResults = None
del tags
del description
del extent
del itemJson
del layerIDSwitch
del admin
del json_data
del itemData
del replaceItem
del item
del response
del layerNamesID
del layerIDs
del tableNamesID
del tableIDs
del opLayer
del widget
del widgets
del mapTool
del dataSource
del configFileAsString
del repl
del name
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del thumbnail
del itemType
del typeKeywords
del itemParams
del adminusercontent
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
del updateResults
gc.collect()
#----------------------------------------------------------------------
def updateFeatureService(self, efs_config):
"""Updates a feature service.
Args:
efs_config (list): A list of JSON configuration feature service details to update.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
fsRes = None
fst = None
fURL = None
resItm= None
try:
fsRes = []
fst = featureservicetools.featureservicetools(securityinfo=self)
if isinstance(efs_config, list):
for ext_service in efs_config:
fURL = None
cs = 0
try:
if 'ChunkSize' in ext_service:
if common.is_number(ext_service['ChunkSize']):
cs = ext_service['ChunkSize']
except Exception as e:
pass
resItm={"DeleteDetails": None,"AddDetails":None}
if 'ItemId' in ext_service and 'LayerName' in ext_service:
fs = fst.GetFeatureService(itemId=ext_service['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=ext_service['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in ext_service:
fURL = ext_service['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
continue
if 'DeleteInfo' in ext_service:
if str(ext_service['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=ext_service['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print ("Delete Successful: %s" % fURL)
else:
print (str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = ext_service['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print ("Add Successful: %s " % fURL)
else:
print (str(resItm['AddDetails']))
else:
resItm={"DeleteDetails": None,"AddDetails":None}
fURL = efs_config['URL']
cs = 0
try:
if 'ChunkSize' in efs_config:
if common.is_number(efs_config['ChunkSize']):
cs = efs_config['ChunkSize']
except Exception as e:
pass
if 'ItemId' in efs_config and 'LayerName' in efs_config:
fs = fst.GetFeatureService(itemId=efs_config['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=efs_config['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in efs_config:
fURL = efs_config['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
return None
if 'DeleteInfo' in efs_config:
if str(efs_config['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=efs_config['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print (" Delete Successful: %s" % fURL)
else:
print (" " + str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = efs_config['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print (" Add Successful: %s " % fURL)
else:
print (" " + str(resItm['AddDetails']))
return fsRes
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "updateFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
fst = None
fURL = None
resItm= None
del fst
del fURL
del resItm
gc.collect()
#----------------------------------------------------------------------
def _publishFeatureCollection(self, config):
try:
# Service settings
zipfile = config['Zip']
service_name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
description = ""
if 'Description' in config:
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
extent = config['Extent']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
typeKeywords = config['typeKeywords']
datestring = datetime.datetime.now().strftime(loc_df)
service_name = service_name.replace('{DATE}',datestring)
service_name = service_name.replace('{Date}',datestring)
service_name_safe = service_name.replace(' ','_')
service_name_safe = service_name_safe.replace(':','_')
service_name_safe = service_name_safe.replace('-','_')
if os.path.exists(path=zipfile) == False:
raise ValueError("Zip does not exit")
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
feature_content = content.FeatureContent
publishParameters = arcrest.manageorg.GenerateParameter(
name=service_name,maxRecordCount=4000
)
fcResults = feature_content.generate(publishParameters=publishParameters,
itemId=None,
filePath=zipfile,
fileType='shapefile')
if not 'featureCollection' in fcResults:
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": fcResults
})
if not 'layers' in fcResults['featureCollection']:
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": fcResults
})
fcJson = {'visibility':True,
'showLegend':True,
'opacity':1}
for layer in fcResults['featureCollection']['layers']:
oidFldName = ''
highOID = -1
popInfo = {'title':'',
'description':None,
'showAttachments': False,
'mediaInfo': [],
'fieldInfos': []
}
if 'layerDefinition' in layer:
extVal = extent.split(',')
layer['layerDefinition']['extent'] = {'type':'extent',
'xmin':extVal[0],
'ymin':extVal[1],
'xmax':extVal[2],
'ymax':extVal[3]
}
layer['layerDefinition']['spatialReference'] = {'wkid':102100}
if 'fields' in layer['layerDefinition']:
for field in layer['layerDefinition']['fields']:
fieldInfos = None
if field['type'] == 'esriFieldTypeOID':
oidFldName = field['name']
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':False,
'tooltip':'',
'visible':False,
'format':None,
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeInteger':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':{
'places':0,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeDouble':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':{
'places':2,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeString':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':None,
'stringFieldOption':'textbox'
}
else:
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':None,
'stringFieldOption':'textbox'
}
if fieldInfos is not None:
popInfo['fieldInfos'].append(fieldInfos)
if 'featureSet' in layer:
if 'features' in layer['featureSet']:
for feature in layer['featureSet']['features']:
if 'attributes' in feature:
if feature['attributes'][oidFldName] > highOID:
highOID = feature[oidFldName]
layer['nextObjectId'] = highOID + 1
fcJson['layers'] = fcResults['featureCollection']['layers']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.type = "Feature Collection"
itemParams.title = service_name
itemParams.thumbnail = thumbnail
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType='Feature Collection',searchorg=False)
itemId = None
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Collection':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
resultSD = item.updateItem(itemParameters=itemParams,
text=fcJson)
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
resultSD = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
text= fcJson,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
if 'error' in resultSD:
if not itemId is None:
print ("Attempting to delete")
delres=userInfo.deleteItems(items=itemId)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Item exist and cannot be found, probably owned by another user.")
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Item exist and cannot be found, probably owned by another user."
})
resultSD = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
text=fcResults['featureCollection'],
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
return resultSD
else:
return resultSD
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
gc.collect()
|
apache-2.0
| -5,585,332,328,714,547,000 | 41.834936 | 201 | 0.440428 | false | 5.663644 | true | false | false |
AnhellO/DAS_Sistemas
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Examen/Ejercicio4/chain_of_test.py
|
1
|
1251
|
import unittest
from chain_of import *
#Las pruebas del chain of responsability, como lo que devuelve al final es un print y no un return, lo que me da es un None por eso se compara con un None para que pase,
#se que no esta bien, pero se que si pasaria la prueba si supiera implementar el unittest para prints con el mock y todo eso, porque si corres el test te impreme lo que deberia darte
class CajeroTest(unittest.TestCase):
def test_cadena_correcta(self):
b50 = Cajero50ConcreteHandler()
b20 = Cajero20ConcreteHandler()
b10 = Cajero10ConcreteHandler()
b50.next_succesor(b20).next_succesor(b10)
self.assertEqual(b50.handle(80), None)
def test_cadena_incorrecta(self):
b50 = Cajero50ConcreteHandler()
b20 = Cajero20ConcreteHandler()
b10 = Cajero10ConcreteHandler()
b10.next_succesor(b20).next_succesor(b50)
self.assertEqual(b10.handle(135), None)
def test_cadena_20_primero(self):
b50 = Cajero50ConcreteHandler()
b20 = Cajero20ConcreteHandler()
b10 = Cajero10ConcreteHandler()
b20.next_succesor(b50).next_succesor(b10)
self.assertEqual(b20.handle(90), None)
if __name__ == "__main__":
unittest.main()
|
mit
| 4,310,769,567,808,078,300 | 42.172414 | 182 | 0.68745 | false | 3.11194 | true | false | false |
ktnyt/autosearch
|
autosearch/searcher.py
|
1
|
1549
|
from autosearch.form import Form
from autosearch.path import Path, PathFinder
from autosearch.utils import *
class Searcher(object):
def __init__(self, url, query):
self.url = url
top = addScore(parse(fetch(url)), query)
forms = []
paths = []
scores = []
# Find forms with text inputs
for form in top.find_all('form'):
if 'action' not in form.attrs:
continue
form['action'] = absolutify(url, form['action'])
for input in form.find_all('input'):
attrs = input.attrs
if 'type' not in attrs or attrs['type'] in ['text', 'search']:
forms.append(Form(form))
if not len(forms):
return
# Try each form
for form in forms:
result = form(query)
finder = PathFinder().fromDom(result, tag='a', attr='href')
path, score = finder.bestPath()
paths.append(path)
scores.append(score)
# Find best form
i = argmax(scores)
form, path = forms[i], paths[i]
self.form = form
self.path = path.stringify()
def __call__(self, query):
form = self.form
path = self.path
result = form(query)
finder = PathFinder().fromDom(result, tag='a', attr='href')
matches, scores = finder.matchPath(path, string=True)
return [absolutify(form.action, match.elements[-1]['href']) for match in matches], scores
|
mit
| -2,114,310,529,578,682,600 | 29.98 | 97 | 0.53583 | false | 4.130667 | false | false | false |
PaulEcoffet/stonewallsgate
|
dunwallsgate/test/test_battle.py
|
1
|
5888
|
import unittest
from collections import Counter
import battle
import inventory
from character import Character
class TestBattle(unittest.TestCase):
def test_init_battle(self):
char1 = Character(None, maxhealth=100, initiative=1000)
char2 = Character(None, maxhealth=100, initiative=1)
battle1 = battle.Battle([char1], [char2])
self.assertListEqual(battle1.team1, [char1])
self.assertListEqual(battle1.team2, [char2])
self.assertIs(battle1.playing_char, char1)
def test_possible_target(self):
char1 = Character(None, maxhealth=100, initiative=1000)
char2 = Character(None, maxhealth=100, initiative=10)
char3 = Character(None, maxhealth=100, initiative=1)
battle1 = battle.Battle([char1], [char2, char3])
self.assertEqual(Counter(battle1.possible_targets_attack()),
Counter([char2, char3]))
battle1.end_turn()
self.assertEqual(Counter(battle1.possible_targets_attack()),
Counter([char1]))
def test_do_attack(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
battle1.do_attack(char2)
self.assertLessEqual(char2.health, char2.maxhealth)
battle1.end_turn()
self.assertRaises(battle.CantAttackException, battle1.do_attack,
char2)
def test_end_turn(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
battle1.end_turn()
self.assertIs(battle1.playing_char, char2)
def test_get_all_character(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
self.assertEqual(Counter((char1, char2)),
Counter(battle1.all_characters))
def test_get_current_playe_team(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
self.assertEqual(battle1.cur_player_team, 1)
battle1.end_turn()
self.assertEqual(battle1.cur_player_team, 2)
def test_win(self):
char1 = Character(None, maxhealth=10, initiative=1000)
char2 = Character(None, maxhealth=10, initiative=10)
char3 = Character(None, maxhealth=10, initiative=1)
battle1 = battle.Battle([char1], [char2, char3])
self.assertIsNone(battle1.winner)
battle1.do_run()
self.assertEqual(battle1.winner, 2)
battle2 = battle.Battle([char1], [char2, char3])
while char1.is_alive:
if battle2.cur_player_team == 2:
battle2.do_attack(char1)
battle2.end_turn()
self.assertEqual(battle2.winner, 2)
char1 = Character(None, maxhealth=10, initiative=1)
char2 = Character(None, maxhealth=10, initiative=1000)
char3 = Character(None, maxhealth=10, initiative=10)
battle3 = battle.Battle([char1], [char2, char3])
battle3.do_run()
self.assertEqual(battle3.winner, 1)
battle4 = battle.Battle([char1], [char2, char3])
while char2.is_alive or char3.is_alive:
if battle4.cur_player_team == 1:
battle4.do_attack(battle4.possible_targets_attack()[0])
battle4.end_turn()
self.assertEqual(battle4.winner, 1)
def test_can_attack(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=0, initiative=1, defense=0)
char3 = Character(None, maxhealth=10, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2, char3])
self.assertFalse(battle1.can_attack(char1))
self.assertFalse(battle1.can_attack(char2))
self.assertTrue(battle1.can_attack(char3))
def test_already_played(self):
char1 = Character(None, maxhealth=100, initiative=1000)
char2 = Character(None, maxhealth=100, initiative=1)
gun = inventory.create_item("gun")
ammo = inventory.create_item("gun_ammo", 20)
char1.inventory.add(gun)
char1.inventory.add(ammo)
battle1 = battle.Battle([char1], [char2])
battle1.do_attack(battle1.possible_targets_attack()[0])
self.assertRaises(battle.AlreadyPlayedException,
battle1.change_weapon, gun, ammo)
self.assertRaises(battle.AlreadyPlayedException,
battle1.do_attack,
battle1.possible_targets_attack()[0])
def test_change_weapon(self):
char1 = Character(None, maxhealth=100, initiative=1000)
gun = inventory.create_item("gun")
ammo = inventory.create_item("gun_ammo", 20)
char1.inventory.add(gun)
char1.inventory.add(ammo)
battle1 = battle.Battle([char1], [])
battle1.change_weapon(gun, ammo)
battle1.end_turn()
self.assertRaises(battle.CantChangeWeaponException,
battle1.change_weapon,
inventory.create_item("gun"), ammo)
self.assertRaises(battle.CantChangeWeaponException,
battle1.change_weapon,
gun, inventory.create_item("gun_ammo"))
self.assertRaises(inventory.IncompatibleAmmoException,
battle1.change_weapon,
gun, None)
battle1.change_weapon(char1.inventory.get_first("bare_hands"), None)
|
gpl-2.0
| -6,642,242,283,896,249,000 | 43.606061 | 76 | 0.625849 | false | 3.467609 | true | false | false |
ndaniel/fusioncatcher
|
bin/build_report_fusions_psl.py
|
1
|
28831
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
It produces a report with the summary of the fusion genes found. Also
FASTQ and FASTA files containing the supporting reads corresponding to each
fusion gene is generated.
Author: Daniel Nicorici, [email protected]
Copyright (c) 2009-2021 Daniel Nicorici
This file is part of FusionCatcher.
FusionCatcher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FusionCatcher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FusionCatcher (see file 'COPYING.txt'). If not, see
<http://www.gnu.org/licenses/>.
By default, FusionCatcher is running BLAT aligner
<http://users.soe.ucsc.edu/~kent/src/> but it offers also the option to disable
all its scripts which make use of BLAT aligner if you choose explicitly to do so.
BLAT's license does not allow to be used for commercial activities. If BLAT
license does not allow to be used in your case then you may still use
FusionCatcher by forcing not use the BLAT aligner by specifying the option
'--skip-blat'. Fore more information regarding BLAT please see its license.
Please, note that FusionCatcher does not require BLAT in order to find
candidate fusion genes!
This file is running/executing/using BLAT.
"""
import sys
import os
import optparse
import gc
import string
import zipfile
import Bio.SeqIO
import datetime
import tempfile
import shutil
import gzip
empty_zip_data = 'PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ttable = string.maketrans("ACGTN","TGCAA") # global
#ttable = string.maketrans("ACGTYRSWKMBDHV-","TGCARYSWMKVHDB-")
mapping_solexa2sanger = "".join([chr(0) for ascii in range(0, 59)]
+ [chr(33 + int(round(Bio.SeqIO.QualityIO.phred_quality_from_solexa(q)))) for q in range(-5, 62 + 1)]
+ [chr(0) for ascii in range(127, 256)])
mapping_illumina2sanger = "".join([chr(0) for ascii in range(0, 64)]
+ [chr(33 + q) for q in range(0, 62 + 1)]
+ [chr(0) for ascii in range(127, 256)])
def solexa2sanger(qual):
return qual.translate(mapping_solexa2sanger)
def illumina2sanger(qual):
return qual.translate(mapping_illumina2sanger)
def give_me_temp_filename(tmp_dir = None):
if tmp_dir and (not os.path.isdir(tmp_dir)) and (not os.path.islink(tmp_dir)):
os.makedirs(tmp_dir)
(ft,ft_name) = tempfile.mkstemp(dir = tmp_dir)
os.close(ft)
return ft_name
def myorder(a,b):
return (a,b) if a <= b else (b,a)
def dnaReverseComplement(seq):
seq = seq.upper()
seq = seq.translate(ttable)
return seq[::-1]
def reads_from_fastq_file(file_name, size_read_buffer = 10**8):
fid = None
if file_name == '-':
fid = sys.stdin
elif file_name.lower().endswith('.gz'):
fid = gzip.open(file_name,'r')
else:
fid = open(file_name,'r')
piece = [None,None,None,None]
ij = 0
while True:
gc.disable()
lines = fid.readlines(size_read_buffer)
gc.enable()
if not lines:
break
for line in lines:
ij = ij + 1
piece[ij-1] = line
if ij == 4:
bucket = (piece[0].rstrip('\r\n')[1:],
piece[1].rstrip('\r\n'),
piece[3].rstrip('\r\n'))
yield bucket
piece = [None,None,None,None]
ij = 0
fid.close()
def delete_file(some_file):
if os.path.isfile(some_file) or os.path.islink(some_file):
os.remove(some_file)
elif os.path.isdir(some_file):
shutil.rmtree(some_file)
def give_me_psl(fasta, twobit, blat_dir = None, tmp_dir = None, align_type = 'web'):
# give as input a file as a list of strings it runs BLAT and it returns
# the PSL output as a list of strings
fasta_file = give_me_temp_filename(tmp_dir = tmp_dir)
psl_file = give_me_temp_filename(tmp_dir = tmp_dir)
file(fasta_file,'w').writelines(fasta)
# web version of blat
# blat -stepSize=5 -repMatch=2253 -minScore=0 -minIdentity=0 database.2bit query.fa output.psl
# from: http://http://genome.ucsc.edu/FAQ/FAQblat.html
#
# other idea: ./blat -minIdentity=95 –fine -stepSize=1 –tileSize=6 -repMatch = 1000000
# from http://www.gene2drug.com/product/?p=671 by Sucheta Tripathy
# BLAT stands for Blast like Alignment Tool and was designed by Jim Kent.
# It is relatively easy to install the software and is an excellent way of
# mapping assembly files generated from abyss into reference genome for
# finding new transcripts and new intron exon junctions. BLAT has recently
# added few fine tuning options for short read mapping. Setting the stepSize
# and tileSize parameters for mapping reads of length n, where
# n = 2 * stepSize + tileSize – 1. While tileSize can range from 6 to 15,
# stepSize can be from 1 to tileSize. So, in other words, reads as short
# as 7 bases can be mapped into reference [2 * 1 + 6 – 1 = 7]. Also other
# commandline options can be used to make the mapping more sensitive such
# as –fine and -repMatch = 1000000. –fastmap option and –ooc option should
# be avoided for mapping short reads. In addition –minIdentity may be set
# to 95%.
_BT_ = ""
if blat_dir and blat_dir.strip():
_BT_ = blat_dir.rstrip("/")+"/"
cmd = None
if align_type == 'web':
cmd = [_BT_+'blat',
'-stepSize=5', # 5
'-repMatch=2253', # 2253
'-minScore=0', # 0
'-minIdentity=0', # 0
twobit,
fasta_file,
psl_file]
elif align_type == 'sensitive':
cmd = [_BT_+'blat',
'-stepSize=5', # 5
'-repMatch=2253', # 2253
'-minScore=0', # 0
'-minIdentity=95', # 0
'-fine',
twobit,
fasta_file,
psl_file]
else:
print "ERROR: Not known type of BLAT search!"
sys.exit(1)
cmd = ' '.join(cmd)
proc = os.system(cmd)
if proc:
print >>sys.stderr, "ERROR while executing '%s'" % (cmd,)
sys.exit(1)
psl = file(psl_file,'r').readlines()
# add chr to the column number 14 (index = 1) so that can be loaded into UCSC
# genome browser
chr_psl = []
for line in psl:
li = line.split('\t')
if li and len(li) > 14:
l = li[13]
li[13] = 'chr' + l if not l.startswith('chr') else l
if li[13] == 'chrMT':
li[13] = 'chrM'
chr_psl.append('\t'.join(li))
else:
chr_psl.append(line)
delete_file(fasta_file)
delete_file(psl_file)
return chr_psl
def give_me_sam(fastq, anchor, bowtie2index, bowtie2_dir = None, tmp_dir = None, cpus = 1):
# give as input a file as a list of strings it runs BOWTIE2 and it returns
# the SAM output as a list of strings
fastq_file = give_me_temp_filename(tmp_dir = tmp_dir)
sam_file = give_me_temp_filename(tmp_dir = tmp_dir)
file(fastq_file,'w').writelines(fastq)
_B2_ = ""
if bowtie2_dir and bowtie2_dir.strip():
_B2_ = bowtie2_dir.rstrip("/")+"/"
cmd = [_B2_+'bowtie2',
'-p',str(cpus),
'--local',
'-k','10',
'-L',str(anchor),
'-x',bowtie2index,
'-U',fastq_file,
'-S',sam_file]
sam = []
cmd = ' '.join(cmd)
proc = os.system(cmd)
if proc:
print >>sys.stderr, "WARNING: unable to execute: '%s'" % (cmd,)
else:
sam = file(sam_file,'r').readlines()
delete_file(fastq_file)
delete_file(sam_file)
return sam
def mycols(t):
return t[0:4] + t[5:11]
def strmycols(t):
return '\t'.join(mycols(t))
def mygenes(t):
return tuple(t[0],t[6])
def ordmygenes(t):
return myorder(mygenes(t))
def give_me_assembly(fasta, kmer = 31, velvet_dir = None, tmp_dir = None):
# use Velvet to assembl the supporting reads
#
# velveth /tmp/velvet-unmapped-reads/ 17 -fasta -short myfasta.fa
# velvetg /tmp/velvet-unmapped-reads/
if fasta:
fasta_file = give_me_temp_filename(tmp_dir = tmp_dir)
ase_dir = give_me_temp_filename(tmp_dir = tmp_dir)
if os.path.isfile(ase_dir) or os.path.islink(ase_dir):
os.remove(ase_dir)
elif os.path.isdir(ase_dir):
os.rmtree(ase_dir)
os.makedirs(ase_dir)
file(fasta_file,'w').writelines(fasta)
_VT_ = ""
if velvet_dir and velvet_dir.strip():
_VT_ = velvet_dir.rstrip("/")+"/"
cmd = [_VT_+'velveth',
ase_dir,
str(kmer),
'-fasta',
'-short',
fasta_file,
';',
_VT_+'velvetg',
ase_dir,
'>',
'/dev/null',
'2>&1'
]
cmd = ' '.join(cmd)
proc = os.system(cmd)
if proc:
print >>sys.stderr, "ERROR while executing '%s'" % (cmd,)
sys.exit(1)
else:
return []
ase = file(os.path.join(ase_dir,'contigs.fa'),'r').readlines()
delete_file(fasta_file)
shutil.rmtree(ase_dir)
return ase
################################################################################
################################################################################
################################################################################
if __name__ == '__main__':
#command line parsing
usage = "%prog [options]"
description = """It analyzes the mappings of reads on exon-exon junctions."""
version = "%prog 0.12 beta"
parser = optparse.OptionParser(usage = usage,
description = description,
version = version)
parser.add_option("--input_fastq",
action = "store",
type = "string",
dest = "input_fastq_filename",
help = """The input FASTQ file containing all the reads.""")
parser.add_option("--input_fusion_psl",
action = "store",
type = "string",
dest = "input_fusion_psl_filename",
help = """The input PSL file containing the candidate fusion genes.""")
parser.add_option("--input_candidate_fusion_genes_reads",
action = "store",
type = "string",
dest = "input_candidate_fusion_genes_reads_filename",
help = """The input list of candidate fusion genes and ids of the supporting reads, for example 'candidate_fusion-genes_not-filtered_supporting_paired-reads.txt'. This is processed even further.""")
parser.add_option("--input_unmapped_reads",
action = "store",
type = "string",
dest = "input_unmapped_reads_filename",
help = """The input list of ids of reads that are unmapped (that are mapping over the fusion junction).""")
parser.add_option("--output_super_summary",
action = "store",
type = "string",
dest = "output_super_summary_filename",
help = """The output super summary report for candidate fusion genes.""")
parser.add_option("--output_zip_fasta",
action = "store",
type = "string",
dest = "output_zip_fasta_filename",
help = """The ouput FASTQ file containing the reads which support each candidate fusion gene.""")
parser.add_option("--suporting_unique_reads",
action = "store",
type = "int",
dest = "supporting_unique_reads",
default = 1,
help = """The minimum number of unique reads which overlap over an exon-exon junction. Default is %default.""")
parser.add_option("--anchor2",
action = "store",
type = "int",
dest = "anchor2",
default = 40,
help = """For anchors longer (or equal) with this value it is enough to have only one supporting read. Default is '%default'.""")
parser.add_option("--input_genome_2bit",
action = "store",
type = "string",
dest = "input_genome_2bit",
help = """Path to the genome in 2bit format (generated with faToTwoBit) which will be used for aligning using BLAT the supporting reads and their alignment in PSL format is added to file specified with '--output_zip_fasta'.""")
parser.add_option("--input_genome_bowtie2",
action = "store",
type = "string",
dest = "input_genome_bowtie2",
help = """Path to the genome in BOWTIE2 index format which will be used for aligning using BOWTIE2 the supporting reads and their alignment in PSL format is added to file specified with '--output_zip_fasta'.""")
choices = ('web','sensitive')
parser.add_option("--psl_alignment_type",
action = "store",
type = "choice",
choices = choices,
dest = "psl_search_type",
default = "web",
help = "The type of BLAT alignment to be used for aligning "+
"the supporting reads when BLAT is chosen. The choices "+
"are ['"+"','".join(choices)+"']. "+
"Default is '%default'.")
parser.add_option("--blat-dir",
action = "store",
type = "string",
dest = "blat_directory",
help = """Path to Blat's executable.""")
parser.add_option("--sam_alignment",
action = "store",
type = "int",
dest = "sam_alignment",
default = 10,
help = """If set then a SAM file will be generated using BOWTIE2. Default is '%default'.""")
parser.add_option("--bowtie2-dir",
action = "store",
type = "string",
dest = "bowtie2_directory",
help = """Path to Bowtie2's executable.""")
parser.add_option("--mismatches",
action = "store",
type = "int",
dest = "mismatches",
default = 3,
help = """The minimum number of mismatches accepted in the alignment. Default is '%default'.""")
parser.add_option("--mismatches-gap",
action = "store",
type = "int",
dest = "mismatches_gap",
default = 7,
help = """The minimum number of mismatches accepted in the gap alignment. Default is '%default'.""")
parser.add_option("--junction",
action = "store_true",
dest = "junction",
default = False,
help = """If used then the junction sequence is added to the FASTA file with the supporting reads. Default is '%default'.""")
parser.add_option("--threads","-p",
action = "store",
type = "int",
dest = "processes",
default = 1,
help = "Number or processes to be used for running Bowtie2. "+
"Default is '%default'. ")
parser.add_option("--tmp_dir",'-t',
action = "store",
type = "string",
dest = "tmp_directory",
default = None,
help = "The directory which should be used as temporary directory. By default is the OS temporary directory.")
parser.add_option("--velvet",
action = "store_true",
dest = "velvet",
default = False,
help = """If used then the supporting reads from the FASTA file are assembled using VELVET. Default is '%default'.""")
parser.add_option("--velvet-dir",
action = "store",
type = "string",
dest = "velvet_directory",
help = """Path to Velvet's executable.""")
(options, args) = parser.parse_args()
# validate options
if not (options.input_fusion_psl_filename and
options.output_super_summary_filename
):
parser.print_help()
sys.exit(1)
#
# HEADER PSL file
#
#header = ['gene-5end', # 0
# 'gene-5end_symbol', # 1
# 'chromosome_gene-5end', # 2
# 'strand_gene-5end', # 3
# 'start_chromosome_part-1-of-read-mapped-gene-5end', # 4
# 'end_chromosome_part-1-read-mapped-gene-5end', # 5
# 'gene-3end', # 6
# 'gene-3end_symbol', # 7
# 'chromosome_gene-3end', # 8
# 'strand_gene-3end', # 9
# 'start_chromosome_part-2-of-read-mapped-gene-3end', # 10
# 'end_chromosome_part-2-read-mapped-gene-3end', # 11
# 'short_read', # 12
# 'mismatches', # 13
# 'length_short_read', # 14
# 'start_part-1-read_on_gene-5end', # 15
# 'end_part-1-read_on_gene-5end', # 16
# 'start_part-2-read_on_gene-3end', # 17
# 'end_part-2-read_on_gene-3end', # 18
# 'anchor_length', # 19
# 'fusion_sequence' # 20
# ]
unmapped_reads = set()
if options.input_unmapped_reads_filename:
print "Reading...",options.input_unmapped_reads_filename
unmapped_reads = set([line.rstrip('\r\n') for line in file(options.input_unmapped_reads_filename,'r').readlines()])
print "Reading...",options.input_fusion_psl_filename
data = [line.rstrip('\r\n').split('\t') for line in file(options.input_fusion_psl_filename,'r') if line.rstrip('\r\n')]
header = data.pop(0)
# filter for mismatches
#data = [line for line in data if int(line[13])<=options.mismatches]
dudu = []
for line in data:
if line[20].lower().find('*n') == -1:
if int(line[13])<=options.mismatches:
dudu.append(line)
elif int(line[13])<=options.mismatches_gap:
dudu.append(line)
# here I have gaps in alignment (i.e. IGH fusions) because there is "*NNNN"
#data = [line for line in data if int(line[13])<=options.mismatches] # ORIGINAL
data = dudu
# find unique reads
data_uniq = list(set(['\t'.join(line[:12]) for line in data]))
data_uniq = [line.split('\t') for line in data_uniq]
# find same splicing sites = remove cols 4 and 11
data_uniq = [strmycols(line) for line in data_uniq]
# counts the unique reads for unique splicing sites
data_dict = dict()
for line in data_uniq:
data_dict[line] = data_dict.get(line,0) + 1
# sort the counts
dd = sorted(data_dict.items(),key = lambda x: -x[1])
# filter those fusion with too few counts
#dd = [(k,v) for (k,v) in dd if v >= options.supporting_unique_reads]
dd = [(k,v) for (k,v) in dd if v >= 1] # in order to allow the use of options.anchor2
# find those reads and the fusion sequence for the unique fusion points
summary = []
summary_reads = []
summary.append("%s\tcounts\tlongest_anchor\tfusion_sequence\n"%(strmycols(header),))
summary_reads.append('header')
singles = set()
ggenes_e = list()
ggenes_s = list()
ggenes_p = list()
ggenes_e.append('header')
ggenes_s.append('header')
ggenes_p.append('header')
fast_gg = dict()
for (k,v) in dd:
if v >= options.supporting_unique_reads:
r = []
fs = None
gg = None
anchor_max = 0
for li in data:
if strmycols(li) == k:
r.append(li[12])
fs = li[20]
anchor = int(li[19])
if anchor > anchor_max:
anchor_max = anchor
gg_e = (li[0],li[6])
gg_s = (li[1],li[7])
gg_p = (li[5],li[10])
summary.append("%s\t%d\t%d\t%s\n"%(k,v,anchor_max,fs))
r = set(r)
rr = set(el[:-1] + '1' if el.endswith('/2') else el[:-1]+'2' for el in r)
summary_reads.append(list(r)+list(rr))
singles.update(r)
singles.update(rr)
ggenes_e.append(gg_e)
ggenes_s.append(gg_s)
ggenes_p.append(gg_p)
fast_gg[myorder(*gg_e)] = None
elif options.supporting_unique_reads > 1:
r = []
fs = None
gg = None
anchor_max = 0
for li in data:
if strmycols(li) == k:
r.append(li[12])
fs = li[20]
anchor = int(li[19])
if anchor > anchor_max:
anchor_max = anchor
gg_e = (li[0],li[6])
gg_s = (li[1],li[7])
gg_p = (li[5],li[10])
if anchor_max >= options.anchor2:
summary.append("%s\t%d\t%d\t%s\n"%(k,v,anchor_max,fs))
r = set(r)
rr = set(el[:-1] + '1' if el.endswith('/2') else el[:-1]+'2' for el in r)
summary_reads.append(list(r)+list(rr))
singles.update(r)
singles.update(rr)
ggenes_e.append(gg_e)
ggenes_s.append(gg_s)
ggenes_p.append(gg_p)
fast_gg[myorder(*gg_e)] = None
print "Writing the summary file...", options.output_super_summary_filename
file(options.output_super_summary_filename,'w').writelines(summary)
print "Reading...",options.input_candidate_fusion_genes_reads_filename
# 0 - Fusion_gene_symbol_1
# 1 - Fusion_gene_symbol_2
# 2 - Fusion_gene_1
# 3 - Fusion_gene_2
# 4 - Count_paired-end_reads
# 5 - Supporting_paired-read_ids ==> separated by commas, e.g. F1000018349733,F1000033997513,F1000046358541,F1000034322437,...
candidate_fusions_reads = [line.rstrip('\r\n').split('\t') for line in file(options.input_candidate_fusion_genes_reads_filename,'r').readlines()]
candidate_fusions_reads.pop(0) # remove the header
candidate_fusions_reads = dict([(myorder(el[2],el[3]),el[5].split(',')) for el in candidate_fusions_reads if fast_gg.has_key(myorder(el[2],el[3]))])
#
# for each candidate fusion genes build a FASTA file containing the reads which support it
#
print "Processing the supporting reads..."
fasta = dict()
fastq = dict()
pairs = dict()
for (k,v) in candidate_fusions_reads.items():
pairs[k] = []
for vv in v:
s1 = '%s/1' % (vv,)
s2 = '%s/2' % (vv,)
fasta[s1] = None
fasta[s2] = None
pairs[k].append(s1)
pairs[k].append(s2)
for k in singles:
fasta[k] = None
print "Scanning the FASTQ file...",options.input_fastq_filename
for a_read in reads_from_fastq_file(options.input_fastq_filename):
if fasta.has_key(a_read[0]):
ev = a_read[0]
w = a_read[1]
q = a_read[2]
if ev.endswith("/1"):
fasta[ev] = w
elif ev.endswith("/2"):
fasta[ev] = dnaReverseComplement(w)
fastq[ev] = (w,q)
# create a ZIP FASTA file where is a file for each candidate fusion gene
print "Writing the FASTA/FASTQ files containing the supporting reads...",options.output_zip_fasta_filename
archive = zipfile.ZipFile(options.output_zip_fasta_filename, 'w', zipfile.ZIP_STORED, allowZip64 = True)
for i in xrange(len(summary)):
if i == 0: # skip header
continue
# for each candidate fusion
#gg = "%s:%s_%s:%s_%s:%s" % (ggenes_s[i][0],ggenes_s[i][1],ggenes_e[i][0],ggenes_e[i][1],ggenes_p[i][0],ggenes_p[i][1])
gg = "%s--%s__%s--%s" % (ggenes_s[i][0],ggenes_s[i][1],ggenes_p[i][0],ggenes_p[i][1])
gg_e = myorder(ggenes_e[i][0],ggenes_e[i][1])
da = []
if options.junction:
u = summary[i].rstrip('\n').split('\t')
da = ['>JUNCTION__%s\n%s\n' % ('_'.join(u[:-1]),u[-1])]
# write the junction sequence
#archive.writestr("%s_junction.fa" % (gg,), da)
# PSL
#if options.input_genome_2bit:
# psl = give_me_psl(da,
# options.input_genome_2bit,
# tmp_dir = options.tmp_directory,
# align_type = options.blat_search_type)
# archive.writestr("%s_junction.psl" % (gg,), ''.join(psl))
# write the reads in FASTA file
#da = []
for v in sorted(summary_reads[i]):
da.append(">%s_supports_fusion_junction\n"%(v,))
da.append("%s\n"%(fasta[v],))
for v in sorted(pairs[gg_e]):
da.append(">%s_supports_fusion_pair\n"%(v,))
da.append("%s\n"%(fasta[v],))
archive.writestr("%s_reads.fa" % (gg,), ''.join(da))
# PSL
if options.input_genome_2bit:
psl = give_me_psl(da,
options.input_genome_2bit,
blat_dir = options.blat_directory,
tmp_dir = options.tmp_directory,
align_type = options.psl_search_type)
archive.writestr("%s_reads.psl" % (gg,), ''.join(psl))
# VELVET
if options.velvet:
ase = give_me_assembly(da,
17,
velvet_dir = options.velvet_directory,
tmp_dir = options.tmp_directory)
archive.writestr("%s_assembly.fa" % (gg,), ''.join(ase))
# write the reads in FASTQ file
da = []
for v in sorted(summary_reads[i]):
da.append("@%s_supports_fusion_junction%s\n"%(v[:-2],v[-2:]))
sq = fastq[v]
da.append("%s\n+\n%s\n"%(sq[0],sq[1]))
for v in sorted(pairs[gg_e]):
da.append("@%s_supports_fusion_pair%s\n"%(v[:-2],v[-2:]))
sq = fastq[v]
#da.append("%s\n+\n%s\n"%(sq[0],illumina2sanger(sq[1])))
da.append("%s\n+\n%s\n"%(sq[0],sq[1]))
archive.writestr("%s_reads.fq" % (gg,), ''.join(da))
if options.input_genome_bowtie2:
sam = give_me_sam(da,
options.sam_alignment,
options.input_genome_bowtie2,
bowtie2_dir = options.bowtie2_directory,
tmp_dir = options.tmp_directory,
cpus = options.processes)
archive.writestr("%s_reads.sam" % (gg,), ''.join(sam))
# Ensembl ids of genes
archive.writestr("%s_ensembl_ids.txt" % (gg,), '%s\n%s\n' % (ggenes_e[i][0],ggenes_e[i][1]))
archive.close()
|
gpl-3.0
| -3,517,396,408,604,192,000 | 38.635488 | 249 | 0.512997 | false | 3.702776 | false | false | false |
keithasaurus/django_fun_views
|
fun_views/views/generic/form/render.py
|
1
|
1144
|
from fun_views.patterns.form.render import form_render_pattern
from fun_views.views.utils import (get_context_base, make_base_view,
not_set_get_form_class,
not_set_get_template_name, prefer_func,
prefer_literal, render_response_base)
form_render_base = make_base_view(form_render_pattern)
def _init_form(req_data, form_class):
return form_class()
def form_render(template_name=None,
get_template_name=not_set_get_template_name,
form_class=None,
get_form_class=not_set_get_form_class,
init_form=_init_form,
form_context_name='form',
get_form_context_name=None,
get_context=get_context_base,
render_response=render_response_base):
return form_render_base(
prefer_literal(form_class, get_form_class),
init_form,
prefer_func(form_context_name, get_form_context_name),
get_context,
prefer_literal(template_name, get_template_name),
render_response
)
|
mit
| -5,728,591,379,269,749,000 | 35.903226 | 74 | 0.578671 | false | 3.851852 | false | false | false |
mumax/2
|
examples/stdproblem5-am01.py
|
1
|
1828
|
# Micromagnetics standard proplem no. 5
# As proposed by M. Najafi et al., JAP 105, 113914 (2009).
# @author Mykola Dvornik
from mumax2 import *
from mumax2_geom import *
Nx = 32
Ny = 32
Nz = 1
setgridsize(Nx, Ny, Nz)
# physical size in meters
sizeX = 100e-9
sizeY = 100e-9
sizeZ = 10e-9
csX = (sizeX/Nx)
csY = (sizeY/Ny)
csZ = (sizeZ/Nz)
setcellsize(csX, csY, csZ)
# load modules
load('exchange6')
load('demag')
load('zeeman')
load('llg')
load('maxtorque')
load('solver/am12')
setv('m_maxabserror', 1e-4)
setv('m_maxrelerror', 1e-3)
setv('maxdt', 1e-10)
setv('mindt', 1e-17)
savegraph("graph.png")
setv('Msat', 800e3)
setv('Aex', 1.3e-11)
setv('alpha', 1.0)
setv('gamma', 2.211e5)
setv('dt', 1e-15)
setv('maxdt', 1e-12)
# Set a initial magnetisation which will relax into a vortex
mv = makearray(3, Nx, Ny, Nz)
for m in range(Nx):
for n in range(Ny):
for o in range(Nz):
xx = float(m) * csX - 50.0e-9
yy = 50.0e-9 - float(n) * csY
mv[0][m][n][o] = yy
mv[1][m][n][o] = xx
mv[2][m][n][o] = 40.0e-9
setarray('m', mv)
run_until_smaller('maxtorque', 1e-3 * gets('gamma') * 800e3)
load('zhang-li')
setv('alpha', 0.1)
setv('dt', 1e-15)
setv('t', 0)
tabulate(["t", "<m>"], "m.txt")
setv('xi',0.05)
setv('polarisation',1.0)
save("m","png",[])
save("m","vtk",[])
j = makearray(3, Nx, Ny, Nz)
for m in range(Nx):
for n in range(Ny):
for o in range(Nz):
j[0][m][n][o] = 1.0
j[1][m][n][o] = 0.0
j[2][m][n][o] = 0.0
setv('j', [1e12, 0, 0])
setmask('j', j)
#autosave("m", "png", [], 10e-12)
autosave("m", "gplot", [], 10e-12)
autotabulate(["t", "<m>"], "m.txt", 50e-12)
autotabulate(["t", "m_error"], "error.dat", 1e-13)
autotabulate(["t", "dt"], "dt.dat", 1e-13)
run(15.0e-9)
printstats()
|
gpl-3.0
| -2,265,707,924,753,422,800 | 17.464646 | 60 | 0.561269 | false | 2.138012 | false | false | false |
dseuss/pycsalgs
|
csalgs/lowrank/gradient.py
|
1
|
2641
|
# encoding: utf-8
import itertools as it
import numpy as np
import numpy.linalg as la
from scipy.sparse.linalg import svds
__all__ = ['adaptive_stepsize', 'iht_estimator', 'cgm_estimator']
def _vec(A):
newshape = A.shape[:-2]
newshape = newshape + (A.shape[-2] * A.shape[-1],)
return A.reshape(newshape)
def hard_threshold(mat, rank, retproj=False):
"""PU, PV ... projectors on left/right eigenspaces"""
U_full, s, Vstar_full = la.svd(mat)
U = U_full[:, :rank]
V = Vstar_full.T.conj()[:, :rank]
PU = U @ U.T.conj()
PV = V @ V.T.conj()
mat_projected = U @ np.diag(s[:rank]) @ V.conj().T
return (mat_projected, (PU, PV)) if retproj else mat_projected
def adaptive_stepsize(projection='row'):
"""@todo: Docstring for adaptive_stepsize.
:param projection: Possible values: 'row', 'col', 'rowcol', None
:returns: @todo
"""
assert projection in {'col', 'row', 'rowcol', None}
def stepsize(A, g, projectors):
PU, PV = projectors
if projection == 'col':
g = PU @ g
elif projection == 'row':
g = g @ PV
elif projection == 'rowcol':
g = PU @ g @ PV
return la.norm(g)**2 / la.norm(_vec(A) @ _vec(g))**2
return stepsize
def iht_estimator(A, y, rank, stepsize=adaptive_stepsize(), x_init=None):
x_hat = np.zeros(A.shape[1:]) if x_init is None else x_init
_, projectors = hard_threshold(np.tensordot(y, A, axes=(-1, 0)), rank,
retproj=True)
while True:
g = np.tensordot(y - (_vec(A) @ _vec(x_hat)), A, axes=(-1, 0))
mu = stepsize(A, g, projectors)
x_hat, projectors = hard_threshold(x_hat + mu * g, rank, retproj=True)
yield x_hat
def _expval(A, x):
return np.dot(A.reshape((len(A), -1)), x.ravel())
def _cgm_iterator(A, y, alpha, svds=svds, ret_gap=False):
x = np.zeros(A.shape[1:3], dtype=A.dtype)
for iteration in it.count():
z = _expval(A, x)
u, _, v = svds(np.tensordot(z - y, A, axes=(0, 0)), 1)
h = - alpha * u * v
eta = 2 / (iteration + 2)
x = (1 - eta) * x + eta * h
duality_gap = np.dot(z - _expval(A, h), z - y)
yield x, duality_gap
def cgm_estimator(A, y, alpha, relerr=1e-1, maxiter=int(1e6)):
"""@todo: Docstring for cgm_estimator.
"""
solution = _cgm_iterator(A, y, alpha, ret_gap=True)
for x, gap in it.islice(solution, maxiter):
if gap < relerr:
return x
raise ValueError("Did not find solution with error < {} in {} iterations"
.format(relerr, maxiter))
|
gpl-3.0
| 84,450,061,899,274,080 | 27.706522 | 78 | 0.558122 | false | 2.950838 | false | false | false |
cathyyul/sumo-0.18
|
tests/complex/traci/busySocket/runner.py
|
1
|
1168
|
#!/usr/bin/env python
import os, subprocess, sys, time, shutil
sumoHome = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(sumoHome, "tools"))
import traci
if sys.argv[1]=="sumo":
sumoBinary = os.environ.get("SUMO_BINARY", os.path.join(sumoHome, 'bin', 'sumo'))
addOption = ""
secondConfig = "sumo.sumocfg"
else:
sumoBinary = os.environ.get("GUISIM_BINARY", os.path.join(sumoHome, 'bin', 'sumo-gui'))
addOption = "-S -Q"
secondConfig = "sumo_log.sumocfg"
PORT = 8813
subprocess.Popen("%s -c sumo.sumocfg %s" % (sumoBinary, addOption), shell=True, stdout=sys.stdout, stderr=sys.stderr)
traci.init(PORT)
subprocess.Popen("%s -c %s %s" % (sumoBinary, secondConfig, addOption), shell=True, stdout=sys.stdout, stderr=sys.stderr)
time.sleep(10)
step = 0
while not step>100:
traci.simulationStep()
vehs = traci.vehicle.getIDList()
if vehs.index("horiz")<0 or len(vehs)>1:
print "Something is false"
step += 1
traci.close()
sys.stdout.flush()
if os.path.exists("lastrun.stderr"):
f = open("lastrun.stderr")
shutil.copyfileobj(f, sys.stderr)
f.close()
|
gpl-3.0
| -7,274,768,031,797,202,000 | 32.371429 | 121 | 0.662671 | false | 2.774347 | false | false | false |
javimosch/cdnsviewer
|
savecss.py
|
1
|
1626
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# server.py: receive CSS and JS files from Chrome extension
# and save files locally
#
# Author: [email protected]
# 30.10.2011 - Created
try:
# python 2.x
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except:
# python 3.x
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyServer(BaseHTTPRequestHandler):
def do_POST(self):
hd = self.headers
# chrome sent data:
url = hd.get("X-origurl")
fpath = hd.get("X-filepath")
bodylen = int(hd['content-length'])
body = self.rfile.read(bodylen)
print (url, " ->", fpath, len(body))
reply = "OK"
# optional security: check that path is under given folder
ROOT = ""
if ROOT and not fpath.startswith(ROOT):
reply = "access denied: " + fpath
else:
# save file
try:
f = open(fpath, "wb")
f.write(body)
f.close()
except Exception as e:
print (e)
reply = "Server couldn't save "+fpath
# return reply
self.send_response(200)
self.end_headers()
self.wfile.write(reply.encode('utf-8'))
# optional security: chroot this script to a folder, run with
# "sudo python server.py"
# (remember to adjust your url mappings in the extension too)
# import os
# os.chroot("/Users/myusername/")
# start http server
server = HTTPServer(('localhost', 8080), MyServer)
print ("Server running in port 8080...")
server.serve_forever()
|
mit
| 2,339,234,633,292,802,600 | 25.655738 | 66 | 0.591021 | false | 3.880668 | false | false | false |
BotDevGroup/marvin
|
marvinbot/views.py
|
1
|
1385
|
import logging
from flask import (
request, session, g, redirect, url_for, abort, render_template,
flash, current_app, Blueprint
)
from flask_login import login_user, logout_user, current_user, login_required
from marvinbot.models import User
from marvinbot.forms import LoginForm
from marvinbot.utils.net import is_safe_url
log = logging.getLogger(__name__)
marvinbot = Blueprint('marvinbot', __name__, template_folder='templates')
@marvinbot.route('/')
@login_required
def home():
return render_template('index.html')
@marvinbot.route('/login', methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.by_username(form.username.data)
if user and user.check_password(form.passwd.data):
login_user(user, remember=True)
flash('Successful login')
next_url = request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
if not is_safe_url(request, next_url):
return abort(400)
return redirect(next_url or url_for(".home"))
return render_template("login.html", form=form)
@marvinbot.route('/logout', methods=["GET", "POST"])
@login_required
def logout():
logout_user()
return redirect(url_for('.login'))
|
mit
| -2,103,130,300,611,279,000 | 28.468085 | 77 | 0.658484 | false | 3.654354 | false | false | false |
rszki/Protein
|
PDBList.py
|
1
|
4456
|
# -*- coding: utf-8 -*-
'''
Created on 2012/12/14
自作のPDB関連リストの操作をまとめたモジュール
対象となるものは、基本的にはリストになる。
セパレーターはデフォルトを "," とする
ReadList
ToInt
ToStr
WriteList
DivideList
ToDetailNum
ToPDBNum
MakeLinkerTupple
@author: ryosuke
'''
def ReadList(filename, shold_ToInt=True, sep=","):
'''
Read my definition PDB list file (ex. linker list) and make list.
@param filename: is PDB list file name.
@param shold_ToInt: is whether you use ToInt function.
@param sep: is separator in PDB list.
@return: PDB list converted to list object
'''
with open(filename) as fp:
ls = fp.readlines()
ls = [x.splitlines() for x in ls]
ls = [x[0].split(sep) for x in ls]
if shold_ToInt:
return ToInt(ls)
return ls
def ToInt(PDB_ls):
'''
Convert str type number into int type number
@param PDB_ls: is made by ReadList()
@return: converted PDB list
'''
new_ls = []
for line in PDB_ls:
new_line = [line[0]]
for i in range(1,len(line)):
try:
elem_new = int(line[i])
except:
elem_new = line[i]
new_line += [elem_new]
new_ls.append(new_line)
return new_ls
def ToStr(PDB_ls):
'''
Convert int type number into str type number
@param PDB_ls: is made by ReadList()
@return converted PDB list
'''
new_ls = []
for line in PDB_ls:
new_line = []
for x in line:
new_line.append(str(x))
new_ls.append(new_line)
return new_ls
def WriteList(PDB_ls,output_file, sep = ",", shold_ToStr = True):
'''
Write PDB list to file
@param PDB_ls: is made by ReadList()
@param output_file: include directory
@param sep: is separator
@param shold_ToStr: is whether you use ToStr function
@return: is None
'''
if shold_ToStr:
PDB_ls = ToStr(PDB_ls)
with open(output_file,mode="w") as fp:
fp.writelines(sep.join(line)+"\n" for line in PDB_ls)
def DivdeList(PDB_ls, div_num, output_dir):
'''
'''
for i in range(div_num):
cross_ls = []
for j, x in enumerate(PDB_ls):
if j%div_num == i:
cross_ls.append(x)
filename = "{0}/crosslist_{1}".format(output_dir, i+1)
WriteList(cross_ls, filename)
def ToDetailNum(PDB_ls,Detail_dir):
"""
Convert linker pdb_number to detail line number.
@param PDB_ls: is made by ReadList()
@param Detail_dir: is need to use Detail module.
@return: Converted PDBlist
"""
import Detail
list_conv = []
for protein in PDB_ls:
pdbid = protein[0]
pdbnum = protein[1:]
newline = [pdbid]
list_detail = Detail.ReadList(pdbid, Detail_dir)
for line_detail in list_detail[:]:
if line_detail[8] in pdbnum:
newline.append(line_detail[2])
list_conv.append(newline)
return list_conv
def ToPDBNum(PDB_fasta_ls,Detail_dir):
"""
Convert linker fasta number to pdb number.
@param PDB_fasta_ls: is made by ToDetailNum()
@param Detail_dir: is need to use Detail module.
@return: Converted PDBlist
"""
import Detail
list_conv = []
for protein in PDB_fasta_ls:
pdbid = protein[0]
pdbnum = protein[1:]
newline = [pdbid]
list_detail = Detail.ReadList(pdbid, Detail_dir)
for line_detail in list_detail[:]:
if line_detail[2] in pdbnum:
newline.append(line_detail[8])
list_conv.append(newline)
return list_conv
def MakeLinkerTupple(linker_line):
"""
Convert linker line to linkers tupple
@param linker_line: linker list's one line made by ReadList()
@return: linkers tupple
"""
linkers = []
for i in range(1,len(linker_line),2):
linkers.append((linker_line[i],linker_line[i+1]))
return linkers
#test section
if __name__ == "__main__":
a = ReadList("/home/ryosuke/Dropbox/Task/test_list")
b = ToDetailNum(a,"/home/ryosuke/Dropbox/Task/")
print b
# WriteList(a,"test_list")
# print NotExistInDatabase("/home/ryosuke/db/FASTA/",a,"txt")
|
gpl-2.0
| -6,933,500,030,450,986,000 | 23.116667 | 69 | 0.571198 | false | 3.297872 | false | false | false |
noxdafox/pebble
|
pebble/common.py
|
1
|
5693
|
# This file is part of Pebble.
# Copyright (c) 2013-2021, Matteo Cafasso
# Pebble is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
# Pebble is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with Pebble. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import os
import pickle
import signal
from threading import Thread
from traceback import format_exc
from concurrent.futures import Future
class ProcessExpired(OSError):
"""Raised when process dies unexpectedly."""
def __init__(self, msg, code=0):
super(ProcessExpired, self).__init__(msg)
self.exitcode = code
class PebbleFuture(Future):
# Same as base class, removed logline
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if set_result() or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
raise RuntimeError('Future in unexpected state')
class ProcessFuture(PebbleFuture):
def cancel(self):
"""Cancel the future.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it has already completed.
"""
with self._condition:
if self._state == FINISHED:
return False
if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
class RemoteTraceback(Exception):
"""Traceback wrapper for exceptions in remote process.
Exception.__cause__ requires a BaseException subclass.
"""
def __init__(self, traceback):
self.traceback = traceback
def __str__(self):
return self.traceback
class RemoteException(object):
"""Pickling wrapper for exceptions in remote process."""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __reduce__(self):
return rebuild_exception, (self.exception, self.traceback)
def rebuild_exception(exception, traceback):
exception.__cause__ = RemoteTraceback(traceback)
return exception
def launch_thread(name, function, daemon, *args, **kwargs):
thread = Thread(target=function, name=name, args=args, kwargs=kwargs)
thread.daemon = daemon
thread.start()
return thread
def launch_process(name, function, daemon, mp_context, *args, **kwargs):
process = mp_context.Process(
target=function, name=name, args=args, kwargs=kwargs)
process.daemon = daemon
process.start()
return process
def stop_process(process):
"""Does its best to stop the process."""
process.terminate()
process.join(3)
if process.is_alive() and os.name != 'nt':
try:
os.kill(process.pid, signal.SIGKILL)
process.join()
except OSError:
return
if process.is_alive():
raise RuntimeError("Unable to terminate PID %d" % os.getpid())
def execute(function, *args, **kwargs):
"""Runs the given function returning its results or exception."""
try:
return function(*args, **kwargs)
except Exception as error:
error.traceback = format_exc()
return error
def process_execute(function, *args, **kwargs):
"""Runs the given function returning its results or exception."""
try:
return function(*args, **kwargs)
except Exception as error:
error.traceback = format_exc()
return RemoteException(error, error.traceback)
def send_result(pipe, data):
"""Send result handling pickling and communication errors."""
try:
pipe.send(data)
except (pickle.PicklingError, TypeError) as error:
error.traceback = format_exc()
pipe.send(RemoteException(error, error.traceback))
SLEEP_UNIT = 0.1
# Borrowed from concurrent.futures
PENDING = 'PENDING'
RUNNING = 'RUNNING'
FINISHED = 'FINISHED'
CANCELLED = 'CANCELLED'
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
|
lgpl-3.0
| 3,995,260,078,226,853,400 | 28.194872 | 78 | 0.657825 | false | 4.423465 | false | false | false |
derwentx/WooGenerator
|
tests/test_syncupdate.py
|
1
|
2736
|
import traceback
import unittest
from pprint import pformat
from context import get_testdata, TESTS_DATA_DIR, woogenerator
from woogenerator.namespace.core import (MatchNamespace, ParserNamespace,
SettingsNamespaceProto,
UpdateNamespace)
from woogenerator.conf.parser import ArgumentParserCommon
from woogenerator.utils import Registrar, TimeUtils
class TestSyncUpdateAbstract(unittest.TestCase):
config_file = None
settings_namespace_class = SettingsNamespaceProto
argument_parser_class = ArgumentParserCommon
local_work_dir = TESTS_DATA_DIR
override_args = ''
debug = False
def setUp(self):
self.import_name = TimeUtils.get_ms_timestamp()
self.settings = self.settings_namespace_class()
self.settings.local_work_dir = self.local_work_dir
self.settings.local_live_config = None
self.settings.local_test_config = self.config_file
self.settings.init_settings(self.override_args)
# with open(yaml_path) as stream:
# config = yaml.load(stream)
# merge_mode = config.get('merge-mode', 'sync')
# master_name = config.get('master-name', 'MASTER')
# slave_name = config.get('slave-name', 'SLAVE')
# default_last_sync = config.get('default-last-sync')
#
# SyncUpdateUsr.set_globals(
# master_name, slave_name, merge_mode, default_last_sync)
Registrar.DEBUG_ERROR = False
Registrar.DEBUG_WARN = False
Registrar.DEBUG_MESSAGE = False
Registrar.DEBUG_PROGRESS = False
if self.debug:
# FieldGroup.perform_post = True
# FieldGroup.DEBUG_WARN = True
# FieldGroup.DEBUG_MESSAGE = True
# FieldGroup.DEBUG_ERROR = True
# SyncUpdateUsr.DEBUG_WARN = True
# SyncUpdateUsr.DEBUG_MESSAGE = True
# SyncUpdateUsr.DEBUG_ERROR = True
Registrar.DEBUG_ERROR = True
Registrar.DEBUG_WARN = True
Registrar.DEBUG_MESSAGE = True
Registrar.DEBUG_PROGRESS = True
Registrar.DEBUG_UPDATE = True
# Registrar.DEBUG_USR = True
# Registrar.DEBUG_CONTACT = True
# Registrar.DEBUG_NAME = True
# FieldGroup.DEBUG_CONTACT = True
# FieldGroup.enforce_mandatory_keys = False
def fail_syncupdate_assertion(self, exc, sync_update):
msg = "failed assertion: %s\n%s\n%s" % (
pformat(sync_update.sync_warnings.items()),
sync_update.tabulate(tablefmt='simple'),
traceback.format_exc(exc),
)
raise AssertionError(msg)
|
gpl-2.0
| 173,407,668,335,882,200 | 37.535211 | 73 | 0.617325 | false | 4.065379 | true | false | false |
clubit/edi-workflow
|
edi_tools/wizard/edi_wizard_archive_incoming.py
|
1
|
1327
|
from openerp.osv import osv
from openerp.tools.translate import _
from openerp import netsvc
class edi_tools_edi_wizard_archive_incoming(osv.TransientModel):
_name = 'edi.tools.edi.wizard.archive.incoming'
_description = 'Archive EDI Documents'
''' edi.tools.edi.wizard.archive.incoming:archive()
--------------------------------------------------
This method is used by the EDI wizard to push
multiple documents to the workflow "archived" state.
---------------------------------------------------- '''
def archive(self, cr, uid, ids, context=None):
# Get the selected documents
# --------------------------
ids = context.get('active_ids',[])
if not ids:
raise osv.except_osv(_('Warning!'), _("You did not provide any documents to archive!"))
# Push each document to archived
# ------------------------------
wf_service = netsvc.LocalService("workflow")
for document in self.pool.get('edi.tools.edi.document.incoming').browse(cr, uid, ids, context):
if document.state in ['new','ready','processed','in_error']:
wf_service.trg_validate(uid, 'edi.tools.edi.document.incoming', document.id, 'button_to_archived', cr)
return {'type': 'ir.actions.act_window_close'}
|
agpl-3.0
| 7,202,058,422,622,266,000 | 44.758621 | 118 | 0.562924 | false | 4.365132 | false | false | false |
jamesroutley/formation
|
test/test_parameter.py
|
1
|
2073
|
# -*- coding: utf-8 -*-
import pytest
import formation.parameter
from formation.parameter import Parameter
@pytest.mark.parametrize("parameter,expected_output", [
(
Parameter("A"),
"Parameter(title='A', param_type='String', **{})"
),
(
Parameter("A", "Number"),
"Parameter(title='A', param_type='Number', **{})"
),
(
Parameter("A", "Number", description="My description"),
"Parameter(title='A', param_type='Number', "
"**{'description': 'My description'})"
),
(
Parameter("A", "Number", description="My description"),
"Parameter(title='A', param_type='Number', **{'description': "
"'My description'})"
)
])
def test_repr(parameter, expected_output):
assert parameter.__repr__() == expected_output
@pytest.mark.parametrize("left,right,output", [
(Parameter("A"), Parameter("A"), True),
(Parameter("A"), Parameter("B"), False),
(Parameter("A"), 1, False),
(Parameter("A", default="a"), Parameter("A", default="a"), True)
])
def test_eq(left, right, output):
assert (left == right) == output
@pytest.mark.parametrize("snake,camel", [
("", ""),
("my_words", "MyWords"),
("word_1", "Word1"),
(" ", " "),
("1_word", "1Word")
])
def test_snake_to_camel(snake, camel):
output = formation.parameter._snake_to_camel(snake)
assert output == camel
def test_validate_kwargs_with_expected_keywords():
allowed_properties = [
"allowed_pattern",
"allowed_values",
"constraint_description",
"default",
"description",
"max_length",
"max_value",
"min_length",
"min_value",
"no_echo"
]
kwargs = {
property_name: "mock_value"
for property_name in allowed_properties
}
formation.parameter._validate_kwargs(kwargs)
def test_validate_kwargs_with_unexpected_keyword():
kwargs = {"unexpected_keyword": "mock_value"}
with pytest.raises(TypeError):
formation.parameter._validate_kwargs(kwargs)
|
apache-2.0
| 6,632,965,659,923,859,000 | 25.576923 | 70 | 0.581766 | false | 3.748644 | true | false | false |
samuelchen/code-snippets
|
python/logger.py
|
1
|
1906
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2013-12-12
@author: samuelchen
'''
import logging, logging.handlers
import os
import sys
class LogLevelFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
return logRecord.levelno <= self.__level
def setLogPath(path='p2python.log'):
os.environ['P2PYTHON_LOG'] = path
fh = ch = eh = None
log_path = ''
def getLogger(name='P2Python'):
global fh, ch, eh, log_path
if not log_path and 'P2PYTHON_LOG' in os.environ:
log_path = os.environ['P2PYTHON_LOG']
else:
log_path = 'p2python.log'
setLogPath()
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter( \
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# file handler.
if not fh:
fh = logging.handlers.TimedRotatingFileHandler(log_path)
fh.suffix = "%Y%m%d.log"
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# console handler
if not ch:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.addFilter(LogLevelFilter(logging.WARN))
ch.setFormatter(formatter)
# stderr handler
if not eh:
eh = logging.StreamHandler(stream=sys.stderr)
eh.setLevel(logging.ERROR)
eh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
logger.addHandler(eh)
logger.propagate = False
return logger
if __name__ == '__main__':
import logger
log = logger.getLogger()
log.debug('This is a debug message.')
log.info('This is a info message.')
log.error('This is a error message.')
|
gpl-2.0
| 3,733,849,311,889,806,300 | 23.756757 | 67 | 0.605981 | false | 3.665385 | false | false | false |
nextgis/nextgisweb
|
setup.py
|
1
|
4264
|
# -*- coding: utf-8 -*-
import sys
import io
import os
import os.path
from stat import S_IXUSR, S_IXGRP, S_IXOTH
from subprocess import check_output, CalledProcessError
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
with io.open('VERSION', 'r') as fd:
VERSION = fd.read().rstrip()
try:
gv = check_output(['gdal-config', '--version'], universal_newlines=True).strip()
except CalledProcessError:
gv = None
requires = [
# Do not use a specific version of system-like packages because their presence is expected
'pip',
'six',
# Other dependencies
'alembic==1.4.2',
'pyramid==1.10.1',
'SQLAlchemy==1.2.16',
'transaction==2.4.0',
'pyramid_tm==2.2.1',
'pyramid_debugtoolbar==4.5.1',
'pyramid_mako==1.0.2',
'zope.sqlalchemy==1.1',
'zope.interface<5',
'zope.event<5',
'bunch==1.0.1',
'flufl.enum==4.1.1',
'waitress==1.2.0',
'pygdal' + (('==%s.*' % gv) if gv else ''), # TODO: Add >=2.3.0
'psycopg2==2.8.5',
'geoalchemy2==0.5.0',
'shapely==1.7.1',
'affine==2.2.2',
'geojson==2.4.1',
'pillow==5.4.1',
'lxml==4.3.0',
'passlib==1.7.1',
'requests[security]==2.22.0',
'babel==2.6.0',
'sentry-sdk==0.14.3',
'python-magic==0.4.15',
'backports.tempfile==1.0',
'pyproj<3',
'elasticsearch>=7.0.0,<8.0.0',
'elasticsearch-dsl>=7.1.0,<8.0.0',
'unicodecsv==0.14.1',
'flatdict==4.0.1',
'psutil==5.7.3',
'zipstream-new==1.1.7',
'cachetools==3.1.1',
'networkx',
# TODO: Move to dev or test dependencies
'freezegun',
'pytest',
'pytest-watch',
'pytest-flake8',
'webtest',
'flake8',
'flake8-future-import',
'modernize',
]
if sys.version_info[0:2] < (3, 6):
requires.append('python2-secrets')
requires.append('OWSLib==0.17.1')
else:
requires.append('OWSLib==0.24.1')
extras_require = {
'dev': ['pdbpp', 'ipython']
}
entry_points = {
'paste.app_factory': [
'main = nextgisweb:main'
],
'babel.extractors': [
'hbs = nextgisweb.i18n.hbs:extract',
],
'pytest11': [
'nextgisweb = nextgisweb.pytest',
'nextgisweb.core = nextgisweb.core.test',
'nextgisweb.pyramid = nextgisweb.pyramid.test',
'nextgiswev.auth = nextgisweb.auth.test',
'nextgiswev.resource = nextgisweb.resource.test',
],
'nextgisweb.packages': ['nextgisweb = nextgisweb:pkginfo', ],
'nextgisweb.amd_packages': [
'nextgisweb = nextgisweb:amd_packages',
],
}
class DevelopCommand(develop):
def run(self):
develop.run(self)
# Builtin console_scripts entrypoint scripts are very slow because of
# checking package requirement. So we use generated wrapper scripts.
bin_dir, _ = os.path.split(sys.executable)
for name, module, func in (
('nextgisweb', 'nextgisweb.script', 'main'),
('nextgisweb-config', 'nextgisweb.script', 'config'),
('nextgisweb-i18n', 'nextgisweb.i18n.script', 'main'),
):
sf = os.path.join(bin_dir, name)
with open(sf, 'w') as fd:
fd.write("#!{}\n".format(sys.executable))
fd.write("from {} import {} as main\n".format(module, func))
fd.write("if __name__ == '__main__': main()\n")
st = os.stat(sf)
os.chmod(sf, st.st_mode | S_IXUSR | S_IXGRP | S_IXOTH)
class InstallCommand(install):
def run(self):
raise RuntimeError(
"Only development mode installation "
"(pip install -e ...) is supported!")
install.run(self)
setup(
name='nextgisweb',
version=VERSION,
description='nextgisweb',
author='NextGIS',
author_email='[email protected]',
url='http://nextgis.com/nextgis-web',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
python_requires=">=2.7.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4",
install_requires=requires,
extras_require=extras_require,
entry_points=entry_points,
cmdclass=dict(
develop=DevelopCommand,
install=InstallCommand,
)
)
|
gpl-3.0
| 6,692,886,225,111,922,000 | 25.65 | 94 | 0.582552 | false | 3.009174 | true | false | false |
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/mercurial/win32.py
|
1
|
15830
|
# win32.py - utility functions that use win32 API
#
# Copyright 2005-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import ctypes, errno, os, struct, subprocess, random
_kernel32 = ctypes.windll.kernel32
_advapi32 = ctypes.windll.advapi32
_user32 = ctypes.windll.user32
_BOOL = ctypes.c_long
_WORD = ctypes.c_ushort
_DWORD = ctypes.c_ulong
_UINT = ctypes.c_uint
_LONG = ctypes.c_long
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_HWND = _HANDLE
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# GetLastError
_ERROR_SUCCESS = 0
_ERROR_INVALID_PARAMETER = 87
_ERROR_INSUFFICIENT_BUFFER = 122
# WPARAM is defined as UINT_PTR (unsigned type)
# LPARAM is defined as LONG_PTR (signed type)
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
_WPARAM = ctypes.c_ulong
_LPARAM = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
_WPARAM = ctypes.c_ulonglong
_LPARAM = ctypes.c_longlong
class _FILETIME(ctypes.Structure):
_fields_ = [('dwLowDateTime', _DWORD),
('dwHighDateTime', _DWORD)]
class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
_fields_ = [('dwFileAttributes', _DWORD),
('ftCreationTime', _FILETIME),
('ftLastAccessTime', _FILETIME),
('ftLastWriteTime', _FILETIME),
('dwVolumeSerialNumber', _DWORD),
('nFileSizeHigh', _DWORD),
('nFileSizeLow', _DWORD),
('nNumberOfLinks', _DWORD),
('nFileIndexHigh', _DWORD),
('nFileIndexLow', _DWORD)]
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_OPEN_EXISTING = 3
# SetFileAttributes
_FILE_ATTRIBUTE_NORMAL = 0x80
_FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000
# Process Security and Access Rights
_PROCESS_QUERY_INFORMATION = 0x0400
# GetExitCodeProcess
_STILL_ACTIVE = 259
# registry
_HKEY_CURRENT_USER = 0x80000001L
_HKEY_LOCAL_MACHINE = 0x80000002L
_KEY_READ = 0x20019
_REG_SZ = 1
_REG_DWORD = 4
class _STARTUPINFO(ctypes.Structure):
_fields_ = [('cb', _DWORD),
('lpReserved', _LPSTR),
('lpDesktop', _LPSTR),
('lpTitle', _LPSTR),
('dwX', _DWORD),
('dwY', _DWORD),
('dwXSize', _DWORD),
('dwYSize', _DWORD),
('dwXCountChars', _DWORD),
('dwYCountChars', _DWORD),
('dwFillAttribute', _DWORD),
('dwFlags', _DWORD),
('wShowWindow', _WORD),
('cbReserved2', _WORD),
('lpReserved2', ctypes.c_char_p),
('hStdInput', _HANDLE),
('hStdOutput', _HANDLE),
('hStdError', _HANDLE)]
class _PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [('hProcess', _HANDLE),
('hThread', _HANDLE),
('dwProcessId', _DWORD),
('dwThreadId', _DWORD)]
_DETACHED_PROCESS = 0x00000008
_STARTF_USESHOWWINDOW = 0x00000001
_SW_HIDE = 0
class _COORD(ctypes.Structure):
_fields_ = [('X', ctypes.c_short),
('Y', ctypes.c_short)]
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', _COORD),
('dwCursorPosition', _COORD),
('wAttributes', _WORD),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)]
_STD_ERROR_HANDLE = _DWORD(-12).value
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
_kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
_kernel32.GetFileInformationByHandle.restype = _BOOL
_kernel32.CloseHandle.argtypes = [_HANDLE]
_kernel32.CloseHandle.restype = _BOOL
try:
_kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p]
_kernel32.CreateHardLinkA.restype = _BOOL
except AttributeError:
pass
_kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD]
_kernel32.SetFileAttributesA.restype = _BOOL
_kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
_kernel32.OpenProcess.restype = _HANDLE
_kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p]
_kernel32.GetExitCodeProcess.restype = _BOOL
_kernel32.GetLastError.argtypes = []
_kernel32.GetLastError.restype = _DWORD
_kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
_kernel32.GetModuleFileNameA.restype = _DWORD
_kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
ctypes.c_void_p]
_kernel32.CreateProcessA.restype = _BOOL
_kernel32.ExitProcess.argtypes = [_UINT]
_kernel32.ExitProcess.restype = None
_kernel32.GetCurrentProcessId.argtypes = []
_kernel32.GetCurrentProcessId.restype = _DWORD
_SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD)
_kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL]
_kernel32.SetConsoleCtrlHandler.restype = _BOOL
_kernel32.GetStdHandle.argtypes = [_DWORD]
_kernel32.GetStdHandle.restype = _HANDLE
_kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p]
_kernel32.GetConsoleScreenBufferInfo.restype = _BOOL
_advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD,
ctypes.c_void_p]
_advapi32.RegOpenKeyExA.restype = _LONG
_advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_advapi32.RegQueryValueExA.restype = _LONG
_advapi32.RegCloseKey.argtypes = [_HANDLE]
_advapi32.RegCloseKey.restype = _LONG
_advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_advapi32.GetUserNameA.restype = _BOOL
_user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p]
_user32.GetWindowThreadProcessId.restype = _DWORD
_user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int]
_user32.ShowWindow.restype = _BOOL
_WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM)
_user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
_user32.EnumWindows.restype = _BOOL
def _raiseoserror(name):
err = ctypes.WinError()
raise OSError(err.errno, '%s: %s' % (name, err.strerror))
def _getfileinfo(name):
fh = _kernel32.CreateFileA(name, 0,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, _OPEN_EXISTING, 0, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseoserror(name)
try:
fi = _BY_HANDLE_FILE_INFORMATION()
if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)):
_raiseoserror(name)
return fi
finally:
_kernel32.CloseHandle(fh)
def oslink(src, dst):
try:
if not _kernel32.CreateHardLinkA(dst, src, None):
_raiseoserror(src)
except AttributeError: # Wine doesn't support this function
_raiseoserror(src)
def nlinks(name):
'''return number of hardlinks for the given file'''
return _getfileinfo(name).nNumberOfLinks
def samefile(fpath1, fpath2):
'''Returns whether fpath1 and fpath2 refer to the same file. This is only
guaranteed to work for files, not directories.'''
res1 = _getfileinfo(fpath1)
res2 = _getfileinfo(fpath2)
return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
and res1.nFileIndexHigh == res2.nFileIndexHigh
and res1.nFileIndexLow == res2.nFileIndexLow)
def samedevice(fpath1, fpath2):
'''Returns whether fpath1 and fpath2 are on the same device. This is only
guaranteed to work for files, not directories.'''
res1 = _getfileinfo(fpath1)
res2 = _getfileinfo(fpath2)
return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
def testpid(pid):
'''return True if pid is still running or unable to
determine, False otherwise'''
h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
if h:
try:
status = _DWORD()
if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)):
return status.value == _STILL_ACTIVE
finally:
_kernel32.CloseHandle(h)
return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
def lookupreg(key, valname=None, scope=None):
''' Look up a key/value name in the Windows registry.
valname: value name. If unspecified, the default value for the key
is used.
scope: optionally specify scope for registry lookup, this can be
a sequence of scopes to look up in order. Default (CURRENT_USER,
LOCAL_MACHINE).
'''
byref = ctypes.byref
if scope is None:
scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE)
elif not isinstance(scope, (list, tuple)):
scope = (scope,)
for s in scope:
kh = _HANDLE()
res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh))
if res != _ERROR_SUCCESS:
continue
try:
size = _DWORD(600)
type = _DWORD()
buf = ctypes.create_string_buffer(size.value + 1)
res = _advapi32.RegQueryValueExA(kh.value, valname, None,
byref(type), buf, byref(size))
if res != _ERROR_SUCCESS:
continue
if type.value == _REG_SZ:
# string is in ANSI code page, aka local encoding
return buf.value
elif type.value == _REG_DWORD:
fmt = '<L'
s = ctypes.string_at(byref(buf), struct.calcsize(fmt))
return struct.unpack(fmt, s)[0]
finally:
_advapi32.RegCloseKey(kh.value)
def executablepath():
'''return full path of hg.exe'''
size = 600
buf = ctypes.create_string_buffer(size + 1)
len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
if len == 0:
raise ctypes.WinError()
elif len == size:
raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
return buf.value
def getuser():
'''return name of current user'''
size = _DWORD(300)
buf = ctypes.create_string_buffer(size.value + 1)
if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)):
raise ctypes.WinError()
return buf.value
_signalhandler = []
def setsignalhandler():
'''Register a termination handler for console events including
CTRL+C. python signal handlers do not work well with socket
operations.
'''
def handler(event):
_kernel32.ExitProcess(1)
if _signalhandler:
return # already registered
h = _SIGNAL_HANDLER(handler)
_signalhandler.append(h) # needed to prevent garbage collection
if not _kernel32.SetConsoleCtrlHandler(h, True):
raise ctypes.WinError()
def hidewindow():
def callback(hwnd, pid):
wpid = _DWORD()
_user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
if pid == wpid.value:
_user32.ShowWindow(hwnd, _SW_HIDE)
return False # stop enumerating windows
return True
pid = _kernel32.GetCurrentProcessId()
_user32.EnumWindows(_WNDENUMPROC(callback), pid)
def termwidth():
# cmd.exe does not handle CR like a unix console, the CR is
# counted in the line length. On 80 columns consoles, if 80
# characters are written, the following CR won't apply on the
# current line but on the new one. Keep room for it.
width = 79
# Query stderr to avoid problems with redirections
screenbuf = _kernel32.GetStdHandle(
_STD_ERROR_HANDLE) # don't close the handle returned
if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
return width
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
if not _kernel32.GetConsoleScreenBufferInfo(
screenbuf, ctypes.byref(csbi)):
return width
width = csbi.srWindow.Right - csbi.srWindow.Left
return width
def spawndetached(args):
# No standard library function really spawns a fully detached
# process under win32 because they allocate pipes or other objects
# to handle standard streams communications. Passing these objects
# to the child process requires handle inheritance to be enabled
# which makes really detached processes impossible.
si = _STARTUPINFO()
si.cb = ctypes.sizeof(_STARTUPINFO)
si.dwFlags = _STARTF_USESHOWWINDOW
si.wShowWindow = _SW_HIDE
pi = _PROCESS_INFORMATION()
env = ''
for k in os.environ:
env += "%s=%s\0" % (k, os.environ[k])
if not env:
env = '\0'
env += '\0'
args = subprocess.list2cmdline(args)
# Not running the command in shell mode makes python26 hang when
# writing to hgweb output socket.
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
res = _kernel32.CreateProcessA(
None, args, None, None, False, _DETACHED_PROCESS,
env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
if not res:
raise ctypes.WinError()
return pi.dwProcessId
def unlink(f):
'''try to implement POSIX' unlink semantics on Windows'''
# POSIX allows to unlink and rename open files. Windows has serious
# problems with doing that:
# - Calling os.unlink (or os.rename) on a file f fails if f or any
# hardlinked copy of f has been opened with Python's open(). There is no
# way such a file can be deleted or renamed on Windows (other than
# scheduling the delete or rename for the next reboot).
# - Calling os.unlink on a file that has been opened with Mercurial's
# posixfile (or comparable methods) will delay the actual deletion of
# the file for as long as the file is held open. The filename is blocked
# during that time and cannot be used for recreating a new file under
# that same name ("zombie file"). Directories containing such zombie files
# cannot be removed or moved.
# A file that has been opened with posixfile can be renamed, so we rename
# f to a random temporary name before calling os.unlink on it. This allows
# callers to recreate f immediately while having other readers do their
# implicit zombie filename blocking on a temporary name.
for tries in xrange(10):
temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
try:
os.rename(f, temp) # raises OSError EEXIST if temp exists
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise IOError, (errno.EEXIST, "No usable temporary filename found")
try:
os.unlink(temp)
except OSError:
# The unlink might have failed because the READONLY attribute may heave
# been set on the original file. Rename works fine with READONLY set,
# but not os.unlink. Reset all attributes and try again.
_kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL)
try:
os.unlink(temp)
except OSError:
# The unlink might have failed due to some very rude AV-Scanners.
# Leaking a tempfile is the lesser evil than aborting here and
# leaving some potentially serious inconsistencies.
pass
def makedir(path, notindexed):
os.mkdir(path)
if notindexed:
_kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
|
gpl-3.0
| -3,631,045,314,598,587,400 | 34.099778 | 80 | 0.6482 | false | 3.546942 | false | false | false |
zstackio/zstack-woodpecker
|
zstackwoodpecker/zstackwoodpecker/operations/datamigrate_operations.py
|
1
|
3332
|
'''
All data migrate operations for test.
@author: Legion
'''
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import account_operations
import apibinding.inventory as inventory
def ps_migrage_vm(dst_ps_uuid, vm_uuid, session_uuid=None, withDataVolumes=False, withSnapshots=False):
action = api_actions.PrimaryStorageMigrateVmAction()
action.dstPrimaryStorageUuid = dst_ps_uuid
action.vmInstanceUuid = vm_uuid
action.timeout = 7200000
action.withDataVolumes = withDataVolumes
action.withSnapshots = withSnapshots
test_util.action_logger('Migrate [vm uuid: %s] to [Primary Storage: %s]' % (vm_uuid, dst_ps_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def ps_migrage_volume(dst_ps_uuid, vol_uuid, volume_type=None, session_uuid=None):
action = api_actions.PrimaryStorageMigrateVolumeAction()
action.dstPrimaryStorageUuid = dst_ps_uuid
action.volumeUuid = vol_uuid
action.timeout = 7200000
test_util.action_logger('Migrate [%s Volume: %s] to [Primary Storage: %s]' % (volume_type, vol_uuid, dst_ps_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def ps_migrage_root_volume(dst_ps_uuid, vol_uuid, session_uuid=None):
evt_inv = ps_migrage_volume(dst_ps_uuid=dst_ps_uuid, vol_uuid=vol_uuid, volume_type='Root', session_uuid=session_uuid)
return evt_inv
def ps_migrage_data_volume(dst_ps_uuid, vol_uuid, session_uuid=None):
evt_inv = ps_migrage_volume(dst_ps_uuid=dst_ps_uuid, vol_uuid=vol_uuid, volume_type='Data', session_uuid=session_uuid)
return evt_inv
def bs_migrage_image(dst_bs_uuid, src_bs_uuid, image_uuid, session_uuid=None):
action = api_actions.BackupStorageMigrateImageAction()
action.dstBackupStorageUuid = dst_bs_uuid
action.srcBackupStorageUuid = src_bs_uuid
action.imageUuid = image_uuid
test_util.action_logger('Migrate [Image: %s] from [Backup Storage: %s ]to [Backup Storage: %s]' % (image_uuid, src_bs_uuid, dst_bs_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def get_ps_candidate_for_vol_migration(vol_uuid, session_uuid=None):
action = api_actions.GetPrimaryStorageCandidatesForVolumeMigrationAction()
action.volumeUuid = vol_uuid
test_util.action_logger('Get Primary Storage Candidates for Volume Migration')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def get_bs_candidate_for_image_migration(src_bs_uuid, session_uuid=None):
action = api_actions.GetBackupStorageCandidatesForImageMigrationAction()
action.srcBackupStorageUuid = src_bs_uuid
test_util.action_logger('Get Backup Storage Candidates for Volume Migration')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def get_ps_candidate_for_vm_migration(vm_uuid, session_uuid=None):
action = api_actions.GetPrimaryStorageCandidatesForVmMigrationAction()
action.vmInstanceUuid = vm_uuid
test_util.action_logger('Get Primary Storage Candidates for Vm Migration')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
|
apache-2.0
| 3,511,064,920,320,922,000 | 47.289855 | 141 | 0.752401 | false | 3.389624 | true | false | false |
jeKnowledge/horarios-inforestudante
|
DataParser.py
|
1
|
3411
|
import csv
import datetime
import re
from Structs import AulaDataRaw
from Structs import AulaDataSripped
def openFile(filePath):
return open(filePath)
def csvIntoRawArray(csvFile):
# Array of arrays(lines) with data
filereader = csv.reader(csvFile)
# We will be returning an array of AulaDataRaw
# each corresponding to a line
aulaDataRawArray = []
for row in filereader:
# Skip labels row
try:
int(row[0])
except ValueError:
continue
aulaDataRawArray.append(AulaDataRaw(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]))
return aulaDataRawArray
def rawArrayToStrippedArray(rawArray):
result = []
for raw in rawArray:
# Skip label
if raw.aulaId == "FE_ID":
continue
result.append(rawToStripped(raw))
return result
# Converts from Structs.AulaDataRaw to Stucts.AulaDataStripped
def rawToStripped(dataRaw):
# Semestre:
# 1, 2 ou Anual (0)
if dataRaw.semestre == "Anual":
semestre = 0
else:
semestre = int(dataRaw.semestre[0]) # 1o caractere (1/2)
aulaCodigo = int(dataRaw.aulaId)
turmaId = int(dataRaw.turmaId)
dia = dmytimeToDayOfWeek(dataRaw.dataInicio)
horaInicio = dmytimeToTime(dataRaw.dataInicio)
horaFim = dmytimeToTime(dataRaw.dataFim)
turma = dataRaw.turma
tipo = getClassType(dataRaw.turma)
aulaNome = dataRaw.aulaNome
return AulaDataSripped(aulaCodigo, semestre, turmaId, dia, horaInicio, horaFim, turma, tipo, aulaNome)
# "10-JAN-2015 20:30:30" -> 20.55 (date to decimal time)
def dmytimeToTime(timeString):
timeStr = re.search("\d\d:\d\d:\d\d", timeString).group(0)
return int(timeStr[:2]) + int(timeStr[3:5])/60 + int(timeStr[6:8]) / 3600
# Monday -> 0
# Sunday -> 6
def dmytimeToDayOfWeek(timeString):
day = int(re.search("\d\d(?=-\w\w\w-\d\d\d\d)", timeString).group(0))
monthStr = re.search("(?<=\d\d-)\w\w\w(?=-\d\d\d\d)", timeString).group(0)
month = monthStrToNumber(monthStr)
year = int(re.search("(?<=\d\d-\w\w\w-)\d\d\d\d", timeString).group(0))
return datetime.datetime(year, month, day).weekday()
# Converts JAN -> 1
# FEB -> 2 ...
def monthStrToNumber(monthString):
upperString = str(monthString).upper()
# Oh, no switch statements. Of course.
us = upperString
if us == "JAN":
return 1
if us == "FEV" or us == "FEB":
return 2
if us == "MAR":
return 3
if us == "ABR":
return 4
if us == "MAI":
return 5
if us == "JUN":
return 6
if us == "JUL":
return 7
if us == "AGO":
return 8
if us == "SET":
return 9
if us == "OUT":
return 10
if us == "NOV":
return 11
if us == "DEZ":
return 12
return -1
# Returns array of classes in strippedArray that match classIds in
# classIdArray.
# Caution: WILL return all classes (i.e., TP1, TP2, T1, T2, ...)
def getClasses(strippedArray, semester, classIdArray):
myClasses = []
for data in strippedArray:
if data.aulaId not in classIdArray or data.semestre != semester:
continue
myClasses.append(data)
return myClasses
# Returns class "type" from turma
# i.e., TP1 => TP
# O/S/T5 => O/S/T
def getClassType(turma):
return re.search(".+(?=\d)", turma).group(0)
|
mit
| -2,165,584,668,745,276,400 | 25.238462 | 116 | 0.613603 | false | 2.943054 | false | false | false |
ermo/privateer_wcu
|
modules/wrong_escort.py
|
1
|
2543
|
import escort_mission
import faction_ships
class wrong_escort (escort_mission.escort_mission):
def __init__ (self,factionname,missiondifficulty,distance_from_base,creds,numsysaway,jumps=(),var_to_set='',dynfg='',dyntype='',alternatesystems=(),alternatefactions=(),alternateflightgroups=(),alternatetypes=(),alternategreeting=(),alternatevariable='diverted'):
escort_mission.escort_mission.__init__(self,factionname,missiondifficulty,0,0,distance_from_base,creds,0,numsysaway,jumps,var_to_set,dynfg,dyntype)
self.alternatesystems=alternatesystems
self.alternatevariable=alternatevariable
self.alternateflightgroups=alternateflightgroups
self.alternatetypes=alternatetypes
self.alternategreeting=alternategreeting
self.alternatefactions=alternatefactions
import quest
import VS
self.cp = VS.getCurrentPlayer()
quest.removeQuest(self.cp,alternatevariable,-1)
def Execute(self):
escort_mission.escort_mission.Execute(self)
sys = self.escortee.getUnitSystemFile()
if sys in self.alternatesystems:
for i in range(len(self.alternatesystems)):
if sys==self.alternatesystems[i]:
import quest
quest.removeQuest(self.cp,self.alternatevariable,i)
quest.removeQuest(self.cp,self.var_to_set,-1)
import VS
import launch
L = launch.Launch()
L.fg="Escorts"
L.faction=self.alternatefactions[i]
L.dynfg=self.alternateflightgroups[i]
L.type=self.alternatetypes[i]
L.ai="default"
L.num=6
L.minradius=3000.0
L.maxradius=4000.0
try:
L.minradius*=faction_ships.launch_distance_factor
L.minradius*=faction_ships.launch_distance_factor
except:
pass
launched=L.launch(self.escortee)
self.escortee.setFgDirective('f')
self.escortee.setFlightgroupLeader(launched)
self.escortee.setFactionName(self.alternatefactions[i])
import universe
universe.greet(self.alternategreeting[i],launched,VS.getPlayerX(self.cp))
VS.terminateMission(1)
return
|
gpl-2.0
| 8,869,745,354,992,858,000 | 49.86 | 267 | 0.585529 | false | 4.266779 | false | false | false |
faneshion/MatchZoo
|
matchzoo/auto/tuner/callbacks/load_embedding_matrix.py
|
1
|
1541
|
from matchzoo.engine.base_model import BaseModel
from matchzoo.auto.tuner.callbacks.callback import Callback
class LoadEmbeddingMatrix(Callback):
"""
Load a pre-trained embedding after the model is built.
Used with tuner to load a pre-trained embedding matrix for each newly built
model instance.
:param embedding_matrix: Embedding matrix to load.
Example:
>>> import matchzoo as mz
>>> model = mz.models.ArcI()
>>> prpr = model.get_default_preprocessor()
>>> data = mz.datasets.toy.load_data()
>>> data = prpr.fit_transform(data, verbose=0)
>>> embed = mz.datasets.toy.load_embedding()
>>> term_index = prpr.context['vocab_unit'].state['term_index']
>>> matrix = embed.build_matrix(term_index)
>>> callback = mz.auto.tuner.callbacks.LoadEmbeddingMatrix(matrix)
>>> model.params.update(prpr.context)
>>> model.params['task'] = mz.tasks.Ranking()
>>> model.params['embedding_output_dim'] = embed.output_dim
>>> result = mz.auto.tune(
... params=model.params,
... train_data=data,
... test_data=data,
... num_runs=1,
... callbacks=[callback],
... verbose=0
... )
"""
def __init__(self, embedding_matrix):
"""Init."""
self._embedding_matrix = embedding_matrix
def on_build_end(self, tuner, model: BaseModel):
"""`on_build_end`."""
model.load_embedding_matrix(self._embedding_matrix)
|
apache-2.0
| -8,370,876,919,457,573,000 | 33.244444 | 79 | 0.595717 | false | 3.776961 | false | false | false |
credativUK/vdirsyncer
|
vdirsyncer/sync.py
|
1
|
11250
|
# -*- coding: utf-8 -*-
'''
The function in `vdirsyncer.sync` can be called on two instances of `Storage`
to synchronize them. Due to the abstract API storage classes are implementing,
the two given instances don't have to be of the same exact type. This allows us
not only to synchronize a local vdir with a CalDAV server, but also synchronize
two CalDAV servers or two local vdirs.
The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z.
Yang. http://blog.ezyang.com/2012/08/how-offlineimap-works/
'''
import itertools
from . import exceptions, log
from .utils import uniq
from .utils.compat import iteritems, text_type
sync_logger = log.get(__name__)
class SyncError(exceptions.Error):
'''Errors related to synchronization.'''
class SyncConflict(SyncError):
'''
Two items changed since the last sync, they now have different contents and
no conflict resolution method was given.
:param ident: The ident of the item.
:param href_a: The item's href on side A.
:param href_b: The item's href on side B.
'''
ident = None
href_a = None
href_b = None
class IdentConflict(SyncError):
'''
Multiple items on the same storage have the same UID.
:param storage: The affected storage.
:param hrefs: List of affected hrefs on `storage`.
'''
storage = None
_hrefs = None
@property
def hrefs(self):
return self._hrefs
@hrefs.setter
def hrefs(self, val):
val = set(val)
assert len(val) > 1
self._hrefs = val
class StorageEmpty(SyncError):
'''
One storage unexpectedly got completely empty between two synchronizations.
The first argument is the empty storage.
:param empty_storage: The empty
:py:class:`vdirsyncer.storage.base.Storage`.
'''
empty_storage = None
class BothReadOnly(SyncError):
'''
Both storages are marked as read-only. Synchronization is therefore not
possible.
'''
class StorageInfo(object):
'''A wrapper class that holds prefetched items, the status and other
things.'''
def __init__(self, storage, status):
'''
:param status: {ident: (href, etag)}
'''
self.storage = storage
self.status = status
self.idents = None
def prepare_idents(self, other_read_only):
href_to_status = dict((href, (ident, etag))
for ident, (href, etag)
in iteritems(self.status))
hrefs_to_download = []
self.idents = {}
for href, etag in self.storage.list():
if href in href_to_status:
ident, old_etag = href_to_status[href]
self.idents[ident] = {
'etag': etag,
'href': href,
'ident': ident
}
if etag != old_etag and not other_read_only:
hrefs_to_download.append(href)
else:
hrefs_to_download.append(href)
# Prefetch items
for href, item, etag in (self.storage.get_multi(hrefs_to_download) if
hrefs_to_download else ()):
props = self.idents.setdefault(item.ident, {})
props['item'] = item
props['ident'] = item.ident
if props.setdefault('href', href) != href:
raise IdentConflict(storage=self.storage,
hrefs=[props['href'], href])
if props.setdefault('etag', etag) != etag:
raise SyncError('Etag changed during sync.')
def sync(storage_a, storage_b, status, conflict_resolution=None,
force_delete=False):
'''Synchronizes two storages.
:param storage_a: The first storage
:type storage_a: :class:`vdirsyncer.storage.base.Storage`
:param storage_b: The second storage
:type storage_b: :class:`vdirsyncer.storage.base.Storage`
:param status: {ident: (href_a, etag_a, href_b, etag_b)}
metadata about the two storages for detection of changes. Will be
modified by the function and should be passed to it at the next sync.
If this is the first sync, an empty dictionary should be provided.
:param conflict_resolution: Either 'a wins' or 'b wins'. If none is
provided, the sync function will raise
:py:exc:`SyncConflict`.
:param force_delete: When one storage got completely emptied between two
syncs, :py:exc:`StorageEmpty` is raised for
safety. Setting this parameter to ``True`` disables this safety
measure.
'''
if storage_a.read_only and storage_b.read_only:
raise BothReadOnly()
a_info = StorageInfo(storage_a, dict(
(ident, (href_a, etag_a))
for ident, (href_a, etag_a, href_b, etag_b) in iteritems(status)
))
b_info = StorageInfo(storage_b, dict(
(ident, (href_b, etag_b))
for ident, (href_a, etag_a, href_b, etag_b) in iteritems(status)
))
a_info.prepare_idents(storage_b.read_only)
b_info.prepare_idents(storage_a.read_only)
if bool(a_info.idents) != bool(b_info.idents) \
and status and not force_delete:
raise StorageEmpty(
empty_storage=(storage_b if a_info.idents else storage_a))
actions = list(_get_actions(a_info, b_info))
with storage_a.at_once():
with storage_b.at_once():
for action in actions:
action(a_info, b_info, conflict_resolution)
status.clear()
for ident in uniq(itertools.chain(a_info.status, b_info.status)):
href_a, etag_a = a_info.status[ident]
href_b, etag_b = b_info.status[ident]
status[ident] = href_a, etag_a, href_b, etag_b
def _action_upload(ident, source, dest):
def inner(a, b, conflict_resolution):
sync_logger.info('Copying (uploading) item {0} to {1}'
.format(ident, dest.storage))
source_meta = source.idents[ident]
if dest.storage.read_only:
sync_logger.warning('{dest} is read-only. Skipping update...'
.format(dest=dest.storage))
dest_href = dest_etag = None
else:
item = source_meta['item']
dest_href, dest_etag = dest.storage.upload(item)
source.status[ident] = source_meta['href'], source_meta['etag']
dest.status[ident] = dest_href, dest_etag
return inner
def _action_update(ident, source, dest):
def inner(a, b, conflict_resolution):
sync_logger.info('Copying (updating) item {0} to {1}'
.format(ident, dest.storage))
source_meta = source.idents[ident]
if dest.storage.read_only:
sync_logger.info('{dest} is read-only. Skipping update...'
.format(dest=dest.storage))
dest_href = dest_etag = None
else:
dest_meta = dest.idents[ident]
dest_href = dest_meta['href']
dest_etag = dest.storage.update(dest_href, source_meta['item'],
dest_meta['etag'])
assert isinstance(dest_etag, (bytes, text_type))
source.status[ident] = source_meta['href'], source_meta['etag']
dest.status[ident] = dest_href, dest_etag
return inner
def _action_delete(ident, info):
storage = info.storage
idents = info.idents
def inner(a, b, conflict_resolution):
sync_logger.info('Deleting item {0} from {1}'.format(ident, storage))
if storage.read_only:
sync_logger.warning('{0} is read-only, skipping deletion...'
.format(storage))
else:
meta = idents[ident]
etag = meta['etag']
href = meta['href']
storage.delete(href, etag)
del a.status[ident]
del b.status[ident]
return inner
def _action_delete_status(ident):
def inner(a, b, conflict_resolution):
sync_logger.info('Deleting status info for nonexisting item {0}'
.format(ident))
del a.status[ident]
del b.status[ident]
return inner
def _action_conflict_resolve(ident):
def inner(a, b, conflict_resolution):
sync_logger.info('Doing conflict resolution for item {0}...'
.format(ident))
meta_a = a.idents[ident]
meta_b = b.idents[ident]
if meta_a['item'].raw == meta_b['item'].raw:
sync_logger.info('...same content on both sides.')
a.status[ident] = meta_a['href'], meta_a['etag']
b.status[ident] = meta_b['href'], meta_b['etag']
elif conflict_resolution is None:
raise SyncConflict(ident=ident, href_a=meta_a['href'],
href_b=meta_b['href'])
elif conflict_resolution == 'a wins':
sync_logger.info('...{0} wins.'.format(a.storage))
_action_update(ident, a, b)(a, b, conflict_resolution)
elif conflict_resolution == 'b wins':
sync_logger.info('...{0} wins.'.format(b.storage))
_action_update(ident, b, a)(a, b, conflict_resolution)
else:
raise ValueError('Invalid conflict resolution mode: {0}'
.format(conflict_resolution))
return inner
def _get_actions(a_info, b_info):
for ident in uniq(itertools.chain(a_info.idents, b_info.idents,
a_info.status)):
a = a_info.idents.get(ident, None)
b = b_info.idents.get(ident, None)
assert not a or a['etag'] is not None
assert not b or b['etag'] is not None
_, status_etag_a = a_info.status.get(ident, (None, None))
_, status_etag_b = b_info.status.get(ident, (None, None))
if a and b:
if a['etag'] != status_etag_a and b['etag'] != status_etag_b:
# item was modified on both sides
# OR: missing status
yield _action_conflict_resolve(ident)
elif a['etag'] != status_etag_a:
# item was only modified in a
yield _action_update(ident, a_info, b_info)
elif b['etag'] != status_etag_b:
# item was only modified in b
yield _action_update(ident, b_info, a_info)
elif a and not b:
if a['etag'] != status_etag_a:
# was deleted from b but modified on a
# OR: new item was created in a
yield _action_upload(ident, a_info, b_info)
else:
# was deleted from b and not modified on a
yield _action_delete(ident, a_info)
elif not a and b:
if b['etag'] != status_etag_b:
# was deleted from a but modified on b
# OR: new item was created in b
yield _action_upload(ident, b_info, a_info)
else:
# was deleted from a and not changed on b
yield _action_delete(ident, b_info)
elif not a and not b:
# was deleted from a and b, clean up status
yield _action_delete_status(ident)
|
mit
| -7,033,694,881,106,067,000 | 33.829721 | 79 | 0.573778 | false | 3.867308 | false | false | false |
cgqyh/pyalgotrade-mod
|
pyalgotrade/optimizer/server.py
|
1
|
8087
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import SimpleXMLRPCServer
import threading
import time
import pickle
import pyalgotrade.logger
class AutoStopThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.__server = server
def run(self):
while self.__server.jobsPending():
time.sleep(1)
self.__server.stop()
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
class Job(object):
def __init__(self, strategyParameters):
self.__strategyParameters = strategyParameters
self.__bestResult = None
self.__bestParameters = None
self.__id = id(self)
def getId(self):
return self.__id
def getNextParameters(self):
ret = None
if len(self.__strategyParameters):
ret = self.__strategyParameters.pop()
return ret
def getBestParameters(self):
return self.__bestParameters
def getBestResult(self):
return self.__bestResult
def getBestWorkerName(self):
return self.__bestWorkerName
def setBestResult(self, result, parameters, workerName):
self.__bestResult = result
self.__bestParameters = parameters
self.__bestWorkerName = workerName
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
rpc_paths = ('/PyAlgoTradeRPC',)
class Server(SimpleXMLRPCServer.SimpleXMLRPCServer):
defaultBatchSize = 200
def __init__(self, address, port, autoStop=True):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True)
self.__instrumentsAndBars = None # Pickle'd instruments and bars for faster retrieval.
self.__barsFreq = None
self.__activeJobs = {}
self.__activeJobsLock = threading.Lock()
self.__parametersLock = threading.Lock()
self.__bestJob = None
self.__parametersIterator = None
self.__logger = pyalgotrade.logger.getLogger("server")
if autoStop:
self.__autoStopThread = AutoStopThread(self)
else:
self.__autoStopThread = None
self.register_introspection_functions()
self.register_function(self.getInstrumentsAndBars, 'getInstrumentsAndBars')
self.register_function(self.getBarsFrequency, 'getBarsFrequency')
self.register_function(self.getNextJob, 'getNextJob')
self.register_function(self.pushJobResults, 'pushJobResults')
self.__forcedStop = False
def __getNextParams(self):
ret = []
# Get the next set of parameters.
with self.__parametersLock:
if self.__parametersIterator is not None:
try:
for i in xrange(Server.defaultBatchSize):
ret.append(self.__parametersIterator.next())
except StopIteration:
self.__parametersIterator = None
return ret
def getLogger(self):
return self.__logger
def getInstrumentsAndBars(self):
return self.__instrumentsAndBars
def getBarsFrequency(self):
return str(self.__barsFreq)
def getBestJob(self):
return self.__bestJob
def getNextJob(self):
ret = None
params = []
# Get the next set of parameters.
params = self.__getNextParams()
# Map the active job
if len(params):
ret = Job(params)
with self.__activeJobsLock:
self.__activeJobs[ret.getId()] = ret
return pickle.dumps(ret)
def jobsPending(self):
if self.__forcedStop:
return False
with self.__parametersLock:
jobsPending = self.__parametersIterator is not None
with self.__activeJobsLock:
activeJobs = len(self.__activeJobs) > 0
return jobsPending or activeJobs
def pushJobResults(self, jobId, result, parameters, workerName):
jobId = pickle.loads(jobId)
result = pickle.loads(result)
parameters = pickle.loads(parameters)
workerName = pickle.loads(workerName)
job = None
# Get the active job and remove the mapping.
with self.__activeJobsLock:
try:
job = self.__activeJobs[jobId]
del self.__activeJobs[jobId]
except KeyError:
# The job's results were already submitted.
return
# Save the job with the best result
if self.__bestJob is None or result > self.__bestJob.getBestResult():
job.setBestResult(result, parameters, workerName)
self.__bestJob = job
self.getLogger().info("Partial result %s with parameters: %s from %s" % (result, parameters, workerName))
def stop(self):
self.shutdown()
def serve(self, barFeed, strategyParameters):
ret = None
try:
# Initialize instruments, bars and parameters.
self.getLogger().info("Loading bars")
loadedBars = []
for dateTime, bars in barFeed:
loadedBars.append(bars)
instruments = barFeed.getRegisteredInstruments()
self.__instrumentsAndBars = pickle.dumps((instruments, loadedBars))
self.__barsFreq = barFeed.getFrequency()
self.__parametersIterator = iter(strategyParameters)
if self.__autoStopThread:
self.__autoStopThread.start()
self.getLogger().info("Waiting for workers")
self.serve_forever()
if self.__autoStopThread:
self.__autoStopThread.join()
# Show the best result.
bestJob = self.getBestJob()
if bestJob:
self.getLogger().info("Best final result %s with parameters: %s from client %s" % (bestJob.getBestResult(), bestJob.getBestParameters(), bestJob.getBestWorkerName()))
ret = Results(bestJob.getBestParameters(), bestJob.getBestResult())
else:
self.getLogger().error("No jobs processed")
finally:
self.__forcedStop = True
return ret
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found.
"""
s = Server(address, port)
return s.serve(barFeed, strategyParameters)
|
apache-2.0
| -4,952,820,957,967,452,000 | 32.556017 | 182 | 0.634599 | false | 4.404684 | false | false | false |
rdmorganiser/rdmo
|
rdmo/questions/admin.py
|
1
|
3411
|
from django import forms
from django.contrib import admin
from django.db import models
from rdmo.core.utils import get_language_fields
from .models import Catalog, Question, QuestionSet, Section
from .validators import (CatalogLockedValidator, CatalogUniqueURIValidator,
QuestionLockedValidator, QuestionSetLockedValidator,
QuestionSetUniqueURIValidator,
QuestionUniqueURIValidator, SectionLockedValidator,
SectionUniqueURIValidator)
class CatalogAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = Catalog
fields = '__all__'
def clean(self):
CatalogUniqueURIValidator(self.instance)(self.cleaned_data)
CatalogLockedValidator(self.instance)(self.cleaned_data)
class SectionAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = Section
fields = '__all__'
def clean(self):
SectionUniqueURIValidator(self.instance)(self.cleaned_data)
SectionLockedValidator(self.instance)(self.cleaned_data)
class QuestionSetAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = QuestionSet
fields = '__all__'
def clean(self):
QuestionSetUniqueURIValidator(self.instance)(self.cleaned_data)
QuestionSetLockedValidator(self.instance)(self.cleaned_data)
class QuestionAdminForm(forms.ModelForm):
key = forms.SlugField(required=True)
class Meta:
model = Question
fields = '__all__'
def clean(self):
QuestionUniqueURIValidator(self.instance)(self.cleaned_data)
QuestionLockedValidator(self.instance)(self.cleaned_data)
class CatalogAdmin(admin.ModelAdmin):
form = CatalogAdminForm
search_fields = ['uri'] + get_language_fields('title')
list_display = ('uri', 'title', 'projects_count', 'available')
readonly_fields = ('uri', )
list_filter = ('available', )
def get_queryset(self, request):
return super().get_queryset(request) \
.annotate(projects_count=models.Count('projects'))
def projects_count(self, obj):
return obj.projects_count
class SectionAdmin(admin.ModelAdmin):
form = SectionAdminForm
search_fields = ['uri'] + get_language_fields('title')
list_display = ('uri', 'title')
readonly_fields = ('uri', 'path')
list_filter = ('catalog', )
class QuestionSetAdmin(admin.ModelAdmin):
form = QuestionSetAdminForm
search_fields = ['uri'] + get_language_fields('title') + get_language_fields('help')
list_display = ('uri', 'attribute', 'is_collection')
readonly_fields = ('uri', 'path')
list_filter = ('section__catalog', 'section', 'is_collection')
class QuestionItemAdmin(admin.ModelAdmin):
form = QuestionAdminForm
search_fields = ['uri'] + get_language_fields('help') + get_language_fields('text')
list_display = ('uri', 'attribute', 'text', 'is_collection')
readonly_fields = ('uri', 'path')
list_filter = ('questionset__section__catalog', 'questionset__section', 'is_collection', 'widget_type', 'value_type')
admin.site.register(Catalog, CatalogAdmin)
admin.site.register(Section, SectionAdmin)
admin.site.register(QuestionSet, QuestionSetAdmin)
admin.site.register(Question, QuestionItemAdmin)
|
apache-2.0
| -897,750,510,695,204,200 | 30.293578 | 121 | 0.6781 | false | 4.12954 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.