repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jieyu/maple | script/maple/benchmark/aget_bug2_training.py | 1 | 1607 | """Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu ([email protected])
"""
import os
import subprocess
from maple.core import config
from maple.core import logging
from maple.core import testing
_sio = [None, os.devnull, 'stderr']
class Test(testing.ServerTest):
def __init__(self, input_idx):
testing.ServerTest.__init__(self, input_idx)
self.add_input(([self.bin(), '-n1', 'http://apache.cyberuse.com/httpd/httpd-2.2.21.tar.gz', '-l', 'aget.file'], _sio))
def setup(self):
if os.path.exists('aget.file'):
os.remove('aget.file')
def tear_down(self):
if os.path.exists('aget.file'):
os.remove('aget.file')
def start(self):
args, sio = self.input()
cmd = []
if self.prefix != None:
cmd.extend(self.prefix)
cmd.extend(args)
self.proc = subprocess.Popen(cmd)
def stop(self):
self.proc.wait()
def bin(self):
return config.benchmark_home('aget_bug2') + '/aget'
def get_test(input_idx='default'):
return Test(input_idx)
| apache-2.0 | -3,940,439,966,726,479,400 | 31.14 | 126 | 0.667082 | false |
al8/raspberrypi-photosensor | photosensor.py | 1 | 2036 | #!/usr/bin/env python
from __future__ import print_function
# Example for RC timing reading for Raspberry Pi
# Must be used with GPIO 0.3.1a or later - earlier verions
# are not fast enough!
import argparse
import time
import sys
import atexit
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
g_RCpin = None
def RCtime (RCpin, sleep, maxvalue):
global g_RCpin
g_RCpin = RCpin
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
time.sleep(sleep)
if reading >= maxvalue: break
return reading
@atexit.register
def setread():
if g_RCpin is None:
return
GPIO.setup(g_RCpin, GPIO.IN)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='photosensor, resistor/capacitor timer method. larger numbers are darker, default values tuned for 3uF capacitor.')
parser.add_argument("--pin", type=int, default=18, help="gpio pin used")
parser.add_argument("--div", type=int, default=1, help="divide final value by this")
parser.add_argument("--sleep", type=float, default=0.04, help="sleep between counter in counting")
parser.add_argument("--maxvalue", type=int, default=50, help="max 'darkness' to be detected")
parser.add_argument("--outfile", "-o", type=str, default="/tmp/photosensor.value")
parser.add_argument("--debug", action="store_true")
options = parser.parse_args()
if options.debug:
print("using pin %d" % options.pin)
while True:
reading = RCtime(options.pin, options.sleep, options.maxvalue) / options.div # Read RC timing using pin #18
if options.debug:
print("%s: " % time.asctime(), file=sys.stderr, end='')
print(reading)
with open(options.outfile, "wb") as f:
f.write("%d" % reading)
time.sleep(0.5)
| mit | -7,217,418,351,592,861,000 | 31.83871 | 164 | 0.641454 | false |
oseledets/pybtex | pybtex/style/labels/alpha.py | 1 | 6453 | # -*- coding: utf-8 -*-
# Copyright (c) 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
if sys.version_info < (2, 7):
from counter import Counter
else:
from collections import Counter
import re
import string
import unicodedata
from pybtex.style.labels import BaseLabelStyle
_nonalnum_pattern = re.compile('[^A-Za-z0-9]+', re.UNICODE)
def _strip_accents(s):
return u''.join(
(c for c in unicodedata.normalize('NFD', s)
if not unicodedata.combining(c)))
def _strip_nonalnum(parts):
"""Strip all non-alphanumerical characters from a list of strings.
>>> print _strip_nonalnum([u"ÅA. B. Testing 12+}[.@~_", u" 3%"])
AABTesting123
"""
s = u''.join(parts)
return _nonalnum_pattern.sub(u'', _strip_accents(s))
class LabelStyle(BaseLabelStyle):
def format_labels(self, sorted_entries):
labels = [self.format_label(entry) for entry in sorted_entries]
count = Counter(labels)
counted = Counter()
for label in labels:
if count[label] == 1:
yield label
else:
yield label + chr(ord('a') + counted[label])
counted.update([label])
# note: this currently closely follows the alpha.bst code
# we should eventually refactor it
def format_label(self, entry):
# see alpha.bst calc.label
if entry.type == "book" or entry.type == "inbook":
label = self.author_editor_key_label(entry)
elif entry.type == "proceedings":
label = self.editor_key_organization_label(entry)
elif entry.type == "manual":
label = self.author_key_organization_label(entry)
else:
label = self.author_key_label(entry)
if "year" in entry.fields:
return label + entry.fields["year"][-2:]
else:
return label
# bst additionally sets sort.label
def author_key_label(self, entry):
# see alpha.bst author.key.label
if not "author" in entry.persons:
if not "key" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
# for entry.key, bst actually uses text.prefix$
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["author"])
def author_editor_key_label(self, entry):
# see alpha.bst author.editor.key.label
if not "author" in entry.persons:
if not "editor" in entry.persons:
if not "key" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
# for entry.key, bst actually uses text.prefix$
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["editor"])
else:
return self.format_lab_names(entry.persons["author"])
def author_key_organization_label(self, entry):
if not "author" in entry.persons:
if not "key" in entry.fields:
if not "organization" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
result = entry.fields["organization"]
if result.startswith("The "):
result = result[4:]
return result
else:
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["author"])
def editor_key_organization_label(self, entry):
if not "editor" in entry.persons:
if not "key" in entry.fields:
if not "organization" in entry.fields:
return entry.key[:3] # entry.key is bst cite$
else:
result = entry.fields["organization"]
if result.startswith("The "):
result = result[4:]
return result
else:
return entry.fields["key"][:3]
else:
return self.format_lab_names(entry.persons["editor"])
def format_lab_names(self, persons):
# see alpha.bst format.lab.names
# s = persons
numnames = len(persons)
if numnames > 1:
if numnames > 4:
namesleft = 3
else:
namesleft = numnames
result = ""
nameptr = 1
while namesleft:
person = persons[nameptr - 1]
if nameptr == numnames:
if unicode(person) == "others":
result += "+"
else:
result += _strip_nonalnum(
person.prelast(abbr=True) + person.last(abbr=True))
else:
result += _strip_nonalnum(
person.prelast(abbr=True) + person.last(abbr=True))
nameptr += 1
namesleft -= 1
if numnames > 4:
result += "+"
else:
person = persons[0]
result = _strip_nonalnum(
person.prelast(abbr=True) + person.last(abbr=True))
if len(result) < 2:
result = _strip_nonalnum(person.last(abbr=False))[:3]
return result
| mit | 3,171,189,926,441,258,500 | 36.511628 | 79 | 0.566491 | false |
alisheykhi/SocialPDA | Degree_Anonimity.py | 1 | 2374 | from graph_util import ReadGraph
from micro_pda import MicroPDA
from particle_pda1 import ParticlePDA
from swarm_pda import SwarmPDA
from Measurments import measurmnets
import time
import datetime
import pymysql
import json
# avg , std , best for 30 times run,
# file name = datasetName_Beta_k_Delta_l
# levels = low, medium, high, and critical
#graph_caida = ReadGraph("caida.txt",6)
# graph_caida = ReadGraph("polblogs.gml",1)
# #graph_caida = ReadGraph("polbooks.gml",level=3)
# optimal_omega_cluster = MicroPDA(graph_caida.sorted_degree_sequence)
# particle_pda = ParticlePDA(omega_clusters=optimal_omega_cluster.omega_clusters,beta= 0.01,
# removed_omega_clusters=optimal_omega_cluster.removed_omega_clusters)
# particle_pda.plotResults()
# anonymizedcluster = particle_pda.clusterWithAvg()
# swarm_pda = SwarmPDA(omega_clusters=particle_pda.clusters_avg_embedded,graph_G= graph_caida.G)
# sol = swarm_pda.run_swarm()
# measurment = measurmnets(graph_caida.G, sol['original'], sol['modified'])
# measure = measurment.calculate_measures()
# for key,value in measure.iteritems():
# print key , '----->' , value
graph_name = 'caida.txt'
level = 6
beta = 0.5
l = 30
run = 30
graph = ReadGraph(graph_name,level)
db = pymysql.connect(host="localhost",
user="root",
passwd="",
db="SocialPda")
connection = db.cursor()
optimal_omega_cluster = MicroPDA(graph.sorted_degree_sequence)
cluster = json.dump(optimal_omega_cluster.omega_clusters)
print cluster
# for i in range(1,run+1):
# optimal_omega_cluster = MicroPDA(graph.sorted_degree_sequence)
# cluster = json.dumps(optimal_omega_cluster.omega_clusters)
# #insert into micropda
# ts = time.time()
# timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#
# sql = "INSERT INTO `SocialPDA`.`micropda` (`dataset`, `Beta`, `l`, `date`, `k`, `delta`, `omega_cluster`, `run`) VALUES (%s,%s,%s,%s,%s,%s,%r,%s)"
# connection.execute(sql, (graph_name, beta, l, timestamp, '1', '1',cluster , i))
# connection.commit()
# connection.close()
# particle_pda = ParticlePDA(omega_clusters=optimal_omega_cluster.omega_clusters,beta= beta,
# removed_omega_clusters=optimal_omega_cluster.removed_omega_clusters)
#insert into particlepda | apache-2.0 | -1,964,974,860,067,055,400 | 33.926471 | 152 | 0.680286 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Fitbit/Activities/GetActivityWeeklyGoals.py | 1 | 4593 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetActivityWeeklyGoals
# Get a user's current weekly activity goals.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetActivityWeeklyGoals(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetActivityWeeklyGoals Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetActivityWeeklyGoals, self).__init__(temboo_session, '/Library/Fitbit/Activities/GetActivityWeeklyGoals')
def new_input_set(self):
return GetActivityWeeklyGoalsInputSet()
def _make_result_set(self, result, path):
return GetActivityWeeklyGoalsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetActivityWeeklyGoalsChoreographyExecution(session, exec_id, path)
class GetActivityWeeklyGoalsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetActivityWeeklyGoals
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Fitbit.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Fitbit.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('ConsumerSecret', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in: xml or json. Defaults to json.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) The user's encoded id. Defaults to "-" (dash) which will return data for the user associated with the token credentials provided.)
"""
super(GetActivityWeeklyGoalsInputSet, self)._set_input('UserID', value)
class GetActivityWeeklyGoalsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetActivityWeeklyGoals Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Fitbit.)
"""
return self._output.get('Response', None)
class GetActivityWeeklyGoalsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetActivityWeeklyGoalsResultSet(response, path)
| gpl-2.0 | -8,695,528,798,487,133,000 | 41.925234 | 209 | 0.689092 | false |
DoubleGremlin181/Scripting | Wikipedia.py | 1 | 1131 | #A script to check if any two wikipedia pages have any common wiki links
#TODO: Check on multiple levels, i.e. check if any of the linked wikis on the pages have a common wiki link
import requests
import wikipedia
from bs4 import BeautifulSoup
def get_list(page):
links_page=["/wiki/"+page.split("/")[4]]
data = requests.get(page).content
soup = BeautifulSoup(data, "lxml")
for temp in soup.find_all("a"):
if temp.parent.name =="p":
links_page.append(temp["href"])
return links_page
def compare(list1,list2):
common=[]
for i in list1:
if i in list2:
common.append("https://en.wikipedia.org"+i)
return common
def main():
page1 = raw_input("Enter the url of the first page\n>")
page2 = raw_input("Enter the url of the second page\n>")
links_page1=get_list(page1)
links_page2=get_list(page2)
common = compare(links_page1,links_page2)
print "\nThe pages are directly linked through the following wikis:\n"
print '\n'.join(['%i: %s' % (n+1, common[n]) for n in xrange(len(common))])
if __name__ == '__main__':
main()
| gpl-3.0 | -1,919,865,110,978,478,600 | 25.302326 | 107 | 0.641026 | false |
f-prettyland/angr | angr/exploration_techniques/tracer.py | 1 | 16414 | import os
import logging
import claripy
from . import ExplorationTechnique, Cacher
from .. import BP_BEFORE
from ..calling_conventions import SYSCALL_CC
from ..errors import AngrTracerError, SimMemoryError, SimEngineError
from ..storage.file import SimFile
l = logging.getLogger("angr.exploration_techniques.tracer")
class Tracer(ExplorationTechnique):
"""
An exploration technique that follows an angr path with a concrete input.
The tracing result is the state after executing the last basic block of the
given trace and can be found in 'traced' stash.
If the given concrete input makes the program crash, the last correct
states that you might want are kept in the 'predecessors' list. The crashed
state can be found with CrashMonitor exploration technique.
"""
def __init__(self, trace=None, resiliency=True, use_cache=True, dump_syscall=False, keep_predecessors=1):
"""
:param trace : The basic block trace.
:param resiliency : Should we continue to step forward even if qemu and angr disagree?
:param use_cache : True if we want to use caching system.
:param dump_syscall : True if we want to dump the syscall information.
:param keep_predecessors: Number of states before the final state we should preserve.
Default 1, must be greater than 0.
"""
super(Tracer, self).__init__()
self._trace = trace
self._resiliency = resiliency
self._dump_syscall = dump_syscall
# keep track of the last basic block we hit
if keep_predecessors < 1:
raise ValueError("Must have keep_predecessors >= 1")
self.predecessors = [None] * keep_predecessors
# whether we should follow the trace
self._no_follow = self._trace is None
# initilize the syscall statistics if the flag is on
if self._dump_syscall:
self._syscalls = []
self._use_cache = use_cache
def setup(self, simgr):
self.project = simgr._project
s = simgr.active[0]
# initialize the basic block counter to 0
s.globals['bb_cnt'] = 0
if self._dump_syscall:
s.inspect.b('syscall', when=BP_BEFORE, action=self._syscall)
elif self.project.loader.main_object.os.startswith('UNIX'):
# Step forward until we catch up with QEMU
if self._trace and s.addr != self._trace[0]:
simgr = simgr.explore(find=self.project.entry)
simgr = simgr.drop(stash="unsat")
simgr = simgr.unstash(from_stash="found",to_stash="active")
if self.project.loader.main_object.os == 'cgc':
if self._use_cache:
cache_file = os.path.join("/tmp", "%(name)s-%(binhash)s.tcache")
cacher = Cacher(when=self._tracer_cache_cond,
container=cache_file,
dump_func=self._tracer_dump,
load_func=self._tracer_load)
simgr.use_technique(cacher)
# If we're restoring from a cache, we preconstrain. If we're not restoring from a cache,
# the cacher will preconstrain.
# If we're restoring from a cache, we can safely remove the cacher
# right after.
if os.path.exists(cacher.container):
simgr.one_active.preconstrainer.preconstrain_state()
simgr.remove_tech(cacher)
else:
simgr.one_active.preconstrainer.preconstrain_state()
def complete(self, simgr):
all_paths = simgr.active + simgr.deadended
if not len(simgr.active) or all_paths[0].globals['bb_cnt'] >= len(self._trace):
# this is a concrete trace, there should only be ONE path
if len(all_paths) != 1:
raise AngrTracerError("Program did not behave correctly, expected only one path.")
# the caller is responsible for removing preconstraints
simgr.stash(from_stash='active', to_stash='traced')
simgr.stash(from_stash='deadended', to_stash='traced')
return True
return False
def step(self, simgr, stash, **kwargs):
if stash != 'active':
raise Exception("TODO: tracer doesn't work with stashes other than active")
if len(simgr.active) == 1:
current = simgr.active[0]
if current.history.recent_block_count > 1:
# executed unicorn fix bb_cnt
current.globals['bb_cnt'] += current.history.recent_block_count - 1 - current.history.recent_syscall_count
if not self._no_follow:
# termination condition: we exhausted the dynamic trace log
if current.globals['bb_cnt'] >= len(self._trace):
return simgr
# now, we switch through several ways that the dynamic and symbolic traces can interact
# basic, convenient case: the two traces match
if current.addr == self._trace[current.globals['bb_cnt']]:
current.globals['bb_cnt'] += 1
# angr will count a syscall as a step, qemu will not. they will sync next step.
elif current.history.jumpkind.startswith("Ijk_Sys"):
pass
# handle library calls and simprocedures
elif self.project.is_hooked(current.addr) \
or self.project.simos.is_syscall_addr(current.addr) \
or not self._address_in_binary(current.addr):
# If dynamic trace is in the PLT stub, update bb_cnt until it's out
while current.globals['bb_cnt'] < len(self._trace) and self._addr_in_plt(self._trace[current.globals['bb_cnt']]):
current.globals['bb_cnt'] += 1
# handle hooked functions
# TODO: this branch is totally missed by the test cases
elif self.project.is_hooked(current.history.addr) \
and current.history.addr in self.project._sim_procedures:
l.debug("ending hook for %s", self.project.hooked_by(current.history.addr))
l.debug("previous addr %#x", current.history.addr)
l.debug("bb_cnt %d", current.globals['bb_cnt'])
# we need step to the return
current_addr = current.addr
while current.globals['bb_cnt'] < len(self._trace) and current_addr != self._trace[current.globals['bb_cnt']]:
current.globals['bb_cnt'] += 1
# step 1 more for the normal step that would happen
current.globals['bb_cnt'] += 1
l.debug("bb_cnt after the correction %d", current.globals['bb_cnt'])
if current.globals['bb_cnt'] >= len(self._trace):
return simgr
else:
l.error( "the dynamic trace and the symbolic trace disagreed")
l.error("[%s] dynamic [0x%x], symbolic [0x%x]",
self.project.filename,
self._trace[current.globals['bb_cnt']],
current.addr)
l.error("inputs was %r", current.preconstrainer.input_content)
if self._resiliency:
l.error("TracerMisfollowError encountered")
l.warning("entering no follow mode")
self._no_follow = True
else:
raise AngrTracerError
# maintain the predecessors list
self.predecessors.append(current)
self.predecessors.pop(0)
# Basic block's max size in angr is greater than the one in Qemu
# We follow the one in Qemu
if current.globals['bb_cnt'] >= len(self._trace):
bbl_max_bytes = 800
else:
y2 = self._trace[current.globals['bb_cnt']]
y1 = self._trace[current.globals['bb_cnt'] - 1]
bbl_max_bytes = y2 - y1
if bbl_max_bytes <= 0:
bbl_max_bytes = 800
# detect back loops (a block jumps back to the middle of itself) that have to be differentiated from the
# case where max block sizes doesn't match.
# this might still break for huge basic blocks with back loops, but it seems unlikely.
try:
bl = self.project.factory.block(self._trace[current.globals['bb_cnt']-1],
backup_state=current)
back_targets = set(bl.vex.constant_jump_targets) & set(bl.instruction_addrs)
if current.globals['bb_cnt'] < len(self._trace) and self._trace[current.globals['bb_cnt']] in back_targets:
target_to_jumpkind = bl.vex.constant_jump_targets_and_jumpkinds
if target_to_jumpkind[self._trace[current.globals['bb_cnt']]] == "Ijk_Boring":
bbl_max_bytes = 800
except (SimMemoryError, SimEngineError):
bbl_max_bytes = 800
# drop the missed stash before stepping, since driller needs missed paths later.
simgr.drop(stash='missed')
simgr._one_step(stash, size=bbl_max_bytes)
# if our input was preconstrained we have to keep on the lookout for unsat paths.
if current.preconstrainer._preconstrain_input:
simgr.stash(from_stash='unsat', to_stash='active')
simgr.drop(stash='unsat')
# if we stepped to a point where there are no active paths, return the simgr.
if len(simgr.active) == 0:
# possibly we want to have different behaviour if we're in crash mode.
return simgr
if len(simgr.active) > 1:
# if we get to this point there's more than one active path
# if we have to ditch the trace we use satisfiability
# or if a split occurs in a library routine
a_paths = simgr.active
if self._no_follow or all(map( lambda p: not self._address_in_binary(p.addr), a_paths)):
simgr.prune(to_stash='missed')
else:
l.debug("bb %d / %d", current.globals['bb_cnt'], len(self._trace))
if current.globals['bb_cnt'] < len(self._trace):
simgr.stash_not_addr(self._trace[current.globals['bb_cnt']], to_stash='missed')
if len(simgr.active) > 1: # rarely we get two active paths
simgr.prune(to_stash='missed')
if len(simgr.active) > 1: # might still be two active
simgr.stash(to_stash='missed', filter_func=lambda x: x.jumpkind == "Ijk_EmWarn")
# make sure we only have one or zero active paths at this point
assert len(simgr.active) < 2
# something weird... maybe we hit a rep instruction?
# qemu and vex have slightly different behaviors...
if not simgr.active[0].se.satisfiable():
l.info("detected small discrepancy between qemu and angr, "
"attempting to fix known cases")
# Have we corrected it?
corrected = False
# did our missed branch try to go back to a rep?
target = simgr.missed[0].addr
if self.project.arch.name == 'X86' or self.project.arch.name == 'AMD64':
# does it looks like a rep? rep ret doesn't count!
if self.project.factory.block(target).bytes.startswith("\xf3") and \
not self.project.factory.block(target).bytes.startswith("\xf3\xc3"):
l.info("rep discrepency detected, repairing...")
# swap the stashes
simgr.move('missed', 'chosen')
simgr.move('active', 'missed')
simgr.move('chosen', 'active')
corrected = True
if not corrected:
l.warning("Unable to correct discrepancy between qemu and angr.")
return simgr
def _syscall(self, state):
syscall_addr = state.se.eval(state.ip)
args = None
# 0xa000008 is terminate, which we exclude from syscall statistics.
if self.project.loader.main_object.os == 'cgc' and syscall_addr != 0xa000008:
args = SYSCALL_CC['X86']['CGC'](self.project.arch).get_args(state, 4)
else:
args = SYSCALL_CC[self.project.arch.name]['Linux'](self.project.arch).get_arbs(state, 4)
if args is not None:
d = {'addr': syscall_addr}
for i in xrange(4):
d['arg_%d' % i] = args[i]
d['arg_%d_symbolic' % i] = args[i].symbolic
self._syscalls.append(d)
def _address_in_binary(self, addr):
"""
Determine if address @addr is in the binary being traced.
:param addr: the address to test
:return: True if the address is in between the binary's min and max addresses.
"""
mb = self.project.loader.main_object
return mb.min_addr <= addr and addr < mb.max_addr
def _addr_in_plt(self, addr):
"""
Check if an address is inside the plt section
"""
plt = self.project.loader.main_object.sections_map.get('.plt', None)
return False if plt is None else addr >= plt.min_addr and addr <= plt.max_addr
@staticmethod
def _tracer_cache_cond(state):
if state.history.jumpkind.startswith('Ijk_Sys'):
sys_procedure = state.project.simos.syscall(state)
if sys_procedure.display_name == 'receive' and state.se.eval(state.posix.files[0].pos) == 0:
return True
return False
@staticmethod
def _tracer_load(container, simgr):
preconstrainer = simgr.one_active.preconstrainer
if type(preconstrainer.input_content) == str:
fs = {'/dev/stdin': SimFile("/dev/stdin", "r", size=len(preconstrainer.input_content))}
else:
fs = preconstrainer.input_content.stdin
project = simgr._project
cached_project = project.load_function(container)
if cached_project is not None:
cached_project.analyses = project.analyses
cached_project.surveyors = project.surveyors
cached_project.store_function = project.store_function
cached_project.load_function = project.load_function
state = cached_project.storage['cached_states'][0]
state.globals['bb_cnt'] = cached_project.storage['bb_cnt']
claripy.ast.base.var_counter = cached_project.storage['var_cnt']
cached_project.storage = None
# Setting up the cached state
state.project = cached_project
simgr._project = cached_project
# Hookup the new files
for name in fs:
fs[name].set_state(state)
for fd in state.posix.files:
if state.posix.files[fd].name == name:
state.posix.files[fd] = fs[name]
break
state.register_plugin('preconstrainer', preconstrainer)
state.history.recent_block_count = 0
# Setting the cached state to the simgr
simgr.stashes['active'] = [state]
else:
l.error("Something went wrong during Project unpickling for Tracer...")
@staticmethod
def _tracer_dump(container, simgr, stash):
if stash != 'active':
raise Exception("TODO: tracer doesn't work with stashes other than active")
s = simgr.stashes[stash][0]
project = s.project
s.project = None
s.history.trim()
project.storage['cached_states'] = [s]
project.storage['bb_cnt'] = s.globals['bb_cnt']
project.storage['var_cnt'] = claripy.ast.base.var_counter
project.store_function(container)
s.project = project
# Add preconstraints to state
s.preconstrainer.preconstrain_state()
| bsd-2-clause | 2,718,564,388,932,743,000 | 42.42328 | 133 | 0.569148 | false |
antoine-de/navitia | source/jormungandr/jormungandr/interfaces/v1/Coord.py | 1 | 3363 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask.ext.restful import abort, marshal
from jormungandr import i_manager
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.v1.fields import address
from navitiacommon.type_pb2 import _NAVITIATYPE
from collections import OrderedDict
import datetime
class Coord(ResourceUri):
def get(self, region=None, lon=None, lat=None, id=None, *args, **kwargs):
if id is not None:
splitted = id.split(";")
if len(splitted) != 2:
abort(404, message='invalid coords [{}], should be <lon:lon>;<lat:lat>'.format(id))
lon, lat = splitted
try:
lon = float(lon)
lat = float(lat)
except ValueError:
abort(404, message='invalid coords [{}], should be <lon:lon>;<lat:lat>'.format(id))
if region is None:
regions = i_manager.get_regions("", lon, lat)
else:
regions = [region]
args = {
"uri": "coord:{}:{}".format(lon, lat),
"count": 1,
"distance": 200,
"type[]": ["address"],
"depth": 1,
"start_page": 0,
"filter": "",
"_current_datetime": datetime.datetime.utcnow()
}
self._register_interpreted_parameters(args)
result = OrderedDict()
for r in regions:
self.region = r
result.update(regions=[r])
pb_result = i_manager.dispatch(args, "places_nearby", instance_name=r)
if len(pb_result.places_nearby) > 0:
e_type = pb_result.places_nearby[0].embedded_type
if _NAVITIATYPE.values_by_name["ADDRESS"].number == e_type:
new_address = marshal(pb_result.places_nearby[0].address,
address)
result.update(address=new_address)
return result, 200
result.update(regions=regions)
result.update(message="No address for these coords")
return result, 404
| agpl-3.0 | 307,073,991,383,605,900 | 37.655172 | 99 | 0.627119 | false |
stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/riemann-zeta/benchmark/python/scipy/benchmark.py | 1 | 2194 | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.special.zeta."""
from __future__ import print_function
import timeit
NAME = "zeta"
REPEATS = 3
ITERATIONS = 100000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import zeta; from random import random;"
stmt = "y = zeta(random()*56.0 + 1.1)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::scipy::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | 1,624,637,104,917,445,400 | 21.618557 | 74 | 0.628532 | false |
dblia/nosql-ganeti | lib/objects.py | 1 | 62582 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Transportable objects for Ganeti.
This module provides small, mostly data-only objects which are safe to
pass to and from external parties.
"""
# pylint: disable=E0203,W0201,R0902
# E0203: Access to member %r before its definition, since we use
# objects.py which doesn't explicitly initialise its members
# W0201: Attribute '%s' defined outside __init__
# R0902: Allow instances of these objects to have more than 20 attributes
import ConfigParser
import re
import copy
import logging
import time
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import netutils
from ganeti import outils
from ganeti import utils
from socket import AF_INET
__all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
"OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
_TIMESTAMPS = ["ctime", "mtime"]
_UUID = ["uuid"]
def FillDict(defaults_dict, custom_dict, skip_keys=None):
"""Basic function to apply settings on top a default dict.
@type defaults_dict: dict
@param defaults_dict: dictionary holding the default values
@type custom_dict: dict
@param custom_dict: dictionary holding customized value
@type skip_keys: list
@param skip_keys: which keys not to fill
@rtype: dict
@return: dict with the 'full' values
"""
ret_dict = copy.deepcopy(defaults_dict)
ret_dict.update(custom_dict)
if skip_keys:
for k in skip_keys:
try:
del ret_dict[k]
except KeyError:
pass
return ret_dict
def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
"""Fills an instance policy with defaults.
"""
assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
ret_dict = {}
for key in constants.IPOLICY_ISPECS:
ret_dict[key] = FillDict(default_ipolicy[key],
custom_ipolicy.get(key, {}),
skip_keys=skip_keys)
# list items
for key in [constants.IPOLICY_DTS]:
ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key]))
# other items which we know we can directly copy (immutables)
for key in constants.IPOLICY_PARAMETERS:
ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key])
return ret_dict
def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
"""Fills the disk parameter defaults.
@see: L{FillDict} for parameters and return value
"""
assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
skip_keys=skip_keys))
for dt in constants.DISK_TEMPLATES)
def UpgradeGroupedParams(target, defaults):
"""Update all groups for the target parameter.
@type target: dict of dicts
@param target: {group: {parameter: value}}
@type defaults: dict
@param defaults: default parameter values
"""
if target is None:
target = {constants.PP_DEFAULT: defaults}
else:
for group in target:
target[group] = FillDict(defaults, target[group])
return target
def UpgradeBeParams(target):
"""Update the be parameters dict to the new format.
@type target: dict
@param target: "be" parameters dict
"""
if constants.BE_MEMORY in target:
memory = target[constants.BE_MEMORY]
target[constants.BE_MAXMEM] = memory
target[constants.BE_MINMEM] = memory
del target[constants.BE_MEMORY]
def UpgradeDiskParams(diskparams):
"""Upgrade the disk parameters.
@type diskparams: dict
@param diskparams: disk parameters to upgrade
@rtype: dict
@return: the upgraded disk parameters dict
"""
if not diskparams:
result = {}
else:
result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
return result
def UpgradeNDParams(ndparams):
"""Upgrade ndparams structure.
@type ndparams: dict
@param ndparams: disk parameters to upgrade
@rtype: dict
@return: the upgraded node parameters dict
"""
if ndparams is None:
ndparams = {}
if (constants.ND_OOB_PROGRAM in ndparams and
ndparams[constants.ND_OOB_PROGRAM] is None):
# will be reset by the line below
del ndparams[constants.ND_OOB_PROGRAM]
return FillDict(constants.NDC_DEFAULTS, ndparams)
def MakeEmptyIPolicy():
"""Create empty IPolicy dictionary.
"""
return dict([
(constants.ISPECS_MIN, {}),
(constants.ISPECS_MAX, {}),
(constants.ISPECS_STD, {}),
])
class ConfigObject(outils.ValidatedSlots):
"""A generic config object.
It has the following properties:
- provides somewhat safe recursive unpickling and pickling for its classes
- unset attributes which are defined in slots are always returned
as None instead of raising an error
Classes derived from this must always declare __slots__ (we use many
config objects and the memory reduction is useful)
"""
__slots__ = []
def __getattr__(self, name):
if name not in self.GetAllSlots():
raise AttributeError("Invalid object attribute %s.%s" %
(type(self).__name__, name))
return None
def __setstate__(self, state):
slots = self.GetAllSlots()
for name in state:
if name in slots:
setattr(self, name, state[name])
def Validate(self):
"""Validates the slots.
"""
def ToDict(self):
"""Convert to a dict holding only standard python types.
The generic routine just dumps all of this object's attributes in
a dict. It does not work if the class has children who are
ConfigObjects themselves (e.g. the nics list in an Instance), in
which case the object should subclass the function in order to
make sure all objects returned are only standard python types.
"""
result = {}
for name in self.GetAllSlots():
value = getattr(self, name, None)
if value is not None:
result[name] = value
return result
__getstate__ = ToDict
@classmethod
def FromDict(cls, val):
"""Create an object from a dictionary.
This generic routine takes a dict, instantiates a new instance of
the given class, and sets attributes based on the dict content.
As for `ToDict`, this does not work if the class has children
who are ConfigObjects themselves (e.g. the nics list in an
Instance), in which case the object should subclass the function
and alter the objects.
"""
if not isinstance(val, dict):
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
val_str = dict([(str(k), v) for k, v in val.iteritems()])
obj = cls(**val_str) # pylint: disable=W0142
return obj
@staticmethod
def _ContainerToDicts(container):
"""Convert the elements of a container to standard python types.
This method converts a container with elements derived from
ConfigData to standard python types. If the container is a dict,
we don't touch the keys, only the values.
"""
if isinstance(container, dict):
ret = dict([(k, v.ToDict()) for k, v in container.iteritems()])
elif isinstance(container, (list, tuple, set, frozenset)):
ret = [elem.ToDict() for elem in container]
else:
raise TypeError("Invalid type %s passed to _ContainerToDicts" %
type(container))
return ret
@staticmethod
def _ContainerFromDicts(source, c_type, e_type):
"""Convert a container from standard python types.
This method converts a container with standard python types to
ConfigData objects. If the container is a dict, we don't touch the
keys, only the values.
"""
if not isinstance(c_type, type):
raise TypeError("Container type %s passed to _ContainerFromDicts is"
" not a type" % type(c_type))
if source is None:
source = c_type()
if c_type is dict:
ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()])
elif c_type in (list, tuple, set, frozenset):
ret = c_type([e_type.FromDict(elem) for elem in source])
else:
raise TypeError("Invalid container type %s passed to"
" _ContainerFromDicts" % c_type)
return ret
def Copy(self):
"""Makes a deep copy of the current object and its children.
"""
dict_form = self.ToDict()
clone_obj = self.__class__.FromDict(dict_form)
return clone_obj
def __repr__(self):
"""Implement __repr__ for ConfigObjects."""
return repr(self.ToDict())
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
This method will be called at configuration load time, and its
implementation will be object dependent.
"""
pass
class TaggableObject(ConfigObject):
"""An generic class supporting tags.
"""
__slots__ = ["tags"]
VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
@classmethod
def ValidateTag(cls, tag):
"""Check if a tag is valid.
If the tag is invalid, an errors.TagError will be raised. The
function has no return value.
"""
if not isinstance(tag, basestring):
raise errors.TagError("Invalid tag type (not a string)")
if len(tag) > constants.MAX_TAG_LEN:
raise errors.TagError("Tag too long (>%d characters)" %
constants.MAX_TAG_LEN)
if not tag:
raise errors.TagError("Tags cannot be empty")
if not cls.VALID_TAG_RE.match(tag):
raise errors.TagError("Tag contains invalid characters")
def GetTags(self):
"""Return the tags list.
"""
tags = getattr(self, "tags", None)
if tags is None:
tags = self.tags = set()
return tags
def AddTag(self, tag):
"""Add a new tag.
"""
self.ValidateTag(tag)
tags = self.GetTags()
if len(tags) >= constants.MAX_TAGS_PER_OBJ:
raise errors.TagError("Too many tags")
self.GetTags().add(tag)
def RemoveTag(self, tag):
"""Remove a tag.
"""
self.ValidateTag(tag)
tags = self.GetTags()
try:
tags.remove(tag)
except KeyError:
raise errors.TagError("Tag not found")
def ToDict(self):
"""Taggable-object-specific conversion to standard python types.
This replaces the tags set with a list.
"""
bo = super(TaggableObject, self).ToDict()
tags = bo.get("tags", None)
if isinstance(tags, set):
bo["tags"] = list(tags)
return bo
@classmethod
def FromDict(cls, val):
"""Custom function for instances.
"""
obj = super(TaggableObject, cls).FromDict(val)
if hasattr(obj, "tags") and isinstance(obj.tags, list):
obj.tags = set(obj.tags)
return obj
class MasterNetworkParameters(ConfigObject):
"""Network configuration parameters for the master
@ivar name: master name
@ivar ip: master IP
@ivar netmask: master netmask
@ivar netdev: master network device
@ivar ip_family: master IP family
"""
__slots__ = [
"name",
"ip",
"netmask",
"netdev",
"ip_family",
]
class ConfigData(ConfigObject):
"""Top-level config object."""
__slots__ = [
"_id",
"_rev",
"version",
"cluster",
"nodes",
"nodegroups",
"instances",
"networks",
"serial_no",
] + _TIMESTAMPS
def ToDict(self):
"""Custom function for top-level config data.
This just replaces the list of instances, nodes and the cluster
with standard python types.
"""
mydict = super(ConfigData, self).ToDict()
mydict["cluster"] = mydict["cluster"].ToDict()
for key in "nodes", "instances", "nodegroups", "networks":
mydict[key] = self._ContainerToDicts(mydict[key])
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for top-level config data
"""
obj = super(ConfigData, cls).FromDict(val)
obj.cluster = Cluster.FromDict(obj.cluster)
obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node)
obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance)
obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
obj.networks = cls._ContainerFromDicts(obj.networks, dict, Network)
return obj
def HasAnyDiskOfType(self, dev_type):
"""Check if in there is at disk of the given type in the configuration.
@type dev_type: L{constants.LDS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a disk of the given type was found or not
"""
for instance in self.instances.values():
for disk in instance.disks:
if disk.IsBasedOnDiskType(dev_type):
return True
return False
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
self.cluster.UpgradeConfig()
for node in self.nodes.values():
node.UpgradeConfig()
for instance in self.instances.values():
instance.UpgradeConfig()
if self.nodegroups is None:
self.nodegroups = {}
for nodegroup in self.nodegroups.values():
nodegroup.UpgradeConfig()
if self.cluster.drbd_usermode_helper is None:
# To decide if we set an helper let's check if at least one instance has
# a DRBD disk. This does not cover all the possible scenarios but it
# gives a good approximation.
if self.HasAnyDiskOfType(constants.LD_DRBD8):
self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
if self.networks is None:
self.networks = {}
for network in self.networks.values():
network.UpgradeConfig()
class NIC(ConfigObject):
"""Config object representing a network card."""
__slots__ = ["mac", "ip", "network", "nicparams", "netinfo"]
@classmethod
def CheckParameterSyntax(cls, nicparams):
"""Check the given parameters for validity.
@type nicparams: dict
@param nicparams: dictionary with parameter names/value
@raise errors.ConfigurationError: when a parameter is not valid
"""
mode = nicparams[constants.NIC_MODE]
if (mode not in constants.NIC_VALID_MODES and
mode != constants.VALUE_AUTO):
raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
if (mode == constants.NIC_MODE_BRIDGED and
not nicparams[constants.NIC_LINK]):
raise errors.ConfigurationError("Missing bridged NIC link")
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
"children", "iv_name", "size", "mode", "params"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
return self.dev_type in (constants.LD_LV,)
def StaticDevPath(self):
"""Return the device path if this device type has a static one.
Some devices (LVM for example) live always at the same /dev/ path,
irrespective of their status. For such devices, we return this
path, for others we return None.
@warning: The path returned is not a normalized pathname; callers
should check that it is a valid path.
"""
if self.dev_type == constants.LD_LV:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
elif self.dev_type == constants.LD_BLOCKDEV:
return self.logical_id[1]
elif self.dev_type == constants.LD_RBD:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
return None
def ChildrenNeeded(self):
"""Compute the needed number of children for activation.
This method will return either -1 (all children) or a positive
number denoting the minimum number of children needed for
activation (only mirrored devices will usually return >=0).
Currently, only DRBD8 supports diskless activation (therefore we
return 0), for all other we keep the previous semantics and return
-1.
"""
if self.dev_type == constants.LD_DRBD8:
return 0
return -1
def IsBasedOnDiskType(self, dev_type):
"""Check if the disk or its children are based on the given type.
@type dev_type: L{constants.LDS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a device of the given type was found or not
"""
if self.children:
for child in self.children:
if child.IsBasedOnDiskType(dev_type):
return True
return self.dev_type == dev_type
def GetNodes(self, node):
"""This function returns the nodes this device lives on.
Given the node on which the parent of the device lives on (or, in
case of a top-level device, the primary node of the devices'
instance), this function will return a list of nodes on which this
devices needs to (or can) be assembled.
"""
if self.dev_type in [constants.LD_LV, constants.LD_FILE,
constants.LD_BLOCKDEV, constants.LD_RBD,
constants.LD_EXT]:
result = [node]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
if node not in result:
raise errors.ConfigurationError("DRBD device passed unknown node")
else:
raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
return result
def ComputeNodeTree(self, parent_node):
"""Compute the node/disk tree for this disk and its children.
This method, given the node on which the parent disk lives, will
return the list of all (node, disk) pairs which describe the disk
tree in the most compact way. For example, a drbd/lvm stack
will be returned as (primary_node, drbd) and (secondary_node, drbd)
which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node)
result = [(node, self) for node in my_nodes]
if not self.children:
# leaf device
return result
for node in my_nodes:
for child in self.children:
child_result = child.ComputeNodeTree(node)
if len(child_result) == 1:
# child (and all its descendants) is simple, doesn't split
# over multiple hosts, so we don't need to describe it, our
# own entry for this node describes it completely
continue
else:
# check if child nodes differ from my nodes; note that
# subdisk can differ from the child itself, and be instead
# one of its descendants
for subnode, subdisk in child_result:
if subnode not in my_nodes:
result.append((subnode, subdisk))
# otherwise child is under our own node, so we ignore this
# entry (but probably the other results in the list will
# be different)
return result
def ComputeGrowth(self, amount):
"""Compute the per-VG growth requirements.
This only works for VG-based disks.
@type amount: integer
@param amount: the desired increase in (user-visible) disk space
@rtype: dict
@return: a dictionary of volume-groups and the required size
"""
if self.dev_type == constants.LD_LV:
return {self.logical_id[0]: amount}
elif self.dev_type == constants.LD_DRBD8:
if self.children:
return self.children[0].ComputeGrowth(amount)
else:
return {}
else:
# Other disk types do not require VG space
return {}
def RecordGrow(self, amount):
"""Update the size of this disk after growth.
This method recurses over the disks's children and updates their
size correspondigly. The method needs to be kept in sync with the
actual algorithms from bdev.
"""
if self.dev_type in (constants.LD_LV, constants.LD_FILE,
constants.LD_RBD, constants.LD_EXT):
self.size += amount
elif self.dev_type == constants.LD_DRBD8:
if self.children:
self.children[0].RecordGrow(amount)
self.size += amount
else:
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
def Update(self, size=None, mode=None):
"""Apply changes to size and mode.
"""
if self.dev_type == constants.LD_DRBD8:
if self.children:
self.children[0].Update(size=size, mode=mode)
else:
assert not self.children
if size is not None:
self.size = size
if mode is not None:
self.mode = mode
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
"""
if self.children:
for child in self.children:
child.UnsetSize()
self.size = 0
def SetPhysicalID(self, target_node, nodes_ip):
"""Convert the logical ID to the physical ID.
This is used only for drbd, which needs ip/port configuration.
The routine descends down and updates its children also, because
this helps when the only the top device is passed to the remote
node.
Arguments:
- target_node: the node we wish to configure for
- nodes_ip: a mapping of node name to ip
The target_node must exist in in nodes_ip, and must be one of the
nodes in the logical ID for each of the DRBD devices encountered
in the disk tree.
"""
if self.children:
for child in self.children:
child.SetPhysicalID(target_node, nodes_ip)
if self.logical_id is None and self.physical_id is not None:
return
if self.dev_type in constants.LDS_DRBD:
pnode, snode, port, pminor, sminor, secret = self.logical_id
if target_node not in (pnode, snode):
raise errors.ConfigurationError("DRBD device not knowing node %s" %
target_node)
pnode_ip = nodes_ip.get(pnode, None)
snode_ip = nodes_ip.get(snode, None)
if pnode_ip is None or snode_ip is None:
raise errors.ConfigurationError("Can't find primary or secondary node"
" for %s" % str(self))
p_data = (pnode_ip, port)
s_data = (snode_ip, port)
if pnode == target_node:
self.physical_id = p_data + s_data + (pminor, secret)
else: # it must be secondary, we tested above
self.physical_id = s_data + p_data + (sminor, secret)
else:
self.physical_id = self.logical_id
return
def ToDict(self):
"""Disk-specific conversion to standard python types.
This replaces the children lists of objects with lists of
standard python types.
"""
bo = super(Disk, self).ToDict()
for attr in ("children",):
alist = bo.get(attr, None)
if alist:
bo[attr] = self._ContainerToDicts(alist)
return bo
@classmethod
def FromDict(cls, val):
"""Custom function for Disks
"""
obj = super(Disk, cls).FromDict(val)
if obj.children:
obj.children = cls._ContainerFromDicts(obj.children, list, Disk)
if obj.logical_id and isinstance(obj.logical_id, list):
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
obj.physical_id = tuple(obj.physical_id)
if obj.dev_type in constants.LDS_DRBD:
# we need a tuple of length six here
if len(obj.logical_id) < 6:
obj.logical_id += (None,) * (6 - len(obj.logical_id))
return obj
def __str__(self):
"""Custom str() formatter for disks.
"""
if self.dev_type == constants.LD_LV:
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
elif self.dev_type in constants.LDS_DRBD:
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
val = "<DRBD8("
if self.physical_id is None:
phy = "unconfigured"
else:
phy = ("configured as %s:%s %s:%s" %
(self.physical_id[0], self.physical_id[1],
self.physical_id[2], self.physical_id[3]))
val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
(node_a, minor_a, node_b, minor_b, port, phy))
if self.children and self.children.count(None) == 0:
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
else:
val += "no local storage"
else:
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
(self.dev_type, self.logical_id, self.physical_id, self.children))
if self.iv_name is None:
val += ", not visible"
else:
val += ", visible as /dev/%s" % self.iv_name
if isinstance(self.size, int):
val += ", size=%dm)>" % self.size
else:
val += ", size='%s')>" % (self.size,)
return val
def Verify(self):
"""Checks that this disk is correctly configured.
"""
all_errors = []
if self.mode not in constants.DISK_ACCESS_SET:
all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
return all_errors
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
if self.children:
for child in self.children:
child.UpgradeConfig()
# FIXME: Make this configurable in Ganeti 2.7
self.params = {}
# add here config upgrade for this disk
@staticmethod
def ComputeLDParams(disk_template, disk_params):
"""Computes Logical Disk parameters from Disk Template parameters.
@type disk_template: string
@param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
@type disk_params: dict
@param disk_params: disk template parameters;
dict(template_name -> parameters
@rtype: list(dict)
@return: a list of dicts, one for each node of the disk hierarchy. Each dict
contains the LD parameters of the node. The tree is flattened in-order.
"""
if disk_template not in constants.DISK_TEMPLATES:
raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
assert disk_template in disk_params
result = list()
dt_params = disk_params[disk_template]
if disk_template == constants.DT_DRBD8:
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
}))
# data LV
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
}))
# metadata LV
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
}))
elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
elif disk_template == constants.DT_PLAIN:
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
}))
elif disk_template == constants.DT_BLOCK:
result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
elif disk_template == constants.DT_RBD:
result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
constants.LDP_POOL: dt_params[constants.RBD_POOL],
}))
elif disk_template == constants.DT_EXT:
result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
return result
class InstancePolicy(ConfigObject):
"""Config object representing instance policy limits dictionary.
Note that this object is not actually used in the config, it's just
used as a placeholder for a few functions.
"""
@classmethod
def CheckParameterSyntax(cls, ipolicy, check_std):
""" Check the instance policy for validity.
"""
for param in constants.ISPECS_PARAMETERS:
InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std)
if constants.IPOLICY_DTS in ipolicy:
InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS])
for key in constants.IPOLICY_PARAMETERS:
if key in ipolicy:
InstancePolicy.CheckParameter(key, ipolicy[key])
wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
if wrong_keys:
raise errors.ConfigurationError("Invalid keys in ipolicy: %s" %
utils.CommaJoin(wrong_keys))
@classmethod
def CheckISpecSyntax(cls, ipolicy, name, check_std):
"""Check the instance policy for validity on a given key.
We check if the instance policy makes sense for a given key, that is
if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
@type ipolicy: dict
@param ipolicy: dictionary with min, max, std specs
@type name: string
@param name: what are the limits for
@type check_std: bool
@param check_std: Whether to check std value or just assume compliance
@raise errors.ConfigureError: when specs for given name are not valid
"""
min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
if check_std:
std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
std_msg = std_v
else:
std_v = min_v
std_msg = "-"
max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
(name,
ipolicy[constants.ISPECS_MIN].get(name, "-"),
ipolicy[constants.ISPECS_MAX].get(name, "-"),
std_msg))
if min_v > std_v or std_v > max_v:
raise errors.ConfigurationError(err)
@classmethod
def CheckDiskTemplates(cls, disk_templates):
"""Checks the disk templates for validity.
"""
if not disk_templates:
raise errors.ConfigurationError("Instance policy must contain" +
" at least one disk template")
wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
if wrong:
raise errors.ConfigurationError("Invalid disk template(s) %s" %
utils.CommaJoin(wrong))
@classmethod
def CheckParameter(cls, key, value):
"""Checks a parameter.
Currently we expect all parameters to be float values.
"""
try:
float(value)
except (TypeError, ValueError), err:
raise errors.ConfigurationError("Invalid value for key" " '%s':"
" '%s', error: %s" % (key, value, err))
class Instance(TaggableObject):
"""Config object representing an instance."""
__slots__ = [
"_id",
"_rev",
"name",
"primary_node",
"os",
"hypervisor",
"hvparams",
"beparams",
"osparams",
"admin_state",
"nics",
"disks",
"disk_template",
"network_port",
"serial_no",
] + _TIMESTAMPS + _UUID
def _ComputeSecondaryNodes(self):
"""Compute the list of secondary nodes.
This is a simple wrapper over _ComputeAllNodes.
"""
all_nodes = set(self._ComputeAllNodes())
all_nodes.discard(self.primary_node)
return tuple(all_nodes)
secondary_nodes = property(_ComputeSecondaryNodes, None, None,
"List of names of secondary nodes")
def _ComputeAllNodes(self):
"""Compute the list of all nodes.
Since the data is already there (in the drbd disks), keeping it as
a separate normal attribute is redundant and if not properly
synchronised can cause problems. Thus it's better to compute it
dynamically.
"""
def _Helper(nodes, device):
"""Recursively computes nodes given a top device."""
if device.dev_type in constants.LDS_DRBD:
nodea, nodeb = device.logical_id[:2]
nodes.add(nodea)
nodes.add(nodeb)
if device.children:
for child in device.children:
_Helper(nodes, child)
all_nodes = set()
all_nodes.add(self.primary_node)
for device in self.disks:
_Helper(all_nodes, device)
return tuple(all_nodes)
all_nodes = property(_ComputeAllNodes, None, None,
"List of names of all the nodes of the instance")
def MapLVsByNode(self, lvmap=None, devs=None, node=None):
"""Provide a mapping of nodes to LVs this instance owns.
This function figures out what logical volumes should belong on
which nodes, recursing through a device tree.
@param lvmap: optional dictionary to receive the
'node' : ['lv', ...] data.
@return: None if lvmap arg is given, otherwise, a dictionary of
the form { 'nodename' : ['volume1', 'volume2', ...], ... };
volumeN is of the form "vg_name/lv_name", compatible with
GetVolumeList()
"""
if node is None:
node = self.primary_node
if lvmap is None:
lvmap = {
node: [],
}
ret = lvmap
else:
if not node in lvmap:
lvmap[node] = []
ret = None
if not devs:
devs = self.disks
for dev in devs:
if dev.dev_type == constants.LD_LV:
lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
elif dev.dev_type in constants.LDS_DRBD:
if dev.children:
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
elif dev.children:
self.MapLVsByNode(lvmap, dev.children, node)
return ret
def FindDisk(self, idx):
"""Find a disk given having a specified index.
This is just a wrapper that does validation of the index.
@type idx: int
@param idx: the disk index
@rtype: L{Disk}
@return: the corresponding disk
@raise errors.OpPrereqError: when the given index is not valid
"""
try:
idx = int(idx)
return self.disks[idx]
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
errors.ECODE_INVAL)
except IndexError:
raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
" 0 to %d" % (idx, len(self.disks) - 1),
errors.ECODE_INVAL)
def ToDict(self):
"""Instance-specific conversion to standard python types.
This replaces the children lists of objects with lists of standard
python types.
"""
bo = super(Instance, self).ToDict()
for attr in "nics", "disks":
alist = bo.get(attr, None)
if alist:
nlist = self._ContainerToDicts(alist)
else:
nlist = []
bo[attr] = nlist
return bo
@classmethod
def FromDict(cls, val):
"""Custom function for instances.
"""
if "admin_state" not in val:
if val.get("admin_up", False):
val["admin_state"] = constants.ADMINST_UP
else:
val["admin_state"] = constants.ADMINST_DOWN
if "admin_up" in val:
del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
return obj
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
for nic in self.nics:
nic.UpgradeConfig()
for disk in self.disks:
disk.UpgradeConfig()
if self.hvparams:
for key in constants.HVC_GLOBALS:
try:
del self.hvparams[key]
except KeyError:
pass
if self.osparams is None:
self.osparams = {}
UpgradeBeParams(self.beparams)
class OS(ConfigObject):
"""Config object representing an operating system.
@type supported_parameters: list
@ivar supported_parameters: a list of tuples, name and description,
containing the supported parameters by this OS
@type VARIANT_DELIM: string
@cvar VARIANT_DELIM: the variant delimiter
"""
__slots__ = [
"name",
"path",
"api_versions",
"create_script",
"export_script",
"import_script",
"rename_script",
"verify_script",
"supported_variants",
"supported_parameters",
]
VARIANT_DELIM = "+"
@classmethod
def SplitNameVariant(cls, name):
"""Splits the name into the proper name and variant.
@param name: the OS (unprocessed) name
@rtype: list
@return: a list of two elements; if the original name didn't
contain a variant, it's returned as an empty string
"""
nv = name.split(cls.VARIANT_DELIM, 1)
if len(nv) == 1:
nv.append("")
return nv
@classmethod
def GetName(cls, name):
"""Returns the proper name of the os (without the variant).
@param name: the OS (unprocessed) name
"""
return cls.SplitNameVariant(name)[0]
@classmethod
def GetVariant(cls, name):
"""Returns the variant the os (without the base name).
@param name: the OS (unprocessed) name
"""
return cls.SplitNameVariant(name)[1]
class ExtStorage(ConfigObject):
"""Config object representing an External Storage Provider.
"""
__slots__ = [
"name",
"path",
"create_script",
"remove_script",
"grow_script",
"attach_script",
"detach_script",
"setinfo_script",
"verify_script",
"supported_parameters",
]
class NodeHvState(ConfigObject):
"""Hypvervisor state on a node.
@ivar mem_total: Total amount of memory
@ivar mem_node: Memory used by, or reserved for, the node itself (not always
available)
@ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
rounding
@ivar mem_inst: Memory used by instances living on node
@ivar cpu_total: Total node CPU core count
@ivar cpu_node: Number of CPU cores reserved for the node itself
"""
__slots__ = [
"mem_total",
"mem_node",
"mem_hv",
"mem_inst",
"cpu_total",
"cpu_node",
] + _TIMESTAMPS
class NodeDiskState(ConfigObject):
"""Disk state on a node.
"""
__slots__ = [
"total",
"reserved",
"overhead",
] + _TIMESTAMPS
class Node(TaggableObject):
"""Config object representing a node.
@ivar hv_state: Hypervisor state (e.g. number of CPUs)
@ivar hv_state_static: Hypervisor state overriden by user
@ivar disk_state: Disk state (e.g. free space)
@ivar disk_state_static: Disk state overriden by user
"""
__slots__ = [
"_id",
"_rev",
"name",
"primary_ip",
"secondary_ip",
"serial_no",
"master_candidate",
"offline",
"drained",
"group",
"master_capable",
"vm_capable",
"ndparams",
"powered",
"hv_state",
"hv_state_static",
"disk_state",
"disk_state_static",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
# pylint: disable=E0203
# because these are "defined" via slots, not manually
if self.master_capable is None:
self.master_capable = True
if self.vm_capable is None:
self.vm_capable = True
if self.ndparams is None:
self.ndparams = {}
# And remove any global parameter
for key in constants.NDC_GLOBALS:
if key in self.ndparams:
logging.warning("Ignoring %s node parameter for node %s",
key, self.name)
del self.ndparams[key]
if self.powered is None:
self.powered = True
def ToDict(self):
"""Custom function for serializing.
"""
data = super(Node, self).ToDict()
hv_state = data.get("hv_state", None)
if hv_state is not None:
data["hv_state"] = self._ContainerToDicts(hv_state)
disk_state = data.get("disk_state", None)
if disk_state is not None:
data["disk_state"] = \
dict((key, self._ContainerToDicts(value))
for (key, value) in disk_state.items())
return data
@classmethod
def FromDict(cls, val):
"""Custom function for deserializing.
"""
obj = super(Node, cls).FromDict(val)
if obj.hv_state is not None:
obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
if obj.disk_state is not None:
obj.disk_state = \
dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
for (key, value) in obj.disk_state.items())
return obj
class NodeGroup(TaggableObject):
"""Config object representing a node group."""
__slots__ = [
"_id",
"_rev",
"name",
"members",
"ndparams",
"diskparams",
"ipolicy",
"serial_no",
"hv_state_static",
"disk_state_static",
"alloc_policy",
"networks",
] + _TIMESTAMPS + _UUID
def ToDict(self):
"""Custom function for nodegroup.
This discards the members object, which gets recalculated and is only kept
in memory.
"""
mydict = super(NodeGroup, self).ToDict()
del mydict["members"]
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for nodegroup.
The members slot is initialized to an empty list, upon deserialization.
"""
obj = super(NodeGroup, cls).FromDict(val)
obj.members = []
return obj
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
if self.ndparams is None:
self.ndparams = {}
if self.serial_no is None:
self.serial_no = 1
if self.alloc_policy is None:
self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
# We only update mtime, and not ctime, since we would not be able
# to provide a correct value for creation time.
if self.mtime is None:
self.mtime = time.time()
if self.diskparams is None:
self.diskparams = {}
if self.ipolicy is None:
self.ipolicy = MakeEmptyIPolicy()
if self.networks is None:
self.networks = {}
def FillND(self, node):
"""Return filled out ndparams for L{objects.Node}
@type node: L{objects.Node}
@param node: A Node object to fill
@return a copy of the node's ndparams with defaults filled
"""
return self.SimpleFillND(node.ndparams)
def SimpleFillND(self, ndparams):
"""Fill a given ndparams dict with defaults.
@type ndparams: dict
@param ndparams: the dict to fill
@rtype: dict
@return: a copy of the passed in ndparams with missing keys filled
from the node group defaults
"""
return FillDict(self.ndparams, ndparams)
class Cluster(TaggableObject):
"""Config object representing the cluster."""
__slots__ = [
"backend_storage",
"serial_no",
"rsahostkeypub",
"highest_used_port",
"tcpudp_port_pool",
"mac_prefix",
"volume_group_name",
"reserved_lvs",
"drbd_usermode_helper",
"default_bridge",
"default_hypervisor",
"master_node",
"master_ip",
"master_netdev",
"master_netmask",
"use_external_mip_script",
"cluster_name",
"file_storage_dir",
"shared_file_storage_dir",
"enabled_hypervisors",
"hvparams",
"ipolicy",
"os_hvp",
"beparams",
"osparams",
"nicparams",
"ndparams",
"diskparams",
"candidate_pool_size",
"modify_etc_hosts",
"modify_ssh_setup",
"maintain_node_health",
"uid_pool",
"default_iallocator",
"hidden_os",
"blacklisted_os",
"primary_ip_family",
"prealloc_wipe_disks",
"hv_state_static",
"disk_state_static",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
# pylint: disable=E0203
# because these are "defined" via slots, not manually
if self.hvparams is None:
self.hvparams = constants.HVC_DEFAULTS
else:
for hypervisor in self.hvparams:
self.hvparams[hypervisor] = FillDict(
constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
if self.os_hvp is None:
self.os_hvp = {}
# osparams added before 2.2
if self.osparams is None:
self.osparams = {}
self.ndparams = UpgradeNDParams(self.ndparams)
self.beparams = UpgradeGroupedParams(self.beparams,
constants.BEC_DEFAULTS)
for beparams_group in self.beparams:
UpgradeBeParams(self.beparams[beparams_group])
migrate_default_bridge = not self.nicparams
self.nicparams = UpgradeGroupedParams(self.nicparams,
constants.NICC_DEFAULTS)
if migrate_default_bridge:
self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
self.default_bridge
if self.modify_etc_hosts is None:
self.modify_etc_hosts = True
if self.modify_ssh_setup is None:
self.modify_ssh_setup = True
# default_bridge is no longer used in 2.1. The slot is left there to
# support auto-upgrading. It can be removed once we decide to deprecate
# upgrading straight from 2.0.
if self.default_bridge is not None:
self.default_bridge = None
# default_hypervisor is just the first enabled one in 2.1. This slot and
# code can be removed once upgrading straight from 2.0 is deprecated.
if self.default_hypervisor is not None:
self.enabled_hypervisors = ([self.default_hypervisor] +
[hvname for hvname in self.enabled_hypervisors
if hvname != self.default_hypervisor])
self.default_hypervisor = None
# maintain_node_health added after 2.1.1
if self.maintain_node_health is None:
self.maintain_node_health = False
if self.uid_pool is None:
self.uid_pool = []
if self.default_iallocator is None:
self.default_iallocator = ""
# reserved_lvs added before 2.2
if self.reserved_lvs is None:
self.reserved_lvs = []
# hidden and blacklisted operating systems added before 2.2.1
if self.hidden_os is None:
self.hidden_os = []
if self.blacklisted_os is None:
self.blacklisted_os = []
# primary_ip_family added before 2.3
if self.primary_ip_family is None:
self.primary_ip_family = AF_INET
if self.master_netmask is None:
ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
self.master_netmask = ipcls.iplen
if self.prealloc_wipe_disks is None:
self.prealloc_wipe_disks = False
# shared_file_storage_dir added before 2.5
if self.shared_file_storage_dir is None:
self.shared_file_storage_dir = ""
if self.use_external_mip_script is None:
self.use_external_mip_script = False
if self.diskparams:
self.diskparams = UpgradeDiskParams(self.diskparams)
else:
self.diskparams = constants.DISK_DT_DEFAULTS.copy()
# instance policy added before 2.6
if self.ipolicy is None:
self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
else:
# we can either make sure to upgrade the ipolicy always, or only
# do it in some corner cases (e.g. missing keys); note that this
# will break any removal of keys from the ipolicy dict
wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
if wrongkeys:
# These keys would be silently removed by FillIPolicy()
msg = ("Cluster instance policy contains spurious keys: %s" %
utils.CommaJoin(wrongkeys))
raise errors.ConfigurationError(msg)
self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
@property
def primary_hypervisor(self):
"""The first hypervisor is the primary.
Useful, for example, for L{Node}'s hv/disk state.
"""
return self.enabled_hypervisors[0]
def ToDict(self):
"""Custom function for cluster.
"""
mydict = super(Cluster, self).ToDict()
mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool)
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for cluster.
"""
obj = super(Cluster, cls).FromDict(val)
if not isinstance(obj.tcpudp_port_pool, set):
obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
return obj
def SimpleFillDP(self, diskparams):
"""Fill a given diskparams dict with cluster defaults.
@param diskparams: The diskparams
@return: The defaults dict
"""
return FillDiskParams(self.diskparams, diskparams)
def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
"""Get the default hypervisor parameters for the cluster.
@param hypervisor: the hypervisor name
@param os_name: if specified, we'll also update the defaults for this OS
@param skip_keys: if passed, list of keys not to use
@return: the defaults dict
"""
if skip_keys is None:
skip_keys = []
fill_stack = [self.hvparams.get(hypervisor, {})]
if os_name is not None:
os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
fill_stack.append(os_hvp)
ret_dict = {}
for o_dict in fill_stack:
ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
return ret_dict
def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
"""Fill a given hvparams dict with cluster defaults.
@type hv_name: string
@param hv_name: the hypervisor to use
@type os_name: string
@param os_name: the OS to use for overriding the hypervisor defaults
@type skip_globals: boolean
@param skip_globals: if True, the global hypervisor parameters will
not be filled
@rtype: dict
@return: a copy of the given hvparams with missing keys filled from
the cluster defaults
"""
if skip_globals:
skip_keys = constants.HVC_GLOBALS
else:
skip_keys = []
def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
return FillDict(def_dict, hvparams, skip_keys=skip_keys)
def FillHV(self, instance, skip_globals=False):
"""Fill an instance's hvparams dict with cluster defaults.
@type instance: L{objects.Instance}
@param instance: the instance parameter to fill
@type skip_globals: boolean
@param skip_globals: if True, the global hypervisor parameters will
not be filled
@rtype: dict
@return: a copy of the instance's hvparams with missing keys filled from
the cluster defaults
"""
return self.SimpleFillHV(instance.hypervisor, instance.os,
instance.hvparams, skip_globals)
def SimpleFillBE(self, beparams):
"""Fill a given beparams dict with cluster defaults.
@type beparams: dict
@param beparams: the dict to fill
@rtype: dict
@return: a copy of the passed in beparams with missing keys filled
from the cluster defaults
"""
return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
def FillBE(self, instance):
"""Fill an instance's beparams dict with cluster defaults.
@type instance: L{objects.Instance}
@param instance: the instance parameter to fill
@rtype: dict
@return: a copy of the instance's beparams with missing keys filled from
the cluster defaults
"""
return self.SimpleFillBE(instance.beparams)
def SimpleFillNIC(self, nicparams):
"""Fill a given nicparams dict with cluster defaults.
@type nicparams: dict
@param nicparams: the dict to fill
@rtype: dict
@return: a copy of the passed in nicparams with missing keys filled
from the cluster defaults
"""
return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
def SimpleFillOS(self, os_name, os_params):
"""Fill an instance's osparams dict with cluster defaults.
@type os_name: string
@param os_name: the OS name to use
@type os_params: dict
@param os_params: the dict to fill with default values
@rtype: dict
@return: a copy of the instance's osparams with missing keys filled from
the cluster defaults
"""
name_only = os_name.split("+", 1)[0]
# base OS
result = self.osparams.get(name_only, {})
# OS with variant
result = FillDict(result, self.osparams.get(os_name, {}))
# specified params
return FillDict(result, os_params)
@staticmethod
def SimpleFillHvState(hv_state):
"""Fill an hv_state sub dict with cluster defaults.
"""
return FillDict(constants.HVST_DEFAULTS, hv_state)
@staticmethod
def SimpleFillDiskState(disk_state):
"""Fill an disk_state sub dict with cluster defaults.
"""
return FillDict(constants.DS_DEFAULTS, disk_state)
def FillND(self, node, nodegroup):
"""Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
@type node: L{objects.Node}
@param node: A Node object to fill
@type nodegroup: L{objects.NodeGroup}
@param nodegroup: A Node object to fill
@return a copy of the node's ndparams with defaults filled
"""
return self.SimpleFillND(nodegroup.FillND(node))
def SimpleFillND(self, ndparams):
"""Fill a given ndparams dict with defaults.
@type ndparams: dict
@param ndparams: the dict to fill
@rtype: dict
@return: a copy of the passed in ndparams with missing keys filled
from the cluster defaults
"""
return FillDict(self.ndparams, ndparams)
def SimpleFillIPolicy(self, ipolicy):
""" Fill instance policy dict with defaults.
@type ipolicy: dict
@param ipolicy: the dict to fill
@rtype: dict
@return: a copy of passed ipolicy with missing keys filled from
the cluster defaults
"""
return FillIPolicy(self.ipolicy, ipolicy)
class BlockDevStatus(ConfigObject):
"""Config object representing the status of a block device."""
__slots__ = [
"dev_path",
"major",
"minor",
"sync_percent",
"estimated_time",
"is_degraded",
"ldisk_status",
]
class ImportExportStatus(ConfigObject):
"""Config object representing the status of an import or export."""
__slots__ = [
"recent_output",
"listen_port",
"connected",
"progress_mbytes",
"progress_throughput",
"progress_eta",
"progress_percent",
"exit_status",
"error_message",
] + _TIMESTAMPS
class ImportExportOptions(ConfigObject):
"""Options for import/export daemon
@ivar key_name: X509 key name (None for cluster certificate)
@ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
@ivar compress: Compression method (one of L{constants.IEC_ALL})
@ivar magic: Used to ensure the connection goes to the right disk
@ivar ipv6: Whether to use IPv6
@ivar connect_timeout: Number of seconds for establishing connection
"""
__slots__ = [
"key_name",
"ca_pem",
"compress",
"magic",
"ipv6",
"connect_timeout",
]
class ConfdRequest(ConfigObject):
"""Object holding a confd request.
@ivar protocol: confd protocol version
@ivar type: confd query type
@ivar query: query request
@ivar rsalt: requested reply salt
"""
__slots__ = [
"protocol",
"type",
"query",
"rsalt",
]
class ConfdReply(ConfigObject):
"""Object holding a confd reply.
@ivar protocol: confd protocol version
@ivar status: reply status code (ok, error)
@ivar answer: confd query reply
@ivar serial: configuration serial number
"""
__slots__ = [
"protocol",
"status",
"answer",
"serial",
]
class QueryFieldDefinition(ConfigObject):
"""Object holding a query field definition.
@ivar name: Field name
@ivar title: Human-readable title
@ivar kind: Field type
@ivar doc: Human-readable description
"""
__slots__ = [
"name",
"title",
"kind",
"doc",
]
class _QueryResponseBase(ConfigObject):
__slots__ = [
"fields",
]
def ToDict(self):
"""Custom function for serializing.
"""
mydict = super(_QueryResponseBase, self).ToDict()
mydict["fields"] = self._ContainerToDicts(mydict["fields"])
return mydict
@classmethod
def FromDict(cls, val):
"""Custom function for de-serializing.
"""
obj = super(_QueryResponseBase, cls).FromDict(val)
obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition)
return obj
class QueryResponse(_QueryResponseBase):
"""Object holding the response to a query.
@ivar fields: List of L{QueryFieldDefinition} objects
@ivar data: Requested data
"""
__slots__ = [
"data",
]
class QueryFieldsRequest(ConfigObject):
"""Object holding a request for querying available fields.
"""
__slots__ = [
"what",
"fields",
]
class QueryFieldsResponse(_QueryResponseBase):
"""Object holding the response to a query for fields.
@ivar fields: List of L{QueryFieldDefinition} objects
"""
__slots__ = []
class MigrationStatus(ConfigObject):
"""Object holding the status of a migration.
"""
__slots__ = [
"status",
"transferred_ram",
"total_ram",
]
class InstanceConsole(ConfigObject):
"""Object describing how to access the console of an instance.
"""
__slots__ = [
"instance",
"kind",
"message",
"host",
"port",
"user",
"command",
"display",
]
def Validate(self):
"""Validates contents of this object.
"""
assert self.kind in constants.CONS_ALL, "Unknown console type"
assert self.instance, "Missing instance name"
assert self.message or self.kind in [constants.CONS_SSH,
constants.CONS_SPICE,
constants.CONS_VNC]
assert self.host or self.kind == constants.CONS_MESSAGE
assert self.port or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SSH]
assert self.user or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SPICE,
constants.CONS_VNC]
assert self.command or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SPICE,
constants.CONS_VNC]
assert self.display or self.kind in [constants.CONS_MESSAGE,
constants.CONS_SPICE,
constants.CONS_SSH]
return True
class Network(TaggableObject):
"""Object representing a network definition for ganeti.
"""
__slots__ = [
"_id",
"_rev",
"name",
"serial_no",
"mac_prefix",
"network",
"network6",
"gateway",
"gateway6",
"reservations",
"ext_reservations",
] + _TIMESTAMPS + _UUID
def HooksDict(self, prefix=""):
"""Export a dictionary used by hooks with a network's information.
@type prefix: String
@param prefix: Prefix to prepend to the dict entries
"""
result = {
"%sNETWORK_NAME" % prefix: self.name,
"%sNETWORK_UUID" % prefix: self.uuid,
"%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
}
if self.network:
result["%sNETWORK_SUBNET" % prefix] = self.network
if self.gateway:
result["%sNETWORK_GATEWAY" % prefix] = self.gateway
if self.network6:
result["%sNETWORK_SUBNET6" % prefix] = self.network6
if self.gateway6:
result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
if self.mac_prefix:
result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
return result
@classmethod
def FromDict(cls, val):
"""Custom function for networks.
Remove deprecated network_type and family.
"""
if "network_type" in val:
del val["network_type"]
if "family" in val:
del val["family"]
obj = super(Network, cls).FromDict(val)
return obj
class SerializableConfigParser(ConfigParser.SafeConfigParser):
"""Simple wrapper over ConfigParse that allows serialization.
This class is basically ConfigParser.SafeConfigParser with two
additional methods that allow it to serialize/unserialize to/from a
buffer.
"""
def Dumps(self):
"""Dump this instance and return the string representation."""
buf = StringIO()
self.write(buf)
return buf.getvalue()
@classmethod
def Loads(cls, data):
"""Load data from a string."""
buf = StringIO(data)
cfp = cls()
cfp.readfp(buf)
return cfp
class LvmPvInfo(ConfigObject):
"""Information about an LVM physical volume (PV).
@type name: string
@ivar name: name of the PV
@type vg_name: string
@ivar vg_name: name of the volume group containing the PV
@type size: float
@ivar size: size of the PV in MiB
@type free: float
@ivar free: free space in the PV, in MiB
@type attributes: string
@ivar attributes: PV attributes
@type lv_list: list of strings
@ivar lv_list: names of the LVs hosted on the PV
"""
__slots__ = [
"name",
"vg_name",
"size",
"free",
"attributes",
"lv_list"
]
def IsEmpty(self):
"""Is this PV empty?
"""
return self.size <= (self.free + 1)
def IsAllocatable(self):
"""Is this PV allocatable?
"""
return ("a" in self.attributes)
| gpl-2.0 | -6,233,949,203,984,161,000 | 27.919593 | 80 | 0.644546 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib-tk/tkColorChooser.py | 1 | 1064 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tkColorChooser.py
from tkCommonDialog import Dialog
class Chooser(Dialog):
"""Ask for a color"""
command = 'tk_chooseColor'
def _fixoptions(self):
try:
color = self.options['initialcolor']
if isinstance(color, tuple):
self.options['initialcolor'] = '#%02x%02x%02x' % color
except KeyError:
pass
def _fixresult(self, widget, result):
if not result or not str(result):
return (None, None)
else:
r, g, b = widget.winfo_rgb(result)
return (
(
r / 256, g / 256, b / 256), str(result))
def askcolor(color=None, **options):
"""Ask for a color"""
if color:
options = options.copy()
options['initialcolor'] = color
return Chooser(**options).show()
if __name__ == '__main__':
print 'color', askcolor() | unlicense | 8,246,888,322,872,728,000 | 26.307692 | 70 | 0.56485 | false |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/ARB/multi_draw_indirect.py | 1 | 1260 | '''OpenGL extension ARB.multi_draw_indirect
This module customises the behaviour of the
OpenGL.raw.GL.ARB.multi_draw_indirect to provide a more
Python-friendly API
Overview (from the spec)
The ARB_draw_indirect extension (included in OpenGL 4.0) introduced
mechanisms whereby the parameters for a draw function may be provided in
a structure contained in a buffer object rather than as parameters to the
drawing procedure. This is known as an indirect draw and is exposed as two
new functions, glDrawArraysIndirect and glDrawElementsIndirect. Each of
these functions generates a single batch of primitives.
This extension builds on this functionality by providing procedures to
invoke multiple draws from a single procedure call. This allows large
batches of drawing commands to be assembled in server memory (via a buffer
object) which may then be dispatched through a single function call.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/multi_draw_indirect.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.multi_draw_indirect import *
### END AUTOGENERATED SECTION | bsd-2-clause | -8,670,190,848,074,992,000 | 42.482759 | 75 | 0.807937 | false |
fungusakafungus/cloudformation-jsonschema | tests/test_examples.py | 1 | 1130 | import json
import pytest
import glob
import val
schema = json.load(open('schema.json'))
@pytest.mark.parametrize("template", glob.glob('tests/examples-*/*.template'))
def test_template(template):
if 'troposphere/EMR_Cluster' in template:
pytest.skip('troposphere/EMR_Cluster uses undocumented AWS::EMR::Cluster.EbsConfiguration')
if 'OpenStack' in template:
pytest.skip('OpenStack is not supported')
instance = json.load(open(template))
val.val(instance, schema)
import py.io
import py._io.saferepr
def saferepr(obj, maxsize=2400):
""" return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself. This function is a wrapper
around the Repr/reprlib functionality of the standard 2.6 lib.
"""
# review exception handling
srepr = py._io.saferepr.SafeRepr()
srepr.maxstring = maxsize
srepr.maxsize = maxsize
srepr.maxother = 1600
return srepr.repr(obj)
py.io.saferepr = saferepr
| mit | -5,412,805,403,236,675,000 | 27.974359 | 99 | 0.717699 | false |
hilljb/pyclojure | pyclojure/clojure.py | 1 | 2708 | #!/usr/bin/python2
""" A Python module wrapping nrepl to interact with a Leiningen Clojure REPL.
Some notes:
- All return values are unicode objects inside a dictionary.
- You need to explicitly kill the repl before exiting.
Example:
>>> import clojure
>>> repl = clojure.NREPL()
nREPL server started on port 57041 on host 127.0.0.1 - nrepl://127.0.0.1:57041
>>> repl.eval('(defn f [a b] (+ a b))')
{u'session': u'658b32e6-ee3f-4a44-aa24-06ce375e4fb4', u'ns': u'user', u'value': u"#'user/f"}
>>> repl.eval('(f 1 2)')
{u'session': u'32ca0012-0fc1-4170-977c-6d480f678766', u'ns': u'user', u'value': u'3'}
"""
__author__ = "Jason B. Hill"
__email__ = "[email protected]"
import os
import subprocess
import nrepl
class NREPL(object):
""" Create a Leiningen NREPL and interact with it.
"""
def __init__(self, port=None):
""" Initiate a Leiningen NREPL.
INPUT
-----
port : int : optional
The port to use for the NREPL server.
"""
# Make sure the port is a positive integer
if port:
if not isinstance(port, (int, long)):
raise TypeError("NREPL port must be an integer: %s given" % port)
if port < 1:
raise ValueError("NREPL port must be greater than zero: %s given" % port)
self.port = port
self.host = 'localhost'
# Form the command to execute
cmd = "lein repl :headless"
if self.port:
cmd += " :port %s" % self.port
# Execute the command
proc = subprocess.Popen(
cmd.split(),
stdout=subprocess.PIPE,
stderr=open(os.devnull,'w'),
stdin=open(os.devnull,'w')
)
# Get the return string and parse the port
retport = None
while not retport:
retline = proc.stdout.readline()
if 'server started' in retline:
print retline
retport = retline.split('port')[1].split()[0]
if retport:
self.port = retport
def eval(self, cmd):
""" Evaluate a command using the attached NREPL.
INPUT
-----
cmd : str
The command to execute.
OUTPUT
------
A dictionary with u'session', u'ns', and u'value'.
"""
host_string = 'nrepl://' + str(self.host) + ':' + str(self.port)
c = nrepl.connect(host_string)
c.write({"op": "eval", "code": cmd})
print "%s" % c.read()
def exit(self):
""" Shut down the NREPL server.
This method should be called so the NREPL server is not zombied.
"""
self.eval('(System/exit 0)')
| epl-1.0 | -8,702,635,536,927,137,000 | 26.08 | 92 | 0.553176 | false |
aewallin/opencamlib | examples/python/waterline/waterline_2_tux_adapt.py | 1 | 3113 | import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen, loops, loopcolor):
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) )
previous=p
n=n+1
print("rendered loop ",nloop, " with ", len(lop), " points")
nloop = nloop+1
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#stl = camvtk.STLSurf("../../stl/waterline1.stl")
myscreen.addActor(stl)
stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STL surface read,", s.size(), "triangles")
zh=1.75145
diam = 1.4
length = 500
loops = []
#cutter = ocl.CylCutter( diam , length )
cutter = ocl.BallCutter( diam , length )
#cutter = ocl.BullCutter( diam , diam/5, length )
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(0.5)
#wl.setThreads(5)
t_before = time.time()
wl.run()
t_after = time.time()
calctime = t_after-t_before
print(" Waterline done in ", calctime," s")
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
aloops = []
awl = ocl.AdaptiveWaterline()
awl.setSTL(s)
awl.setCutter(cutter)
awl.setZ(zh)
awl.setSampling(0.1)
awl.setMinSampling(0.01)
#wl.setThreads(5)
t_before = time.time()
awl.run()
t_after = time.time()
calctime = t_after-t_before
print(" AdaptiveWaterline done in ", calctime," s")
acutter_loops = awl.getLoops()
for l in acutter_loops:
aloops.append(l)
print("All waterlines done. Got", len(loops)," loops in total.")
# draw the loops
drawLoops(myscreen, loops, camvtk.yellow)
drawLoops(myscreen, aloops, camvtk.red)
print("done.")
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| lgpl-2.1 | -864,676,445,629,931,900 | 30.444444 | 138 | 0.592033 | false |
hackfestca/cnb | cnb/modAvailable/CNBMMChuckKukDo.py | 1 | 1398 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
CNB Matrix Module - chuckkukdo
'''
from cnb.cnbMatrixModule import CNBMatrixModule
class CNBMMChuckKukDo(CNBMatrixModule):
"""
"""
name = 'chuckkukdo'
usage = 'chuckkukdo'
desc = 'Print the Chuck Kuk Do (http://en.wikipedia.org/wiki/Chun_Kuk_Do)'
aliases = []
def __init__(self,log):
CNBMatrixModule.__init__(self,log)
def __del__(self):
pass
def processCmd(self, oMsg):
result = " \n \
1- I will develop myself to the maximum of my potential in all ways.\n \
2- I will forget the mistakes of the past and press on to greater achievements.\n \
3- I will continually work at developing love, happiness and loyalty in my family.\n \
4- I will look for the good in all people and make them feel worthwhile.\n \
5- If I have nothing good to say about a person, I will say nothing.\n \
6- I will always be as enthusiastic about the success of others as I am about my own.\n \
7- I will maintain an attitude of open-mindedness.\n \
8- I will maintain respect for those in authority and demonstrate this respect at all times.\n \
9- I will always remain loyal to my God, my country, family and my friends.\n \
10- I will remain highly goal-oriented throughout my life because that positive attitude helps my family, my country and myself.\n \
"
return result
| gpl-3.0 | 5,698,115,309,628,994,000 | 33.097561 | 133 | 0.679542 | false |
krishnab-datakind/mining-data-acquisition | data_gather/PackageConstants.py | 1 | 4082 |
#!/usr/bin/python
"""
Package constants
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = '[email protected]'
__status__ = 'pre-alpha'
from aenum import Enum
class RequestTypes(Enum):
SIMPLEPOINTIMAGERY = 1
DIVAGIS = 2
COMPOSITEDPOINTIMAGERY = 3
class RequestStatusCodes(Enum):
CLOSED = 0
CREATED = 1
QUEUED = 2
PROCESSING = 3
COMPLETED = 4
REJECTED = 5
ERROR = 6
imgCollections = {'Landsat8' : ImageCollection('LANDSAT/LC08/C01/T1',
['B1','B2','B3','B4','B5','B6','B7','B8','B9','B10','B11','BQA'],
'04/13/2011',
'10/07/2017',
30),
'Landsat7' : ImageCollection('LANDSAT/LE07/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1999',
'09/17/2017',
30),
'Landsat5' : ImageCollection('LANDSAT/LT05/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1984',
'05/05/2012',
30),
'Sentinel2msi' : ImageCollection('COPERNICUS/S2',
['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','QA10','QA20','QA60'],
'01/23/2015',
'10/20/2017',
30),
'Sentinel2sar' : ImageCollection('COPERNICUS/S1_GRD',
['VV', 'HH',['VV', 'VH'], ['HH','HV']],
'10/03/2014',
'10/20/2017',
30),
'ModisThermalAnomalies' : ImageCollection('MODIS/006/MOD14A1',
['FireMask', 'MaxFRP','sample', 'QA'],
'02/18/2000',
'10/23/2017',
30)
}
if __name__ == "__main__":
print('set of package constants.')
| mit | -3,745,854,114,414,277,600 | 48.780488 | 463 | 0.430671 | false |
quentinhardy/odat | HttpUriType.py | 1 | 3512 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from Http import Http
import logging
from Utils import ErrorSQLRequest, checkOptionsGivenByTheUser
from Constants import *
class HttpUriType (Http):
'''
Allow the user to send HTTP request
'''
def __init__(self,args):
'''
Constructor
'''
logging.debug("HttpUriType object created")
Http.__init__(self,args)
def sendGetRequest(self,url,printResponse=True):
'''
Send a HTTP get request to url
Return False if the current user is not allowed to use the httpuritype lib, else return False or response data
'''
logging.info('Send a HTTP GET request to {0}'.format(url))
query = "select httpuritype('{0}').getclob() from dual".format(url)
response = self.__execQuery__(query=query,ld=['data'])
if isinstance(response,Exception) :
logging.info('Error with the SQL request {0}: {1}'.format(query,str(response)))
return response
elif isinstance(response,list) and isinstance(response[0],dict):
return response[0]['data']
logging.info('Enough privileges')
return ''
def testAll (self):
'''
Test all functions
'''
self.args['print'].subtitle("HTTPURITYPE library ?")
logging.info('Try to make the server send a HTTP request to 0.0.0.0 with the HTTPURITYPE library')
response = self.sendGetRequest('http://0.0.0.0/',printResponse=False)
if isinstance(response,Exception) and self.ERROR_NO_PRIVILEGE in str(response) or self.ERROR_XML_DB_SECU_NOT_INST in str(response):
logging.info('Not enough privileges: {0}'.format(str(response)))
self.args['print'].badNews("KO")
return False
else:
self.args['print'].goodNews("OK")
return True
def runHttpUriTypeModule(args):
'''
Run the HTTPURITYPE module
'''
status = True
if checkOptionsGivenByTheUser(args,["test-module","httpUrl","scan-ports"]) == False : return EXIT_MISS_ARGUMENT
httpUriType = HttpUriType(args)
status = httpUriType.connection(stopIfError=True)
if args['test-module'] == True :
args['print'].title("Test if the HTTPURITYPE library can be used")
status = httpUriType.testAll()
#Option 1: httpUrl
if args['httpUrl'] != None:
args['print'].title("Send a GET request from {0} to {1}".format(args['connectionStr'],args['httpUrl']))
response = httpUriType.sendGetRequest(url=args['httpUrl'])
if isinstance(response,Exception):
args['print'].badNews("HTTP GET request failed")
else :
args['print'].goodNews("The server response is:\n {0}".format(response))
if args['outputFile'] != None : httpUriType.writeFile(args['outputFile'],str(response))
#Option 2: scan-ports
if args['scan-ports'] != None:
ports = []
if "," in args['scan-ports'][1]: ports=args['scan-ports'][1].split(',')
elif '-' in args['scan-ports'][1]:
startEnd = args['scan-ports'][1].split('-')
for aPort in range(int(startEnd[0]),int(startEnd[1])): ports.append(str(aPort))
if ports == []:
logging.critical("The second parameter ('{0}') is not a valid: cancelation...".format(args['scan-ports'][1]))
return -1
else :
if args['scan-ports'][1].isdigit() == True:
ports = [args['scan-ports'][1]]
else:
logging.critical("The second parameter ('{0}') is not a valid port: cancelation...".format(args['scan-ports'][1]))
return -1
args['print'].title("Scan ports ({0}) of {1} ".format(args['scan-ports'][1],args['scan-ports'][0]))
resultats = httpUriType.scanTcpPorts(httpObject=httpUriType,ip=args['scan-ports'][0],ports=ports)
httpUriType.printScanPortResults(resultats)
httpUriType.close()
| lgpl-3.0 | -7,563,020,273,250,675,000 | 36.763441 | 133 | 0.687927 | false |
popazerty/dvbapp-gui2 | skin.py | 1 | 33432 | from Tools.Profile import profile
profile("LOAD:ElementTree")
import xml.etree.cElementTree
import os
profile("LOAD:enigma_skin")
from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, addFont, gRGB, eWindowStyleSkinned, getDesktop
from Components.config import ConfigSubsection, ConfigText, config
from Components.Sources.Source import ObsoleteSource
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_SKIN_IMAGE, SCOPE_FONTS, SCOPE_ACTIVE_SKIN, SCOPE_ACTIVE_LCDSKIN, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists
from Tools.Import import my_import
from Tools.LoadPixmap import LoadPixmap
from Components.RcModel import rc_model
colorNames = {}
# Predefined fonts, typically used in built-in screens and for components like
# the movie list and so.
fonts = {
"Body": ("Regular", 18, 22, 16),
"ChoiceList": ("Regular", 20, 24, 18),
}
def dump(x, i=0):
print " " * i + str(x)
try:
for n in x.childNodes:
dump(n, i + 1)
except:
None
class SkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg)
class DisplaySkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.display_skin.value, self.msg)
dom_skins = [ ]
def addSkin(name, scope = SCOPE_SKIN):
# read the skin
filename = resolveFilename(scope, name)
if fileExists(filename):
mpath = os.path.dirname(filename) + "/"
file = open(filename, 'r')
dom_skins.append((mpath, xml.etree.cElementTree.parse(file).getroot()))
file.close()
return True
return False
# get own skin_user_skinname.xml file, if exist
def skin_user_skinname():
name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml"
filename = resolveFilename(SCOPE_CONFIG, name)
if fileExists(filename):
return name
return None
# we do our best to always select the "right" value
# skins are loaded in order of priority: skin with
# highest priority is loaded last, usually the user-provided
# skin.
# currently, loadSingleSkinData (colors, bordersets etc.)
# are applied one-after-each, in order of ascending priority.
# the dom_skin will keep all screens in descending priority,
# so the first screen found will be used.
# example: loadSkin("nemesis_greenline/skin.xml")
config.skin = ConfigSubsection()
DEFAULT_SKIN = "a4you/skin.xml"
if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)):
# in that case, fallback to Magic (which is an SD skin)
DEFAULT_SKIN = "skin.xml"
config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN)
DEFAULT_DISPLAY_SKIN = "skin_display.xml"
config.skin.display_skin = ConfigText(default=DEFAULT_DISPLAY_SKIN)
profile("LoadSkin")
try:
name = skin_user_skinname()
if name is not None:
addSkin(name, SCOPE_CONFIG)
else:
addSkin('skin_user.xml', SCOPE_CONFIG)
except (SkinError, IOError, AssertionError), err:
print "not loading user skin: ", err
# some boxes lie about their dimensions
addSkin('skin_box.xml')
# add optional discrete second infobar
addSkin('skin_second_infobar.xml')
display_skin_id = 1
try:
if not addSkin(os.path.join('display', config.skin.display_skin.value)):
raise DisplaySkinError, "display skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_DISPLAY_SKIN
if config.skin.display_skin.value == skin:
skin = 'skin_display.xml'
print "defaulting to standard display skin...", skin
config.skin.display_skin.value = skin
skin = os.path.join('display', skin)
addSkin(skin)
del skin
addSkin('skin_subtitles.xml')
try:
if not addSkin(config.skin.primary_skin.value):
raise SkinError, "primary skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_SKIN
if config.skin.primary_skin.value == skin:
skin = 'skin.xml'
print "defaulting to standard skin...", skin
config.skin.primary_skin.value = skin
addSkin(skin)
del skin
addSkin('skin_default.xml')
profile("LoadSkinDefaultDone")
def parseCoordinate(s, e, size=0, font=None):
s = s.strip()
if s == "center":
val = (e - size)/2
elif s == '*':
return None
else:
if s[0] is 'e':
val = e
s = s[1:]
elif s[0] is 'c':
val = e/2
s = s[1:]
else:
val = 0
if s:
if s[-1] is '%':
val += e * int(s[:-1]) / 100
elif s[-1] is 'w':
val += fonts[font][3] * int(s[:-1])
elif s[-1] is 'h':
val += fonts[font][2] * int(s[:-1])
else:
val += int(s)
if val < 0:
val = 0
return val
def getParentSize(object, desktop):
size = eSize()
if object:
parent = object.getParent()
# For some widgets (e.g. ScrollLabel) the skin attributes are applied to
# a child widget, instead of to the widget itself. In that case, the parent
# we have here is not the real parent, but it is the main widget.
# We have to go one level higher to get the actual parent.
# We can detect this because the 'parent' will not have a size yet
# (the main widget's size will be calculated internally, as soon as the child
# widget has parsed the skin attributes)
if parent and parent.size().isEmpty():
parent = parent.getParent()
if parent:
size = parent.size()
elif desktop:
#widget has no parent, use desktop size instead for relative coordinates
size = desktop.size()
return size
def parsePosition(s, scale, object = None, desktop = None, size = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width(), size and size.width())
yval = parseCoordinate(y, parentsize.height(), size and size.height())
return ePoint(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseSize(s, scale, object = None, desktop = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width())
yval = parseCoordinate(y, parentsize.height())
return eSize(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseFont(s, scale):
try:
f = fonts[s]
name = f[0]
size = f[1]
except:
name, size = s.split(';')
return gFont(name, int(size) * scale[0][0] / scale[0][1])
def parseColor(s):
if s[0] != '#':
try:
return colorNames[s]
except:
raise SkinError("color '%s' must be #aarrggbb or valid named color" % s)
return gRGB(int(s[1:], 0x10))
def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))):
# walk all attributes
size = None
pos = None
font = None
for attrib, value in node.items():
if attrib not in ignore:
if attrib in filenames:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, value, path_prefix=skin_path_prefix)
if fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)):
pngfile = resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)
value = pngfile
# Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder)
# it needs to be set at least before the size is set, in order for the
# window dimensions to be calculated correctly in all situations.
# If wfNoBorder is applied after the size has been set, the window will fail to clear the title area.
# Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after
# the size, a scrollbar will not be shown until the selection moves for the first time
if attrib == 'size':
size = value.encode("utf-8")
elif attrib == 'position':
pos = value.encode("utf-8")
elif attrib == 'font':
font = value.encode("utf-8")
skinAttributes.append((attrib, font))
else:
skinAttributes.append((attrib, value.encode("utf-8")))
if pos is not None:
pos, size = context.parse(pos, size, font)
skinAttributes.append(('position', pos))
if size is not None:
skinAttributes.append(('size', size))
def morphRcImagePath(value):
if rc_model.rcIsDefault() is False:
if ('rc.png' or 'oldrc.png') in value:
value = rc_model.getRcLocation() + 'rc.png'
return value
def loadPixmap(path, desktop):
option = path.find("#")
if option != -1:
path = path[:option]
ptr = LoadPixmap(morphRcImagePath(path), desktop)
if ptr is None:
raise SkinError("pixmap file %s not found!" % path)
return ptr
class AttributeParser:
def __init__(self, guiObject, desktop, scale = ((1,1),(1,1))):
self.guiObject = guiObject
self.desktop = desktop
self.scale = scale
def applyOne(self, attrib, value):
try:
getattr(self, attrib)(value)
except AttributeError:
print "[Skin] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[Skin] Error:", ex
def applyAll(self, attrs):
for attrib, value in attrs:
try:
getattr(self, attrib)(value)
except AttributeError:
print "[Skin] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[Skin] Error:", ex
def conditional(self, value):
pass
def position(self, value):
if isinstance(value, tuple):
self.guiObject.move(ePoint(*value))
else:
self.guiObject.move(parsePosition(value, self.scale, self.guiObject, self.desktop, self.guiObject.csize()))
def size(self, value):
if isinstance(value, tuple):
self.guiObject.resize(eSize(*value))
else:
self.guiObject.resize(parseSize(value, self.scale, self.guiObject, self.desktop))
def title(self, value):
self.guiObject.setTitle(_(value))
def text(self, value):
self.guiObject.setText(_(value))
def font(self, value):
self.guiObject.setFont(parseFont(value, self.scale))
def zPosition(self, value):
self.guiObject.setZPosition(int(value))
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setPixmap(ptr)
def backgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setBackgroundPicture(ptr)
def selectionPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSelectionPicture(ptr)
def sliderPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSliderPicture(ptr)
def scrollbarbackgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setScrollbarBackgroundPicture(ptr)
def alphatest(self, value):
self.guiObject.setAlphatest(
{ "on": 1,
"off": 0,
"blend": 2,
}[value])
def scale(self, value):
self.guiObject.setScale(1)
def orientation(self, value): # used by eSlider
try:
self.guiObject.setOrientation(*
{ "orVertical": (self.guiObject.orVertical, False),
"orTopToBottom": (self.guiObject.orVertical, False),
"orBottomToTop": (self.guiObject.orVertical, True),
"orHorizontal": (self.guiObject.orHorizontal, False),
"orLeftToRight": (self.guiObject.orHorizontal, False),
"orRightToLeft": (self.guiObject.orHorizontal, True),
}[value])
except KeyError:
print "oprientation must be either orVertical or orHorizontal!, not %s. Please contact the skin's author!" % value
def valign(self, value):
try:
self.guiObject.setVAlign(
{ "top": self.guiObject.alignTop,
"center": self.guiObject.alignCenter,
"bottom": self.guiObject.alignBottom
}[value])
except KeyError:
print "valign must be either top, center or bottom!, not %s. Please contact the skin's author!" % value
def halign(self, value):
try:
self.guiObject.setHAlign(
{ "left": self.guiObject.alignLeft,
"center": self.guiObject.alignCenter,
"right": self.guiObject.alignRight,
"block": self.guiObject.alignBlock
}[value])
except KeyError:
print "halign must be either left, center, right or block!, not %s. Please contact the skin's author!" % value
def textOffset(self, value):
x, y = value.split(',')
self.guiObject.setTextOffset(ePoint(int(x) * self.scale[0][0] / self.scale[0][1], int(y) * self.scale[1][0] / self.scale[1][1]))
def flags(self, value):
flags = value.split(',')
for f in flags:
try:
fv = eWindow.__dict__[f]
self.guiObject.setFlag(fv)
except KeyError:
print "illegal flag %s!" % f
def backgroundColor(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def backgroundColorSelected(self, value):
self.guiObject.setBackgroundColorSelected(parseColor(value))
def foregroundColor(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def foregroundColorSelected(self, value):
self.guiObject.setForegroundColorSelected(parseColor(value))
def foregroundNotCrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundNotCrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def foregroundCrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundCrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def foregroundEncrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundEncrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def shadowColor(self, value):
self.guiObject.setShadowColor(parseColor(value))
def selectionDisabled(self, value):
self.guiObject.setSelectionEnable(0)
def transparent(self, value):
self.guiObject.setTransparent(int(value))
def borderColor(self, value):
self.guiObject.setBorderColor(parseColor(value))
def borderWidth(self, value):
self.guiObject.setBorderWidth(int(value))
def scrollbarMode(self, value):
self.guiObject.setScrollbarMode(getattr(self.guiObject, value))
# { "showOnDemand": self.guiObject.showOnDemand,
# "showAlways": self.guiObject.showAlways,
# "showNever": self.guiObject.showNever,
# "showLeft": self.guiObject.showLeft
# }[value])
def enableWrapAround(self, value):
self.guiObject.setWrapAround(True)
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scale)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(0, ptr, pos)
def seek_pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scale)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(1, ptr, pos)
def shadowOffset(self, value):
self.guiObject.setShadowOffset(parsePosition(value, self.scale))
def noWrap(self, value):
self.guiObject.setNoWrap(1)
def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))):
# Someone still using applySingleAttribute?
AttributeParser(guiObject, desktop, scale).applyOne(attrib, value)
def applyAllAttributes(guiObject, desktop, attributes, scale):
AttributeParser(guiObject, desktop, scale).applyAll(attributes)
def loadSingleSkinData(desktop, skin, path_prefix):
"""loads skin data like colors, windowstyle etc."""
assert skin.tag == "skin", "root element in skin must be 'skin'!"
for c in skin.findall("output"):
id = c.attrib.get('id')
if id:
id = int(id)
else:
id = 0
if id == 0: # framebuffer
for res in c.findall("resolution"):
get_attr = res.attrib.get
xres = get_attr("xres")
if xres:
xres = int(xres)
else:
xres = 720
yres = get_attr("yres")
if yres:
yres = int(yres)
else:
yres = 576
bpp = get_attr("bpp")
if bpp:
bpp = int(bpp)
else:
bpp = 32
#print "Resolution:", xres,yres,bpp
from enigma import gMainDC
gMainDC.getInstance().setResolution(xres, yres)
desktop.resize(eSize(xres, yres))
if bpp != 32:
# load palette (not yet implemented)
pass
for skininclude in skin.findall("include"):
filename = skininclude.attrib.get("filename")
if filename:
skinfile = resolveFilename(SCOPE_ACTIVE_SKIN, filename, path_prefix=path_prefix)
if not fileExists(skinfile):
skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
if fileExists(skinfile):
print "[SKIN] loading include:", skinfile
loadSkin(skinfile)
for c in skin.findall("colors"):
for color in c.findall("color"):
get_attr = color.attrib.get
name = get_attr("name")
color = get_attr("value")
if name and color:
colorNames[name] = parseColor(color)
#print "Color:", name, color
else:
raise SkinError("need color and name, got %s %s" % (name, color))
for c in skin.findall("fonts"):
for font in c.findall("font"):
get_attr = font.attrib.get
filename = get_attr("filename", "<NONAME>")
name = get_attr("name", "Regular")
scale = get_attr("scale")
if scale:
scale = int(scale)
else:
scale = 100
is_replacement = get_attr("replacement") and True or False
render = get_attr("render")
if render:
render = int(render)
else:
render = 0
resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix)
if not fileExists(resolved_font): #when font is not available look at current skin path
resolved_font = resolveFilename(SCOPE_ACTIVE_SKIN, filename)
if fileExists(resolveFilename(SCOPE_CURRENT_SKIN, filename)):
resolved_font = resolveFilename(SCOPE_CURRENT_SKIN, filename)
elif fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, filename)):
resolved_font = resolveFilename(SCOPE_ACTIVE_LCDSKIN, filename)
addFont(resolved_font, name, scale, is_replacement, render)
#print "Font: ", resolved_font, name, scale, is_replacement
for alias in c.findall("alias"):
get = alias.attrib.get
try:
name = get("name")
font = get("font")
size = int(get("size"))
height = int(get("height", size)) # to be calculated some day
width = int(get("width", size))
global fonts
fonts[name] = (font, size, height, width)
except Exception, ex:
print "[SKIN] bad font alias", ex
for c in skin.findall("subtitles"):
from enigma import eSubtitleWidget
scale = ((1,1),(1,1))
for substyle in c.findall("sub"):
get_attr = substyle.attrib.get
font = parseFont(get_attr("font"), scale)
col = get_attr("foregroundColor")
if col:
foregroundColor = parseColor(col)
haveColor = 1
else:
foregroundColor = gRGB(0xFFFFFF)
haveColor = 0
col = get_attr("borderColor")
if col:
borderColor = parseColor(col)
else:
borderColor = gRGB(0)
borderwidth = get_attr("borderWidth")
if borderwidth is None:
# default: use a subtitle border
borderWidth = 3
else:
borderWidth = int(borderwidth)
face = eSubtitleWidget.__dict__[get_attr("name")]
eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth)
for windowstyle in skin.findall("windowstyle"):
style = eWindowStyleSkinned()
style_id = windowstyle.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
# defaults
font = gFont("Regular", 20)
offset = eSize(20, 5)
for title in windowstyle.findall("title"):
get_attr = title.attrib.get
offset = parseSize(get_attr("offset"), ((1,1),(1,1)))
font = parseFont(get_attr("font"), ((1,1),(1,1)))
style.setTitleFont(font)
style.setTitleOffset(offset)
#print " ", font, offset
for borderset in windowstyle.findall("borderset"):
bsName = str(borderset.attrib.get("name"))
for pixmap in borderset.findall("pixmap"):
get_attr = pixmap.attrib.get
bpName = get_attr("pos")
filename = get_attr("filename")
if filename and bpName:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, filename, path_prefix=path_prefix)
if fileExists(resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)):
pngfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
png = loadPixmap(pngfile, desktop)
style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png)
#print " borderset:", bpName, filename
for color in windowstyle.findall("color"):
get_attr = color.attrib.get
colorType = get_attr("name")
color = parseColor(get_attr("color"))
try:
style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color)
except:
raise SkinError("Unknown color %s" % colorType)
#pass
#print " color:", type, color
x = eWindowStyleManager.getInstance()
x.setStyle(style_id, style)
for margin in skin.findall("margin"):
style_id = margin.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
r = eRect(0,0,0,0)
v = margin.attrib.get("left")
if v:
r.setLeft(int(v))
v = margin.attrib.get("top")
if v:
r.setTop(int(v))
v = margin.attrib.get("right")
if v:
r.setRight(int(v))
v = margin.attrib.get("bottom")
if v:
r.setBottom(int(v))
# the "desktop" parameter is hardcoded to the UI screen, so we must ask
# for the one that this actually applies to.
getDesktop(style_id).setMargins(r)
dom_screens = {}
def loadSkin(name, scope = SCOPE_SKIN):
# Now a utility for plugins to add skin data to the screens
global dom_screens, display_skin_id
filename = resolveFilename(scope, name)
if fileExists(filename):
path = os.path.dirname(filename) + "/"
file = open(filename, 'r')
for elem in xml.etree.cElementTree.parse(file).getroot():
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Clear old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
elem.clear()
else:
elem.clear()
file.close()
def loadSkinData(desktop):
# Kinda hackish, but this is called once by mytest.py
global dom_skins
skins = dom_skins[:]
skins.reverse()
for (path, dom_skin) in skins:
loadSingleSkinData(desktop, dom_skin, path)
for elem in dom_skin:
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Kill old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
# without name, it's useless!
elem.clear()
else:
# non-screen element, no need for it any longer
elem.clear()
# no longer needed, we know where the screens are now.
#del dom_skins
#Developer Mode
dom_skins = []
class additionalWidget:
def __init__(self):
pass
# Class that makes a tuple look like something else. Some plugins just assume
# that size is a string and try to parse it. This class makes that work.
class SizeTuple(tuple):
def split(self, *args):
return str(self[0]), str(self[1])
def strip(self, *args):
return '%s,%s' % self
def __str__(self):
return '%s,%s' % self
class SkinContext:
def __init__(self, parent=None, pos=None, size=None, font=None):
if parent is not None:
if pos is not None:
pos, size = parent.parse(pos, size, font)
self.x, self.y = pos
self.w, self.h = size
else:
self.x = None
self.y = None
self.w = None
self.h = None
def __str__(self):
return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h)
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
self.w = 0
self.h = 0
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
self.h -= h
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
self.h -= h
self.y += h
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
self.x += w
self.w -= w
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
self.w -= w
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return SizeTuple(pos), SizeTuple(size)
class SkinContextStack(SkinContext):
# A context that stacks things instead of aligning them
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return SizeTuple(pos), SizeTuple(size)
def readSkin(screen, skin, names, desktop):
if config.atemio.skindevelopermode.value:
# it's not elegant and low performace... but for skin developing is great!
addSkin(config.skin.primary_skin.value)
loadSkinData(desktop)
if not isinstance(names, list):
names = [names]
# try all skins, first existing one have priority
global dom_screens
for n in names:
myscreen, path = dom_screens.get(n, (None,None))
if myscreen is not None:
# use this name for debug output
name = n
break
else:
name = "<embedded-in-'%s'>" % screen.__class__.__name__
# otherwise try embedded skin
if myscreen is None:
myscreen = getattr(screen, "parsedSkin", None)
# try uncompiled embedded skin
if myscreen is None and getattr(screen, "skin", None):
skin = screen.skin
print "[SKIN] Parsing embedded skin", name
if isinstance(skin, tuple):
for s in skin:
candidate = xml.etree.cElementTree.fromstring(s)
if candidate.tag == 'screen':
sid = candidate.attrib.get('id', None)
if (not sid) or (int(sid) == display_skin_id):
myscreen = candidate
break
else:
print "[SKIN] Hey, no suitable screen!"
else:
myscreen = xml.etree.cElementTree.fromstring(skin)
if myscreen:
screen.parsedSkin = myscreen
if myscreen is None:
print "[SKIN] No skin to read..."
myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>")
screen.skinAttributes = [ ]
skin_path_prefix = getattr(screen, "skin_path", path)
context = SkinContextStack()
s = desktop.bounds()
context.x = s.left()
context.y = s.top()
context.w = s.width()
context.h = s.height()
del s
collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",))
context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size'))
screen.additionalWidgets = [ ]
screen.renderer = [ ]
visited_components = set()
# now walk all widgets and stuff
def process_none(widget, context):
pass
def process_widget(widget, context):
get_attr = widget.attrib.get
# ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped
# widgets (source->renderer).
wname = get_attr('name')
wsource = get_attr('source')
if wname is None and wsource is None:
print "widget has no name and no source!"
return
if wname:
#print "Widget name=", wname
visited_components.add(wname)
# get corresponding 'gui' object
try:
attributes = screen[wname].skinAttributes = [ ]
except:
raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!")
# assert screen[wname] is not Source
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',))
elif wsource:
# get corresponding source
#print "Widget source=", wsource
while True: # until we found a non-obsolete source
# parse our current "wsource", which might specifiy a "related screen" before the dot,
# for example to reference a parent, global or session-global screen.
scr = screen
# resolve all path components
path = wsource.split('.')
while len(path) > 1:
scr = screen.getRelatedScreen(path[0])
if scr is None:
#print wsource
#print name
raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!")
path = path[1:]
# resolve the source.
source = scr.get(path[0])
if isinstance(source, ObsoleteSource):
# however, if we found an "obsolete source", issue warning, and resolve the real source.
print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source)
print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % source.removal_date
if source.description:
print source.description
wsource = source.new_source
else:
# otherwise, use that source.
break
if source is None:
raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!")
wrender = get_attr('render')
if not wrender:
raise SkinError("you must define a renderer with render= for source '%s'" % wsource)
for converter in widget.findall("convert"):
ctype = converter.get('type')
assert ctype, "'convert'-tag needs a 'type'-attribute"
#print "Converter:", ctype
try:
parms = converter.text.strip()
except:
parms = ""
#print "Params:", parms
converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype)
c = None
for i in source.downstream_elements:
if isinstance(i, converter_class) and i.converter_arguments == parms:
c = i
if c is None:
c = converter_class(parms)
c.connect(source)
source = c
renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender)
renderer = renderer_class() # instantiate renderer
renderer.connect(source) # connect to source
attributes = renderer.skinAttributes = [ ]
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source'))
screen.renderer.append(renderer)
def process_applet(widget, context):
try:
codeText = widget.text.strip()
widgetType = widget.attrib.get('type')
code = compile(codeText, "skin applet", "exec")
except Exception, ex:
raise SkinError("applet failed to compile: " + str(ex))
if widgetType == "onLayoutFinish":
screen.onLayoutFinish.append(code)
else:
raise SkinError("applet type '%s' unknown!" % widgetType)
def process_elabel(widget, context):
w = additionalWidget()
w.widget = eLabel
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_epixmap(widget, context):
w = additionalWidget()
w.widget = ePixmap
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_screen(widget, context):
for w in widget.getchildren():
conditional = w.attrib.get('conditional')
if conditional and not [i for i in conditional.split(",") if i in screen.keys()]:
continue
p = processors.get(w.tag, process_none)
try:
p(w, context)
except SkinError, e:
print "[Skin] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e
def process_panel(widget, context):
n = widget.attrib.get('name')
if n:
try:
s = dom_screens[n]
except KeyError:
print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name)
else:
process_screen(s[0], context)
layout = widget.attrib.get('layout')
if layout == 'stack':
cc = SkinContextStack
else:
cc = SkinContext
try:
c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'))
except Exception, ex:
raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) )
process_screen(widget, c)
processors = {
None: process_none,
"widget": process_widget,
"applet": process_applet,
"eLabel": process_elabel,
"ePixmap": process_epixmap,
"panel": process_panel
}
try:
context.x = 0 # reset offsets, all components are relative to screen
context.y = 0 # coordinates.
process_screen(myscreen, context)
except Exception, e:
print "[Skin] SKIN ERROR in %s:" % name, e
from Components.GUIComponent import GUIComponent
nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)]
assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components))
# This may look pointless, but it unbinds 'screen' from the nested scope. A better
# solution is to avoid the nested scope above and use the context object to pass
# things around.
screen = None
visited_components = None
| gpl-2.0 | 5,488,487,470,184,430,000 | 32.465465 | 231 | 0.679559 | false |
filannim/ManTIME | mantime/settings.py | 1 | 1226 | #!/usr/bin/env python
#
# Copyright 2014 Michele Filannino
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: Michele Filannino
# email: [email protected]
#
# For details, see www.cs.man.ac.uk/~filannim/
'''This file containes some absolute paths you need to customize according to
you installation.'''
import os
HOME = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
LANGUAGE = 'english'
PATH_CRF_PP_ENGINE_TRAIN = os.path.abspath(os.environ['MANTIME_CRF_TRAIN'])
PATH_CRF_PP_ENGINE_TEST = os.path.abspath(os.environ['MANTIME_CRF_TEST'])
PATH_CORENLP_FOLDER = os.path.abspath(os.environ['MANTIME_CORENLP_FOLDER'])
PATH_CRF_CONSISTENCY_MODULE = HOME + 'components/make_consistent.py'
PATH_CRF_ADJUSTMENT_MODULE = HOME + 'components/make_adjusted.py'
PATH_MODEL_FOLDER = './models'
EVENT_ATTRIBUTES = ('class', 'pos', 'tense', 'aspect', 'polarity', 'modality')
# EVENT_ATTRIBUTES = ('type', 'polarity', 'modality', 'sec_time_rel')
NO_ATTRIBUTE = 'n/a'
GAZETTEER_FOLDER = 'mantime/data/gazetteer/'
SENTENCE_WINDOW_RELATION = 1
| bsd-2-clause | 6,687,968,761,473,981,000 | 37.3125 | 78 | 0.730016 | false |
seomoz/roger-mesos-tools | bin/roger.py | 1 | 2779 | #!/usr/bin/python
from __future__ import print_function
import os
import sys
import subprocess
import re
import importlib
from cli.utils import Utils
def print_help_opt(opt, desc):
print(" {} {}".format(opt.ljust(13), desc))
def roger_help(root, commands):
print("usage: roger [-h] [-v] command [arg...]\n")
print("a command line interface to work with roger mesos.")
print("\npositional arguments:")
print_help_opt("command", "command to run.")
print_help_opt("arg", "arguments to pass to the command.")
print("\noptional arguments:")
print_help_opt("-h, --help", "show this help message and exit.")
print_help_opt("-v, --version", "show version information and exit.")
print("\ncommands:")
sys.path.append("{}/cli".format(root))
for command in commands:
description = ""
module_name = "roger_" + command
cmd_module = importlib.import_module(module_name)
try:
description = cmd_module.describe()
except Exception as e:
pass
print_help_opt(command, description)
print("\nrun: 'roger < command > -h' for more information on a command.")
def getFiles(directory):
filenames = next(os.walk(directory))[2]
return filenames
def getCommands(files):
commands = set()
for filename in files:
if filename.startswith("roger_"):
commands.add(re.split("roger_|\.", filename)[1])
return sorted(commands)
def getScriptCall(root, command, command_args):
script_call = "roger_{}.py".format(command)
for command_arg in command_args:
script_call = script_call + " {}".format(command_arg)
return script_call
def main():
root = ''
utilsObj = Utils()
own_dir = os.path.dirname(os.path.realpath(__file__))
root = os.path.abspath(os.path.join(own_dir, os.pardir))
files = getFiles("{}/cli/".format(root))
commands = getCommands(files)
if len(sys.argv) > 1:
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
roger_help(root, commands)
elif sys.argv[1] == "-v" or sys.argv[1] == "--version":
version = utilsObj.roger_version(root)
print(version)
else:
command = sys.argv[1]
command_args = sys.argv[2:]
if command in commands:
print("root: {} command: {} args: {}".format(
root, command, command_args
))
script_call = getScriptCall(root, command, command_args)
os.system(script_call)
else:
raise SystemExit("Command is not valid. Exiting.")
else:
raise SystemExit("No arguments found. Please refer to usage: roger -h")
if __name__ == "__main__":
main()
| apache-2.0 | -4,948,606,142,957,857,000 | 30.224719 | 79 | 0.592659 | false |
rkh/travispy | setup.py | 1 | 1655 | from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(['travispy'] + self.pytest_args)
sys.exit(errno)
setup(
name='TravisPy',
version='0.1.1',
packages=['travispy', 'travispy.entities'],
install_requires=[x.strip() for x in open('requirements.txt')],
# metadata for upload to PyPI
author='Fabio Menegazzo',
author_email='[email protected]',
description='Python API for Travis CI.',
long_description=open('README.rst').read(),
license='GPL',
keywords='travis ci continuous integration travisci',
url='http://menegazzo.github.io/travispy/',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
# tests
tests_require=['pytest'],
cmdclass = {'test': PyTest},
)
| gpl-3.0 | -2,048,252,582,661,187,300 | 29.090909 | 75 | 0.622961 | false |
jegger/kivy | kivy/tests/test_uix_textinput.py | 1 | 2204 | '''
uix.textinput tests
========================
'''
import unittest
from kivy.tests.common import GraphicUnitTest
from kivy.uix.textinput import TextInput
class TextInputTest(unittest.TestCase):
def test_focusable_when_disabled(self):
ti = TextInput()
ti.disabled = True
ti.focused = True
ti.bind(focus=self.on_focused)
def on_focused(self, instance, value):
self.assertTrue(instance.focused, value)
def test_wordbreak(self):
self.test_txt = "Firstlongline\n\nSecondveryverylongline"
ti = TextInput(width='30dp', size_hint_x=None)
ti.bind(text=self.on_text)
ti.text = self.test_txt
def on_text(self, instance, value):
# Check if text is modified while recreating from lines and lines_flags
self.assertEquals(instance.text, self.test_txt)
# Check if wordbreaking is correctly done
# If so Secondvery... should start from the 7th line
pos_S = self.test_txt.index('S')
self.assertEquals(instance.get_cursor_from_index(pos_S), (0, 6))
class TextInputGraphicTest(GraphicUnitTest):
def test_text_validate(self):
ti = TextInput(multiline=False)
ti.focus = True
self.render(ti)
self.assertFalse(ti.multiline)
self.assertTrue(ti.focus)
self.assertTrue(ti.text_validate_unfocus)
ti.validate_test = None
ti.bind(on_text_validate=lambda *_: setattr(
ti, 'validate_test', True
))
ti._key_down(
(
None, # displayed_str
None, # internal_str
'enter', # internal_action
1 # scale
),
repeat=False
)
self.assertTrue(ti.validate_test)
self.assertFalse(ti.focus)
ti.validate_test = None
ti.text_validate_unfocus = False
ti.focus = True
self.assertTrue(ti.focus)
ti._key_down(
(None, None, 'enter', 1),
repeat=False
)
self.assertTrue(ti.validate_test)
self.assertTrue(ti.focus)
if __name__ == '__main__':
import unittest
unittest.main()
| mit | -2,391,985,931,788,283,000 | 25.878049 | 79 | 0.582123 | false |
AllanYangZhou/oppia | core/domain/collection_jobs_one_off.py | 1 | 3306 | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-off jobs for collections."""
import logging
from core import jobs
from core.domain import collection_domain
from core.domain import collection_services
from core.platform import models
import feconf
(base_models, collection_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.collection])
class CollectionMigrationJob(jobs.BaseMapReduceOneOffJobManager):
"""A reusable one-time job that may be used to migrate collection schema
versions. This job will load all existing collections from the data store
and immediately store them back into the data store. The loading process of
a collection in collection_services automatically performs schema updating.
This job persists that conversion work, keeping collections up-to-date and
improving the load time of new collections.
"""
_DELETED_KEY = 'collection_deleted'
_ERROR_KEY = 'validation_error'
_MIGRATED_KEY = 'collection_migrated'
@classmethod
def entity_classes_to_map_over(cls):
return [collection_models.CollectionModel]
@staticmethod
def map(item):
if item.deleted:
yield (
CollectionMigrationJob._DELETED_KEY,
'Encountered deleted collection.')
return
# Note: the read will bring the collection up to the newest version.
collection = collection_services.get_collection_by_id(item.id)
try:
collection.validate(strict=False)
except Exception as e:
logging.error(
'Collection %s failed validation: %s' % (item.id, e))
yield (
CollectionMigrationJob._ERROR_KEY,
'Collection %s failed validation: %s' % (item.id, e))
return
# Write the new collection into the datastore if it's different from
# the old version.
if item.schema_version <= feconf.CURRENT_COLLECTION_SCHEMA_VERSION:
commit_cmds = [{
'cmd': collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION,
'from_version': item.schema_version,
'to_version': str(
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
}]
collection_services.update_collection(
feconf.MIGRATION_BOT_USERNAME, item.id, commit_cmds,
'Update collection schema version to %d.' % (
feconf.CURRENT_COLLECTION_SCHEMA_VERSION))
yield (
CollectionMigrationJob._MIGRATED_KEY,
'Collection successfully migrated.')
@staticmethod
def reduce(key, values):
yield (key, values)
| apache-2.0 | 595,232,431,969,137,900 | 37 | 79 | 0.662432 | false |
UniPiTechnology/evok | tornadorpc_evok/base.py | 1 | 13270 | """
============================
Base RPC Handler for Tornado
============================
This is a basic server implementation, designed for use within the
Tornado framework. The classes in this library should not be used
directly, but rather though the XML or JSON RPC implementations.
You can use the utility functions like 'private' and 'start_server'.
"""
from tornado.web import RequestHandler
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.concurrent import Future, TracebackFuture
from tornado import gen
from tornado.stack_context import ExceptionStackContext, run_with_stack_context
import types
import traceback
from tornadorpc_evok.utils import getcallargs
# Configuration element
class Config(object):
verbose = True
short_errors = True
config = Config()
class BaseRPCParser(object):
"""
This class is responsible for managing the request, dispatch,
and response formatting of the system. It is tied into the
_RPC_ attribute of the BaseRPCHandler (or subclasses) and
populated as necessary throughout the request. Use the
.faults attribute to take advantage of the built-in error
codes.
"""
content_type = 'text/plain'
def __init__(self, library, encode=None, decode=None):
# Attaches the RPC library and encode / decode functions.
self.library = library
if not encode:
encode = getattr(library, 'dumps')
if not decode:
decode = getattr(library, 'loads')
self.encode = encode
self.decode = decode
self.requests_in_progress = 0
self.responses = []
@property
def faults(self):
# Grabs the fault tree on request
return Faults(self)
def response(self, handler):
"""
This is the callback for a single finished dispatch.
Once all the dispatches have been run, it calls the
parser library to parse responses and then calls the
handler's async method.
"""
handler._requests -= 1
if handler._requests > 0:
return
# We are finished with requests, send response
if handler._RPC_finished:
# We've already sent the response
raise Exception("Error trying to send response twice.")
handler._RPC_finished = True
responses = tuple(handler._results)
response_text = self.parse_responses(responses)
if type(response_text) not in types.StringTypes:
# Likely a fault, or something messed up
response_text = self.encode(response_text)
# Calling the async callback
handler.on_result(response_text)
def traceback(self, method_name='REQUEST', params=[]):
err_lines = traceback.format_exc().splitlines()
err_title = "ERROR IN %s" % method_name
if len(params) > 0:
err_title = '%s - (PARAMS: %s)' % (err_title, repr(params))
err_sep = ('-'*len(err_title))[:79]
err_lines = [err_sep, err_title, err_sep]+err_lines
if config.verbose:
if len(err_lines) >= 7 and config.short_errors:
# Minimum number of lines to see what happened
# Plus title and separators
print '\n'.join(err_lines[0:4]+err_lines[-3:])
else:
print '\n'.join(err_lines)
# Log here
return
def parse_request(self, request_body):
"""
Extend this on the implementing protocol. If it
should error out, return the output of the
'self.faults.fault_name' response. Otherwise,
it MUST return a TUPLE of TUPLE. Each entry
tuple must have the following structure:
('method_name', params)
...where params is a list or dictionary of
arguments (positional or keyword, respectively.)
So, the result should look something like
the following:
( ('add', [5,4]), ('add', {'x':5, 'y':4}) )
"""
return ([], [])
def parse_responses(self, responses):
"""
Extend this on the implementing protocol. It must
return a response that can be returned as output to
the client.
"""
return self.encode(responses, methodresponse=True)
def check_method(self, attr_name, obj):
"""
Just checks to see whether an attribute is private
(by the decorator or by a leading underscore) and
returns boolean result.
"""
assert(not attr_name.startswith('_'))
attr = getattr(obj, attr_name)
assert( not getattr(attr, 'private', False))
return attr
class BaseRPCHandler(RequestHandler):
"""
This is the base handler to be subclassed by the actual
implementations and by the end user.
"""
_RPC_ = None
#_requests = 1
rpcrequests = None
_error = None
_RPC_finished = False
def prepare(self):
"""
Parse request_body, prepares self.rpcrequest
On error call finish or set self._error - to be serialized by export procedure
"""
try:
requests = self._RPC_.parse_request(self.request.body)
if not isinstance(requests, types.TupleType):
# SHOULD be the result of a fault call,
# according tothe parse_request spec below.
if isinstance(requests, basestring):
# Should be the response text of a fault
# This will break in Python 3.x
self.finish(requests)
elif hasattr(requests, 'response'):
# Fault types should have a 'response' method
self.finish(requests.response())
elif hasattr(requests, 'faultCode'):
# XML-RPC fault types need to be properly dispatched. This
# should only happen if there was an error parsing the
self._error = requests
else:
# No idea, hopefully the handler knows what it is doing.
self.finish(requests)
return
self.rpcrequests = requests
except (AttributeError,Exception):
self._RPC_.traceback()
self._error = self._RPC_.faults.parse_error()
@tornado.web.asynchronous
@gen.coroutine
def post(self):
# Dispatches request methods
# rpcrequests are prepared in self.prepare()
if self._error:
responses = (self._error,)
else:
futures = [self._dispatch(method, args) for method,args in self.rpcrequests ]
if len(futures) == 1:
response = yield futures[0]
responses = (response,)
else:
responses = yield futures
responses = tuple(responses)
response_text = self._RPC_.parse_responses(responses)
self.set_header('Content-Type', self._RPC_.content_type)
self.finish(response_text)
#self._RPC_.run(self, request_body)
@gen.coroutine
def _dispatch(self, method_name, params):
"""
This method walks the attribute tree in the method
and passes the parameters, either in positional or
keyword form, into the appropriate method on the
Handler class. Currently supports only positional
or keyword arguments, not mixed.
"""
try:
assert(not hasattr(RequestHandler, method_name))
print method_name
method = self
method_list = dir(method)
method_list.sort()
attr_tree = method_name.split('.')
for attr_name in attr_tree:
method = self._RPC_.check_method(attr_name, method)
assert(callable(method))
assert(not method_name.startswith('_'))
assert(not getattr(method, 'private', False))
except Exception,e :
raise gen.Return(self._RPC_.faults.method_not_found())
args = []
kwargs = {}
try:
if isinstance(params, dict):
# The parameters are keyword-based
kwargs = params
elif type(params) in (list, tuple):
# The parameters are positional
args = params
else:
# Bad argument formatting?
raise Exception()
# Validating call arguments
final_kwargs, extra_args = getcallargs(method, *args, **kwargs)
except Exception:
raise gen.Return(self._RPC_.faults.invalid_params())
try:
if getattr(method, 'coroutine', False):
method=tornado.gen.coroutine(method)
response = yield method(*extra_args, **final_kwargs)
else:
response = method(*extra_args, **final_kwargs)
except Exception:
self._RPC_.traceback(method_name, params)
raise gen.Return(self._RPC_.faults.internal_error())
raise gen.Return(response)
class FaultMethod(object):
"""
This is the 'dynamic' fault method so that the message can
be changed on request from the parser.faults call.
"""
def __init__(self, fault, code, message):
self.fault = fault
self.code = code
self.message = message
def __call__(self, message=None):
if message:
self.message = message
return self.fault(self.code, self.message)
class Faults(object):
"""
This holds the codes and messages for the RPC implementation.
It is attached (dynamically) to the Parser when called via the
parser.faults query, and returns a FaultMethod to be called so
that the message can be changed. If the 'dynamic' attribute is
not a key in the codes list, then it will error.
USAGE:
parser.fault.parse_error('Error parsing content.')
If no message is passed in, it will check the messages dictionary
for the same key as the codes dict. Otherwise, it just prettifies
the code 'key' from the codes dict.
"""
codes = {
'parse_error': -32700,
'method_not_found': -32601,
'invalid_request': -32600,
'invalid_params': -32602,
'internal_error': -32603
}
messages = {}
def __init__(self, parser, fault=None):
self.library = parser.library
self.fault = fault
if not self.fault:
self.fault = getattr(self.library, 'Fault')
def __getattr__(self, attr):
message = 'Error'
if attr in self.messages.keys():
message = self.messages[attr]
else:
message = ' '.join(map(str.capitalize, attr.split('_')))
fault = FaultMethod(self.fault, self.codes[attr], message)
return fault
"""
Utility Functions
"""
def private(func):
"""
Use this to make a method private.
It is intended to be used as a decorator.
If you wish to make a method tree private, just
create and set the 'private' variable to True
on the tree object itself.
"""
func.private = True
return func
#def async(func):
# """
# Use this to make a method asynchronous
# It is intended to be used as a decorator.
# Make sure you call "self.result" on any
# async method. Also, trees do not currently
# support async methods.
# """
# func.async = True
# return func
def coroutine(func):
func.coroutine = True
return func
def start_server(handlers, route=r'/', port=8080):
"""
This is just a friendly wrapper around the default
Tornado instantiation calls. It simplifies the imports
and setup calls you'd make otherwise.
USAGE:
start_server(handler_class, route=r'/', port=8181)
"""
if type(handlers) not in (types.ListType, types.TupleType):
handler = handlers
handlers = [(route, handler)]
if route != '/RPC2':
# friendly addition for /RPC2 if it's the only one
handlers.append(('/RPC2', handler))
application = tornado.web.Application(handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
loop_instance = tornado.ioloop.IOLoop.instance()
""" Setting the '_server' attribute if not set """
for (route, handler) in handlers:
try:
setattr(handler, '_server', loop_instance)
except AttributeError:
handler._server = loop_instance
loop_instance.start()
return loop_instance
"""
The following is a test implementation which should work
for both the XMLRPC and the JSONRPC clients.
"""
class TestMethodTree(object):
def power(self, x, y=2):
return pow(x, y)
@private
def private(self):
# Shouldn't be called
return False
class TestRPCHandler(BaseRPCHandler):
_RPC_ = None
def add(self, x, y):
return x+y
def ping(self, x):
return x
def noargs(self):
return 'Works!'
tree = TestMethodTree()
def _private(self):
# Shouldn't be called
return False
@private
def private(self):
# Also shouldn't be called
return False
| apache-2.0 | 1,557,004,587,630,101,800 | 31.208738 | 89 | 0.597589 | false |
0verchenko/Utils | googleapis_mock.py | 1 | 5420 | import random
import json
import datetime
import ssl
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def change_host_file_configuration(for_disabling=False, path_to_hosts_file='C:\\Windows\\System32\\drivers\\etc\\hosts'):
def check_if_hosts_file_hacked(path='C:\\Windows\\System32\\drivers\\etc\\hosts'):
with open(path, 'r') as target_file:
target_file_content = target_file.read()
if 'android.googleapis.com' in target_file_content:
return True
else:
return False
try:
if for_disabling:
if not check_if_hosts_file_hacked(path=path_to_hosts_file):
print('The "android.googleapis.com" record not in hosts file.')
return True
else:
with open(path_to_hosts_file, 'r') as hosts_file:
hosts_file_content = hosts_file.readlines()
with open(path_to_hosts_file, 'w') as hosts_file:
for line in hosts_file_content:
if 'android.googleapis.com' not in line:
hosts_file.write(line)
if not check_if_hosts_file_hacked(path=path_to_hosts_file):
print('The "android.googleapis.com" record was removed from hosts file.')
return True
else:
if not check_if_hosts_file_hacked(path=path_to_hosts_file):
with open(path_to_hosts_file, 'a') as hosts_file:
################################################################################
################################################################################
# Server ip that will be GCM imitator type below:
################################################################################
################################################################################
hosts_file.write('127.0.0.1 android.googleapis.com\n')
if check_if_hosts_file_hacked(path=path_to_hosts_file):
print('The "android.googleapis.com" record was added to hosts file.')
return True
else:
print('The "android.googleapis.com" record is already in hosts file.')
return False
except IOError:
print('Unable to check/modify hosts file.')
return False
class Responce_Sender(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=UTF-8')
self.send_header('Date', '%s' % datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
self.send_header('Expires', '%s' % datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT'))
self.send_header('Cache-Control', 'private, max-age=0')
self.send_header('X-Content-Type-Options', 'nosniff')
self.send_header('X-Frame-Options', 'SAMEORIGIN')
self.send_header('Server', 'GSE')
self.send_header('Alt-Svc', 'quic=":443"; ma=2592000; v="39,38,37,35"')
self.send_header('Accept-Ranges', 'none')
self.send_header('Vary', 'Accept-Encoding')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("<html><body><h1>hi!</h1></body></html>")
def do_HEAD(self):
self._set_headers()
def do_POST(self):
print self.path
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
# print post_body
try:
json_request = eval(post_body)
number_of_tokens = len(json_request['registration_ids'])
print('number of tokens:%s' % number_of_tokens)
except:
print 'error happened'
with open('logfile.txt', 'a') as logfile:
logfile.write(post_body)
return
self._set_headers()
multicast_id = random.randint(1000000000000000000, 9999999999999999999)
message_id = int(str(multicast_id)[:16])
post_responce = {
"multicast_id": multicast_id,
"success": number_of_tokens,
"failure":0,
"canonical_ids":0,
"results": []}
for x in range(number_of_tokens):
post_responce["results"].append({"message_id": "0:{message_id}%8ad5829ef9fd7ecd".format(message_id=message_id + x)})
print('Sending responce for %s tokens' % number_of_tokens)
self.wfile.write(json.dumps(post_responce))
def run(server_class=HTTPServer, handler_class=Responce_Sender, port=2195):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile='server.pem', server_side=True)
print '%s - starting httpd...' % datetime.datetime.now().strftime('%d %b %Y %H:%M:%S')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print '\t%s - stopping httpd.' % datetime.datetime.now().strftime('%d %b %Y %H:%M:%S')
if __name__ == "__main__":
from sys import argv
change_host_file_configuration()
print('Starting http mock')
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| apache-2.0 | -5,844,384,114,319,613,000 | 43.065041 | 128 | 0.537823 | false |
softelnet/sponge | sponge-kb/sponge-kb-designer/src/main/resources/sponge/designer/storage.py | 1 | 1331 | """
Sponge Knowledge Base
WARNING: THIS IS A WORK IN PROGRESS!
"""
from org.openksavi.sponge.core.action import BaseActionMeta, BaseActionBuilder
from java.util.concurrent.atomic import AtomicInteger
from java.util.concurrent import CopyOnWriteArrayList
import re
def onInit():
global STORAGE
STORAGE = createStorage()
def createStorage():
storage = Storage()
storage.addAction(BaseActionBuilder("Echo").withLabel("Echo").withArgs([StringType("text").withLabel("Text")]).getMeta())
return storage
class Storage:
def __init__(self):
self.actions = CopyOnWriteArrayList()
self.currentId = AtomicInteger(0)
def addAction(self, actionMeta):
if list(filter(lambda action: action.name == actionMeta.name, self.actions)):
raise Exception("The action {} has already been added".format(actionMeta.name))
self.actions.add(actionMeta)
def getAction(self, name):
return filter(lambda action: action.name == name, self.actions)[0]
def updateAction(self, name, actionMeta):
action = self.getAction(name)
action.name = actionMeta.name
action.label = actionMeta.label
action.description = actionMeta.description
action.callable = actionMeta.callable
action.activatable = actionMeta.activatable
| apache-2.0 | 5,470,633,246,581,718,000 | 29.953488 | 125 | 0.700225 | false |
spacy-io/spaCy | spacy/tests/test_cli.py | 1 | 16278 | import pytest
from click import NoSuchOption
from spacy.training import docs_to_json, offsets_to_biluo_tags
from spacy.training.converters import iob_to_docs, conll_ner_to_docs, conllu_to_docs
from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate
from spacy.lang.nl import Dutch
from spacy.util import ENV_VARS
from spacy.cli import info
from spacy.cli.init_config import init_config, RECOMMENDATIONS
from spacy.cli._util import validate_project_commands, parse_config_overrides
from spacy.cli._util import load_project_config, substitute_project_variables
from spacy.cli._util import string_to_list
from thinc.api import ConfigValidationError, Config
import srsly
import os
from .util import make_tempdir
def test_cli_info():
nlp = Dutch()
nlp.add_pipe("textcat")
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
raw_data = info(tmp_dir, exclude=[""])
assert raw_data["lang"] == "nl"
assert raw_data["components"] == ["textcat"]
def test_cli_converters_conllu_to_docs():
# from NorNE: https://github.com/ltgoslo/norne/blob/3d23274965f513f23aa48455b28b1878dad23c05/ud/nob/no_bokmaal-ud-dev.conllu
lines = [
"1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\tO",
"2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tB-PER",
"3\tEilertsen\tEilertsen\tPROPN\t_\t_\t2\tname\t_\tI-PER",
"4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tO",
]
input_data = "\n".join(lines)
converted_docs = list(conllu_to_docs(input_data, n_sents=1))
assert len(converted_docs) == 1
converted = [docs_to_json(converted_docs)]
assert converted[0]["id"] == 0
assert len(converted[0]["paragraphs"]) == 1
assert len(converted[0]["paragraphs"][0]["sentences"]) == 1
sent = converted[0]["paragraphs"][0]["sentences"][0]
assert len(sent["tokens"]) == 4
tokens = sent["tokens"]
assert [t["orth"] for t in tokens] == ["Dommer", "Finn", "Eilertsen", "avstår"]
assert [t["tag"] for t in tokens] == ["NOUN", "PROPN", "PROPN", "VERB"]
assert [t["head"] for t in tokens] == [1, 2, -1, 0]
assert [t["dep"] for t in tokens] == ["appos", "nsubj", "name", "ROOT"]
ent_offsets = [
(e[0], e[1], e[2]) for e in converted[0]["paragraphs"][0]["entities"]
]
biluo_tags = offsets_to_biluo_tags(converted_docs[0], ent_offsets, missing="O")
assert biluo_tags == ["O", "B-PER", "L-PER", "O"]
@pytest.mark.parametrize(
"lines",
[
(
"1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\tname=O",
"2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tSpaceAfter=No|name=B-PER",
"3\tEilertsen\tEilertsen\tPROPN\t_\t_\t2\tname\t_\tname=I-PER",
"4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tSpaceAfter=No|name=O",
"5\t.\t$.\tPUNCT\t_\t_\t4\tpunct\t_\tname=B-BAD",
),
(
"1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\t_",
"2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tSpaceAfter=No|NE=B-PER",
"3\tEilertsen\tEilertsen\tPROPN\t_\t_\t2\tname\t_\tNE=L-PER",
"4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tSpaceAfter=No",
"5\t.\t$.\tPUNCT\t_\t_\t4\tpunct\t_\tNE=B-BAD",
),
],
)
def test_cli_converters_conllu_to_docs_name_ner_map(lines):
input_data = "\n".join(lines)
converted_docs = list(
conllu_to_docs(input_data, n_sents=1, ner_map={"PER": "PERSON", "BAD": ""})
)
assert len(converted_docs) == 1
converted = [docs_to_json(converted_docs)]
assert converted[0]["id"] == 0
assert len(converted[0]["paragraphs"]) == 1
assert converted[0]["paragraphs"][0]["raw"] == "Dommer FinnEilertsen avstår. "
assert len(converted[0]["paragraphs"][0]["sentences"]) == 1
sent = converted[0]["paragraphs"][0]["sentences"][0]
assert len(sent["tokens"]) == 5
tokens = sent["tokens"]
assert [t["orth"] for t in tokens] == ["Dommer", "Finn", "Eilertsen", "avstår", "."]
assert [t["tag"] for t in tokens] == ["NOUN", "PROPN", "PROPN", "VERB", "PUNCT"]
assert [t["head"] for t in tokens] == [1, 2, -1, 0, -1]
assert [t["dep"] for t in tokens] == ["appos", "nsubj", "name", "ROOT", "punct"]
ent_offsets = [
(e[0], e[1], e[2]) for e in converted[0]["paragraphs"][0]["entities"]
]
biluo_tags = offsets_to_biluo_tags(converted_docs[0], ent_offsets, missing="O")
assert biluo_tags == ["O", "B-PERSON", "L-PERSON", "O", "O"]
def test_cli_converters_conllu_to_docs_subtokens():
# https://raw.githubusercontent.com/ohenrik/nb_news_ud_sm/master/original_data/no-ud-dev-ner.conllu
lines = [
"1\tDommer\tdommer\tNOUN\t_\tDefinite=Ind|Gender=Masc|Number=Sing\t2\tappos\t_\tname=O",
"2-3\tFE\t_\t_\t_\t_\t_\t_\t_\t_",
"2\tFinn\tFinn\tPROPN\t_\tGender=Masc\t4\tnsubj\t_\tname=B-PER",
"3\tEilertsen\tEilertsen\tX\t_\tGender=Fem|Tense=past\t2\tname\t_\tname=I-PER",
"4\tavstår\tavstå\tVERB\t_\tMood=Ind|Tense=Pres|VerbForm=Fin\t0\troot\t_\tSpaceAfter=No|name=O",
"5\t.\t$.\tPUNCT\t_\t_\t4\tpunct\t_\tname=O",
]
input_data = "\n".join(lines)
converted_docs = list(
conllu_to_docs(
input_data, n_sents=1, merge_subtokens=True, append_morphology=True
)
)
assert len(converted_docs) == 1
converted = [docs_to_json(converted_docs)]
assert converted[0]["id"] == 0
assert len(converted[0]["paragraphs"]) == 1
assert converted[0]["paragraphs"][0]["raw"] == "Dommer FE avstår. "
assert len(converted[0]["paragraphs"][0]["sentences"]) == 1
sent = converted[0]["paragraphs"][0]["sentences"][0]
assert len(sent["tokens"]) == 4
tokens = sent["tokens"]
print(tokens)
assert [t["orth"] for t in tokens] == ["Dommer", "FE", "avstår", "."]
assert [t["tag"] for t in tokens] == [
"NOUN__Definite=Ind|Gender=Masc|Number=Sing",
"PROPN_X__Gender=Fem,Masc|Tense=past",
"VERB__Mood=Ind|Tense=Pres|VerbForm=Fin",
"PUNCT",
]
assert [t["pos"] for t in tokens] == ["NOUN", "PROPN", "VERB", "PUNCT"]
assert [t["morph"] for t in tokens] == [
"Definite=Ind|Gender=Masc|Number=Sing",
"Gender=Fem,Masc|Tense=past",
"Mood=Ind|Tense=Pres|VerbForm=Fin",
"",
]
assert [t["lemma"] for t in tokens] == ["dommer", "Finn Eilertsen", "avstå", "$."]
assert [t["head"] for t in tokens] == [1, 1, 0, -1]
assert [t["dep"] for t in tokens] == ["appos", "nsubj", "ROOT", "punct"]
ent_offsets = [
(e[0], e[1], e[2]) for e in converted[0]["paragraphs"][0]["entities"]
]
biluo_tags = offsets_to_biluo_tags(converted_docs[0], ent_offsets, missing="O")
assert biluo_tags == ["O", "U-PER", "O", "O"]
def test_cli_converters_iob_to_docs():
lines = [
"I|O like|O London|I-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O",
"I|O like|O London|B-GPE and|O New|B-GPE York|I-GPE City|I-GPE .|O",
"I|PRP|O like|VBP|O London|NNP|I-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O",
"I|PRP|O like|VBP|O London|NNP|B-GPE and|CC|O New|NNP|B-GPE York|NNP|I-GPE City|NNP|I-GPE .|.|O",
]
input_data = "\n".join(lines)
converted_docs = list(iob_to_docs(input_data, n_sents=10))
assert len(converted_docs) == 1
converted = docs_to_json(converted_docs)
assert converted["id"] == 0
assert len(converted["paragraphs"]) == 1
assert len(converted["paragraphs"][0]["sentences"]) == 4
for i in range(0, 4):
sent = converted["paragraphs"][0]["sentences"][i]
assert len(sent["tokens"]) == 8
tokens = sent["tokens"]
expected = ["I", "like", "London", "and", "New", "York", "City", "."]
assert [t["orth"] for t in tokens] == expected
assert len(converted_docs[0].ents) == 8
for ent in converted_docs[0].ents:
assert ent.text in ["New York City", "London"]
def test_cli_converters_conll_ner_to_docs():
lines = [
"-DOCSTART- -X- O O",
"",
"I\tO",
"like\tO",
"London\tB-GPE",
"and\tO",
"New\tB-GPE",
"York\tI-GPE",
"City\tI-GPE",
".\tO",
"",
"I O",
"like O",
"London B-GPE",
"and O",
"New B-GPE",
"York I-GPE",
"City I-GPE",
". O",
"",
"I PRP O",
"like VBP O",
"London NNP B-GPE",
"and CC O",
"New NNP B-GPE",
"York NNP I-GPE",
"City NNP I-GPE",
". . O",
"",
"I PRP _ O",
"like VBP _ O",
"London NNP _ B-GPE",
"and CC _ O",
"New NNP _ B-GPE",
"York NNP _ I-GPE",
"City NNP _ I-GPE",
". . _ O",
"",
"I\tPRP\t_\tO",
"like\tVBP\t_\tO",
"London\tNNP\t_\tB-GPE",
"and\tCC\t_\tO",
"New\tNNP\t_\tB-GPE",
"York\tNNP\t_\tI-GPE",
"City\tNNP\t_\tI-GPE",
".\t.\t_\tO",
]
input_data = "\n".join(lines)
converted_docs = list(conll_ner_to_docs(input_data, n_sents=10))
assert len(converted_docs) == 1
converted = docs_to_json(converted_docs)
assert converted["id"] == 0
assert len(converted["paragraphs"]) == 1
assert len(converted["paragraphs"][0]["sentences"]) == 5
for i in range(0, 5):
sent = converted["paragraphs"][0]["sentences"][i]
assert len(sent["tokens"]) == 8
tokens = sent["tokens"]
# fmt: off
assert [t["orth"] for t in tokens] == ["I", "like", "London", "and", "New", "York", "City", "."]
# fmt: on
assert len(converted_docs[0].ents) == 10
for ent in converted_docs[0].ents:
assert ent.text in ["New York City", "London"]
def test_project_config_validation_full():
config = {
"vars": {"some_var": 20},
"directories": ["assets", "configs", "corpus", "scripts", "training"],
"assets": [
{
"dest": "x",
"url": "https://example.com",
"checksum": "63373dd656daa1fd3043ce166a59474c",
},
{
"dest": "y",
"git": {
"repo": "https://github.com/example/repo",
"branch": "develop",
"path": "y",
},
},
],
"commands": [
{
"name": "train",
"help": "Train a model",
"script": ["python -m spacy train config.cfg -o training"],
"deps": ["config.cfg", "corpus/training.spcy"],
"outputs": ["training/model-best"],
},
{"name": "test", "script": ["pytest", "custom.py"], "no_skip": True},
],
"workflows": {"all": ["train", "test"], "train": ["train"]},
}
errors = validate(ProjectConfigSchema, config)
assert not errors
@pytest.mark.parametrize(
"config",
[
{"commands": [{"name": "a"}, {"name": "a"}]},
{"commands": [{"name": "a"}], "workflows": {"a": []}},
{"commands": [{"name": "a"}], "workflows": {"b": ["c"]}},
],
)
def test_project_config_validation1(config):
with pytest.raises(SystemExit):
validate_project_commands(config)
@pytest.mark.parametrize(
"config,n_errors",
[
({"commands": {"a": []}}, 1),
({"commands": [{"help": "..."}]}, 1),
({"commands": [{"name": "a", "extra": "b"}]}, 1),
({"commands": [{"extra": "b"}]}, 2),
({"commands": [{"name": "a", "deps": [123]}]}, 1),
],
)
def test_project_config_validation2(config, n_errors):
errors = validate(ProjectConfigSchema, config)
assert len(errors) == n_errors
def test_project_config_interpolation():
variables = {"a": 10, "b": {"c": "foo", "d": True}}
commands = [
{"name": "x", "script": ["hello ${vars.a} ${vars.b.c}"]},
{"name": "y", "script": ["${vars.b.c} ${vars.b.d}"]},
]
project = {"commands": commands, "vars": variables}
with make_tempdir() as d:
srsly.write_yaml(d / "project.yml", project)
cfg = load_project_config(d)
assert cfg["commands"][0]["script"][0] == "hello 10 foo"
assert cfg["commands"][1]["script"][0] == "foo true"
commands = [{"name": "x", "script": ["hello ${vars.a} ${vars.b.e}"]}]
project = {"commands": commands, "vars": variables}
with pytest.raises(ConfigValidationError):
substitute_project_variables(project)
@pytest.mark.parametrize(
"args,expected",
[
# fmt: off
(["--x.foo", "10"], {"x.foo": 10}),
(["--x.foo=10"], {"x.foo": 10}),
(["--x.foo", "bar"], {"x.foo": "bar"}),
(["--x.foo=bar"], {"x.foo": "bar"}),
(["--x.foo", "--x.bar", "baz"], {"x.foo": True, "x.bar": "baz"}),
(["--x.foo", "--x.bar=baz"], {"x.foo": True, "x.bar": "baz"}),
(["--x.foo", "10.1", "--x.bar", "--x.baz", "false"], {"x.foo": 10.1, "x.bar": True, "x.baz": False}),
(["--x.foo", "10.1", "--x.bar", "--x.baz=false"], {"x.foo": 10.1, "x.bar": True, "x.baz": False})
# fmt: on
],
)
def test_parse_config_overrides(args, expected):
assert parse_config_overrides(args) == expected
@pytest.mark.parametrize("args", [["--foo"], ["--x.foo", "bar", "--baz"]])
def test_parse_config_overrides_invalid(args):
with pytest.raises(NoSuchOption):
parse_config_overrides(args)
@pytest.mark.parametrize("args", [["--x.foo", "bar", "baz"], ["x.foo"]])
def test_parse_config_overrides_invalid_2(args):
with pytest.raises(SystemExit):
parse_config_overrides(args)
def test_parse_cli_overrides():
overrides = "--x.foo bar --x.bar=12 --x.baz false --y.foo=hello"
os.environ[ENV_VARS.CONFIG_OVERRIDES] = overrides
result = parse_config_overrides([])
assert len(result) == 4
assert result["x.foo"] == "bar"
assert result["x.bar"] == 12
assert result["x.baz"] is False
assert result["y.foo"] == "hello"
os.environ[ENV_VARS.CONFIG_OVERRIDES] = "--x"
assert parse_config_overrides([], env_var=None) == {}
with pytest.raises(SystemExit):
parse_config_overrides([])
os.environ[ENV_VARS.CONFIG_OVERRIDES] = "hello world"
with pytest.raises(SystemExit):
parse_config_overrides([])
del os.environ[ENV_VARS.CONFIG_OVERRIDES]
@pytest.mark.parametrize("lang", ["en", "nl"])
@pytest.mark.parametrize(
"pipeline", [["tagger", "parser", "ner"], [], ["ner", "textcat", "sentencizer"]]
)
@pytest.mark.parametrize("optimize", ["efficiency", "accuracy"])
def test_init_config(lang, pipeline, optimize):
# TODO: add more tests and also check for GPU with transformers
config = init_config(lang=lang, pipeline=pipeline, optimize=optimize, gpu=False)
assert isinstance(config, Config)
def test_model_recommendations():
for lang, data in RECOMMENDATIONS.items():
assert RecommendationSchema(**data)
@pytest.mark.parametrize(
"value",
[
# fmt: off
"parser,textcat,tagger",
" parser, textcat ,tagger ",
'parser,textcat,tagger',
' parser, textcat ,tagger ',
' "parser"," textcat " ,"tagger "',
" 'parser',' textcat ' ,'tagger '",
'[parser,textcat,tagger]',
'["parser","textcat","tagger"]',
'[" parser" ,"textcat ", " tagger " ]',
"[parser,textcat,tagger]",
"[ parser, textcat , tagger]",
"['parser','textcat','tagger']",
"[' parser' , 'textcat', ' tagger ' ]",
# fmt: on
],
)
def test_string_to_list(value):
assert string_to_list(value, intify=False) == ["parser", "textcat", "tagger"]
@pytest.mark.parametrize(
"value",
[
# fmt: off
"1,2,3",
'[1,2,3]',
'["1","2","3"]',
'[" 1" ,"2 ", " 3 " ]',
"[' 1' , '2', ' 3 ' ]",
# fmt: on
],
)
def test_string_to_list_intify(value):
assert string_to_list(value, intify=False) == ["1", "2", "3"]
assert string_to_list(value, intify=True) == [1, 2, 3]
| mit | -3,833,332,398,481,140,700 | 36.648148 | 128 | 0.555337 | false |
timkofu/jobhuntr | search/tests.py | 1 | 1257 | from webtest import TestApp
from jobhuntr.wsgi import application
from django.test import TestCase
from django.test import Client
from django.utils.six import StringIO
from django.urls import reverse
from django.core.management import call_command
from django.test.utils import setup_test_environment
#setup_test_environment()
class Blankettests(TestCase):
def test_main_page_loads(self):
self.assertEqual(Client().get('/').status_code, 200)
def test_countries_loads(self):
self.assertEqual(Client().get(reverse('countries')).status_code, 200)
def test_search_works(self):
self.assertEqual(Client().get('/?q=KE').status_code, 200)
def test_remove_expired_management_command(self):
out = StringIO()
call_command('remove_expired', stdout=out)
self.assertIn("", out.getvalue())
def test_spider_management_command(self):
out = StringIO()
call_command('spider', stdout=out)
self.assertIn("", out.getvalue())
def test_admin_loads(self):
self.assertEqual(Client().get('/admin/', follow=True).status_code, 200)
#def test_wsgi_loads(self):
# app = TestApp(application)
# self.assertEqual(app.get('/').status_int, 200)
# ASGI now
| mit | 1,716,138,064,455,852,300 | 29.658537 | 79 | 0.682578 | false |
AerisCloud/AerisCloud | aeriscloud/s3.py | 1 | 2331 | import math
import requests
import time
import xml.etree.ElementTree as ET
# from .config import config
class S3:
def __init__(self, bucket, endpoint_url=None):
"""
The S3 class provides methods to manage files from a bucket.
:param bucket: The bucket to use
:type bucket: String
"""
self._bucket = bucket
self._endpoint_url = endpoint_url
if not self._endpoint_url:
self._endpoint_url = 'http://%s.s3.amazonaws.com' % bucket
def put(self, data, key):
"""
Upload a file to the S3 bucket
:param data: The content of the file to upload
:type data: any
:param key: The name of the file to post
:type key: String
:return: The url of the uploaded file
:rtype: String
"""
url = self._endpoint_url + '/' + key
r = requests.put(url, data=data)
r.raise_for_status()
return url
def generate_key(self, seed, ext):
"""
Generate a key supposed to be unique in the bucket
:param self: The seed to use to generate the name
:type self: String
:param ext: The file extension
:type ext: String
:return: A key to upload a new file
:rtype: String
"""
return "{0}-{1}.{2}".format(
seed,
int(math.floor(time.time())),
ext
)
def list_bucket(self):
"""
Return the list of the files in the bucket
:return: List of files
:rtype: List
"""
url = self._endpoint_url + '/'
r = requests.get(url)
r.raise_for_status()
xml = ET.fromstring(r.text)
files = []
for child in xml:
if child.tag.endswith('Contents'):
file = {}
# Convert the XML data to python object
for file_data in child:
if file_data.tag.endswith('Key'):
file['Key'] = file_data.text
if file_data.tag.endswith('LastModified'):
file['LastModified'] = file_data.text
if file_data.tag.endswith('Size'):
file['Size'] = file_data.text
files.append(file)
return files
| mit | 3,940,088,200,830,511,600 | 27.084337 | 70 | 0.517375 | false |
GNOME/gedit-latex | latex/latex/completion.py | 1 | 12450 | # -*- coding: utf-8 -*-
# This file is part of the Gedit LaTeX Plugin
#
# Copyright (C) 2010 Michael Zeising
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public Licence as published by the Free Software
# Foundation; either version 2 of the Licence, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more
# details.
#
# You should have received a copy of the GNU General Public Licence along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
latex.completion
LaTeX-specific completion classes
"""
from logging import getLogger
from gi.repository import GdkPixbuf
from ..resources import Resources
from ..completion import ICompletionHandler, Proposal
class LaTeXCommandProposal(Proposal):
"""
A proposal inserting a Template when activated
"""
def __init__(self, overlap, snippet, label):
self._snippet = snippet
self._label = label
self._overlap = overlap
self._icon = GdkPixbuf.Pixbuf.new_from_file(Resources().get_icon("i_command.png"))
@property
def source(self):
return self._snippet
@property
def label(self):
return self._label
@property
def details(self):
return None
@property
def icon(self):
return self._icon
@property
def overlap(self):
return self._overlap
class LaTeXChoiceProposal(Proposal):
"""
A proposal inserting a simple string when activated
"""
def __init__(self, overlap, source, label, details):
self._source = source
self._details = details
self._overlap = overlap
self._label = label
self._icon = GdkPixbuf.Pixbuf.new_from_file(Resources().get_icon("i_choice.png"))
@property
def source(self):
return self._source
@property
def label(self):
return self._label
@property
def details(self):
return self._details
@property
def icon(self):
return self._icon
@property
def overlap(self):
return self._overlap
from .model import LanguageModelFactory, Choice, MandatoryArgument, OptionalArgument
from .parser import PrefixParser, Node
from ..bibtex.cache import BibTeXDocumentCache
class LaTeXCompletionHandler(ICompletionHandler):
"""
This implements the LaTeX-specific code completion
"""
_log = getLogger("LaTeXCompletionHandler")
trigger_keys = ["backslash", "braceleft"]
prefix_delimiters = ["\\"]
def __init__(self):
self._log.debug("init")
#get the language_model singleton
self._language_model = LanguageModelFactory().get_language_model()
self._bibtex_document_cache = BibTeXDocumentCache()
def set_outline(self, outline):
"""
Process a LaTeX outline model
@param outline: a latex.outline.Outline instance
"""
# labels
label_choices = [Choice(None, label.value) for label in outline.labels]
self._language_model.fill_placeholder("Labels", label_choices)
# colors
color_choices = [Choice(None, color) for color in outline.colors]
self._language_model.fill_placeholder("Colors", color_choices)
# newcommands
self._language_model.set_newcommands(outline.newcommands)
# newenvironments
newenvironments = []
for n in outline.newenvironments:
choice = Choice(None, n.value)
newenvironments.append(choice)
self._language_model.fill_placeholder("Newenvironments", newenvironments)
#
# bibtex entries
#
try:
entry_choices = []
for bib_file in outline.bibliographies:
try:
bibtex_document = self._bibtex_document_cache.get_document(bib_file)
# generate choices from entries
for entry in bibtex_document.entries:
# build table data for DetailsPopup
rows = []
for field in entry.fields:
rows.append([field.name, field.valueMarkup])
entry_choices.append(Choice(None, entry.key, rows))
except OSError:
# BibTeX file not found
self._log.error("Not found: %s" % bib_file)
# attach to placeholders in CommandStore
self._language_model.fill_placeholder("Bibitems", entry_choices)
except IOError:
self._log.debug("Failed to provide BibTeX completion due to IOError")
def set_neighbors(self, tex_files, bib_files, graphic_files):
"""
Populate the lists of neighbor files
@param tex_files: list of neighbor TeX files
@param bib_files: list of neighbor BibTeX files
@param graphic_files: list of neighbor graphics
"""
tex_choices = [Choice(None, file.shortbasename) for file in tex_files]
self._language_model.fill_placeholder("TexFiles", tex_choices)
bib_choices = [Choice(None, file.shortbasename) for file in bib_files]
self._language_model.fill_placeholder("BibFiles", bib_choices)
graphic_choices = [Choice(None, file.basename) for file in graphic_files]
self._language_model.fill_placeholder("ImageFiles", graphic_choices)
def complete(self, prefix):
"""
Try to complete a given prefix
"""
self._log.debug("complete: '%s'" % prefix)
#proposals = [LaTeXTemplateProposal(Template("Hello[${One}][${Two}][${Three}]"), "Hello[Some]"), LaTeXProposal("\\world")]
fragment = Node(Node.DOCUMENT)
parser = PrefixParser()
try:
parser.parse(prefix, fragment)
modelParser = PrefixModelParser(self._language_model)
proposals = modelParser.parse(fragment)
self._log.debug("Generated %s proposals" % len(proposals))
return proposals
except Exception as e:
self._log.debug(e)
return []
from ..preferences import Preferences
from . import LaTeXSource
class PrefixModelParser(object):
"""
This parses the document model of a prefix and generates proposals accordingly
This is used by the LaTeXCompletionHandler class
"""
_log = getLogger("PrefixModelParser")
def __init__(self, language_model):
self.__language_model = language_model
self.__light_foreground = Preferences().get("light-foreground-color")
def __create_proposals_from_commands(self, commands, overlap):
"""
Generate proposals for commands
"""
proposals = []
for command in commands:
label = command.name
snippet = "\\" + command.name
for idx, argument in enumerate(command.children):
if type(argument) is MandatoryArgument:
label += "{<span color='%s'>%s</span>}" % (self.__light_foreground, argument.label)
snippet += "{${%s:%s}}" % (idx+1, argument.label)
elif type(argument) is OptionalArgument:
label += "[<span color='%s'>%s</span>]" % (self.__light_foreground, argument.label)
snippet += "[${%s:%s}]" % (idx+1, argument.label)
if command.package:
label += " <small><b>%s</b></small>" % command.package
# workaround for latex.model.Element.package may be None
# TODO: latex.model.Element.package should be a list of packages
if command.package is None:
packages = []
else:
packages = [command.package]
proposal = LaTeXCommandProposal(overlap, LaTeXSource(snippet, packages), label)
proposals.append(proposal)
return proposals
def __create_proposals_from_choices(self, choices, overlap):
"""
Generate proposals for argument choices
"""
proposals = []
for choice in choices:
label = choice.value
if choice.package:
label += " <small><b>%s</b></small>" % choice.package
# see above
if choice.package is None:
packages = []
else:
packages = [choice.package]
proposal = LaTeXChoiceProposal(overlap, LaTeXSource(choice.value, packages), label, choice.details)
proposals.append(proposal)
return proposals
def parse(self, prefixFragment):
"""
Returns choices
"""
# root node of the prefix model must be COMMAND
commandNode = prefixFragment[-1]
if commandNode.type != Node.COMMAND:
return []
commandName = commandNode.value
if len(commandNode) == 0:
# command has no arguments...
if len(commandName) == 0:
# no name, so propose all commands
commands = list(self.__language_model.commands.values())
overlap = 1 # only "\"
else:
commands = self.__language_model.find_command(commandName)
if len(commands) == 1 and commands[0].name == commandName:
# don't propose when only one command is found and that one
# matches the typed one
return []
overlap = len(commandName) + 1 # "\begi"
return self.__create_proposals_from_commands(commands, overlap)
# ...command has arguments
try:
self._log.debug(commandNode.xml)
# find the language model of the command
storedCommand = self.__language_model.commands[commandName]
try:
argumentNode, storedArgument = self.__match_argument(commandNode, storedCommand)
except Exception as e:
self._log.error(e)
return []
choices = storedArgument.children
# filter argument matching the already typed argument text
argumentValue = argumentNode.innerText
if len(argumentValue):
choices = [choice for choice in choices if choice.value.startswith(argumentValue)]
overlap = len(argumentValue)
else:
overlap = 0
return self.__create_proposals_from_choices(choices, overlap)
except KeyError:
self._log.debug("Command not found: %s" % commandName)
return []
def __match_argument(self, command, model_command):
"""
@param command: the parsed command Node
@param model_command: the according model command
@return: (matched argument, model argument)
"""
# push the arguments of the model command on a stack
model_argument_stack = []
model_argument_stack.extend(model_command.children)
model_argument_stack.reverse()
for argument in command:
if argument.type == Node.MANDATORY_ARGUMENT:
# optional arguments in the model may be skipped
while True:
try:
model_argument = model_argument_stack.pop()
if model_argument.type != Node.OPTIONAL_ARGUMENT:
break
except IndexError:
# no more optional arguments to skip - signatures can't match
raise Exception("Signatures don't match")
if not argument.closed:
return (argument, model_argument)
elif argument.type == Node.OPTIONAL_ARGUMENT:
model_argument = model_argument_stack.pop()
if model_argument.type != Node.OPTIONAL_ARGUMENT:
raise Exception("Signatures don't match")
if not argument.closed:
return (argument, model_argument)
raise Exception("No matching model argument found")
# ex:ts=4:et:
| gpl-3.0 | 2,227,404,283,291,466,000 | 30.518987 | 130 | 0.592048 | false |
uranusjr/pystandardpaths | standardpaths/base.py | 1 | 7132 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import enum
import importlib
import platform
class Location(enum.Enum):
"""Describe the different locations that can be queried using functions
such as :func:`.get_writable_path` and :func:`.get_standard_paths`.
Some of the values in this enum represent a user configuration. Such enum
values will return the same paths in different applications, so they could
be used to share data with other applications. Other values are specific to
this application. Each enum value in the table below describes whether it's
application-specific or generic.
Application-specific directories should be assumed to be unreachable by
other applications. Therefore, files placed there might not be readable by
other applications, even if run by the same user. On the other hand,
generic directories should be assumed to be accessible by all applications
run by this user, but should still be assumed to be unreachable by
applications by other users.
Data interchange with other users is out of the scope of PyStandardPaths.
"""
desktop = 0
"""The user's desktop directory. This is a generic value.
"""
documents = 1
"""The directory containing user document files. This is a generic value.
The returned path is never empty.
"""
fonts = 2
"""The directory containing user's fonts. This is a generic value. Note
that installing fonts may require additional, platform-specific operations.
"""
applications = 3
"""The directory containing the user applications (either executables,
application bundles, or shortcuts to them). This is a generic value. Note
that installing applications may require additional, platform-specific
operations. Files, folders or shortcuts in this directory are
platform-specific.
"""
music = 4
"""The directory containing the user's music or other audio files. This is
a generic value. If no directory specific for music files exists, a
sensible fallback for storing user documents is returned.
"""
movies = 5
"""The directory containing the user's movies and videos. This is a generic
value. If no directory specific for movie files exists, a sensible fallback
for storing user documents is returned.
"""
pictures = 6
"""The directory containing the user's pictures or photos. This is a
generic value. If no directory specific for picture files exists, a
sensible fallback for storing user documents is returned.
"""
temp = 7
"""A directory where temporary files can be stored (the same as
:func:`tempfile.gettempdir`). The returned value might be
application-specific, shared among other applications for this user, or
even system-wide. The returned path is never empty.
"""
home = 8
"""The user's home directory (the same as `os.path.expanduser('~')`). On
Unix systems, this is equal to the `HOME` environment variable. This value
might be generic or application-specific, but the returned path is never
empty.
"""
data = 9
"""The same value as :attr:`.app_local_data`. This enumeration value is
deprecated. Using :attr:`.app_data` is preferable since on Windows, the
roaming path is recommended.
"""
cache = 10
"""A directory location where user-specific non-essential (cached) data
should be written. This is an application-specific directory. The returned
path is never empty.
"""
generic_data = 11
"""A directory location where persistent data shared across applications
can be stored. This is a generic value. The returned path is never empty.
"""
runtime = 12
"""A directory location where runtime communication files should be
written, like Unix local sockets. This is a generic value. The returned
path may be empty on some systems.
"""
config = 13
"""A directory location where user-specific configuration files should be
written. This may be either a generic value or application-specific, and
the returned path is never empty.
"""
download = 14
"""A directory for user's downloaded files. This is a generic value. If no
directory specific for downloads exists, a sensible fallback for storing
user documents is returned.
"""
generic_cache = 15
"""A directory location where user-specific non-essential (cached) data,
shared across applications, should be written. This is a generic value.
Note that the returned path may be empty if the system has no concept of
shared cache.
"""
generic_config = 16
"""A directory location where user-specific configuration files shared
between multiple applications should be written. This is a generic value
and the returned path is never empty.
"""
app_data = 17
"""A directory location where persistent application data can be stored.
This is an application-specific directory. To obtain a path to store data
to be shared with other applications, use :attr:`.generic_data`. The
returned path is never empty. On the Windows operating system, this returns
the roaming path.
"""
app_local_data = data
"""The local settings path on the Windows operating system. On all other
platforms, it returns the same value as :attr:`.app_data`.
"""
# Our own enum values. We use string values so the values will never
# collide with Qt's.
log = 'log'
"""A directory location where user-specific log files should be written.
This is an application-specific value. The returned path is never empty.
"""
class LocationError(OSError):
"""Exception class raised to indicate an error during path resolution.
"""
class Config(object):
"""Configuration class that holds application information.
.. seealso::
:func:`.configure` and :func:`.get_config`.
"""
def __init__(self, application_name='', organization_name=''):
self.application_name = application_name
self.organization_name = organization_name
_config = Config('', '')
def configure(application_name='', organization_name=''):
"""Configure default application information used by PyStandardPaths.
.. seealso::
:func:`.get_config` and :class:`.Config`.
"""
global _config
_config = Config(application_name, organization_name)
def get_config():
"""Get the current configuration of application information.
:rtype: :class:`.Config`
"""
return _config
def _append_org_and_app(path, config):
if config is None:
config = get_config()
if config.organization_name:
path = path / config.organization_name
if config.application_name:
path = path / config.application_name
return path
def _get_implementation():
"""Get implementation module based on current OS.
"""
module_name = {
'Darwin': '..osx',
'Windows': '..windows',
}.get(platform.system(), '..unix')
return importlib.import_module(module_name, package=__name__)
| bsd-3-clause | 6,039,331,955,097,506,000 | 33.454106 | 79 | 0.695177 | false |
Psycojoker/dierentheater | scraper/utils.py | 1 | 4715 | # -*- coding:Utf-8 -*-
# Dieren Theater - lachambre.be to json sausage machine
# Copyright (C) 2012 Laurent Peuch <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
logger = logging.getLogger('')
from urllib import quote
from history.utils import irc
from .tasks import app
LACHAMBRE_PREFIX = "http://www.lachambre.be/kvvcr/"
DOSSIER_ID_REGEX = "dossierID=([0-9A-Za-z-]+).xml"
def get_or_create(klass, _id=None, **kwargs):
if _id is None:
object = klass.objects.filter(**kwargs)
else:
object = klass.objects.filter(**{_id: kwargs[_id]})
if object:
return object[0]
else:
logger.debug("\033[0;36madd new %s %s\033[0m" % (klass.__name__, kwargs))
result = klass(**kwargs)
result.save()
return result
def update_or_create(klass, _id=None, **kwargs):
if _id is None:
object = klass.objects.filter(**kwargs)
else:
object = klass.objects.filter(**{_id: kwargs[_id]})
if object:
result = object[0]
for key, value in kwargs.items():
setattr(result, key, value)
logger.debug("\033[0;36mupdate %s %s\033[0m" % (klass.__name__, kwargs))
else:
logger.debug("\033[0;32add new %s %s\033[0m" % (klass.__name__, kwargs))
result = klass(**kwargs)
result.save()
return result
def get_text_else_blank(dico, key):
return dico[key].text if dico.get(key) and dico[key].a else ""
def get_href_else_blank(dico, key):
return dico[key].a["href"] if dico.get(key) and dico[key].a else ""
def get_items_list_else_empty_list(dico, key):
return dico[key].text.split(" | ") if dico.get(key) else []
def dico_get_text(dico, key):
if dico.get(key):
return dico[key].text
return ""
class AccessControlDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.accessed = set()
def __getitem__(self, key):
self.accessed.add(key)
return dict.__getitem__(self, key)
def get_not_accessed_keys(self):
a = []
for i in self.keys():
if i not in self.accessed:
a.append(i)
elif isinstance(self[i], AccessControlDict) and self[i].get_not_accessed_keys():
a.append(i)
a.append(self[i].get_not_accessed_keys())
return a
def die_if_got_not_accessed_keys(self):
if self.get_not_accessed_keys():
logger.error("\nError: untreated sections:")
irc("\nError: untreated sections:")
for i in self.get_not_accessed_keys():
if isinstance(i, (str, unicode)):
logger.error("* %s" % i)
irc("* %s" % i.encode("Utf-8"))
else:
for j in i:
logger.error(" * %s" % j)
irc(" * %s" % j.encode("Utf-8"))
logger.error("------------ stop ------------")
irc("Bram: Error: dico got un-accessed keys, die")
import sys
sys.exit(1)
def clean_text(text):
def rep(result):
string = result.group() # "&#xxx;"
n = int(string[2:-1])
uchar = unichr(n) # matching unicode char
return uchar
return re.sub("(\r|\t|\n| )+", " ", re.sub("&#\d+;", rep, text)).strip()
def lame_url(url):
# convert super lame urls of lachambre.be into something uzable
return quote(url.encode("iso-8859-1"), safe="%/:=&?~#+!$,;'@()*[]")
def table2dic(table):
dico = {}
for x, y in zip(table[::2], table[1::2]):
dico[x.text] = y.text if y.a is None else y.a
return dico
class Parsable(object):
@classmethod
def scrape(klass, cache=False, sync=False):
if sync:
klass.fetch_list(cache=cache, sync=sync)
else:
return klass.fetch_list.delay(cache=cache, sync=sync)
@classmethod
def fetch_list(klass, cache=False, sync=False):
raise NotImplementedError()
| agpl-3.0 | 1,099,140,688,297,136,500 | 29.419355 | 92 | 0.577943 | false |
agx/git-buildpackage | tests/testutils/debiangittestrepo.py | 1 | 1333 | # vim: set fileencoding=utf-8 :
from .. import context
import os
import unittest
import gbp.deb.git
class DebianGitTestRepo(unittest.TestCase):
"""Scratch repo for a single unit test"""
def setUp(self, repo_cls=None):
name = 'test_repo'
self.tmpdir = context.new_tmpdir(__name__)
if repo_cls is None:
repo_cls = gbp.deb.git.DebianGitRepository
repodir = self.tmpdir.join(name)
self.repodir = os.path.join(str(self.tmpdir), name)
self.repo = repo_cls.create(repodir)
def tearDown(self):
context.teardown()
def add_file(self, name, content=None, msg=None, mode=None):
"""
Add a single file with name I{name} and content I{content}. If
I{content} is C{none} the content of the file is undefined.
@param name: the file's path relativ to the git repo
@type name: C{str}
@param content: the file's content
@type content: C{str}
"""
path = os.path.join(self.repo.path, name)
d = os.path.dirname(path)
if not os.path.exists(d):
os.makedirs(d)
with open(path, mode or 'w+') as f:
content is None or f.write(content)
self.repo.add_files(name, force=True)
self.repo.commit_files(path, msg or "added %s" % name)
| gpl-2.0 | 3,140,142,410,044,956,000 | 27.361702 | 70 | 0.602401 | false |
pablocscode/TFG-CAEBAT | representacion2.0.py | 1 | 3399 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 10 10:14:31 2017
@author: Pablo
Objetivos:
-Representar de forma dinámica los resultados del archivo profiles.out para cualquier tiempo
Guía:
-Este script debe encontrarse en la misma carpeta que los archivos profiles.out y halfcells.out
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
#Leemos todas las líneas del archivo
archivo = open('profiles.out','r')
lineas = archivo.readlines()
archivo.close()
#Calculamos el número de filas del archivo para cada tiempo
i = 4 #Empieza a haber datos a partir de la línea 4
num_distancias = 0
#Se aumenta el contador con cada línea distinta de cero de la primera matriz de
#tiempos
while lineas[i] != ' \n':
num_distancias += 1
i += 1
#Calculamos el número de tiempos del archivo
datos_halfcells = open('halfcells.out','r')
lineas_halfcells = datos_halfcells.readlines()
datos_halfcells.close()
num_tiempos = len(lineas_halfcells)-1 #la primera linea no tiene datos
#Declaramos los vectores que contendrán los valores de las columnas
distancia = np.zeros((num_tiempos,num_distancias))
C_Elec = np.zeros((num_tiempos,num_distancias))
C_Sol_Surf = np.zeros((num_tiempos,num_distancias))
Liq_Pot = np.zeros((num_tiempos,num_distancias))
Solid_Pot = np.zeros((num_tiempos,num_distancias))
J_main = np.zeros((num_tiempos,num_distancias))
tiempo = np.zeros(num_tiempos)
#Inicializamos para empezar el ciclo for
fila =0
columna = 0
#Cada línea (fila) representa los datos para un tiempo concreto
for j in range(4,(num_distancias+6)*num_tiempos,num_distancias+6):
for i in range(j,j+num_distancias): #Empieza a haber datos a partir de la línea 4
#Cada elemento de "lineas" es un línea entera que convertimos en un vector
linea = lineas[i].split(',')
#A cada variable le vamos asignando su valor de cada línea que leemos
distancia[fila,columna] = float(linea[0])
C_Elec[fila,columna] = float(linea[1])
C_Sol_Surf[fila,columna] = float(linea[2])
Liq_Pot[fila,columna] = float(linea[3])
Solid_Pot[fila,columna] = float(linea[4])
J_main[fila,columna] = float(linea[5])
columna = columna +1
#Asignamos el tiempo de cada gráfica
linea = lineas[j-1].split()
tiempo[fila] = float(linea[2])
#Al final del ciclo for pasamos a la siguiente fila y ponemos a cero las columnas
fila = fila+1
columna = 0
#Representamos los resultados
#Figura 1
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(1,1,1)
#plt.axis([0, 1, -10, 10])
plt.subplots_adjust(left=0.25, bottom=0.25)
axi1 = plt.axes([0.2, 0.15, 0.65, 0.03])
si1 = Slider(axi1, 'Tiempo', 0, 100, valinit=0)
#Figura 2
fig2 = plt.figure(2)
ax2 = fig2.add_subplot(1,1,1)
#plt.axis([0, 1, -10, 10])
plt.subplots_adjust(left=0.25, bottom=0.25)
ax2.set_ylim([0, 0.9])
ax2.set_xlim([0, 100])
axi2 = plt.axes([0.2, 0.15, 0.65, 0.03])
si2 = Slider(axi2, 'Tiempo',0,num_tiempos-1,valinit = 0)
def plot1(val):
i = int(si1.val)
ax1.clear()
ax1.plot(C_Elec[i])
def plot2(val):
i = int(si2.val)
ax2.clear()
ax2.set_ylim([0, 0.9])
ax2.set_xlim([0, num_distancias])
ax2.plot(C_Sol_Surf[i])
si1.on_changed(plot1)
si2.on_changed(plot2)
| gpl-3.0 | -8,595,424,597,173,040 | 28.781818 | 95 | 0.661843 | false |
EmreAtes/spack | lib/spack/spack/build_systems/autotools.py | 1 | 19376 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import inspect
import os
import os.path
import shutil
import stat
from subprocess import PIPE
from subprocess import check_call
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir, join_path, force_remove
from spack.package import PackageBase, run_after, run_before
from spack.util.executable import Executable
class AutotoolsPackage(PackageBase):
"""Specialized class for packages built using GNU Autotools.
This class provides four phases that can be overridden:
1. :py:meth:`~.AutotoolsPackage.autoreconf`
2. :py:meth:`~.AutotoolsPackage.configure`
3. :py:meth:`~.AutotoolsPackage.build`
4. :py:meth:`~.AutotoolsPackage.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override the helper method
:py:meth:`~.AutotoolsPackage.configure_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:attr:`~.AutotoolsPackage.build_targets` | Specify ``make`` |
| | targets for the |
| | build phase |
+-----------------------------------------------+--------------------+
| :py:attr:`~.AutotoolsPackage.install_targets` | Specify ``make`` |
| | targets for the |
| | install phase |
+-----------------------------------------------+--------------------+
| :py:meth:`~.AutotoolsPackage.check` | Run build time |
| | tests if required |
+-----------------------------------------------+--------------------+
"""
#: Phases of a GNU Autotools package
phases = ['autoreconf', 'configure', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'AutotoolsPackage'
#: Whether or not to update ``config.guess`` on old architectures
patch_config_guess = True
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.build`
#: phase
build_targets = []
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.install`
#: phase
install_targets = ['install']
#: Callback names for build-time test
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ['installcheck']
#: Set to true to force the autoreconf step even if configure is present
force_autoreconf = False
#: Options to be passed to autoreconf when using the default implementation
autoreconf_extra_args = []
@run_after('autoreconf')
def _do_patch_config_guess(self):
"""Some packages ship with an older config.guess and need to have
this updated when installed on a newer architecture. In particular,
config.guess fails for PPC64LE for version prior to a 2013-06-10
build date (automake 1.13.4) and for ARM (aarch64)."""
if not self.patch_config_guess or (not self.spec.satisfies(
'target=ppc64le') and not self.spec.satisfies('target=aarch64')
):
return
my_config_guess = None
config_guess = None
if os.path.exists('config.guess'):
# First search the top-level source directory
my_config_guess = 'config.guess'
else:
# Then search in all sub directories.
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
# ship with their configure.in or configure.ac.
d = '.'
dirs = [os.path.join(d, o) for o in os.listdir(d)
if os.path.isdir(os.path.join(d, o))]
for dirname in dirs:
path = os.path.join(dirname, 'config.guess')
if os.path.exists(path):
my_config_guess = path
if my_config_guess is not None:
try:
check_call([my_config_guess], stdout=PIPE, stderr=PIPE)
# The package's config.guess already runs OK, so just use it
return
except Exception:
pass
else:
return
# Look for a spack-installed automake package
if 'automake' in self.spec:
automake_path = os.path.join(self.spec['automake'].prefix, 'share',
'automake-' +
str(self.spec['automake'].version))
path = os.path.join(automake_path, 'config.guess')
if os.path.exists(path):
config_guess = path
# Look for the system's config.guess
if config_guess is None and os.path.exists('/usr/share'):
automake_dir = [s for s in os.listdir('/usr/share') if
"automake" in s]
if automake_dir:
automake_path = os.path.join('/usr/share', automake_dir[0])
path = os.path.join(automake_path, 'config.guess')
if os.path.exists(path):
config_guess = path
if config_guess is not None:
try:
check_call([config_guess], stdout=PIPE, stderr=PIPE)
mod = os.stat(my_config_guess).st_mode & 0o777 | stat.S_IWUSR
os.chmod(my_config_guess, mod)
shutil.copyfile(config_guess, my_config_guess)
return
except Exception:
pass
raise RuntimeError('Failed to find suitable config.guess')
@property
def configure_directory(self):
"""Returns the directory where 'configure' resides.
:return: directory where to find configure
"""
return self.stage.source_path
@property
def configure_abs_path(self):
# Absolute path to configure
configure_abs_path = join_path(
os.path.abspath(self.configure_directory), 'configure'
)
return configure_abs_path
@property
def build_directory(self):
"""Override to provide another place to build the package"""
return self.configure_directory
@run_before('autoreconf')
def delete_configure_to_force_update(self):
if self.force_autoreconf:
force_remove(self.configure_abs_path)
def autoreconf(self, spec, prefix):
"""Not needed usually, configure should be already there"""
# If configure exists nothing needs to be done
if os.path.exists(self.configure_abs_path):
return
# Else try to regenerate it
autotools = ['m4', 'autoconf', 'automake', 'libtool']
missing = [x for x in autotools if x not in spec]
if missing:
msg = 'Cannot generate configure: missing dependencies {0}'
raise RuntimeError(msg.format(missing))
tty.msg('Configure script not found: trying to generate it')
tty.warn('*********************************************************')
tty.warn('* If the default procedure fails, consider implementing *')
tty.warn('* a custom AUTORECONF phase in the package *')
tty.warn('*********************************************************')
with working_dir(self.configure_directory):
m = inspect.getmodule(self)
# This part should be redundant in principle, but
# won't hurt
m.libtoolize()
m.aclocal()
# This line is what is needed most of the time
# --install, --verbose, --force
autoreconf_args = ['-ivf']
if 'pkgconfig' in spec:
autoreconf_args += [
'-I',
join_path(spec['pkgconfig'].prefix, 'share', 'aclocal'),
]
autoreconf_args += self.autoreconf_extra_args
m.autoreconf(*autoreconf_args)
@run_after('autoreconf')
def set_configure_or_die(self):
"""Checks the presence of a ``configure`` file after the
autoreconf phase. If it is found sets a module attribute
appropriately, otherwise raises an error.
:raises RuntimeError: if a configure script is not found in
:py:meth:`~AutotoolsPackage.configure_directory`
"""
# Check if a configure script is there. If not raise a RuntimeError.
if not os.path.exists(self.configure_abs_path):
msg = 'configure script not found in {0}'
raise RuntimeError(msg.format(self.configure_directory))
# Monkey-patch the configure script in the corresponding module
inspect.getmodule(self).configure = Executable(
self.configure_abs_path
)
def configure_args(self):
"""Produces a list containing all the arguments that must be passed to
configure, except ``--prefix`` which will be pre-pended to the list.
:return: list of arguments for configure
"""
return []
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass specified
compiler flags to configure."""
# Has to be dynamic attribute due to caching.
setattr(self, 'configure_flag_args', [])
for flag, values in flags.items():
if values:
values_str = '{0}={1}'.format(flag.upper(), ' '.join(values))
self.configure_flag_args.append(values_str)
def configure(self, spec, prefix):
"""Runs configure with the arguments specified in
:py:meth:`~.AutotoolsPackage.configure_args`
and an appropriately set prefix.
"""
options = getattr(self, 'configure_flag_args', [])
options += ['--prefix={0}'.format(prefix)]
options += self.configure_args()
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).configure(*options)
def build(self, spec, prefix):
"""Makes the build targets specified by
:py:attr:``~.AutotoolsPackage.build_targets``
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.build_targets)
def install(self, spec, prefix):
"""Makes the install targets specified by
:py:attr:``~.AutotoolsPackage.install_targets``
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('test')
self._if_make_target_execute('check')
def _activate_or_not(
self,
name,
activation_word,
deactivation_word,
activation_value=None
):
"""This function contains the current implementation details of
:py:meth:`~.AutotoolsPackage.with_or_without` and
:py:meth:`~.AutotoolsPackage.enable_or_disable`.
Args:
name (str): name of the variant that is being processed
activation_word (str): the default activation word ('with' in the
case of ``with_or_without``)
deactivation_word (str): the default deactivation word ('without'
in the case of ``with_or_without``)
activation_value (callable): callable that accepts a single
value. This value is either one of the allowed values for a
multi-valued variant or the name of a bool-valued variant.
Returns the parameter to be used when the value is activated.
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Examples:
Given a package with:
.. code-block:: python
variant('foo', values=('x', 'y'), description='')
variant('bar', default=True, description='')
calling this function like:
.. code-block:: python
_activate_or_not(
'foo', 'with', 'without', activation_value='prefix'
)
_activate_or_not('bar', 'with', 'without')
will generate the following configuration options:
.. code-block:: console
--with-x=<prefix-to-x> --without-y --with-bar
for ``<spec-name> foo=x +bar``
Returns:
list of strings that corresponds to the activation/deactivation
of the variant that has been processed
Raises:
KeyError: if name is not among known variants
"""
spec = self.spec
args = []
if activation_value == 'prefix':
activation_value = lambda x: spec[x].prefix
# Defensively look that the name passed as argument is among
# variants
if name not in self.variants:
msg = '"{0}" is not a variant of "{1}"'
raise KeyError(msg.format(name, self.name))
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if set(self.variants[name].values) == set((True, False)):
# BoolValuedVariant carry information about a single option.
# Nonetheless, for uniformity of treatment we'll package them
# in an iterable of one element.
condition = '+{name}'.format(name=name)
options = [(name, condition in spec)]
else:
condition = '{name}={value}'
options = [
(value, condition.format(name=name, value=value) in spec)
for value in self.variants[name].values
]
# For each allowed value in the list of values
for option_value, activated in options:
# Search for an override in the package for this value
override_name = '{0}_or_{1}_{2}'.format(
activation_word, deactivation_word, option_value
)
line_generator = getattr(self, override_name, None)
# If not available use a sensible default
if line_generator is None:
def _default_generator(is_activated):
if is_activated:
line = '--{0}-{1}'.format(
activation_word, option_value
)
if activation_value is not None and activation_value(option_value): # NOQA=ignore=E501
line += '={0}'.format(
activation_value(option_value)
)
return line
return '--{0}-{1}'.format(deactivation_word, option_value)
line_generator = _default_generator
args.append(line_generator(activated))
return args
def with_or_without(self, name, activation_value=None):
"""Inspects a variant and returns the arguments that activate
or deactivate the selected feature(s) for the configure options.
This function works on all type of variants. For bool-valued variants
it will return by default ``--with-{name}`` or ``--without-{name}``.
For other kinds of variants it will cycle over the allowed values and
return either ``--with-{value}`` or ``--without-{value}``.
If activation_value is given, then for each possible value of the
variant, the option ``--with-{value}=activation_value(value)`` or
``--without-{value}`` will be added depending on whether or not
``variant=value`` is in the spec.
Args:
name (str): name of a valid multi-valued variant
activation_value (callable): callable that accepts a single
value and returns the parameter to be used leading to an entry
of the type ``--with-{name}={parameter}``.
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, 'with', 'without', activation_value)
def enable_or_disable(self, name, activation_value=None):
"""Same as :py:meth:`~.AutotoolsPackage.with_or_without` but substitute
``with`` with ``enable`` and ``without`` with ``disable``.
Args:
name (str): name of a valid multi-valued variant
activation_value (callable): if present accepts a single value
and returns the parameter to be used leading to an entry of the
type ``--enable-{name}={parameter}``
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Returns:
list of arguments to configure
"""
return self._activate_or_not(
name, 'enable', 'disable', activation_value
)
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
| lgpl-2.1 | -7,559,344,240,667,162,000 | 40.848812 | 111 | 0.566887 | false |
imageio/imageio | setup.py | 1 | 6721 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2020, imageio contributors
#
# imageio is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
# styletest: skip
"""
Release:
* Write release notes
* Increase __version__
* git tag the release (and push the tag to Github)
* Upload to Pypi: python setup.py sdist bdist_wheel upload
* Update conda recipe on conda-forge feedstock
"""
import os
import os.path as op
import sys
import shutil
from distutils.core import Command
from distutils.command.sdist import sdist
from distutils.command.build_py import build_py
from itertools import chain
try:
from setuptools import setup # Supports wheels
except ImportError:
from distutils.core import setup # Supports anything else
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
bdist_wheel = object
name = "imageio"
description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Get version and docstring
__version__ = None
__doc__ = ""
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(THIS_DIR, "imageio", "__init__.py")
for line in open(initFile).readlines():
if line.startswith("__version__"):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line.rstrip() + "\n"
# Template for long description. __doc__ gets inserted here
long_description = """
.. image:: https://github.com/imageio/imageio/workflows/CI/badge.svg
:target: https://github.com/imageio/imageio/actions
__doc__
Release notes: https://github.com/imageio/imageio/blob/master/CHANGELOG.md
Example:
.. code-block:: python
>>> import imageio
>>> im = imageio.imread('imageio:astronaut.png')
>>> im.shape # im is a numpy array
(512, 512, 3)
>>> imageio.imwrite('astronaut-gray.jpg', im[:, :, 0])
See the `user API <https://imageio.readthedocs.io/en/stable/userapi.html>`_
or `examples <https://imageio.readthedocs.io/en/stable/examples.html>`_
for more information.
"""
# Prepare resources dir
package_data = [
"resources/shipped_resources_go_here",
"resources/*.*",
"resources/images/*.*",
"resources/freeimage/*.*",
]
def _set_crossplatform_resources(resource_dir):
import imageio
# Clear now
if op.isdir(resource_dir):
shutil.rmtree(resource_dir)
os.mkdir(resource_dir)
open(op.join(resource_dir, "shipped_resources_go_here"), "wb")
# Load images
for fname in [
"images/chelsea.png",
"images/chelsea.zip",
"images/astronaut.png",
"images/newtonscradle.gif",
"images/cockatoo.mp4",
"images/realshort.mp4",
"images/stent.npz",
]:
imageio.core.get_remote_file(fname, resource_dir, force_download=True)
def _set_platform_resources(resource_dir, platform):
import imageio
# Create file to show platform
assert platform
open(op.join(resource_dir, "platform_%s" % platform), "wb")
# Load freeimage
fname = imageio.plugins.freeimage.FNAME_PER_PLATFORM[platform]
imageio.core.get_remote_file(
"freeimage/" + fname, resource_dir, force_download=True
)
class test_command(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from imageio import testing
os.environ["IMAGEIO_NO_INTERNET"] = "1" # run tests without inet
sys.exit(testing.test_unit())
class build_with_fi(build_py):
def run(self):
# Download images and libs
import imageio
resource_dir = imageio.core.resource_dirs()[0]
_set_crossplatform_resources(resource_dir)
_set_platform_resources(resource_dir, imageio.core.get_platform())
# Build as normal
build_py.run(self)
class build_with_images(sdist):
def run(self):
# Download images
import imageio
resource_dir = imageio.core.resource_dirs()[0]
_set_crossplatform_resources(resource_dir)
# Build as normal
sdist.run(self)
# avoid pillow 8.3 because it doesn't respect numpy API
install_requires = ["numpy", "pillow != 8.3.0"]
extras_require = {
"linting": ["black", "flake8"],
"test": ["invoke", "pytest", "pytest-cov"],
"docs": ["sphinx", "numpydoc"],
"ffmpeg": ["imageio-ffmpeg", "psutil"],
"fits": ["astropy"],
"gdal": ["gdal"],
"itk": ["itk"],
}
extras_require["full"] = sorted(set(chain.from_iterable(extras_require.values())))
extras_require["dev"] = extras_require["test"] + extras_require["linting"]
setup(
cmdclass={ # 'bdist_wheel_all': bdist_wheel_all,
# 'sdist_all': sdist_all,
"build_with_images": build_with_images,
"build_with_fi": build_with_fi,
"sdist": build_with_images,
"test": test_command,
},
name=name,
version=__version__,
author="imageio contributors",
author_email="[email protected]",
license="BSD-2-Clause",
url="https://github.com/imageio/imageio",
download_url="http://pypi.python.org/pypi/imageio",
keywords="image video volume imread imwrite io animation ffmpeg",
description=description,
long_description=long_description.replace("__doc__", __doc__),
platforms="any",
provides=["imageio"],
python_requires=">=3.5",
install_requires=install_requires,
extras_require=extras_require,
packages=["imageio", "imageio.core", "imageio.plugins"],
package_dir={"imageio": "imageio"},
# Data in the package
package_data={"imageio": package_data},
entry_points={
"console_scripts": [
"imageio_download_bin=imageio.__main__:download_bin_main",
"imageio_remove_bin=imageio.__main__:remove_bin_main",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| bsd-2-clause | 234,741,061,991,115,100 | 27.478814 | 118 | 0.635768 | false |
TYPO3-Documentation/sphinxcontrib.t3tablerows | setup.py | 1 | 2480 | from __future__ import absolute_import
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='t3tablerows',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='0.3.1',
description='Implement Sphinx transformation of "table-row" containers',
long_description=long_description,
# The project's main homepage.
url='https://github.com/TYPO3-Documentation/sphinxcontrib.t3tablerows',
# Author details
author='Martin Bless',
author_email='[email protected]',
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Framework :: Sphinx :: Extension',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Documentation',
],
# What does your project relate to?
keywords='sphinx extension transformation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['sphinxcontrib'],
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['sphinx'],
)
| bsd-2-clause | 3,198,431,805,503,910,000 | 33.929577 | 98 | 0.674597 | false |
ohrstrom/obp-media-preflight-api | app/preflight/models.py | 1 | 3533 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
import logging
import os
import uuid
import hashlib
from datetime import timedelta
from django.db import models
from django.db.models.signals import post_save, pre_save, post_delete
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from celery.task.control import revoke
from base.storage import OverwriteFileSystemStorage
from .tasks import preflight_check_task
RUN_ASYNC = getattr(settings, 'PREFLIGHT_RUN_ASYNC', False)
log = logging.getLogger(__name__)
def get_media_upload_path(instance, filename):
path = [instance._meta.app_label.lower()]
#path += str(instance.uuid).split('-')
path += [str(instance.uuid)]
path += [filename]
return os.path.join(*path)
class Check(models.Model):
STATUS_INIT = 0
STATUS_PENDING = 1
STATUS_PROCESSING = 2
STATUS_DONE = 3
STATUS_ERROR = 99
STATUS_CHOICES = (
(STATUS_INIT, 'Initialized'),
(STATUS_PENDING, 'Pending'),
(STATUS_PROCESSING, 'Processing'),
(STATUS_DONE, 'Done'),
(STATUS_ERROR, 'Error'),
)
status = models.PositiveSmallIntegerField(
_('Status'),
choices=STATUS_CHOICES,
default=STATUS_PENDING,
blank=False, null=False,
db_index=True,
)
# holds celery queue task id
task_id = models.CharField(
max_length=64, null=True, blank=True, editable=True,
)
uuid = models.UUIDField(
default=uuid.uuid4, editable=False, db_index=True
)
created = models.DateTimeField(
auto_now_add=True, editable=False, db_index=True
)
updated = models.DateTimeField(
auto_now=True, editable=False, db_index=True
)
remote_uri = models.URLField(
null=True, blank=False, unique=True, db_index=True
)
media_file = models.FileField(
null=True, blank=True,
storage=OverwriteFileSystemStorage(), upload_to=get_media_upload_path
)
def __str__(self):
return '{}'.format(self.uuid)
@receiver(pre_save, sender=Check)
def check_pre_save(sender, instance, **kwargs):
if not instance.pk:
instance.status = Check.STATUS_PENDING
else:
pass
@receiver(post_save, sender=Check)
def check_post_save(sender, instance, **kwargs):
if instance.status < Check.STATUS_PROCESSING:
log.debug('Check {} needs processing'.format(instance.pk))
# check for running task - terminate if found
if instance.task_id:
log.info('task {} running - need to terminate.'.format(instance.task_id))
revoke(instance.task_id, terminate=True, signal='SIGKILL')
if RUN_ASYNC:
celery_task = preflight_check_task.apply_async((instance,))
Check.objects.filter(pk=instance.pk).update(task_id=celery_task.id)
else:
# just for debuging - the non-async version
preflight_check_task(instance)
@receiver(post_delete, sender=Check)
def check_post_delete(sender, instance, **kwargs):
# check for running task - terminate if found
if instance.task_id:
log.info('task {} running - need to terminate.'.format(instance.task_id))
revoke(instance.task_id, terminate=True, signal='SIGKILL')
if instance.media_file:
if os.path.isfile(instance.media_file.path):
os.remove(instance.media_file.path)
| gpl-3.0 | 2,993,727,627,394,881,000 | 26.387597 | 85 | 0.658081 | false |
googleapis/googleapis-gen | google/cloud/videointelligence/v1/videointelligence-v1-py/google/cloud/videointelligence_v1/__init__.py | 1 | 4435 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.video_intelligence_service import VideoIntelligenceServiceClient
from .services.video_intelligence_service import VideoIntelligenceServiceAsyncClient
from .types.video_intelligence import AnnotateVideoProgress
from .types.video_intelligence import AnnotateVideoRequest
from .types.video_intelligence import AnnotateVideoResponse
from .types.video_intelligence import DetectedAttribute
from .types.video_intelligence import DetectedLandmark
from .types.video_intelligence import Entity
from .types.video_intelligence import ExplicitContentAnnotation
from .types.video_intelligence import ExplicitContentDetectionConfig
from .types.video_intelligence import ExplicitContentFrame
from .types.video_intelligence import FaceAnnotation
from .types.video_intelligence import FaceDetectionAnnotation
from .types.video_intelligence import FaceDetectionConfig
from .types.video_intelligence import FaceFrame
from .types.video_intelligence import FaceSegment
from .types.video_intelligence import LabelAnnotation
from .types.video_intelligence import LabelDetectionConfig
from .types.video_intelligence import LabelFrame
from .types.video_intelligence import LabelSegment
from .types.video_intelligence import LogoRecognitionAnnotation
from .types.video_intelligence import NormalizedBoundingBox
from .types.video_intelligence import NormalizedBoundingPoly
from .types.video_intelligence import NormalizedVertex
from .types.video_intelligence import ObjectTrackingAnnotation
from .types.video_intelligence import ObjectTrackingConfig
from .types.video_intelligence import ObjectTrackingFrame
from .types.video_intelligence import PersonDetectionAnnotation
from .types.video_intelligence import PersonDetectionConfig
from .types.video_intelligence import ShotChangeDetectionConfig
from .types.video_intelligence import SpeechContext
from .types.video_intelligence import SpeechRecognitionAlternative
from .types.video_intelligence import SpeechTranscription
from .types.video_intelligence import SpeechTranscriptionConfig
from .types.video_intelligence import TextAnnotation
from .types.video_intelligence import TextDetectionConfig
from .types.video_intelligence import TextFrame
from .types.video_intelligence import TextSegment
from .types.video_intelligence import TimestampedObject
from .types.video_intelligence import Track
from .types.video_intelligence import VideoAnnotationProgress
from .types.video_intelligence import VideoAnnotationResults
from .types.video_intelligence import VideoContext
from .types.video_intelligence import VideoSegment
from .types.video_intelligence import WordInfo
from .types.video_intelligence import Feature
from .types.video_intelligence import LabelDetectionMode
from .types.video_intelligence import Likelihood
__all__ = (
'VideoIntelligenceServiceAsyncClient',
'AnnotateVideoProgress',
'AnnotateVideoRequest',
'AnnotateVideoResponse',
'DetectedAttribute',
'DetectedLandmark',
'Entity',
'ExplicitContentAnnotation',
'ExplicitContentDetectionConfig',
'ExplicitContentFrame',
'FaceAnnotation',
'FaceDetectionAnnotation',
'FaceDetectionConfig',
'FaceFrame',
'FaceSegment',
'Feature',
'LabelAnnotation',
'LabelDetectionConfig',
'LabelDetectionMode',
'LabelFrame',
'LabelSegment',
'Likelihood',
'LogoRecognitionAnnotation',
'NormalizedBoundingBox',
'NormalizedBoundingPoly',
'NormalizedVertex',
'ObjectTrackingAnnotation',
'ObjectTrackingConfig',
'ObjectTrackingFrame',
'PersonDetectionAnnotation',
'PersonDetectionConfig',
'ShotChangeDetectionConfig',
'SpeechContext',
'SpeechRecognitionAlternative',
'SpeechTranscription',
'SpeechTranscriptionConfig',
'TextAnnotation',
'TextDetectionConfig',
'TextFrame',
'TextSegment',
'TimestampedObject',
'Track',
'VideoAnnotationProgress',
'VideoAnnotationResults',
'VideoContext',
'VideoIntelligenceServiceClient',
'VideoSegment',
'WordInfo',
)
| apache-2.0 | 3,950,516,235,427,323,400 | 37.232759 | 84 | 0.842165 | false |
chris-ch/myledger-online-bookkeeping | _old/server/src/oas/models.py | 1 | 16991 | """
Django model for OAS.
CREATE DATABASE IF NOT EXISTS `accounting`
CHARACTER SET utf8 COLLATE utf8_general_ci;
USE `accounting`;
-- at this point run manage.py syncdb
DROP TABLE IF EXISTS `oas_template_journal_entry`;
DROP TABLE IF EXISTS `oas_template_journal_entry_group`;
DROP TABLE IF EXISTS `oas_template_name`;
DROP TABLE IF EXISTS `oas_initial_amount`;
DROP TABLE IF EXISTS `oas_internal_investment`;
DROP TABLE IF EXISTS `oas_journal_entry`;
DROP TABLE IF EXISTS `oas_journal_entry_group`;
DROP TABLE IF EXISTS `oas_account`;
DROP TABLE IF EXISTS `oas_account_type`;
DROP TABLE IF EXISTS `oas_accounting_period`;
DROP TABLE IF EXISTS `oas_legal_entity`;
DROP TABLE IF EXISTS `oas_currency`;
CREATE TABLE `oas_account_type` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(1) NOT NULL,
`name` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_currency` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(3) NOT NULL,
`name` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_legal_entity` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL,
`currency_id` int(11) NOT NULL,
`code` varchar(32) NOT NULL,
`name` varchar(64) NOT NULL,
`description` longtext,
`is_individual` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
UNIQUE KEY `code` (`code`),
UNIQUE KEY `name` (`name`),
KEY `user_id` (`user_id`),
KEY `currency_id` (`currency_id`),
CONSTRAINT `legal_entity_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),
CONSTRAINT `legal_entity_ibfk_2` FOREIGN KEY (`currency_id`) REFERENCES `oas_currency` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_account` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`code` varchar(32) NOT NULL,
`name` varchar(192) NOT NULL,
`description` longtext,
`account_type_id` int(11) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
`user_id` int(11) NOT NULL,
`parent_id` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`code`, `legal_entity_id`),
UNIQUE KEY (`name`, `legal_entity_id`),
KEY `account_type_id` (`account_type_id`),
KEY `legal_entity_id` (`legal_entity_id`),
KEY `user_id` (`user_id`),
KEY `parent_id` (`parent_id`),
CONSTRAINT `account_ibfk_1` FOREIGN KEY (`account_type_id`) REFERENCES `oas_account_type` (`id`),
CONSTRAINT `account_ibfk_2` FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`),
CONSTRAINT `account_ibfk_3` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),
CONSTRAINT `account_ibfk_4` FOREIGN KEY (`parent_id`) REFERENCES `oas_account` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_accounting_period` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(128) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
`till_date` datetime NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`name`,`legal_entity_id`),
UNIQUE KEY (`till_date`,`legal_entity_id`),
KEY (`legal_entity_id`),
CONSTRAINT FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_journal_entry_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`date` datetime NOT NULL,
`description` longtext NULL,
`currency_id` int(11) NOT NULL,
`accounting_period_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `currency_id` (`currency_id`),
KEY `accounting_period_id` (`accounting_period_id`),
CONSTRAINT FOREIGN KEY (`currency_id`) REFERENCES `oas_currency` (`id`),
CONSTRAINT FOREIGN KEY (`accounting_period_id`) REFERENCES `oas_accounting_period` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_journal_entry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`description` longtext NULL,
`ref_num` int(11) NULL,
`account_id` int(11) NOT NULL,
is_debit tinyint(1) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
`group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `account_id` (`account_id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`group_id`) REFERENCES `oas_journal_entry_group` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_internal_investment` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`account_asset_id` int(11) NOT NULL,
`account_liability_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY (`account_asset_id`,`account_liability_id`),
UNIQUE KEY (`account_asset_id`),
KEY (`account_asset_id`),
KEY (`account_liability_id`),
CONSTRAINT FOREIGN KEY (`account_asset_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`account_liability_id`) REFERENCES `oas_account` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_initial_amount` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`account_id` int(11) NOT NULL,
`accounting_period_id` int(11) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
PRIMARY KEY (`id`),
UNIQUE KEY (`account_id`,`accounting_period_id`),
KEY (`account_id`),
KEY (`accounting_period_id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`accounting_period_id`) REFERENCES `oas_accounting_period` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(96) NOT NULL,
`description` longtext,
`template_currency_id` int(11) NOT NULL,
`legal_entity_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT FOREIGN KEY (`template_currency_id`) REFERENCES `oas_currency` (`id`),
CONSTRAINT FOREIGN KEY (`legal_entity_id`) REFERENCES `oas_legal_entity` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_journal_entry_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`id`),
`template_name_id` int(11) NOT NULL,
CONSTRAINT FOREIGN KEY (`template_name_id`) REFERENCES `oas_template_name` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
CREATE TABLE `oas_template_journal_entry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`description` longtext,
`account_id` int(11) NOT NULL,
`is_debit` tinyint(1) NOT NULL,
`quantity` decimal(20,6) NOT NULL DEFAULT '1.000000',
`unit_cost` decimal(20,6) NOT NULL DEFAULT '1.000000',
`template_group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
CONSTRAINT FOREIGN KEY (`account_id`) REFERENCES `oas_account` (`id`),
CONSTRAINT FOREIGN KEY (`template_group_id`) REFERENCES `oas_template_journal_entry_group` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
INSERT INTO oas_account_type (id,code,name) VALUES (1,'A','Asset');
INSERT INTO oas_account_type (id,code,name) VALUES (2,'L','Liability & Equity');
INSERT INTO oas_account_type (id,code,name) VALUES (3,'I','Income');
INSERT INTO oas_account_type (id,code,name) VALUES (4,'E','Expense');
INSERT INTO oas_currency (code, name) VALUES ('USD', 'US Dollar');
INSERT INTO oas_currency (code, name) VALUES ('GBP', 'Sterling');
INSERT INTO oas_currency (code, name) VALUES ('CHF', 'Swiss Franc');
INSERT INTO oas_currency (code, name) VALUES ('EUR', 'Euro');
"""
import logging
_LOG = logging.getLogger('oas.model')
from django.db import models
from django.contrib.auth.models import User
import old.server.oas.tools
CODE_ASSETS = 'A'
CODE_LIABILITIES_EQUITY = 'L'
CODE_INCOME = 'I'
CODE_EXPENSE = 'E'
#
# Custom User Model
#
#
# App model starts here
#
class AccountType(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=3)
name = models.CharField(unique=True, max_length=192)
class Meta:
db_table = u'oas_account_type'
def __unicode__(self):
return '%s - %s' % (self.code, self.name)
class Currency(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=9)
name = models.CharField(unique=True, max_length=192)
class Meta:
db_table = u'oas_currency'
def __unicode__(self):
return self.code
def build_tree(accounts):
tree = old.server.oas.tools.SimpleTreeSet()
for account in accounts:
if account.parent is None:
if not tree.has_node(account):
tree.add_root(account)
else:
tree.create_parent_child(account.parent, account)
return tree.group()
class LegalEntity(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=96)
name = models.CharField(unique=True, max_length=192)
description = models.TextField(blank=True)
is_individual = models.IntegerField(null=False, default=False, blank=True)
user = models.ForeignKey(User, related_name='legal_entities', on_delete=models.PROTECT)
currency = models.ForeignKey(Currency, related_name='+', null=False, blank=False, on_delete=models.PROTECT)
class Meta:
db_table = u'oas_legal_entity'
def get_asset_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_ASSETS)
as_tree = build_tree(accounts)
return as_tree
def get_liability_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_LIABILITIES_EQUITY)
as_tree = build_tree(accounts)
return as_tree
def get_income_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_INCOME)
as_tree = build_tree(accounts)
return as_tree
def get_expense_accounts(self):
accounts = self.accounts.filter(account_type__code=CODE_EXPENSE)
as_tree = build_tree(accounts)
return as_tree
def clean_journal_entries(self, account_code=None):
accounts = Account.objects.filter(legal_entity=self)
if account_code is not None:
accounts = accounts.filter(code=account_code)
for account in accounts:
JournalEntry.objects.filter(account=account).delete()
def __unicode__(self):
return self.code
class Account(models.Model):
id = models.AutoField(primary_key=True)
code = models.CharField(unique=True, max_length=32)
name = models.CharField(unique=True, max_length=192)
description = models.TextField(blank=True)
account_type = models.ForeignKey(AccountType, related_name='+', on_delete=models.PROTECT)
legal_entity = models.ForeignKey(LegalEntity, related_name='accounts', on_delete=models.PROTECT)
user = models.ForeignKey(User, related_name='accounts', on_delete=models.PROTECT)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_account'
unique_together = (('legal_entity', 'code'), ('legal_entity', 'name'))
def update_account_type(self, account_type, visited=None):
"""
Because of redundancy in db model,
children account types need to be updated
"""
if visited is None:
visited = set()
_LOG.debug('visited: %s', visited)
_LOG.debug('node: %s', self)
assert self not in visited, 'tree not consistent: loop detected on %s' % (self)
visited.add(self)
self.account_type = account_type
# recursive call updating children account types
for child in self.children.all():
child.update_account_type(account_type, visited)
def __unicode__(self):
return '%s - %s' % (self.code, self.name)
class InternalInvestment(models.Model):
id = models.AutoField(primary_key=True)
account_asset = models.ForeignKey(Account, related_name='owner_account', unique=True, on_delete=models.PROTECT)
account_liability = models.ForeignKey(Account, related_name='investment_account', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_internal_investment'
unique_together = (('account_asset', 'account_liability'),)
class AccountingPeriod(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(unique=True, max_length=128, null=False)
till_date = models.DateTimeField(null=True)
legal_entity = models.ForeignKey(LegalEntity, null=False, related_name='periods', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_accounting_period'
unique_together = (('legal_entity', 'name'),)
unique_together = (('legal_entity', 'till_date'),)
class JournalEntryGroup(models.Model):
id = models.AutoField(primary_key=True)
date = models.DateTimeField(null=False)
description = models.TextField(null=True)
currency = models.ForeignKey(Currency, related_name='+', null=False, on_delete=models.PROTECT)
accounting_period = models.ForeignKey(AccountingPeriod, related_name='+', null=False, on_delete=models.PROTECT)
class Meta:
db_table = u'oas_journal_entry_group'
def __unicode__(self):
return '<group: %s, %s>' % (self.date, self.description)
class JournalEntry(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField(null=True)
ref_num = models.IntegerField(null=True, default=False, blank=True)
quantity = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
unit_cost = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
is_debit = models.IntegerField(null=False, default=False, blank=True)
account = models.ForeignKey(Account, related_name='entries', null=False, on_delete=models.PROTECT)
group = models.ForeignKey(JournalEntryGroup, related_name='entries', null=False, on_delete=models.PROTECT)
class Meta:
db_table = u'oas_journal_entry'
def __unicode__(self):
account_type = ('credit', 'debit')[self.is_debit]
return '%s' % str([account_type, self.description, self.quantity * self.unit_cost, self.group])
class InitialAmount(models.Model):
id = models.AutoField(primary_key=True)
quantity = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
unit_cost = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
account = models.ForeignKey(Account, related_name='+', null=False, on_delete=models.PROTECT)
accounting_period = models.ForeignKey(AccountingPeriod, related_name='initial_amounts', null=False,
on_delete=models.PROTECT)
class Meta:
db_table = u'oas_initial_amount'
unique_together = (('account', 'accounting_period'),)
def __unicode__(self):
return '%s' % str([self.accounting_period, self.account, self.quantity * self.unit_cost])
class TemplateSet(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(unique=True, max_length=192)
description = models.TextField(null=True)
template_currency = models.ForeignKey(Currency, related_name='+', null=False, on_delete=models.PROTECT)
legal_entity = models.ForeignKey(LegalEntity, null=False, related_name='templates', on_delete=models.PROTECT)
class Meta:
db_table = u'oas_template_name'
unique_together = (('legal_entity', 'name'),)
def __unicode__(self):
return '<template set: %s>' % (self.name)
class TemplateJournalEntryGroup(models.Model):
id = models.AutoField(primary_key=True)
template_set = models.ForeignKey(TemplateSet, db_column='template_name_id', related_name='templates', null=False,
on_delete=models.PROTECT)
class Meta:
db_table = u'oas_template_journal_entry_group'
def __unicode__(self):
return '<group template: %s>' % (self.template_set)
class TemplateJournalEntry(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField(null=True)
quantity = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
unit_cost = models.DecimalField(null=False, default=1.0, max_digits=22, decimal_places=6, blank=True)
is_debit = models.IntegerField(null=False, default=False, blank=True)
account = models.ForeignKey(Account, related_name='template_entries', null=False, on_delete=models.PROTECT)
template_group = models.ForeignKey(TemplateJournalEntryGroup, related_name='entries', null=False,
on_delete=models.PROTECT)
class Meta:
db_table = u'oas_template_journal_entry'
def __unicode__(self):
account_type = ('credit', 'debit')[self.is_debit]
return '%s' % str([account_type, self.description, self.quantity * self.unit_cost, self.template_group])
| mit | 1,577,952,250,280,129,500 | 38.331019 | 117 | 0.682244 | false |
sadikovi/queue | test/__init__.py | 1 | 2186 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 sadikovi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import sys
import unittest
# Select what tests to run
RUN_TESTS = {
"test.test_const": True,
"test.test_queue": True,
"test.test_util": True,
"test.test_context": True,
"test.test_scheduler": True,
"test.test_spark": True,
"test.test_simple": True,
"test.test_submission": True
}
suites = unittest.TestSuite()
# Add individual test module
def addTests(module_name):
if module_name in RUN_TESTS and RUN_TESTS[module_name]:
module = importlib.import_module(module_name)
batch = loadSuites(module)
suites.addTest(batch)
else:
print "@skip: '%s' tests" % module_name
# Load test suites for module
def loadSuites(module):
gsuite = unittest.TestSuite()
for suite in module.suites():
print "Adding %s" % suite
gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
return gsuite
def collectSystemTests():
for test_name in RUN_TESTS.keys():
addTests(test_name)
def main():
print ""
print "== Gathering tests info =="
print "-" * 70
collectSystemTests()
print ""
print "== Running tests =="
print "-" * 70
results = unittest.TextTestRunner(verbosity=2).run(suites)
num = len([x for x in RUN_TESTS.values() if not x])
print "%s Number of test modules skipped: %d" %("OK" if num == 0 else "WARN", num)
print ""
# Fail if there is at least 1 error or failure
if results and len(results.failures) == 0 and len(results.errors) == 0:
return 0
else:
return 1
| apache-2.0 | 6,212,667,995,313,246,000 | 28.146667 | 86 | 0.666057 | false |
devilry/devilry-django | devilry/devilry_group/tests/test_feedbackfeed_builders/test_feedbackfeed_sidebarbuilder.py | 1 | 2101 | # Django imports
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
from model_bakery import baker
# Devilry imports
from devilry.devilry_group import devilry_group_baker_factories
from devilry.devilry_group.feedbackfeed_builder import builder_base
from devilry.devilry_group.feedbackfeed_builder.feedbackfeed_sidebarbuilder import FeedbackFeedSidebarBuilder
from devilry.devilry_group import models as group_models
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
class TestFeedbackfeedSidebarBuilder(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_num_queries(self):
# Must be refactored
# Test that the number of queries performed is manageable
testuser = baker.make(settings.AUTH_USER_MODEL)
testgroup = baker.make('core.AssignmentGroup')
testassignment = testgroup.assignment
testfeedbackset = devilry_group_baker_factories.feedbackset_first_attempt_published(group=testgroup)
candidate = baker.make('core.Candidate', assignment_group=testgroup)
baker.make('core.Candidate', assignment_group=testgroup, _quantity=100)
baker.make('core.Examiner', assignmentgroup=testgroup, _quantity=100)
testcomment = baker.make('devilry_group.GroupComment',
feedback_set=testfeedbackset,
user=candidate.relatedstudent.user)
baker.make('devilry_comment.CommentFile',
comment=testcomment,
_quantity=100)
with self.assertNumQueries(5):
feedbackset_queryset = builder_base.get_feedbackfeed_builder_queryset(testgroup, testuser, 'unused')
sidebarbuilder = FeedbackFeedSidebarBuilder(
assignment=testassignment,
group=testgroup,
feedbacksets=feedbackset_queryset)
sidebarbuilder.build()
sidebarbuilder.get_as_list()
self.assertEqual(1, group_models.FeedbackSet.objects.count()) | bsd-3-clause | 577,152,114,106,916,900 | 45.711111 | 112 | 0.707758 | false |
mignev/startappsync | gitconfig/core.py | 1 | 3702 | import os
import sys
from re import search
from gitconfig.config import (
ConfigDict,
ConfigFile,
)
if sys.version_info < (2, 7):
from ordereddict import OrderedDict
from collections import MutableMapping
else:
from collections import (
OrderedDict,
MutableMapping,
)
class GitRepoNotFoundError(Exception): pass
class GitConfig():
def __init__(self,**kwargs):
self.path = kwargs.get('path', None)
self.file = kwargs.get('file', None)
if self.path:
if os.path.exists(self.path):
config_path = self.detect_git_config(self.path)
if os.path.exists(config_path):
self.config_path = config_path
self.config = ConfigFile.from_path(config_path)
else:
raise GitRepoNotFoundError(self.path)
else:
raise IOError(self.path)
else:
self.config = ConfigFile.from_file(self.file)
def detect_git_config(self, path):
config_path = ""
if search(r'\.git/config', path):
config_path = path
elif search(r'\.git', path):
config_path = "{0}/config".format(path)
else:
config_path = "{0}/.git/config".format(path)
return config_path
def has_remotes(self):
return self.has_section('remote')
def has_remote(self, remote_name):
return self.has_section('remote', remote_name)
def has_section(self, section_type, section_name = ''):
config_sections = self.config.itersections()
"""
These variables are used in return statements only
They are used to experiment with readability
"""
yes_there_is_section_with_this_name = yes_this_section_exists = True
sorry_search_section_doest_not_exist = False
for section in config_sections:
this_section_type = section[0]
search_for_section_with_spcific_name = (section_name != '')
if not search_for_section_with_spcific_name:
if this_section_type == section_type:
return yes_this_section_exists # True
else:
try:
this_section_name = section[1]
if this_section_name == section_name:
return yes_there_is_section_with_this_name # True
except IndexError:
""" These type of sections are like [core], [alias], [user]"""
continue
return sorry_search_section_doest_not_exist # False
@property
def remotes(self):
config_sections = self.config.items()
remotes = OrderedDict()
for section in config_sections:
section_type = section[0][0]
if section_type == 'remote':
remote_name = section[0][1]
remote_properties = section[1]
remotes[remote_name] = remote_properties
return remotes
@property
def sections(self):
config_sections = self.config.items()
return [section[0][0] for section in config_sections]
def set(self, section, key, value):
return self.config.set((section,), key, value)
def get(self, section, key):
section_details = section.split('.')
if len(section_details) == 2:
section_type, section_name = section_details
else:
section_type, section_name = (section, '')
return self.config.get((section_type, section_name), key)
def save(self):
return self.config.write_to_path(self.config_path)
| mit | -6,701,524,496,558,599,000 | 28.616 | 82 | 0.567261 | false |
tuxxi/OpenBurn | openburn/application/propellant_db.py | 1 | 2142 | from typing import Dict
import jsonpickle
from qtpy.QtCore import QObject, Signal
from openburn.core.propellant import OpenBurnPropellant
class PropellantDatabase(QObject):
database_ready = Signal()
propellant_added = Signal(str)
propellant_edited = Signal(str)
propellant_removed = Signal(str)
def __init__(self, filename: str = None):
super(PropellantDatabase, self).__init__()
# Dict ordered by propellant name : propellant
self.propellants: Dict[str: OpenBurnPropellant] = {}
if filename is not None:
self.load_database(filename)
def propellant_names(self):
return [prop.name for prop in self.propellants]
def load_database(self, filename: str):
self.clear_database()
with open(filename, 'r') as f:
data = f.read()
if len(data) > 0:
self.propellants = jsonpickle.decode(data)
self.database_filename: str = filename
self.database_ready.emit()
def save_database(self):
with open(self.database_filename, 'w+') as f:
if len(self.propellants) > 0:
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
f.write(jsonpickle.encode(self.propellants))
def clear_database(self) -> None:
self.propellants.clear()
def add_propellant(self, propellant: OpenBurnPropellant) -> None:
self.propellants[propellant.name] = propellant
self.propellant_added.emit(propellant.name) # emit signal
def remove_propellant(self, key: str) -> None:
"""
Removes a propellant from the database
:param key: the propellant name to be removed
"""
self.propellants.pop(key)
self.propellant_removed.emit(key) # emit signal
def update_propellant(self, key: str, new_prop: OpenBurnPropellant) -> None:
"""Updates the propellant database
:param key: the old propellant's name
:param new_prop: the new propellant, to replace old_prop
"""
self.propellants[key] = new_prop
self.propellant_edited.emit(key)
| gpl-3.0 | 4,604,087,554,832,698,400 | 32.46875 | 80 | 0.637722 | false |
agancsos/python | LinkedInSort_in_Python.py | 1 | 2716 | #!/bin/python
class linkedinSort:
project_name=\"\";
project_month=\"\";
project_year=0;
def month_to_int(self):
month=self.project_month;
if month == \"January\":
return 1;
if month == \"February\":
return 2;
if month == \"March\":
return 3;
if month == \"April\":
return 4;
if month == \"May\":
return 5;
if month == \"June\":
return 6;
if month == \"July\":
return 7;
if month == \"August\":
return 8;
if month == \"September\":
return 9;
if month == \"October\":
return 10;
if month == \"November\":
return 12;
if month == \"December\":
return 12;
return 0;
def set_from_other(self,other):
self.project_name=other.project_name;
self.project_month=other.project_month;
self.project_year=other.project_year;
def print_dates(self):
for date in self.project_dates:
date_components=date.split( );
print self.month_to_int(date_components[0]);
def set_dates(self,name,month,year):
self.project_name=name;
self.project_month=month;
self.project_year=year;
def linkedSort(self):
return self;
def sortDates(self,dates_array):
for sorting in dates_array:
for sorting2 in dates_array:
if(sorting.project_year<sorting2.project_year):
temp_linkedin=linkedinSort();
temp_linkedin.set_from_other(sorting);
sorting.set_from_other(sorting2);
sorting2.set_from_other(temp_linkedin);
if(sorting.project_year==sorting2.project_year):
if(sorting.month_to_int()<sorting2.month_to_int()):
temp_linkedin=linkedinSort();
temp_linkedin.set_from_other(sorting);
sorting.set_from_other(sorting2);
sorting2.set_from_other(temp_linkedin)
##int main
to_be_sorted=[];
project_dates={Sample1 November 2010,PaperWorks October 2012,ResumeBuuilder October 2013,Resume_Sampler September 2013,
iNCREPYT_Alpha August 2013,LangSim November 2013,iNCREPTY_LT_Alpha August 2013,DOY April 2013,
JokeBook January 2013,HIRED January 2014,JokeBook2 January 2014,Pic2Text January 2014,BlackBook January 2014,
LangSim_LT February 2014,MovieBook February 2014,Geode October 2012,Star_wars_Roll-Ups ,Students.py October 2013};
i=0;
for dates in project_dates:
test_linkedin=linkedinSort();
temp_comp=dates.split( );
temp_name=temp_comp[0];
temp_month=temp_comp[1];
temp_year=temp_comp[2];
test_linkedin.set_dates(temp_name.replace(_, ),temp_month,temp_year);
to_be_sorted.insert(i,test_linkedin);
i+=1;
linkedinSort().sortDates(to_be_sorted);
for project in to_be_sorted:
print project.project_name, ,project.project_month, ,project.project_year;
| mit | 8,169,540,432,007,339,000 | 28.188889 | 129 | 0.662003 | false |
jocelynj/weboob | weboob/applications/qhavesex/qhavesex.py | 1 | 1273 | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.capabilities.dating import ICapDating
from weboob.tools.application.qt import QtApplication
from .main_window import MainWindow
class QHaveSex(QtApplication):
APPNAME = 'qhavesex'
VERSION = '0.4'
COPYRIGHT = 'Copyright(C) 2010 Romain Bignon'
STORAGE_FILENAME = 'dating.storage'
def main(self, argv):
self.create_storage(self.STORAGE_FILENAME)
self.load_backends(ICapDating)
self.main_window = MainWindow(self.config, self.weboob)
self.main_window.show()
return self.weboob.loop()
| gpl-3.0 | 7,059,353,888,160,273,000 | 34.361111 | 76 | 0.732129 | false |
mwrlabs/veripy | contrib/rfc3633/rr/basic_message_exchange.py | 1 | 3313 | from contrib.rfc3315.constants import *
from contrib.rfc3633.dhcpv6_pd import DHCPv6PDHelper
from scapy.all import *
from veripy.assertions import *
from veripy.models import IPAddress
class BasicMessageExchangeTestCase(DHCPv6PDHelper):
"""
DHCPv6-PD Basic Message Exchange
Verify that a device can properly interoperate while using DHCPv6-PD
@private
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 4.1)
"""
restart_uut = True
def run(self):
self.logger.info("Waiting for a DHCPv6 Solicit message, with a IA for Prefix Delegation...")
r1 = self.node(1).received(src=self.target(1).link_local_ip(), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Solicit)
assertGreaterThanOrEqualTo(1, len(r1), "expected to receive one-or-more DHCPv6 Solicit messages")
assertHasLayer(DHCP6OptIA_PD, r1[0], "expected the DHCPv6 Solicit message to contain an IA for Prefix Delegation")
self.logger.info("Sending a DHCPv6 Advertise message, offering a prefix...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(self.target(1).link_local_ip()))/
UDP(sport=DHCPv6DestPort, dport=DHCPv6SourcePort)/
self.build_dhcpv6_pd_advertise(r1[0], self.node(1), self.target(1), T1=50, T2=80))
self.logger.info("Checking for a DHCPv6 Request message...")
r2 = self.node(1).received(src=self.target(1).link_local_ip(), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Request)
assertGreaterThanOrEqualTo(1, len(r2), "expected to receive a DHCPv6 Request message")
assertHasLayer(DHCP6OptIA_PD, r2[0], "expected the DHCPv6 Request to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r2[0], "expected the DHCPv6 Request to contain an IA Prefix")
assertEqual(IPAddress.identify(self.node(1).iface(0).link.v6_prefix), r2[0][DHCP6OptIAPrefix].prefix, "expected the requested Prefix to match the advertised one")
assertEqual(self.node(1).iface(0).link.v6_prefix_size, r2[0][DHCP6OptIAPrefix].plen, "expected the requested Prefix Length to match the advertised one")
self.logger.info("Sending a DHCPv6 Reply message, with the offered IA for Prefix Delegation...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(self.target(1).link_local_ip()))/
UDP(sport=DHCPv6DestPort, dport=DHCPv6SourcePort)/
self.build_dhcpv6_pd_reply(r2[0], self.node(1), self.target(1)))
self.ui.wait(50)
self.node(1).clear_received()
self.logger.info("Waiting for a DHCPv6 Renew message, with a IA for Prefix Delegation...")
r3 = self.node(1).received(src=self.target(1).link_local_ip(), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Renew)
assertGreaterThanOrEqualTo(1, len(r3), "expected to receive one-or-more DHCPv6 Renew messages")
assertHasLayer(DHCP6OptIA_PD, r3[0], "expected the DHCPv6 Renew to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r3[0], "expected the DHCPv6 Renew to contain an IA Prefix")
assertEqual(r2[0][DHCP6OptIAPrefix].prefix, r3[0][DHCP6OptIAPrefix].prefix, "expected the original prefix to be renewed")
| gpl-3.0 | -1,700,819,788,144,245,000 | 58.178571 | 170 | 0.699064 | false |
sidorov-si/ngser | filter_gbk_by_cds.py | 1 | 3272 | #!/usr/bin/env python
"""
Filter GBK file by CDS: retain only those records which have correct CDS.
Correct CDS must:
1) contain a 'product' field;
2) have length that is a multiple of 3;
3) have start and stop codons.
Usage:
filter_gbk_by_cds.py -i <input_GBK_file> -o <output_GBK_file>
Options:
-h --help Show this screen.
--version Show version.
-i <input_GBK_file> Input GBK file.
-o <output_GBK_file> Output GBK file.
"""
import sys
print
modules = ["docopt", "os", "Bio"]
exit_flag = False
for module in modules:
try:
__import__(module)
except ImportError:
exit_flag = True
sys.stderr.write("Error: Python module " + module + " is not installed.\n")
if exit_flag:
sys.stderr.write("You can install these modules with a command: pip install <module>\n")
sys.stderr.write("(Administrator privileges may be required.)\n")
sys.exit(1)
from docopt import docopt
from os.path import exists
from os.path import isfile
from Bio import SeqIO
def filter_gbk(input_gbk_filename, output_gbk_filename):
print 'Input GBK file: ', input_gbk_filename
print 'Output GBK file: ', output_gbk_filename
with open(output_gbk_filename, 'w') as outfile:
print 'Start filtering.'
total_count = 0
filtered_count = 0
for index, record in enumerate(SeqIO.parse(open(input_gbk_filename), "genbank")):
cds_exists = False
for number, feature in enumerate(record.features):
if feature.type == 'CDS' and 'product' in feature.qualifiers:
cds_exists = True
try:
_ = feature.extract(record.seq).translate(cds = True)
except Exception, e:
# something is wrong with this CDS (start and/or stop codons are missing,
# or the lenght of CDS is not a multiple of 3)
print 'Record', record.id, ':', str(e), '=> Filtered out.'
filtered_count += 1
continue # just take the next locus
SeqIO.write(record, outfile, "genbank")
if not cds_exists:
print 'Record', record.id, ':', 'No CDS => Filtered out.'
filtered_count += 1
if index % 100 == 0 and index != 0:
print index, 'records are processed.'
total_count = index + 1
print 'Finished filtering.'
print total_count, 'records were processed.'
print filtered_count, 'of them were filtered out.'
print str(total_count - filtered_count), 'records remain.'
if __name__ == '__main__':
arguments = docopt(__doc__, version='filter_gbk_by_cds 0.2')
input_gbk_filename = arguments["-i"]
if not exists(input_gbk_filename):
print "Error: Can't find an input GBK file: no such file '" + \
input_gbk_filename + "'. Exit.\n"
sys.exit(1)
if not isfile(input_gbk_filename):
print "Error: Input GBK file must be a regular file. " + \
"Something else given. Exit.\n"
sys.exit(1)
output_gbk_filename = arguments["-o"].rstrip('/')
filter_gbk(input_gbk_filename, output_gbk_filename)
| gpl-2.0 | -5,694,534,526,533,918,000 | 34.565217 | 97 | 0.587408 | false |
rolisz/walter_experiments | motion/control.py | 1 | 1293 | from ssc32 import *
from time import sleep
from smooth import getPositions
# Run with sudo
ssc = SSC32('/dev/ttyUSB0', 115200)
ssc[0].degrees = 20
ssc[0].max = 2500
ssc[0].min = 500
ssc[0].deg_max = +90.0
ssc[0].deg_min = -90.0
#TODO: fix library so it doesn't take 100ms for the first instruction
# And which overrides the current command even if it has the same targetDegs
def moveTo(motor, mi, time, targetDegs, dryRun=True):
currDegs = motor[mi].degrees
motor[mi].degrees = targetDegs
if dryRun:
print time, motor[mi].degrees
else:
motor.commit(time*1000)
sleep(time)
def smoothMoveTo(motor, mi, time, targetDegs, dryRun=True):
freq = 100.0
timePerStep = time/freq
currDegs = motor[mi].degrees
distToGo = targetDegs - currDegs
for pos in getPositions(currDegs, targetDegs, freq):
moveTo(motor, mi, timePerStep, pos, dryRun)
#elbow: 35 -> -100
#ssc[0].degrees = 0
#ssc.commit(4000)
ssc[0].degrees = -30
ssc.commit(1000)
x=-30
while True:
k = raw_input()
if x < 30:
x = 30
moveTo(ssc, 0, 1, x, True)
else:
x = -30
smoothMoveTo(ssc, 0, 1, x, True)
ssc.close()
#
#minus = -1
#while True:
# ssc[0].degrees = minus * 20
# ssc.commit()
# minus *= -1
# sleep(4)
| mit | 2,671,244,723,943,717,400 | 22.089286 | 76 | 0.63109 | false |
pghant/big-theta | scraper/EquationScraper/pipelines.py | 1 | 3154 | # -*- coding: utf-8 -*-
from py2neo import authenticate, Graph, Node, Relationship
from scrapy import signals, exceptions
from scrapy.exporters import JsonLinesItemExporter, PprintItemExporter
from .latex import utils as latexutils
class EquationscraperPipeline(object):
def __init__(self):
self.jsl_exporter = None
self.pprnt_exporter = None
self.files = {}
authenticate('localhost:7474', 'neo4j', 'big-theta-team')
self.graph = Graph('localhost:7474/db/data')
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
file_pprnt = open('%s_pprint-items0' % spider.name, 'w+b', )
file_jsl = open('%s_json-items0' % spider.name, 'w+b', )
self.jsl_exporter = JsonLinesItemExporter(file_jsl)
self.pprnt_exporter = PprintItemExporter(file_pprnt)
self.files[spider] = [file_pprnt, file_jsl]
self.pprnt_exporter.indent = 2
self.pprnt_exporter.start_exporting()
self.jsl_exporter.start_exporting()
def spider_closed(self, spider):
self.pprnt_exporter.finish_exporting()
self.jsl_exporter.finish_exporting()
for f in self.files[spider]:
f.close()
def process_item(self, item, spider):
if spider.settings.getbool("EXPORT_JSON"):
self.pprnt_exporter.export_item(item)
self.jsl_exporter.export_item(item)
node_equation_label = 'EQUATION'
node_subject_label = 'SUBJECT'
link_relation = 'LINKS_TO'
page_relation = 'SAME_PAGE_AS'
item_array = [item['last_item'].copy(), item.copy()]
subject_nodes_array = []
for idx, elem in enumerate(item_array):
subject_nodes_array.append(Node(node_subject_label,
title=item_array[idx]['title'],
url=item_array[idx]['url'],
categories=item_array[idx]['categories']))
for expression in elem['maths']:
expression = latexutils.strip_styles(expression)
if latexutils.contains_equality_command(expression):
latex_equation_node = Node(node_equation_label,
name='Equation<' + item_array[idx]['title'] + '>',
equation=expression)
self.graph.merge(Relationship(subject_nodes_array[idx],
page_relation,
latex_equation_node,
distance=0))
self.graph.merge(Relationship(subject_nodes_array[0], link_relation, subject_nodes_array[1],
distance=item_array[1]['link_dist']))
del item
raise exceptions.DropItem
| mit | -8,319,851,079,591,015,000 | 35.674419 | 100 | 0.55929 | false |
Teagan42/home-assistant | homeassistant/components/homekit_controller/config_flow.py | 1 | 14739 | """Config flow to configure homekit_controller."""
import json
import logging
import os
import re
import homekit
from homekit.controller.ip_implementation import IpPairing
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from .connection import get_accessory_name, get_bridge_information
from .const import DOMAIN, KNOWN_DEVICES
HOMEKIT_IGNORE = ["Home Assistant Bridge"]
HOMEKIT_DIR = ".homekit"
PAIRING_FILE = "pairing.json"
PIN_FORMAT = re.compile(r"^(\d{3})-{0,1}(\d{2})-{0,1}(\d{3})$")
_LOGGER = logging.getLogger(__name__)
def load_old_pairings(hass):
"""Load any old pairings from on-disk json fragments."""
old_pairings = {}
data_dir = os.path.join(hass.config.path(), HOMEKIT_DIR)
pairing_file = os.path.join(data_dir, PAIRING_FILE)
# Find any pairings created with in HA 0.85 / 0.86
if os.path.exists(pairing_file):
with open(pairing_file) as pairing_file:
old_pairings.update(json.load(pairing_file))
# Find any pairings created in HA <= 0.84
if os.path.exists(data_dir):
for device in os.listdir(data_dir):
if not device.startswith("hk-"):
continue
alias = device[3:]
if alias in old_pairings:
continue
with open(os.path.join(data_dir, device)) as pairing_data_fp:
old_pairings[alias] = json.load(pairing_data_fp)
return old_pairings
def normalize_hkid(hkid):
"""Normalize a hkid so that it is safe to compare with other normalized hkids."""
return hkid.lower()
@callback
def find_existing_host(hass, serial):
"""Return a set of the configured hosts."""
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.data["AccessoryPairingID"] == serial:
return entry
def ensure_pin_format(pin):
"""
Ensure a pin code is correctly formatted.
Ensures a pin code is in the format 111-11-111. Handles codes with and without dashes.
If incorrect code is entered, an exception is raised.
"""
match = PIN_FORMAT.search(pin)
if not match:
raise homekit.exceptions.MalformedPinError(f"Invalid PIN code f{pin}")
return "{}-{}-{}".format(*match.groups())
@config_entries.HANDLERS.register(DOMAIN)
class HomekitControllerFlowHandler(config_entries.ConfigFlow):
"""Handle a HomeKit config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the homekit_controller flow."""
self.model = None
self.hkid = None
self.devices = {}
self.controller = homekit.Controller()
self.finish_pairing = None
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
errors = {}
if user_input is not None:
key = user_input["device"]
self.hkid = self.devices[key]["id"]
self.model = self.devices[key]["md"]
await self.async_set_unique_id(
normalize_hkid(self.hkid), raise_on_progress=False
)
return await self.async_step_pair()
all_hosts = await self.hass.async_add_executor_job(self.controller.discover, 5)
self.devices = {}
for host in all_hosts:
status_flags = int(host["sf"])
paired = not status_flags & 0x01
if paired:
continue
self.devices[host["name"]] = host
if not self.devices:
return self.async_abort(reason="no_devices")
return self.async_show_form(
step_id="user",
errors=errors,
data_schema=vol.Schema(
{vol.Required("device"): vol.In(self.devices.keys())}
),
)
async def async_step_unignore(self, user_input):
"""Rediscover a previously ignored discover."""
unique_id = user_input["unique_id"]
await self.async_set_unique_id(unique_id)
records = await self.hass.async_add_executor_job(self.controller.discover, 5)
for record in records:
if normalize_hkid(record["id"]) != unique_id:
continue
return await self.async_step_zeroconf(
{
"host": record["address"],
"port": record["port"],
"hostname": record["name"],
"type": "_hap._tcp.local.",
"name": record["name"],
"properties": {
"md": record["md"],
"pv": record["pv"],
"id": unique_id,
"c#": record["c#"],
"s#": record["s#"],
"ff": record["ff"],
"ci": record["ci"],
"sf": record["sf"],
"sh": "",
},
}
)
return self.async_abort(reason="no_devices")
async def async_step_zeroconf(self, discovery_info):
"""Handle a discovered HomeKit accessory.
This flow is triggered by the discovery component.
"""
# Normalize properties from discovery
# homekit_python has code to do this, but not in a form we can
# easily use, so do the bare minimum ourselves here instead.
properties = {
key.lower(): value for (key, value) in discovery_info["properties"].items()
}
# The hkid is a unique random number that looks like a pairing code.
# It changes if a device is factory reset.
hkid = properties["id"]
model = properties["md"]
name = discovery_info["name"].replace("._hap._tcp.local.", "")
status_flags = int(properties["sf"])
paired = not status_flags & 0x01
# The configuration number increases every time the characteristic map
# needs updating. Some devices use a slightly off-spec name so handle
# both cases.
try:
config_num = int(properties["c#"])
except KeyError:
_LOGGER.warning(
"HomeKit device %s: c# not exposed, in violation of spec", hkid
)
config_num = None
# If the device is already paired and known to us we should monitor c#
# (config_num) for changes. If it changes, we check for new entities
if paired and hkid in self.hass.data.get(KNOWN_DEVICES, {}):
conn = self.hass.data[KNOWN_DEVICES][hkid]
if conn.config_num != config_num:
_LOGGER.debug(
"HomeKit info %s: c# incremented, refreshing entities", hkid
)
self.hass.async_create_task(conn.async_refresh_entity_map(config_num))
return self.async_abort(reason="already_configured")
_LOGGER.debug("Discovered device %s (%s - %s)", name, model, hkid)
await self.async_set_unique_id(normalize_hkid(hkid))
self._abort_if_unique_id_configured()
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["hkid"] = hkid
self.context["title_placeholders"] = {"name": name}
if paired:
old_pairings = await self.hass.async_add_executor_job(
load_old_pairings, self.hass
)
if hkid in old_pairings:
return await self.async_import_legacy_pairing(
properties, old_pairings[hkid]
)
# Device is paired but not to us - ignore it
_LOGGER.debug("HomeKit device %s ignored as already paired", hkid)
return self.async_abort(reason="already_paired")
# Devices in HOMEKIT_IGNORE have native local integrations - users
# should be encouraged to use native integration and not confused
# by alternative HK API.
if model in HOMEKIT_IGNORE:
return self.async_abort(reason="ignored_model")
# Device isn't paired with us or anyone else.
# But we have a 'complete' config entry for it - that is probably
# invalid. Remove it automatically.
existing = find_existing_host(self.hass, hkid)
if existing:
await self.hass.config_entries.async_remove(existing.entry_id)
self.model = model
self.hkid = hkid
# We want to show the pairing form - but don't call async_step_pair
# directly as it has side effects (will ask the device to show a
# pairing code)
return self._async_step_pair_show_form()
async def async_import_legacy_pairing(self, discovery_props, pairing_data):
"""Migrate a legacy pairing to config entries."""
hkid = discovery_props["id"]
existing = find_existing_host(self.hass, hkid)
if existing:
_LOGGER.info(
(
"Legacy configuration for homekit accessory %s"
"not loaded as already migrated"
),
hkid,
)
return self.async_abort(reason="already_configured")
_LOGGER.info(
(
"Legacy configuration %s for homekit"
"accessory migrated to config entries"
),
hkid,
)
pairing = IpPairing(pairing_data)
return await self._entry_from_accessory(pairing)
async def async_step_pair(self, pair_info=None):
"""Pair with a new HomeKit accessory."""
# If async_step_pair is called with no pairing code then we do the M1
# phase of pairing. If this is successful the device enters pairing
# mode.
# If it doesn't have a screen then the pin is static.
# If it has a display it will display a pin on that display. In
# this case the code is random. So we have to call the start_pairing
# API before the user can enter a pin. But equally we don't want to
# call start_pairing when the device is discovered, only when they
# click on 'Configure' in the UI.
# start_pairing will make the device show its pin and return a
# callable. We call the callable with the pin that the user has typed
# in.
errors = {}
if pair_info:
code = pair_info["pairing_code"]
try:
code = ensure_pin_format(code)
await self.hass.async_add_executor_job(self.finish_pairing, code)
pairing = self.controller.pairings.get(self.hkid)
if pairing:
return await self._entry_from_accessory(pairing)
errors["pairing_code"] = "unable_to_pair"
except homekit.exceptions.MalformedPinError:
# Library claimed pin was invalid before even making an API call
errors["pairing_code"] = "authentication_error"
except homekit.AuthenticationError:
# PairSetup M4 - SRP proof failed
# PairSetup M6 - Ed25519 signature verification failed
# PairVerify M4 - Decryption failed
# PairVerify M4 - Device not recognised
# PairVerify M4 - Ed25519 signature verification failed
errors["pairing_code"] = "authentication_error"
except homekit.UnknownError:
# An error occurred on the device whilst performing this
# operation.
errors["pairing_code"] = "unknown_error"
except homekit.MaxPeersError:
# The device can't pair with any more accessories.
errors["pairing_code"] = "max_peers_error"
except homekit.AccessoryNotFoundError:
# Can no longer find the device on the network
return self.async_abort(reason="accessory_not_found_error")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Pairing attempt failed with an unhandled exception")
errors["pairing_code"] = "pairing_failed"
start_pairing = self.controller.start_pairing
try:
self.finish_pairing = await self.hass.async_add_executor_job(
start_pairing, self.hkid, self.hkid
)
except homekit.BusyError:
# Already performing a pair setup operation with a different
# controller
errors["pairing_code"] = "busy_error"
except homekit.MaxTriesError:
# The accessory has received more than 100 unsuccessful auth
# attempts.
errors["pairing_code"] = "max_tries_error"
except homekit.UnavailableError:
# The accessory is already paired - cannot try to pair again.
return self.async_abort(reason="already_paired")
except homekit.AccessoryNotFoundError:
# Can no longer find the device on the network
return self.async_abort(reason="accessory_not_found_error")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Pairing attempt failed with an unhandled exception")
errors["pairing_code"] = "pairing_failed"
return self._async_step_pair_show_form(errors)
def _async_step_pair_show_form(self, errors=None):
return self.async_show_form(
step_id="pair",
errors=errors or {},
data_schema=vol.Schema(
{vol.Required("pairing_code"): vol.All(str, vol.Strip)}
),
)
async def _entry_from_accessory(self, pairing):
"""Return a config entry from an initialized bridge."""
# The bulk of the pairing record is stored on the config entry.
# A specific exception is the 'accessories' key. This is more
# volatile. We do cache it, but not against the config entry.
# So copy the pairing data and mutate the copy.
pairing_data = pairing.pairing_data.copy()
# Use the accessories data from the pairing operation if it is
# available. Otherwise request a fresh copy from the API.
# This removes the 'accessories' key from pairing_data at
# the same time.
accessories = pairing_data.pop("accessories", None)
if not accessories:
accessories = await self.hass.async_add_executor_job(
pairing.list_accessories_and_characteristics
)
bridge_info = get_bridge_information(accessories)
name = get_accessory_name(bridge_info)
return self.async_create_entry(title=name, data=pairing_data)
| apache-2.0 | 554,822,641,521,625,800 | 37.283117 | 90 | 0.587285 | false |
mmilutinovic1313/zipline-with-algorithms | tests/test_sources.py | 1 | 7302 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from itertools import cycle
import numpy as np
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.finance.trading import with_environment
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source()
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source()
source = DataFrameSource(df, sids=[0])
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
@with_environment()
def test_yahoo_bars_to_panel_source(self, env=None):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
source = DataPanelSource(data)
sids = [
asset.sid for asset in
[env.asset_finder.lookup_symbol(symbol, as_of_date=end)
for symbol in stocks]
]
stocks_iter = cycle(sids)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertEqual(next(stocks_iter), event['sid'])
@with_environment()
def test_nan_filter_dataframe(self, env=None):
env.update_asset_finder(identifiers=[4, 5])
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
@with_environment()
def test_nan_filter_panel(self, env=None):
env.update_asset_finder(identifiers=[4, 5])
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)
| apache-2.0 | 1,614,067,241,080,615,000 | 40.022472 | 75 | 0.574363 | false |
renzon/gaecookie | setup.py | 1 | 4928 | import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*$py.class", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build", "./dist", "EGG-INFO", "*.egg-info"
]
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
PACKAGE = "gaecookie"
DESCRIPTION = "A library for signing string and validating cookies. cd .." \
""
NAME = "gaecookie"
AUTHOR = "Renzo Nuccitelli"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/renzon/gaecookie"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="BSD",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data=find_package_data(PACKAGE, only_in_packages=False),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Paste",
],
zip_safe=False,
install_requires=[
'gaebusiness>=4.0',
'gaegraph>=2.3',
'tekton>=3.1.1'
]
)
| mit | 6,134,216,573,355,813,000 | 33.950355 | 86 | 0.551136 | false |
peret/visualize-bovw | tests/test_caltech_manager.py | 1 | 8382 | import unittest
from datamanagers.CaltechManager import CaltechManager
from datamanagers import InvalidDatasetException, NoSuchCategoryException
import os
import numpy as np
from sklearn.decomposition import PCA
from test_datamanager import BASE_PATH
class TestCaltechManager(unittest.TestCase):
def setUp(self):
self.datamanager = CaltechManager()
self.datamanager.change_base_path(os.path.join(BASE_PATH, "testdata"))
def test_invalid_dataset_caltech(self):
self.assertRaises(InvalidDatasetException, self.datamanager.build_sample_matrix, "rubbish", "test")
def test_invalid_dataset_caltech2(self):
self.assertRaises(InvalidDatasetException, self.datamanager.build_class_vector, "rubbish", "test")
def test_invalid_category_caltech(self):
self.assertRaises(NoSuchCategoryException, self.datamanager.get_positive_samples, "test", "rubbish")
def test_invalid_category_caltech2(self):
self.assertRaises(NoSuchCategoryException, self.datamanager.build_sample_matrix, "test", "rubbish")
def test_training_sample_matrix(self):
samples = self.datamanager.build_sample_matrix("train", "TestFake")
should_be = np.array([
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.57571691, 0.08127511]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_test_sample_matrix(self):
samples = self.datamanager.build_sample_matrix("test", "TestFake")
should_be = np.array([
[ 0.64663881, 0.55629711, 0.11966438, 0.04559849, 0.69156636, 0.4500224 ],
[ 0.38948518, 0.33885501, 0.567841 , 0.36167425, 0.18220702, 0.57701336],
[ 0.08660618, 0.83642531, 0.9239062 , 0.53778457, 0.56708116, 0.13766008],
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.15771926, 0.81349361]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_all_sample_matrix(self):
samples = self.datamanager.build_sample_matrix("all", "TestFake")
should_be = np.array([
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
[ 0.64663881, 0.55629711, 0.11966438, 0.04559849, 0.69156636, 0.4500224 ],
[ 0.38948518, 0.33885501, 0.567841 , 0.36167425, 0.18220702, 0.57701336],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.57571691, 0.08127511],
[ 0.08660618, 0.83642531, 0.9239062 , 0.53778457, 0.56708116, 0.13766008],
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.15771926, 0.81349361]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_all_sample_matrix_exclude_feature(self):
self.datamanager.exclude_feature = 4
samples = self.datamanager.build_sample_matrix("all", "TestFake")
should_be = np.array([
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.9854272 ],
[ 0.64663881, 0.55629711, 0.11966438, 0.04559849, 0.4500224 ],
[ 0.38948518, 0.33885501, 0.567841 , 0.36167425, 0.57701336],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.08127511],
[ 0.08660618, 0.83642531, 0.9239062 , 0.53778457, 0.13766008],
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.81349361]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
@unittest.expectedFailure # TODO: dependent on file order
def test_complete_sample_matrix(self):
samples = self.datamanager.build_complete_sample_matrix("train")
should_be = np.array([
[ 0.31313366, 0.88874122, 0.20000355, 0.56186443, 0.15771926, 0.81349361],
[ 0.12442154, 0.57743013, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.18181397, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.83218205, 0.87969971, 0.81630158, 0.57571691, 0.08127511],
[ 0.44842428, 0.50402522, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.05166245, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
@unittest.expectedFailure # TODO: dependent on file order
def test_complete_sample_matrix_exclude_feature(self):
self.datamanager.exclude_feature = 1
samples = self.datamanager.build_complete_sample_matrix("train")
should_be = np.array([
[ 0.31313366, 0.20000355, 0.56186443, 0.15771926, 0.81349361],
[ 0.12442154, 0.9548108 , 0.22592719, 0.10155164, 0.60750473],
[ 0.53320956, 0.60112703, 0.09004746, 0.31448245, 0.85619318],
[ 0.18139255, 0.87969971, 0.81630158, 0.57571691, 0.08127511],
[ 0.44842428, 0.45302102, 0.54796243, 0.82176286, 0.11623112],
[ 0.31588301, 0.16203263, 0.02196996, 0.96935761, 0.9854272 ],
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.00000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples))
def test_complete_sample_matrix_fail(self):
self.assertRaises(NotImplementedError, self.datamanager.build_complete_sample_matrix, "all")
def test_training_class_vector(self):
classes = self.datamanager.build_class_vector("train", "TestFake")
should_be = np.array([1, 1, 0, 0, 0])
self.assertTrue((classes==should_be).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, classes))
def test_test_class_vector(self):
classes = self.datamanager.build_class_vector("test", "TestFake")
should_be = np.array([1, 1, 0, 0])
self.assertTrue((classes==should_be).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, classes))
def test_complete_class_vector(self):
classes = self.datamanager.build_class_vector("all", "TestFake")
should_be = np.array([1, 1, 1, 1, 0, 0, 0, 0, 0])
self.assertTrue((classes==should_be).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, classes))
def test_sample_matrix_pca(self):
self.datamanager.use_pca(n_components = 1)
samples = self.datamanager.build_sample_matrix("all", "TestFake")
should_be = np.array([
[-0.24263228],
[0.85717554],
[0.29054203],
[0.03857126],
[-0.18379566],
[0.44021899],
[-0.78841356],
[-0.65111911],
[-0.08255303]
], dtype=np.float32)
difference_matrix = np.abs(samples - should_be)
self.assertTrue((difference_matrix < 0.000001).all(), "Should be:\n%s\nbut is:\n%s" % (should_be, samples)) | gpl-2.0 | 71,504,931,090,516,424 | 56.417808 | 117 | 0.620496 | false |
Mendelone/forex_trading | Algorithm.Python/CustomDataBitcoinAlgorithm.py | 1 | 5264 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import date, timedelta, datetime
import decimal
import numpy as np
import json
class CustomDataBitcoinAlgorithm(QCAlgorithm):
'''3.0 CUSTOM DATA SOURCE: USE YOUR OWN MARKET DATA (OPTIONS, FOREX, FUTURES, DERIVATIVES etc).
The new QuantConnect Lean Backtesting Engine is incredibly flexible and allows you to define your own data source.
This includes any data source which has a TIME and VALUE. These are the *only* requirements.
To demonstrate this we're loading in "Bitcoin" data.'''
def Initialize(self):
self.SetStartDate(2011, 9, 13)
self.SetEndDate(datetime.now().date() - timedelta(1))
self.SetCash(100000)
# Define the symbol and "type" of our generic data:
self.AddData(Bitcoin, "BTC")
def OnData(self, data):
if "BTC" not in data: return
close = data["BTC"].Close
# If we don't have any weather "SHARES" -- invest"
if not self.Portfolio.Invested:
# Weather used as a tradable asset, like stocks, futures etc.
self.SetHoldings("BTC", 1)
self.Debug("Buying BTC 'Shares': BTC: {0}".format(close))
self.Debug("Time: {0} {1}".format(datetime.now(), close))
class Bitcoin(PythonData):
'''Custom Data Type: Bitcoin data from Quandl - http://www.quandl.com/help/api-for-bitcoin-data'''
def GetSource(self, config, date, isLiveMode):
if isLiveMode:
return SubscriptionDataSource("https://www.bitstamp.net/api/ticker/", SubscriptionTransportMedium.Rest);
#return "http://my-ftp-server.com/futures-data-" + date.ToString("Ymd") + ".zip";
# OR simply return a fixed small data file. Large files will slow down your backtest
return SubscriptionDataSource("http://www.quandl.com/api/v1/datasets/BCHARTS/BITSTAMPUSD.csv?sort_order=asc", SubscriptionTransportMedium.RemoteFile);
def Reader(self, config, line, date, isLiveMode):
coin = Bitcoin()
coin.Symbol = config.Symbol
if isLiveMode:
# Example Line Format:
# {"high": "441.00", "last": "421.86", "timestamp": "1411606877", "bid": "421.96", "vwap": "428.58", "volume": "14120.40683975", "low": "418.83", "ask": "421.99"}
try:
liveBTC = json.loads(line)
# If value is zero, return None
value = decimal.Decimal(liveBTC["last"])
if value == 0: return None
coin.Time = datetime.now()
coin.Value = value
coin["Open"] = float(liveBTC["open"])
coin["High"] = float(liveBTC["high"])
coin["Low"] = float(liveBTC["low"])
coin["Close"] = float(liveBTC["last"])
coin["Ask"] = float(liveBTC["ask"])
coin["Bid"] = float(liveBTC["bid"])
coin["VolumeBTC"] = float(liveBTC["volume"])
coin["WeightedPrice"] = float(liveBTC["vwap"])
return coin
except ValueError:
# Do nothing, possible error in json decoding
return None
# Example Line Format:
# Date Open High Low Close Volume (BTC) Volume (Currency) Weighted Price
# 2011-09-13 5.8 6.0 5.65 5.97 58.37138238, 346.0973893944 5.929230648356
if not (line.strip() and line[0].isdigit()): return None
try:
data = line.split(',')
# If value is zero, return None
value = decimal.Decimal(data[4])
if value == 0: return None
coin.Time = datetime.strptime(data[0], "%Y-%m-%d")
coin.Value = value
coin["Open"] = float(data[1])
coin["High"] = float(data[2])
coin["Low"] = float(data[3])
coin["Close"] = float(data[4])
coin["VolumeBTC"] = float(data[5])
coin["VolumeUSD"] = float(data[6])
coin["WeightedPrice"] = float(data[7])
return coin;
except ValueError:
# Do nothing, possible error in json decoding
return None | apache-2.0 | -5,720,431,468,602,672,000 | 40.769841 | 174 | 0.605663 | false |
jiejieling/RdsMonitor | src/api/controller/BaseController.py | 1 | 3536 | from dataprovider.dataprovider import RedisLiveDataProvider
import tornado.ioloop
import tornado.web
import dateutil.parser
class BaseController(tornado.web.RequestHandler):
def initialize(self):
self.stats_provider = RedisLiveDataProvider().get_provider()
def datetime_to_list(self, datetime):
"""Converts a datetime to a list.
Args:
datetime (datetime): The datetime to convert.
"""
parsed_date = dateutil.parser.parse(datetime)
# don't return the last two fields, we don't want them.
return tuple(parsed_date.timetuple())[:-2]
# todo : fix this
def average_data(self, data):
"""Averages data.
TODO: More docstring here, once functionality is understood.
"""
average = []
deviation=1024*1024
start = dateutil.parser.parse(data[0][0])
end = dateutil.parser.parse(data[-1][0])
difference = end - start
weeks, days = divmod(difference.days, 7)
minutes, seconds = divmod(difference.seconds, 60)
hours, minutes = divmod(minutes, 60)
# TODO: These if/elif/else branches chould probably be broken out into
# individual functions to make it easier to follow what's going on.
if difference.days > 0:
current_max = 0
current_current = 0
current_d = 0
for dt, max_memory, current_memory in data:
d = dateutil.parser.parse(dt)
if d.day != current_d:
current_d = d.day
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
else:
if max_memory > current_max or \
current_memory > current_current:
average.pop()
average.append([dt, max_memory, current_memory])
current_max=max_memory
current_current=current_memory
elif hours > 0:
current_max = 0
current_current = 0
current = -1
keep_flag = False
for dt, max_memory, current_memory in data:
d = dateutil.parser.parse(dt)
if d.hour != current:
current = d.hour
average.append([dt, max_memory, current_memory])
current_max=max_memory
current_current=current_memory
keep_flag=False
elif abs(max_memory - current_max) > deviation or \
abs(current_memory - current_current) > deviation:
#average.pop()
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = True
elif max_memory > current_max or \
current_memory > current_current:
if keep_flag != True:
average.pop()
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = False
else:
current_max = 0
current_current = 0
current_m = -1
keep_flag = False
for dt, max_memory, current_memory in data:
d = dateutil.parser.parse(dt)
if d.minute != current_m:
current_m = d.minute
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = False
elif abs(max_memory - current_max) > deviation or \
abs(current_memory - current_current) > deviation:
#average.pop()
average.append([dt, max_memory, current_memory])
current_max = max_memory
current_current = current_memory
keep_flag = True
elif max_memory > current_max or \
current_memory > current_current:
if keep_flag!=True:
average.pop()
average.append([dt,max_memory,current_memory])
current_max=max_memory
current_current=current_memory
keep_flag=False
return average
| mit | 2,281,642,187,381,527,300 | 29.222222 | 72 | 0.670532 | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/inventory_service/update_ad_units.py | 1 | 2752 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates ad unit sizes by adding a banner ad size.
To determine which ad units exist, run get_all_ad_units.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the ad unit to get.
AD_UNIT_ID = 'INSERT_AD_UNIT_ID_HERE'
def main(client, ad_unit_id):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201808')
# Create a statement to select a single ad unit by ID.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('id = :id')
.WithBindVariable('id', ad_unit_id))
# Get ad units by statement.
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
# Add the size 468x60 to the ad unit.
ad_unit_size = {
'size': {
'width': '468',
'height': '60'
},
'environmentType': 'BROWSER'
}
if 'results' in response and len(response['results']):
updated_ad_units = []
for ad_unit in response['results']:
if 'adUnitSizes' not in ad_unit:
ad_unit['adUnitSizes'] = []
ad_unit['adUnitSizes'].append(ad_unit_size)
updated_ad_units.append(ad_unit)
# Update ad unit on the server.
ad_units = inventory_service.updateAdUnits(updated_ad_units)
# Display results.
for ad_unit in ad_units:
ad_unit_sizes = ['{%s x %s}' % (size['size']['width'],
size['size']['height'])
for size in ad_unit['adUnitSizes']]
print ('Ad unit with ID "%s", name "%s", and sizes [%s] was updated'
% (ad_unit['id'], ad_unit['name'], ','.join(ad_unit_sizes)))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, AD_UNIT_ID)
| apache-2.0 | 7,114,115,951,235,613,000 | 32.560976 | 78 | 0.665334 | false |
iagcl/data_pipeline | tests/applier/data_greenplum_cdc_applier.py | 1 | 3253 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import data_pipeline.constants.const as const
import data_postgres_cdc_applier
from .data_common import TestCase, UPDATE_SSP_SQL
tests = [
TestCase(
description="Apply update statement containing a single primary key SET with end of batch",
input_table_name="CONNCT_CDC_PK5_COLS10",
input_commit_statements=[
'',
'update "SYS"."CONNCT_CDC_PK5_COLS10" set "COMPNDPK_1" = \'0\' where "COMPNDPK_1" = \'26\'',
''],
input_record_types=[const.START_OF_BATCH, const.DATA, const.END_OF_BATCH],
input_operation_codes=['', const.UPDATE, ''],
input_primary_key_fields="COMPNDPK_1",
input_record_counts=[0, 0, 1],
input_commit_lsns=[0, 0, 0],
expect_sql_execute_called=[
None,
None,
None],
expect_execute_called_times=[0, 0, 0],
expect_audit_db_execute_sql_called=[None, None, (UPDATE_SSP_SQL, ('CDCApply', 0, 'myprofile', 1, 'ctl', 'connct_cdc_pk5_cols10'))],
expect_commit_called_times=[0, 0, 1],
expect_insert_row_count=[0, 0, 0],
expect_update_row_count=[0, 1, 1],
expect_delete_row_count=[0, 0, 0],
expect_source_row_count=[0, 1, 1],
expect_batch_committed=[const.UNCOMMITTED, const.UNCOMMITTED, const.COMMITTED,]
)
, TestCase(
description="Apply update statement containing a primary key and non-primary key in SET with end of batch",
input_table_name="CONNCT_CDC_PK5_COLS10",
input_commit_statements=[
'',
'update "SYS"."CONNCT_CDC_PK5_COLS10" set "COMPNDPK_1" = \'0\', "COL_V_2" = \'26.9\' where "COMPNDPK_1" = \'26\'',
''],
input_record_types=[const.START_OF_BATCH, const.DATA, const.END_OF_BATCH],
input_operation_codes=['', const.UPDATE, ''],
input_primary_key_fields="COMPNDPK_1",
input_record_counts=[0, 0, 1],
input_commit_lsns=[0, 0, 0],
expect_sql_execute_called=[
None,
"UPDATE ctl.CONNCT_CDC_PK5_COLS10 SET COL_V_2 = '26.9' WHERE COMPNDPK_1 = '26'; -- lsn: 0, offset: 1",
None],
expect_execute_called_times=[0, 1, 1],
expect_audit_db_execute_sql_called=[None, None, (UPDATE_SSP_SQL, ('CDCApply', 0, 'myprofile', 1, 'ctl', 'connct_cdc_pk5_cols10'))],
expect_commit_called_times=[0, 0, 1],
expect_insert_row_count=[0, 0, 0],
expect_update_row_count=[0, 1, 1],
expect_delete_row_count=[0, 0, 0],
expect_source_row_count=[0, 1, 1],
expect_batch_committed=[const.UNCOMMITTED, const.UNCOMMITTED, const.COMMITTED,]
)
]
| apache-2.0 | 3,493,367,925,225,126,000 | 40.177215 | 135 | 0.664617 | false |
glts/django-progress | migrations/0002_auto__add_field_challenge_done.py | 1 | 4079 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Challenge.done'
db.add_column('progress_challenge', 'done',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Challenge.done'
db.delete_column('progress_challenge', 'done')
models = {
'progress.challenge': {
'Meta': {'_ormbases': ['progress.Task'], 'object_name': 'Challenge'},
'done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task_ptr': ('django.db.models.fields.related.OneToOneField', [], {'primary_key': 'True', 'to': "orm['progress.Task']", 'unique': 'True'})
},
'progress.effort': {
'Meta': {'ordering': "('-date',)", 'object_name': 'Effort'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 10, 30, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '200'}),
'routine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'efforts'", 'to': "orm['progress.Routine']"})
},
'progress.portion': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Portion'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'portions'", 'to': "orm['progress.Challenge']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'done_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'progress.routine': {
'Meta': {'_ormbases': ['progress.Task'], 'object_name': 'Routine'},
'task_ptr': ('django.db.models.fields.related.OneToOneField', [], {'primary_key': 'True', 'to': "orm['progress.Task']", 'unique': 'True'})
},
'progress.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'progress.task': {
'Meta': {'object_name': 'Task'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tasks'", 'to': "orm['progress.Tag']", 'symmetrical': 'False'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': "orm['progress.Topic']"}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'})
},
'progress.topic': {
'Meta': {'object_name': 'Topic'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['progress'] | mit | -60,077,277,886,440,990 | 55.666667 | 158 | 0.541554 | false |
google-research/google-research | bigg/bigg/torch_ops/tensor_ops.py | 1 | 3956 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from bigg.common.consts import t_float
class MultiIndexSelectFunc(Function):
@staticmethod
def forward(ctx, idx_froms, idx_tos, *mats):
assert len(idx_tos) == len(idx_froms) == len(mats)
cols = mats[0].shape[1]
assert all([len(x.shape) == 2 for x in mats])
assert all([x.shape[1] == cols for x in mats])
num_rows = sum([len(x) for x in idx_tos])
out = mats[0].new(num_rows, cols)
for i, mat in enumerate(mats):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
out[x_to] = mat.detach()
else:
assert len(x_from) == len(x_to)
out[x_to] = mat[x_from].detach()
ctx.idx_froms = idx_froms
ctx.idx_tos = idx_tos
ctx.shapes = [x.shape for x in mats]
return out
@staticmethod
def backward(ctx, grad_output):
idx_froms, idx_tos = ctx.idx_froms, ctx.idx_tos
list_grad_mats = [None, None]
for i in range(len(idx_froms)):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
grad_mat = grad_output[x_to].detach()
else:
grad_mat = grad_output.new(ctx.shapes[i]).zero_()
grad_mat[x_from] = grad_output[x_to].detach()
list_grad_mats.append(grad_mat)
return tuple(list_grad_mats)
class MultiIndexSelect(Module):
def forward(self, idx_froms, idx_tos, *mats):
return MultiIndexSelectFunc.apply(idx_froms, idx_tos, *mats)
multi_index_select = MultiIndexSelect()
def test_multi_select():
a = Parameter(torch.randn(4, 2))
b = Parameter(torch.randn(3, 2))
d = Parameter(torch.randn(5, 2))
idx_froms = [[0, 1], [1, 2], [3, 4]]
idx_tos = [[4, 5], [0, 1], [2, 3]]
c = multi_index_select(idx_froms, idx_tos, a, b, d)
print('===a===')
print(a)
print('===b===')
print(b)
print('===d===')
print(d)
print('===c===')
print(c)
t = torch.sum(c)
t.backward()
print(a.grad)
print(b.grad)
print(d.grad)
class PosEncoding(Module):
def __init__(self, dim, device, base=10000, bias=0):
super(PosEncoding, self).__init__()
p = []
sft = []
for i in range(dim):
b = (i - i % 2) / dim
p.append(base ** -b)
if i % 2:
sft.append(np.pi / 2.0 + bias)
else:
sft.append(bias)
self.device = device
self.sft = torch.tensor(sft, dtype=t_float).view(1, -1).to(device)
self.base = torch.tensor(p, dtype=t_float).view(1, -1).to(device)
def forward(self, pos):
with torch.no_grad():
if isinstance(pos, list):
pos = torch.tensor(pos, dtype=t_float).to(self.device)
pos = pos.view(-1, 1)
x = pos / self.base + self.sft
return torch.sin(x)
if __name__ == '__main__':
# test_multi_select()
pos_enc = PosEncoding(128, 'cpu')
print(pos_enc([1, 2, 3]))
| apache-2.0 | 5,617,877,974,705,925,000 | 28.969697 | 74 | 0.571284 | false |
PurpleMyst/porcupine | porcupine/plugins/statusbar.py | 1 | 1725 | from tkinter import ttk
from porcupine import get_tab_manager, utils
# i have experimented with a logging handler that displays logging
# messages in the label, but it's not as good idea as it sounds like,
# not all INFO messages are something that users should see all the time
# this widget is kind of weird
class LabelWithEmptySpaceAtLeft(ttk.Label):
def __init__(self, master):
self._spacer = ttk.Frame(master)
self._spacer.pack(side='left', expand=True)
super().__init__(master)
self.pack(side='left')
def destroy(self):
self._spacer.destroy()
super().destroy()
class StatusBar(ttk.Frame):
def __init__(self, master, tab):
super().__init__(master)
self.tab = tab
# one label for each tab-separated thing
self.labels = [ttk.Label(self)]
self.labels[0].pack(side='left')
tab.bind('<<StatusChanged>>', self.do_update, add=True)
self.do_update()
# this is do_update() because tkinter has a method called update()
def do_update(self, junk=None):
parts = self.tab.status.split('\t')
# there's always at least one part, the label added in
# __init__ is not destroyed here
while len(self.labels) > len(parts):
self.labels.pop().destroy()
while len(self.labels) < len(parts):
self.labels.append(LabelWithEmptySpaceAtLeft(self))
for label, text in zip(self.labels, parts):
label['text'] = text
def on_new_tab(event):
tab = event.data_widget
StatusBar(tab.bottom_frame, tab).pack(side='bottom', fill='x')
def setup():
utils.bind_with_data(get_tab_manager(), '<<NewTab>>', on_new_tab, add=True)
| mit | -7,980,209,914,607,490,000 | 29.263158 | 79 | 0.628406 | false |
rgeorgi/intent | intent/utils/dicts.py | 1 | 20697 | """
Created on Aug 26, 2013
@author: rgeorgi
"""
import sys, re, unittest
from collections import defaultdict, Callable, OrderedDict
class CountDict(object):
def __init__(self):
self._dict = defaultdict(int)
def add(self, key, value=1):
self[key] += value
def __str__(self):
return self._dict.__str__()
def __repr__(self):
return self._dict.__repr__()
def distribution(self, use_keys = list, add_n = 0):
return {k:(self[k] + add_n)/(self.total()*add_n) for k in self.keys()}
def total(self):
values = self._dict.values()
total = 0
for v in values:
total += v
return total
#===========================================================================
# Stuff that should be inheritable
#===========================================================================
def __getitem__(self, k):
return self._dict.__getitem__(k)
def __setitem__(self, k, v):
self._dict.__setitem__(k, v)
def __contains__(self, k):
return self._dict.__contains__(k)
def __len__(self):
return self._dict.__len__()
def __delitem__(self, k):
self._dict.__delitem__(k)
def keys(self):
return self._dict.keys()
def items(self):
return self._dict.items()
# -----------------------------------------------------------------------------
def largest(self):
return sorted(self.items(), reverse=True, key=lambda k: k[1])[0]
def most_frequent(self, minimum = 0, num = 1):
"""
Return the @num entries with the highest counts that
also have at least @minimum occurrences.
@param minimum: int
@param num: int
"""
items = list(self.items())
items.sort(key = lambda item: item[1], reverse=True)
ret_items = []
for item in items:
if item[1] > minimum:
ret_items.append(item[0])
if num and len(ret_items) == num:
break
return ret_items
def most_frequent_counts(self, minimum = 0, num = 1):
most_frequent_keys = self.most_frequent(minimum, num)
return [(key, self[key]) for key in most_frequent_keys]
def __add__(self, other):
d = self.__class__()
for key in self.keys():
d.add(key, self[key])
for key in other.keys():
d.add(key, other[key])
return d
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
class TwoLevelCountDict(object):
def __init__(self):
self._dict = defaultdict(CountDict)
def __add__(self, other):
new = self.__class__()
for key_a in other.keys():
for key_b in other[key_a].keys():
new.add(key_a, key_b, other[key_a][key_b])
for key_a in self.keys():
for key_b in self[key_a].keys():
new.add(key_a, key_b, self[key_a][key_b])
return new
def combine(self, other):
for key_a in other.keys():
for key_b in other[key_a].keys():
self.add(key_a, key_b, other[key_a][key_b])
def add(self, key_a, key_b, value=1):
self[key_a][key_b] += value
def top_n(self, key, n=1, min_num = 1, key2_re = None):
s = sorted(self[key].items(), reverse=True, key=lambda x: x[1])
if key2_re:
s = [i for i in s if re.search(key2_re, i[0])]
return s[0:n]
def most_frequent(self, key, num = 1, key2_re = ''):
most_frequent = None
biggest_count = 0
for key2 in self[key].keys():
# The key2_re is used to ignore certain keys
if key2_re and re.search(key2_re, key2):
continue
else:
count = self[key][key2]
if count > biggest_count and count >= num:
most_frequent = key2
biggest_count = count
return most_frequent
def fulltotal(self):
total = 0
for key in self.keys():
total += self.total(key)
return total
def total(self, key):
"""
:param key:
:return: Number of tokens that have the "REAL" tag ``key``
"""
count = 0
for key2 in self[key].keys():
count += self[key][key2]
return count
def distribution(self, as_string = False, as_csv = False):
d = {k:self.total(k)/self.fulltotal() for k in self.keys()}
if not (as_string or as_csv):
return d
else:
rs = ''
for key, value in d.items():
if as_csv:
key += ','
rs += '{:<8s}{:>8.2f}\n'.format(key, value)
return rs
def sub_distribution(self, key, use_keys=list, add_n = 0):
d = self[key]
return d.distribution(use_keys=use_keys, add_n=add_n)
#===========================================================================
# Stuff that should've been inherited
#===========================================================================
def __str__(self):
return self._dict.__str__()
def __getitem__(self, k):
"""
:rtype : CountDict
"""
return self._dict.__getitem__(k)
def __setitem__(self, k, v):
self._dict.__setitem__(k, v)
def __contains__(self, k):
return self._dict.__contains__(k)
def keys(self):
return self._dict.keys()
def __len__(self):
return self._dict.__len__()
#===============================================================================
#
#===============================================================================
class POSEvalDict(TwoLevelCountDict):
"""
This dictionary is used for evaluation. Items are stored in the dictionary as:
{real_label:{assigned_label:count}}
This also supports greedy mapping techniques for evaluation.
"""
def __init__(self):
TwoLevelCountDict.__init__(self)
self.mapping = {}
def keys(self):
return [str(k) for k in TwoLevelCountDict.keys(self)]
def gold_tags(self):
return list(self.keys())
def assigned_tags(self):
t = {}
for tag_a in self.keys():
for tag_b in self[tag_a].keys():
t[tag_b] = True
return list(t.keys())
def _resetmapping(self):
self.mapping = {t:t for t in self.keys()}
def _mapping(self):
if not self.mapping:
self._resetmapping()
def map(self, k):
return self.mapping[k]
def unmap(self, k):
keys = [o for o, n in self.mapping.items() if n == k]
assert len(keys) == 1
return keys[0]
def overall_breakdown(self, title=None):
ret_s = ''
if title:
ret_s = title+','
ret_s += 'accuracy, matches, total\n'
ret_s += '%.2f,%s,%s\n' % (self.accuracy(), self.all_matches(), self.fulltotal())
return ret_s
def unaligned(self, unaligned_tag = 'UNK'):
if self.fulltotal() == 0:
return 0
else:
return float(self.col_total(unaligned_tag)) / self.fulltotal() * 100
def breakdown_csv(self):
ret_s = 'TAG,PRECISION,RECALL,F_1,IN_GOLD,IN_EVAL,MATCHES\n'
for label in self.keys():
ret_s += '%s,%.2f,%.2f,%.2f,%d,%d,%d\n' % (label,
self.tag_precision(label),
self.tag_recall(label),
self.tag_fmeasure(label),
self.total(label),
self.col_total(label),
self.matches(label))
return ret_s
def matches(self, t):
self._mapping()
if t in self.mapping:
mapped = self.mapping[t]
else:
mapped = t
if mapped in self and mapped in self[mapped]:
return self[mapped][mapped]
else:
return 0
def all_matches(self):
self._mapping()
matches = 0
for t in self.keys():
matches += self.matches(t)
return matches
def accuracy(self):
totals = self.fulltotal()
matches = self.all_matches()
#print('%d/%d' % (matches, totals))
return float(matches / totals) * 100 if totals != 0 else 0
def col_total(self, assigned_tag):
"""
:param assigned_tag: The assigned tag to count
:return: The number of tokens that have been assigned the tag ``assigned_tag``, including false positives.
"""
self._mapping()
totals = 0
for tag_b in self.keys():
totals += self[tag_b][assigned_tag]
return totals
# =============================================================================
# Overall Precision / Recall / FMeasure
# =============================================================================
def precision(self):
totals = 0
matches = 0
for assigned_tag in self.assigned_tags():
totals += self.col_total(assigned_tag)
matches += self.matches(assigned_tag)
return (float(matches) / totals * 100) if totals != 0 else 0
def recall(self):
totals = 0
matches = 0
for tag in self.keys():
totals += self.total(tag)
matches += self.matches(tag)
return float(matches) / totals * 100 if totals != 0 else 0
def fmeasure(self):
p = self.precision()
r = self.recall()
2 * (p*r)/(p+r) if (p+r) != 0 else 0
# =============================================================================
# Tag-Level Precision / Recall / FMeasure
# =============================================================================
def tag_precision(self, tag):
"""
Calculate the precision for a given tag
:type tag: str
:rtype: float
"""
self._mapping()
tag_total = self.col_total(tag)
return (float(self.matches(tag)) / tag_total * 100) if tag_total != 0 else 0
def tag_recall(self, tag):
"""
Calculate recall for a given tag
:param tag: Input tag
:rtype: float
"""
total = self.total(tag)
return float(self.matches(tag)) / total * 100 if total != 0 else 0
def tag_fmeasure(self, tag):
"""
Calculate f-measure for a given tag
:param tag:
:rtype: float
"""
p = self.tag_precision(tag)
r = self.tag_recall(tag)
return 2 * (p*r)/(p+r) if (p+r) != 0 else 0
# =============================================================================
def greedy_n_to_1(self):
"""
Remap the tags in such a way to maximize matches. In this mapping,
multiple output tags can map to the same gold tag.
"""
self._mapping()
for orig_tag in self.keys():
most_matches = 0
best_alt = orig_tag
# Iterate through every alternate
# and see if remapping fares better.
for alt_tag in self.keys():
if self[alt_tag][orig_tag] > most_matches:
most_matches = self[alt_tag][orig_tag]
best_alt = alt_tag
self.mapping[orig_tag] = best_alt
return self.mapping
def greedy_1_to_1(self, debug=False):
"""
Remap the tags one-to-one in such a way as to maximize matches.
This will be similar to bubble sort. Start off with 1:1. Then, go
through each pair of tags and try swapping the two. If we get a net
gain of matches, then keep the swap, otherwise don't. Repeat until we
get a full run of no swaps.
"""
self._mapping()
mapping = self.mapping
while True:
# 2) Now, for each tag, consider swapping it with another tag, and see if
# we improve.
improved = False
for orig_tag, cur_tag in sorted(mapping.items()):
cur_matches = self[orig_tag][cur_tag]
best_alt = cur_tag
swapped = False
best_delta = 0
for alt_tag in sorted(self.keys()):
# alt_tag -- is the tag we are considering swapping
# the mapping for orig_tag to.
# cur_tag -- is the tag that orig_tag is currently
# mapped to.
# alt_parent_tag -- the tag that previously was
# assigned to alt_tag
alt_parent_tag = self.unmap(alt_tag)
# When looking up the possible matches, remember
# that the first bracket will be the original tag
# and the second tag will be what it is mapped to.
# B MATCHES ------------------------------------------------
matches_b_old = self[alt_tag][alt_parent_tag]
# And the matches that we will see if swapped...
matches_b_new = self[cur_tag][alt_parent_tag]
# A MATCHES ------------------------------------------------
# Now, the matches that we will gain by the swap....
matches_a_new = self[alt_tag][orig_tag]
# And where we last were with relationship to the mapping...
matches_a_old = self[cur_tag][orig_tag]
matches_delta = (matches_b_new - matches_b_old) + (matches_a_new - matches_a_old)
if matches_delta > 0:
best_delta = matches_delta
best_alt = alt_tag
swapped = True
# If we have found a better swap...
if swapped:
new_alt = mapping[orig_tag]
mapping[self.unmap(best_alt)] = new_alt
mapping[orig_tag] = best_alt
improved = True
self.mapping = mapping
break
# Break out of the while loop
# if we have not made a swap.
if not improved:
break
self.mapping = mapping
#===========================================================================
def error_matrix(self, csv=False, ansi=False):
"""
Print an error matrix with the columns being the tags assigned by the
system and the rows being the gold standard answers.
"""
self._mapping()
cellwidth = 12
if not csv:
cell = '%%-%ds' % cellwidth
else:
cell='%s,'
keys = sorted(self.keys())
# Print header
header_start = int((len(keys)*cellwidth)/2)-8
if not csv:
ret_s = ' '*header_start + '[PREDICTED ALONG TOP]' + '\n'
else:
ret_s = ''
ret_s += cell % ''
# Print the column labels
for key in keys:
if self.mapping[key] != key:
ret_s += cell % ('%s(%s)' % (key, self.mapping[key]))
else:
ret_s += cell % key
# Add a total and a recall column.
ret_s += '| ' if not csv else ''
ret_s += (cell % 'TOT') + (cell % 'REC')
# Next Line
ret_s += '\n'
#=======================================================================
# Now, print all the middle of the cells
#=======================================================================
for key_b in keys:
ret_s += cell % key_b
rowtotal = 0
for key_a in keys:
# Make it bold
if self.mapping[key_a] == key_b:
if ansi:
ret_s += '\033[94m'
count = self[key_b][key_a]
rowtotal += count
ret_s += cell % count
# Unbold it...
if self.mapping[key_a] == key_b:
if ansi:
ret_s += '\033[0m'
# Add the total for this row...
ret_s += '| ' if not csv else ''
ret_s += cell % rowtotal
# And calc the recall
if rowtotal == 0:
ret_s += cell % ('%.2f' % 0)
else:
ret_s += cell % ('%.2f' % (float(self[key_b][self.mapping[key_b]]) / rowtotal*100))
ret_s += '\n'
#===================================================================
# Finally, print all the stuff at the bottom
#===================================================================
# 1) Print a separator line at the bottom.
#ret_s += cell % '' # ( Skip a cell )
if not csv:
for i in range(len(keys)+1):
ret_s += cell % ('-'*cellwidth)
ret_s += '\n'
# 2) Print the totals for each column
ret_s += cell % 'TOT'
for key_a in keys:
ret_s += cell % (self.col_total(key_a))
ret_s += '\n'
# 3) Print the precision for each column.
ret_s += cell % 'PREC'
for key_a in keys:
ret_s += cell % ('%.2f' % self.tag_precision(key_a))
return ret_s+'\n'
class MatrixTest(unittest.TestCase):
def runTest(self):
ped = POSEvalDict()
ped.add('A','A',1)
ped.add('A','B',2)
ped.add('A','C',4)
ped.add('B','A',3)
ped.add('B','B',1)
ped.add('C','A',1)
# A B C | TOT REC
# A 1 2 4 | 7 14.29
# B 3 1 0 | 4 25.00
# C 1 0 0 | 1 0.00
# --------------------------------
# TOT 5 3 4
# PREC 20.00 33.33 0.00 '''
self.assertEqual(ped.tag_precision('A'), float(1)/5*100)
self.assertEqual(ped.tag_recall('A'), float(1)/7*100)
self.assertEqual(ped.tag_recall('C'), 0)
self.assertEqual(ped['A']['C'], 4)
class GreedyTest(unittest.TestCase):
def runTest(self):
ped = POSEvalDict()
ped.add('A','B',5)
ped.add('A','C',2)
ped.add('B','A',10)
ped.add('C','C',10)
# A B C | TOT REC
# A 0 5 0 | 5 0.00
# B 10 0 0 | 10 0.00
# C 0 0 10 | 10 100.00
# --------------------------------
# TOT 10 5 10
# PREC 0.00 0.00 100.00
ped.greedy_1_to_1()
print(ped.error_matrix(True))
class StatDict(defaultdict):
"""
"""
def __init__(self, type=int):
"""
Constructor
"""
defaultdict.__init__(self, type)
@property
def total(self):
return sum(self.values())
@property
def distribution(self):
return {(k,float(v)/self.total) for k, v in self.items()}
@property
def counts(self):
return set(self.items()) | mit | 124,940,156,392,500,220 | 28.152113 | 114 | 0.454172 | false |
shoyer/numpy | numpy/core/function_base.py | 2 | 16480 | from __future__ import division, absolute_import, print_function
import functools
import warnings
import operator
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
TooHardError, asanyarray, ndim)
from numpy.core.multiarray import add_docstring
from numpy.core import overrides
__all__ = ['logspace', 'linspace', 'geomspace']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _index_deprecate(i, stacklevel=2):
try:
i = operator.index(i)
except TypeError:
msg = ("object of type {} cannot be safely interpreted as "
"an integer.".format(type(i)))
i = int(i)
stacklevel += 1
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
return i
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
dtype=None, axis=None):
return (start, stop)
@array_function_dispatch(_linspace_dispatcher)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
scale (a geometric progression).
logspace : Similar to `geomspace`, but with the end points specified as
logarithms.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
# 2016-02-25, 1.12
num = _index_deprecate(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
# and make sure one can use variables that have an __array_interface__, gh-6634
start = asanyarray(start) * 1.0
stop = asanyarray(stop) * 1.0
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
delta = stop - start
y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
if num > 1:
step = delta / div
if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
y /= div
if _mult_inplace:
y *= delta
else:
y = y * delta
else:
if _mult_inplace:
y *= step
else:
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if axis != 0:
y = _nx.moveaxis(y, 0, axis)
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
dtype=None, axis=None):
return (start, stop)
@array_function_dispatch(_logspace_dispatcher)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
axis=0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
``base ** start`` is the starting value of the sequence.
stop : array_like
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
geomspace : Similar to logspace, but with endpoints specified directly.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype, copy=False)
def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
axis=None):
return (start, stop)
@array_function_dispatch(_geomspace_dispatcher)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to `logspace`, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The final value of the sequence, unless `endpoint` is False.
In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
logspace : Similar to geomspace, but with endpoints specified using log
and base.
linspace : Similar to geomspace, but with arithmetic instead of geometric
progression.
arange : Similar to linspace, with the step size specified instead of the
number of samples.
Notes
-----
If the inputs or dtype are complex, the output will follow a logarithmic
spiral in the complex plane. (There are an infinite number of spirals
passing through two points; the output will follow the shortest such path.)
Examples
--------
>>> np.geomspace(1, 1000, num=4)
array([ 1., 10., 100., 1000.])
>>> np.geomspace(1, 1000, num=3, endpoint=False)
array([ 1., 10., 100.])
>>> np.geomspace(1, 1000, num=4, endpoint=False)
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
>>> np.geomspace(1, 256, num=9)
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
Note that the above may not produce exact integers:
>>> np.geomspace(1, 256, num=9, dtype=int)
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
Negative, decreasing, and complex inputs are allowed:
>>> np.geomspace(1000, 1, num=4)
array([1000., 100., 10., 1.])
>>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
>>> np.geomspace(1j, 1000j, num=4) # Straight line
array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
1.00000000e+00+0.00000000e+00j])
Graphical illustration of ``endpoint`` parameter:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> y = np.zeros(N)
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.axis([0.5, 2000, 0, 3])
[0.5, 2000, 0, 3]
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
>>> plt.show()
"""
start = asanyarray(start)
stop = asanyarray(stop)
if _nx.any(start == 0) or _nx.any(stop == 0):
raise ValueError('Geometric sequence cannot include zero')
dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
if dtype is None:
dtype = dt
else:
# complex to dtype('complex128'), for instance
dtype = _nx.dtype(dtype)
# Promote both arguments to the same dtype in case, for instance, one is
# complex and another is negative and log would produce NaN otherwise.
# Copy since we may change things in-place further down.
start = start.astype(dt, copy=True)
stop = stop.astype(dt, copy=True)
out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
# Avoid negligible real or imaginary parts in output by rotating to
# positive real, calculating, then undoing rotation
if _nx.issubdtype(dt, _nx.complexfloating):
all_imag = (start.real == 0.) & (stop.real == 0.)
if _nx.any(all_imag):
start[all_imag] = start[all_imag].imag
stop[all_imag] = stop[all_imag].imag
out_sign[all_imag] = 1j
both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
if _nx.any(both_negative):
_nx.negative(start, out=start, where=both_negative)
_nx.negative(stop, out=stop, where=both_negative)
_nx.negative(out_sign, out=out_sign, where=both_negative)
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
result = out_sign * logspace(log_start, log_stop, num=num,
endpoint=endpoint, base=10.0, dtype=dtype)
if axis != 0:
result = _nx.moveaxis(result, 0, axis)
return result.astype(dtype, copy=False)
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except Exception:
pass
| bsd-3-clause | 124,672,043,304,796,380 | 34.44086 | 88 | 0.600303 | false |
harikishen/addons-server | src/olympia/api/fields.py | 1 | 10385 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.encoding import smart_text
from django.utils.translation import get_language, ugettext_lazy as _
from rest_framework import fields, serializers
from olympia.amo.utils import to_language
from olympia.translations.models import Translation
class ReverseChoiceField(fields.ChoiceField):
"""
A ChoiceField that exposes the "human-readable" values of its choices,
while storing the "actual" corresponding value as normal.
This is useful when you want to expose string constants to clients while
storing integers in the database.
Note that the values in the `choices_dict` must be unique, since they are
used for both serialization and de-serialization.
"""
def __init__(self, *args, **kwargs):
self.reversed_choices = {v: k for k, v in kwargs['choices']}
super(ReverseChoiceField, self).__init__(*args, **kwargs)
def to_representation(self, value):
"""
Convert to representation by getting the "human-readable" value from
the "actual" one.
"""
value = self.choices.get(value, None)
return super(ReverseChoiceField, self).to_representation(value)
def to_internal_value(self, value):
"""
Convert to internal value by getting the "actual" value from the
"human-readable" one that is passed.
"""
try:
value = self.reversed_choices[value]
except KeyError:
self.fail('invalid_choice', input=value)
return super(ReverseChoiceField, self).to_internal_value(value)
class TranslationSerializerField(fields.Field):
"""
Django-rest-framework custom serializer field for our TranslatedFields.
- When deserializing, in `to_internal_value`, it accepts both a string
or a dictionary. If a string is given, it'll be considered to be in the
default language.
- When serializing, its behavior depends on the parent's serializer
context:
If a request was included, and its method is 'GET', and a 'lang'
parameter was passed, then only returns one translation (letting the
TranslatedField figure out automatically which language to use).
Else, just returns a dict with all translations for the given
`field_name` on `obj`, with languages as the keys.
"""
default_error_messages = {
'min_length': _(u'The field must have a length of at least {num} '
u'characters.'),
'unknown_locale': _(u'The language code {lang_code} is invalid.')
}
def __init__(self, *args, **kwargs):
self.min_length = kwargs.pop('min_length', None)
super(TranslationSerializerField, self).__init__(*args, **kwargs)
def fetch_all_translations(self, obj, source, field):
translations = field.__class__.objects.filter(
id=field.id, localized_string__isnull=False)
return {to_language(trans.locale): unicode(trans)
for trans in translations} if translations else None
def fetch_single_translation(self, obj, source, field, requested_language):
return unicode(field) if field else None
def get_attribute(self, obj):
source = self.source or self.field_name
field = fields.get_attribute(obj, source.split('.'))
if not field:
return None
requested_language = None
request = self.context.get('request', None)
if request and request.method == 'GET' and 'lang' in request.GET:
requested_language = request.GET['lang']
if requested_language:
return self.fetch_single_translation(obj, source, field,
requested_language)
else:
return self.fetch_all_translations(obj, source, field)
def to_representation(self, val):
return val
def to_internal_value(self, data):
if isinstance(data, basestring):
self.validate(data)
return data.strip()
elif isinstance(data, dict):
self.validate(data)
for key, value in data.items():
data[key] = value and value.strip()
return data
return unicode(data)
def validate(self, value):
value_too_short = True
if isinstance(value, basestring):
if len(value.strip()) >= self.min_length:
value_too_short = False
else:
for locale, string in value.items():
if locale.lower() not in settings.LANGUAGES:
raise ValidationError(
self.error_messages['unknown_locale'].format(
lang_code=repr(locale)))
if string and (len(string.strip()) >= self.min_length):
value_too_short = False
break
if self.min_length and value_too_short:
raise ValidationError(
self.error_messages['min_length'].format(num=self.min_length))
class ESTranslationSerializerField(TranslationSerializerField):
"""
Like TranslationSerializerField, but fetching the data from a dictionary
built from ES data that we previously attached on the object.
"""
suffix = '_translations'
_source = None
def get_source(self):
if self._source is None:
return None
return self._source + self.suffix
def set_source(self, val):
self._source = val
source = property(get_source, set_source)
def attach_translations(self, obj, data, source_name, target_name=None):
"""
Look for the translation of `source_name` in `data` and create a dict
with all translations for this field (which will look like
{'en-US': 'mytranslation'}) and attach it to a property on `obj`.
The property name is built with `target_name` and `cls.suffix`. If
`target_name` is None, `source_name` is used instead.
The suffix is necessary for two reasons:
1) The translations app won't let us set the dict on the real field
without making db queries
2) This also exactly matches how we store translations in ES, so we can
directly fetch the translations in the data passed to this method.
"""
if target_name is None:
target_name = source_name
target_key = '%s%s' % (target_name, self.suffix)
source_key = '%s%s' % (source_name, self.suffix)
target_translations = {v.get('lang', ''): v.get('string', '')
for v in data.get(source_key, {}) or {}}
setattr(obj, target_key, target_translations)
# Serializer might need the single translation in the current language,
# so fetch it and attach it directly under `target_name`. We need a
# fake Translation() instance to prevent SQL queries from being
# automatically made by the translations app.
translation = self.fetch_single_translation(
obj, target_name, target_translations, get_language())
setattr(obj, target_name, Translation(localized_string=translation))
def fetch_all_translations(self, obj, source, field):
return field or None
def fetch_single_translation(self, obj, source, field, requested_language):
translations = self.fetch_all_translations(obj, source, field) or {}
return (translations.get(requested_language) or
translations.get(getattr(obj, 'default_locale', None)) or
translations.get(getattr(obj, 'default_language', None)) or
translations.get(settings.LANGUAGE_CODE) or None)
class SplitField(fields.Field):
"""
A field composed of two separate fields: one used for input, and another
used for output. Most commonly used to accept a primary key for input and
use a full serializer for output.
Example usage:
addon = SplitField(serializers.PrimaryKeyRelatedField(), AddonSerializer())
"""
label = None
def __init__(self, _input, output, **kwargs):
self.input = _input
self.output = output
kwargs['required'] = _input.required
fields.Field.__init__(self, source=_input.source, **kwargs)
def bind(self, field_name, parent):
fields.Field.bind(self, field_name, parent)
self.input.bind(field_name, parent)
self.output.bind(field_name, parent)
def get_read_only(self):
return self._read_only
def set_read_only(self, val):
self._read_only = val
self.input.read_only = val
self.output.read_only = val
read_only = property(get_read_only, set_read_only)
def get_value(self, data):
return self.input.get_value(data)
def to_internal_value(self, value):
return self.input.to_internal_value(value)
def get_attribute(self, obj):
return self.output.get_attribute(obj)
def to_representation(self, value):
return self.output.to_representation(value)
class SlugOrPrimaryKeyRelatedField(serializers.RelatedField):
"""
Combines SlugRelatedField and PrimaryKeyRelatedField. Takes a
`render_as` argument (either "pk" or "slug") to indicate how to
serialize.
"""
read_only = False
def __init__(self, *args, **kwargs):
self.render_as = kwargs.pop('render_as', 'pk')
if self.render_as not in ['pk', 'slug']:
raise ValueError("'render_as' must be one of 'pk' or 'slug', "
"not %r" % (self.render_as,))
self.slug_field = kwargs.pop('slug_field', 'slug')
super(SlugOrPrimaryKeyRelatedField, self).__init__(
*args, **kwargs)
def to_representation(self, obj):
if self.render_as == 'slug':
return getattr(obj, self.slug_field)
else:
return obj.pk
def to_internal_value(self, data):
try:
return self.queryset.get(pk=data)
except:
try:
return self.queryset.get(**{self.slug_field: data})
except ObjectDoesNotExist:
msg = (_('Invalid pk or slug "%s" - object does not exist.') %
smart_text(data))
raise ValidationError(msg)
| bsd-3-clause | 9,180,062,847,117,697,000 | 37.040293 | 79 | 0.62494 | false |
mppmu/secdec | examples/box1L/integrate_box1L.py | 1 | 1796 | from __future__ import print_function
from pySecDec.integral_interface import IntegralLibrary
import sympy as sp
# load c++ library
box1L = IntegralLibrary('box1L/box1L_pylink.so')
# choose integrator
box1L.use_Vegas(flags=2) # ``flags=2``: verbose --> see Cuba manual
# integrate
str_integral_without_prefactor, str_prefactor, str_integral_with_prefactor = box1L(real_parameters=[4.0, -0.75, 1.25, 1.0])
# convert complex numbers from c++ to sympy notation
str_integral_with_prefactor = str_integral_with_prefactor.replace(',','+I*')
str_prefactor = str_prefactor.replace(',','+I*')
str_integral_without_prefactor = str_integral_without_prefactor.replace(',','+I*')
# convert result to sympy expressions
integral_with_prefactor = sp.sympify(str_integral_with_prefactor.replace('+/-','*value+error*'))
integral_with_prefactor_err = sp.sympify(str_integral_with_prefactor.replace('+/-','*value+error*'))
prefactor = sp.sympify(str_prefactor)
integral_without_prefactor = sp.sympify(str_integral_without_prefactor.replace('+/-','*value+error*'))
integral_without_prefactor_err = sp.sympify(str_integral_without_prefactor.replace('+/-','*value+error*'))
# examples how to access individual orders
print('Numerical Result')
print('eps^-2:', integral_with_prefactor.coeff('eps',-2).coeff('value'), '+/- (', integral_with_prefactor_err.coeff('eps',-2).coeff('error'), ')')
print('eps^-1:', integral_with_prefactor.coeff('eps',-1).coeff('value'), '+/- (', integral_with_prefactor_err.coeff('eps',-1).coeff('error'), ')')
print('eps^0 :', integral_with_prefactor.coeff('eps',0).coeff('value'), '+/- (', integral_with_prefactor_err.coeff('eps',0).coeff('error'), ')')
print('Analytic Result')
print('eps^-2: -0.1428571429')
print('eps^-1: 0.6384337090')
print('eps^0 : -0.426354612+I*1.866502363')
| gpl-3.0 | -5,924,396,011,331,012,000 | 50.314286 | 146 | 0.712695 | false |
devicehive/devicehive-python | tests/test_api_network.py | 1 | 17181 | # Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from devicehive import NetworkError, ApiResponseError, SubscriptionError
def test_save(test):
test.only_admin_implementation()
device_hive_api = test.device_hive_api()
name = test.generate_id('n-s', test.NETWORK_ENTITY)
description = '%s-description' % name
network = device_hive_api.create_network(name, description)
name = test.generate_id('n-s', test.NETWORK_ENTITY)
description = '%s-description' % name
network.name = name
network.description = description
network.save()
network_1 = device_hive_api.get_network(network.id)
network.remove()
try:
network.save()
assert False
except NetworkError:
pass
try:
network_1.save()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
def test_remove(test):
test.only_admin_implementation()
device_hive_api = test.device_hive_api()
name = test.generate_id('n-r', test.NETWORK_ENTITY)
description = '%s-description' % name
network = device_hive_api.create_network(name, description)
network_1 = device_hive_api.get_network(network.id)
network.remove()
assert not network.id
assert not network.name
assert not network.description
try:
network.remove()
assert False
except NetworkError:
pass
try:
network_1.remove()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
# ==========================================================================
name = test.generate_id('n-r', test.NETWORK_ENTITY)
description = '%s-description' % name
network = device_hive_api.create_network(name, description)
device_id = test.generate_id('n-r', test.DEVICE_ENTITY)
device_hive_api.put_device(device_id, network_id=network.id)
try:
network.remove()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 400
device = device_hive_api.get_device(device_id)
assert device.id == device_id
network.remove(force=True)
try:
device_hive_api.get_device(device_id)
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
def test_subscribe_insert_commands(test):
test.only_admin_implementation()
def init_data(handler):
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
return device, network, command_names, []
def send_data(handler, device, command_names):
for command_name in command_names:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
def set_handler_data(handler, device, network, command_names, command_ids):
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = command_names
handler.data['command_ids'] = command_ids
def handle_connect(handler):
device, network, command_names, command_ids = init_data(handler)
set_handler_data(handler, device, network, command_names, command_ids)
send_data(handler, device, command_names)
handler.data['subscription'] = network.subscribe_insert_commands()
def handle_command_insert(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_insert)
def handle_connect(handler):
device, network, command_names, command_ids = init_data(handler)
command_name = command_names[:1]
set_handler_data(handler, device, network, command_names, command_ids)
send_data(handler, device, command_name)
handler.data['subscription'] = network.subscribe_insert_commands(
names=command_name)
def handle_command_insert(handler, command):
assert command.id == handler.data['command_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_insert)
def handle_connect(handler):
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id, network_id=network.id)
command_name = '%s-name-1' % device_id
command = device.send_command(command_name)
set_handler_data(handler, device, network, [command_name], [command.id])
handler.data['subscription'] = network.subscribe_insert_commands()
def handle_command_insert(handler, command):
assert command.id == handler.data['command_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_insert)
def handle_connect(handler):
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
network_1 = handler.api.get_network(network.id)
network.remove()
try:
network_1.subscribe_insert_commands()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
test.run(handle_connect)
def test_unsubscribe_insert_commands(test):
test.only_admin_implementation()
def handle_connect(handler):
name = test.generate_id('n-u-i-c', test.NETWORK_ENTITY)
description = '%s-description' % name
network = handler.api.create_network(name, description)
subscription = network.subscribe_insert_commands()
subscription.remove()
try:
subscription.remove()
assert False
except SubscriptionError:
pass
network.remove()
test.run(handle_connect)
def test_subscribe_update_commands(test):
test.only_admin_implementation()
def init_data(handler):
device_id = test.generate_id('n-s-u-c', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-u-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
return device, network, command_names, []
def send_data(handler, device, command_names):
for command_name in command_names:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
command.status = 'status'
command.save()
def set_handler_data(handler, device, network, command_names, command_ids):
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = command_names
handler.data['command_ids'] = command_ids
def handle_connect(handler):
device, network, command_names, command_ids = init_data(handler)
set_handler_data(handler, device, network, command_names, command_ids)
send_data(handler, device, command_names)
handler.data['subscription'] = network.subscribe_update_commands()
def handle_command_update(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_update=handle_command_update)
def handle_connect(handler):
device, network, command_names, command_ids = init_data(handler)
command_name = command_names[:1]
set_handler_data(handler, device, network, command_names, command_ids)
send_data(handler, device, command_name)
handler.data['subscription'] = network.subscribe_update_commands(
names=command_name)
def handle_command_update(handler, command):
assert command.id == handler.data['command_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_update=handle_command_update)
def handle_connect(handler):
network_name = test.generate_id('n-s-u-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
device_id = test.generate_id('n-s-u-c', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id, network_id=network.id)
command_name = '%s-name-1' % device_id
command = device.send_command(command_name)
command.status = 'status'
command.save()
set_handler_data(handler, device, network, [command_name], [command.id])
handler.data['subscription'] = network.subscribe_update_commands()
def handle_command_update(handler, command):
assert command.id == handler.data['command_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_update=handle_command_update)
def handle_connect(handler):
network_name = test.generate_id('n-s-u-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
network_1 = handler.api.get_network(network.id)
network.remove()
try:
network_1.subscribe_update_commands()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
test.run(handle_connect)
def test_unsubscribe_update_commands(test):
test.only_admin_implementation()
def handle_connect(handler):
name = test.generate_id('n-u-u-c', test.NETWORK_ENTITY)
description = '%s-description' % name
network = handler.api.create_network(name, description)
subscription = network.subscribe_update_commands()
subscription.remove()
try:
subscription.remove()
assert False
except SubscriptionError:
pass
network.remove()
test.run(handle_connect)
def test_subscribe_notifications(test):
test.only_admin_implementation()
def init_data(handler):
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
return device, network, notification_names, []
def send_data(handler, device, notification_names):
for notification_name in notification_names:
notification = device.send_notification(notification_name)
handler.data['notification_ids'].append(notification.id)
def set_handler_data(handler, device, network, notification_names,
notification_ids):
handler.data['device'] = device
handler.data['network'] = network
handler.data['notification_names'] = notification_names
handler.data['notification_ids'] = notification_ids
def handle_connect(handler):
device, network, notification_names, notification_ids = init_data(
handler)
set_handler_data(handler, device, network, notification_names,
notification_ids)
send_data(handler, device, notification_names)
handler.data['subscription'] = network.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
device, network, notification_names, notification_ids = init_data(
handler)
notification_name = notification_names[:1]
set_handler_data(handler, device, network, notification_names,
notification_ids)
send_data(handler, device, notification_name)
handler.data['subscription'] = network.subscribe_notifications(
names=notification_name)
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id, network_id=network.id)
notification_name = '%s-name-1' % device_id
notification = device.send_notification(notification_name)
set_handler_data(handler, device, network, [notification_name],
[notification.id])
handler.data['subscription'] = network.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
network_1 = handler.api.get_network(network.id)
network.remove()
try:
network_1.subscribe_notifications()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
test.run(handle_connect)
def test_unsubscribe_notifications(test):
test.only_admin_implementation()
def handle_connect(handler):
name = test.generate_id('n-u-n', test.NETWORK_ENTITY)
description = '%s-description' % name
network = handler.api.create_network(name, description)
subscription = network.subscribe_notifications()
subscription.remove()
try:
subscription.remove()
assert False
except SubscriptionError:
pass
network.remove()
test.run(handle_connect)
| apache-2.0 | -3,097,000,737,378,666,000 | 38.047727 | 80 | 0.647867 | false |
pombredanne/unuk | src/unuk/contrib/eventweb/resources.py | 1 | 1601 | import logging
from unuk.core import exceptions
logger = logging.getLogger('eventweb')
class WSGIResource(object):
def __init__(self, server):
self.server = server
self.handler = server.handler
def __call__(self, environ, start_response):
handler = self.handler.get_handler(environ['PATH_INFO'])
if not handler:
start_response('404 Not Found', [])
return []
response = getattr(self,'{0}_response'.format(handler.serve_as))
return response(handler, environ, start_response)
def jsonrpc_response(self, rpc, environ, start_response):
if environ['REQUEST_METHOD'] != 'POST':
start_response('403 Forbidden', [])
return []
content = environ['wsgi.input'].read()
method, args, kwargs, id, version = rpc.get_method_and_args(content)
if not method:
result = exceptions.InvalidRequest('Method not available')
try:
function = rpc._getFunction(method)
except exceptions.Fault, f:
result = f
else:
rpc.logger.debug('invoking function %s' % method)
result = function(rpc,environ,*args,**kwargs)
data = rpc.dumps(result, id = id, version = version)
start_response('200 OK', [('content-type', 'text/html'),
('content-length', str(len(data)))])
return [data]
def wsgi_response(self, handler, environ, start_response):
return handler(environ, start_response)
| bsd-3-clause | -2,716,399,108,737,009,700 | 32.375 | 76 | 0.571518 | false |
singingwolfboy/flask-dance-google | tests/conftest.py | 1 | 1092 | import os
import sys
from pathlib import Path
import pytest
from betamax import Betamax
from flask_dance.consumer.storage import MemoryStorage
from flask_dance.contrib.google import google
toplevel = Path(__file__).parent.parent
sys.path.insert(0, str(toplevel))
from google import app as flask_app, google_bp
GOOGLE_ACCESS_TOKEN = os.environ.get("GOOGLE_OAUTH_ACCESS_TOKEN", "fake-token")
with Betamax.configure() as config:
config.cassette_library_dir = toplevel / "tests" / "cassettes"
config.define_cassette_placeholder("<AUTH_TOKEN>", GOOGLE_ACCESS_TOKEN)
@pytest.fixture
def google_authorized(monkeypatch):
"""
Monkeypatch the GitHub Flask-Dance blueprint so that the
OAuth token is always set.
"""
storage = MemoryStorage({"access_token": GOOGLE_ACCESS_TOKEN})
monkeypatch.setattr(google_bp, "storage", storage)
return storage
@pytest.fixture
def app():
return flask_app
@pytest.fixture
def flask_dance_sessions():
"""
Necessary to use the ``betamax_record_flask_dance`` fixture
from Flask-Dance
"""
return google
| mit | 2,641,468,074,866,768,400 | 23.818182 | 79 | 0.728022 | false |
hyperopt/hyperopt-nnet | hpnnet/nips2011_dbn.py | 1 | 5468 | """
Deep Belief Network (DBN) search spaces used in [1] and [2].
The functions in this file return pyll graphs that can be used as the `space`
argument to e.g. `hyperopt.fmin`. The pyll graphs include hyperparameter
constructs (e.g. `hyperopt.hp.uniform`) so `hyperopt.fmin` can perform
hyperparameter optimization.
See ./skdata_learning_algo.py for example usage of these functions.
[1] Bergstra, J., Bardenet, R., Bengio, Y., Kegl, B. (2011). Algorithms
for Hyper-parameter optimization, NIPS 2011.
[2] Bergstra, J., Bengio, Y. (2012). Random Search for Hyper-Parameter
Optimization, JMLR 13:281--305.
"""
__author__ = "James Bergstra"
__license__ = "BSD-3"
import numpy as np
from hyperopt.pyll import scope
from hyperopt import hp
import pyll_stubs
import nnet # -- load scope with nnet symbols
def preproc_space(
sup_min_epochs=300,
sup_max_epochs=2000,
max_seconds=60 * 60,
):
"""
Return a hyperopt-compatible pyll expression for a trained neural network.
The trained neural network will have 0, 1, 2, or 3 hidden layers, and may
have an affine first layer that does column normalization or PCA
pre-processing.
Each layer of the network will be pre-trained by some amount of
contrastive divergence before being fine-tuning by SGD.
The training program is built using stub literals `pyll_stubs.train_task`
and `pyll_stubs.valid_task`. When evaluating the pyll program, these
literals must be replaced with skdata Task objects with
`vector_classification` semantics. See `skdata_learning_algo.py` for how
to use the `use_obj_for_literal_in_memo` function to swap live Task
objects in for these stubs.
The search space described by this function corresponds to the DBN model
used in [1] and [2].
"""
train_task_x = scope.getattr(pyll_stubs.train_task, 'x')
nnet0 = scope.NNet([], n_out=scope.getattr(train_task_x, 'shape')[1])
nnet1 = hp.choice('preproc',
[
nnet0, # -- raw data
scope.nnet_add_layers( # -- ZCA of data
nnet0,
scope.zca_layer(
train_task_x,
energy=hp.uniform('pca_energy', .5, 1),
eps=1e-14,
)),
])
param_seed = hp.choice('iseed', [5, 6, 7, 8])
time_limit = scope.time() + max_seconds
nnets = [nnet1]
nnet_i_pt = nnet1
for ii, cd_epochs_max in enumerate([3000, 2000, 1500]):
layer = scope.random_sigmoid_layer(
# -- hack to get different seeds for dif't layers
seed=param_seed + cd_epochs_max,
n_in=scope.getattr(nnet_i_pt, 'n_out'),
n_out=hp.qloguniform('n_hid_%i' % ii,
np.log(2**7),
np.log(2**12),
q=16),
dist=hp.choice('W_idist_%i' % ii, ['uniform', 'normal']),
scale_heuristic=hp.choice(
'W_ialgo_%i' % ii, [
('old', hp.lognormal('W_imult_%i' % ii, 0, 1)),
('Glorot',)]),
squash='logistic',
)
nnet_i_raw = scope.nnet_add_layer(nnet_i_pt, layer)
# -- repeatedly calculating lower-layers wastes some CPU, but keeps
# memory usage much more stable across jobs (good for cluster)
# and the wasted CPU is not so much overall.
nnet_i_pt = scope.nnet_pretrain_top_layer_cd(
nnet_i_raw,
train_task_x,
lr=hp.lognormal('cd_lr_%i' % ii, np.log(.01), 2),
seed=1 + hp.randint('cd_seed_%i' % ii, 10),
n_epochs=hp.qloguniform('cd_epochs_%i' % ii,
np.log(1),
np.log(cd_epochs_max),
q=1),
# -- for whatever reason (?), this was fixed at 100
batchsize=100,
sample_v0s=hp.choice('sample_v0s_%i' % ii, [False, True]),
lr_anneal_start=hp.qloguniform('lr_anneal_%i' % ii,
np.log(10),
np.log(10000),
q=1),
time_limit=time_limit,
)
nnets.append(nnet_i_pt)
# this prior is not what I would do now, but it is what I did then...
nnet_features = hp.pchoice(
'depth',
[(.5, nnets[0]),
(.25, nnets[1]),
(.125, nnets[2]),
(.125, nnets[3])])
sup_nnet = scope.nnet_add_layer(
nnet_features,
scope.zero_softmax_layer(
n_in=scope.getattr(nnet_features, 'n_out'),
n_out=scope.getattr(pyll_stubs.train_task, 'n_classes')))
nnet4, report = scope.nnet_sgd_finetune_classifier(
sup_nnet,
pyll_stubs.train_task,
pyll_stubs.valid_task,
fixed_nnet=nnet1,
max_epochs=sup_max_epochs,
min_epochs=sup_min_epochs,
batch_size=hp.choice('batch_size', [20, 100]),
lr=hp.lognormal('lr', np.log(.01), 3.),
lr_anneal_start=hp.qloguniform(
'lr_anneal_start',
np.log(100),
np.log(10000),
q=1),
l2_penalty=hp.choice('l2_penalty', [
0,
hp.lognormal('l2_penalty_nz', np.log(1.0e-6), 2.)]),
time_limit=time_limit,
)
return nnet4, report
| bsd-3-clause | -6,206,191,359,409,669,000 | 34.051282 | 78 | 0.544989 | false |
petertseng/x-common | bin/check-immutability.py | 1 | 1580 | #!/usr/bin/env python
import json
import subprocess
import sys
oldf = sys.argv[1]
newf = sys.argv[2]
immutable_keys = ('property', 'input', 'expected')
# Use jq to flatten the test data, and parse it
old = json.loads(subprocess.run([f"jq -r '[.. | objects | select(.uuid != null)]' {oldf}"], stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8'))
new = json.loads(subprocess.run([f"jq -r '[.. | objects | select(.uuid != null)]' {newf}"], stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8'))
# Convert new to dict uuid => case
new = {case['uuid']: case for case in new}
fails = set()
deleted = set()
# Iterate through old cases as only those could potentially be mutated
for case in old:
uuid = case['uuid']
# Check if the case has been deleted
if uuid not in new:
deleted.add(uuid)
continue
# Check that scenarios are only updated additively
if 'scenarios' in case and not set(case['scenarios']).issubset(set(new[uuid]['scenarios'])):
fails.add(uuid)
continue
# Check for changes to immutable keys
for key in immutable_keys:
if case[key] != new[uuid][key]:
fails.add(uuid)
break
if len(fails) == 0 and len(deleted) == 0:
sys.exit(0)
if len(fails) > 0:
print('The following tests contain illegal mutations:')
for failure in fails:
print(f" - {failure} ({new[failure]['description']})")
if len(deleted) > 0:
print('The following tests have been deleted illegally:')
for deletion in deleted:
print(f" - {deletion}")
sys.exit(1)
| mit | -3,237,632,367,559,430,000 | 28.259259 | 151 | 0.640506 | false |
ssfrr/advenshare | mouserver/mouserver/server.py | 1 | 6082 | import websocket
import json
import logging
import coloredlogs
import sys
import ssl
from getopt import gnu_getopt, GetoptError
from mouserver_ext import grab_window, Window
import random
import string
import time
class Mouserver:
def __init__(self, ws_url, session, window):
self.ws_url = ws_url
self.session = session
self.window = window
self.log = logging.getLogger('mouserver')
self.ws_log = logging.getLogger('websocket')
self.uid = ''.join(random.choice(string.letters) for i in xrange(20))
self.name = 'MouServer'
self.log.info("Websocket URL: %s", self.ws_url)
self.log.info("Session ID: %s", self.session)
window_name = self.window.get_name()
w, h = self.window.get_size()
self.log.info("Window: %s (%dx%d)", window_name, w, h)
self.method_table = {}
self.register('mouseMove', self.mouse_move)
self.register('mouseDown', self.mouse_down)
self.register('mouseUp', self.mouse_up)
self.wsapp = websocket.WebSocketApp(
ws_url,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open)
def run_forever(self):
self.wsapp.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE})
def on_message(self, ws, message):
try:
msg = json.loads(message)
except ValueError:
self.log.warning("Received non-JSON data")
return
if 'type' not in msg:
self.log.warning("Received data with no command")
return
msg_type = msg['type']
method = self.method_table.get(msg_type, None)
if method is not None:
method(msg)
else:
self.log.warning("Received unknown msg type: %s", msg_type)
def on_error(self, ws, error):
self.ws_log.error(error)
def on_close(self, ws):
self.ws_log.error("Connection closed")
raise MouserverConnectionClosedError("Connection closed")
def on_open(self, ws):
self.ws_log.info("Connection established")
self.ws_log.info("Joining session: %s", self.session)
ws.send(json.dumps({
'type': 'announce',
'srcID': self.uid,
'userName': self.name,
'activeMouseOnly': True
}))
ws.send(json.dumps({
'type': 'joinSession',
'srcID': self.uid,
'sessionID': self.session
}))
def register(self, msg_type, method):
self.method_table[msg_type] = method
def mouse_move(self, msg):
x = float(msg['x'])
y = float(msg['y'])
self.log.debug("mouse_move (%f, %f)", x, y)
self.window.mouse_move_ratio(x, y)
def mouse_down(self, msg):
x = float(msg['x'])
y = float(msg['y'])
# javascript (and the websockets) use 0, 1, 2 for the mouse buttons,
# but libxdo uses 1, 2, 3
button = int(msg['button']) + 1
self.log.debug("mouse_down (%f, %f, %d)", (x, y, button))
self.window.mouse_move_ratio(x, y)
self.window.mouse_down(button)
def mouse_up(self, msg):
x = float(msg['x'])
y = float(msg['y'])
# javascript (and the websockets) use 0, 1, 2 for the mouse buttons,
# but libxdo uses 1, 2, 3
button = int(msg['button']) + 1
self.log.debug("mouse_up (%f, %f, %d)", (x, y, button))
self.window.mouse_move_ratio(x, y)
self.window.mouse_up(button)
class MouserverConnectionClosedError(Exception):
pass
def print_usage():
print "usage: %s -u <websocket_url> -s <session_id> [-w <window_id>]" % sys.argv[0]
print ""
print " --url, -u <websocket_url>"
print " specifies the websocket URL to which the program should"
print " connect to receive user interaction events (required)"
print " --session, -s <session_id>"
print " specifies the string that uniquely identifies this session"
print " (required)"
print " --window, -w <window_id>"
print " specifies the X11 window ID of the window with which to interact."
print " If this is not specified, you will be prompted to select a window"
print " by clicking on it at startup."
print ""
print " --verbose, -v"
print " outputs lots of protocol information"
print " --help, -h"
print " displays this usage information."
def main():
loglevel = logging.INFO
url = None
session = None
window = None
short_opts = "hvu:s:w:"
long_opts = [
'help',
'verbose',
'url=',
'session=',
'window=',
]
try:
opts, args = gnu_getopt(sys.argv[1:], short_opts, long_opts)
except GetoptError as err:
print str(err)
print_usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
print_usage()
sys.exit(0)
elif o in ('-v', '--verbose'):
loglevel = logging.DEBUG
elif o in ('-u', '--url'):
url = a
elif o in ('-s', '--session'):
session = a
elif o in ('-w', '--window'):
window = long(a)
else:
print "Unknown option: %s" % o
print_usage()
sys.exit(2)
if url is None:
print "Must specify server URL (-u)"
sys.exit(1)
if session is None:
print "Must specify session ID (-s)"
sys.exit(1)
if window is None:
print "Please select a window by clicking on it."
window = grab_window()
else:
window = Window(window)
log = logging.getLogger("main")
coloredlogs.install(level=loglevel)
while True:
server = Mouserver(url, session, window)
server.run_forever()
time.sleep(5.0)
log.warning("Restarting after 5 seconds due to dropped connection")
if __name__ == '__main__':
main()
| mit | 6,848,767,843,313,734,000 | 30.189744 | 87 | 0.554916 | false |
dmlc/tvm | python/tvm/topi/vision/rcnn/roi_align.py | 1 | 3987 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Roi align operator"""
import tvm
from tvm import te
from ...utils import get_const_tuple
from ...cpp.utils import bilinear_sample_nchw
def roi_align_nchw(data, rois, pooled_size, spatial_scale, sample_ratio=-1):
"""ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
dtype = rois.dtype
_, channel, height, width = get_const_tuple(data.shape)
num_roi, _ = get_const_tuple(rois.shape)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _bilinear(i, c, y, x):
outside = tvm.tir.any(y < -1.0, x < -1.0, y > height, x > width)
y = tvm.te.max(y, 0.0)
x = tvm.te.max(x, 0.0)
val = bilinear_sample_nchw(data, (i, c, y, x), height - 1, width - 1)
return tvm.tir.if_then_else(outside, 0.0, val)
def _sample(i, c, ph, pw):
roi = rois[i]
batch_index = roi[0].astype("int32")
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[3], roi[4]
roi_start_h *= spatial_scale
roi_end_h *= spatial_scale
roi_start_w *= spatial_scale
roi_end_w *= spatial_scale
# force malformed ROIs to be 1x1
roi_h = tvm.te.max(roi_end_h - roi_start_h, tvm.tir.const(1.0, dtype))
roi_w = tvm.te.max(roi_end_w - roi_start_w, tvm.tir.const(1.0, dtype))
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = tvm.tir.const(sample_ratio, "int32")
else:
roi_bin_grid_h = te.ceil(roi_h / pooled_size_h).astype("int32")
roi_bin_grid_w = te.ceil(roi_w / pooled_size_w).astype("int32")
count = roi_bin_grid_h * roi_bin_grid_w
rh = te.reduce_axis((0, roi_bin_grid_h))
rw = te.reduce_axis((0, roi_bin_grid_w))
roi_start_h += ph * bin_h
roi_start_w += pw * bin_w
return te.sum(
_bilinear(
batch_index,
c,
roi_start_h + (rh + 0.5) * bin_h / roi_bin_grid_h,
roi_start_w + (rw + 0.5) * bin_w / roi_bin_grid_w,
)
/ count,
axis=[rh, rw],
)
return te.compute(
(num_roi, channel, pooled_size_h, pooled_size_w), _sample, tag="pool,roi_align_nchw"
)
| apache-2.0 | 9,211,143,365,963,043,000 | 35.916667 | 98 | 0.605217 | false |
tasleson/lvm-dubstep | lvmdbus/cmdhandler.py | 1 | 14945 | # Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import time
import threading
from itertools import chain
try:
from . import cfg
from .utils import pv_dest_ranges, log_debug, log_error
from .lvm_shell_proxy import LVMShellProxy
except SystemError:
import cfg
from utils import pv_dest_ranges, log_debug, log_error
from lvm_shell_proxy import LVMShellProxy
SEP = '{|}'
total_time = 0.0
total_count = 0
# We need to prevent different threads from using the same lvm shell
# at the same time.
cmd_lock = threading.Lock()
# The actual method which gets called to invoke the lvm command, can vary
# from forking a new process to using lvm shell
_t_call = None
def _debug_c(cmd, exit_code, out):
log_error('CMD= %s' % ' '.join(cmd))
log_error(("EC= %d" % exit_code))
log_error(("STDOUT=\n %s\n" % out[0]))
log_error(("STDERR=\n %s\n" % out[1]))
def call_lvm(command, debug=False):
"""
Call an executable and return a tuple of exitcode, stdout, stderr
:param command: Command to execute
:param debug: Dump debug to stdout
"""
# print 'STACK:'
# for line in traceback.format_stack():
# print line.strip()
# Prepend the full lvm executable so that we can run different versions
# in different locations on the same box
command.insert(0, cfg.LVM_CMD)
process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True)
out = process.communicate()
stdout_text = bytes(out[0]).decode("utf-8")
stderr_text = bytes(out[1]).decode("utf-8")
if debug or process.returncode != 0:
_debug_c(command, process.returncode, (stdout_text, stderr_text))
if process.returncode == 0:
if cfg.DEBUG and out[1] and len(out[1]):
log_error('WARNING: lvm is out-putting text to STDERR on success!')
_debug_c(command, process.returncode, (stdout_text, stderr_text))
return process.returncode, stdout_text, stderr_text
def _shell_cfg():
global _t_call
log_debug('Using lvm shell!')
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
if cfg.USE_SHELL:
_shell_cfg()
else:
_t_call = call_lvm
def set_execution(shell):
global _t_call
with cmd_lock:
_t_call = None
if shell:
log_debug('Using lvm shell!')
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
else:
_t_call = call_lvm
def time_wrapper(command, debug=False):
global total_time
global total_count
with cmd_lock:
start = time.time()
results = _t_call(command, debug)
total_time += (time.time() - start)
total_count += 1
return results
call = time_wrapper
# Default cmd
# Place default arguments for every command here.
def _dc(cmd, args):
c = [cmd, '--noheading', '--separator', '%s' % SEP, '--nosuffix',
'--unbuffered', '--units', 'b']
c.extend(args)
return c
def parse(out):
rc = []
for line in out.split('\n'):
# This line includes separators, so process them
if SEP in line:
elem = line.split(SEP)
cleaned_elem = []
for e in elem:
e = e.strip()
cleaned_elem.append(e)
if len(cleaned_elem) > 1:
rc.append(cleaned_elem)
else:
t = line.strip()
if len(t) > 0:
rc.append(t)
return rc
def parse_column_names(out, column_names):
lines = parse(out)
rc = []
for i in range(0, len(lines)):
d = dict(list(zip(column_names, lines[i])))
rc.append(d)
return rc
def options_to_cli_args(options):
rc = []
for k, v in list(dict(options).items()):
if k.startswith("-"):
rc.append(k)
else:
rc.append("--%s" % k)
if v != "":
rc.append(str(v))
return rc
def pv_remove(device, remove_options):
cmd = ['pvremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.append(device)
return call(cmd)
def _tag(operation, what, add, rm, tag_options):
cmd = [operation]
cmd.extend(options_to_cli_args(tag_options))
if isinstance(what, list):
cmd.extend(what)
else:
cmd.append(what)
if add:
cmd.extend(list(chain.from_iterable(('--addtag', x) for x in add)))
if rm:
cmd.extend(list(chain.from_iterable(('--deltag', x) for x in rm)))
return call(cmd, False)
def pv_tag(pv_devices, add, rm, tag_options):
return _tag('pvchange', pv_devices, add, rm, tag_options)
def vg_tag(vg_name, add, rm, tag_options):
return _tag('vgchange', vg_name, add, rm, tag_options)
def lv_tag(lv_name, add, rm, tag_options):
return _tag('lvchange', lv_name, add, rm, tag_options)
def vg_rename(vg, new_name, rename_options):
cmd = ['vgrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([vg, new_name])
return call(cmd)
def vg_remove(vg_name, remove_options):
cmd = ['vgremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.extend(['-f', vg_name])
return call(cmd)
def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(snapshot_options))
cmd.extend(["-s"])
if size_bytes != 0:
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', raid_type])
cmd.extend(['--size', str(size_bytes) + 'B'])
if num_stripes != 0:
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
return _vg_lv_create_raid(vg_name, create_options, name, raid_type,
size_bytes, num_stripes, stripe_size_kb)
def vg_lv_create_mirror(vg_name, create_options, name, size_bytes, num_copies):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'mirror'])
cmd.extend(['--mirrors', str(num_copies)])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_create_cache_pool(md_full_name, data_full_name, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'cache-pool', '--force', '-y',
'--poolmetadata', md_full_name, data_full_name])
return call(cmd)
def vg_create_thin_pool(md_full_name, data_full_name, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'thin-pool', '--force', '-y',
'--poolmetadata', md_full_name, data_full_name])
return call(cmd)
def lv_remove(lv_path, remove_options):
cmd = ['lvremove']
cmd.extend(options_to_cli_args(remove_options))
cmd.extend(['-f', lv_path])
return call(cmd)
def lv_rename(lv_path, new_name, rename_options):
cmd = ['lvrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([lv_path, new_name])
return call(cmd)
def lv_resize(lv_full_name, size_change, pv_dests,
resize_options):
cmd = ['lvresize', '--force']
cmd.extend(options_to_cli_args(resize_options))
if size_change < 0:
cmd.append("-L-%dB" % (-size_change))
else:
cmd.append("-L+%dB" % (size_change))
cmd.append(lv_full_name)
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
def lv_lv_create(lv_full_name, create_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T'])
cmd.extend(['--name', name, lv_full_name])
return call(cmd)
def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options):
# lvconvert --type cache --cachepool VG/CachePoolLV VG/OriginLV
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(cache_options))
cmd.extend(['--type', 'cache', '--cachepool',
cache_pool_full_name, lv_full_name])
return call(cmd)
def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
cmd = ['lvconvert']
if destroy_cache:
option = '--uncache'
else:
# Currently fairly dangerous
# see: https://bugzilla.redhat.com/show_bug.cgi?id=1248972
option = '--splitcache'
cmd.extend(options_to_cli_args(detach_options))
# needed to prevent interactive questions
cmd.extend(["--yes", "--force"])
cmd.extend([option, lv_full_name])
return call(cmd)
def pv_retrieve_with_segs(device=None):
d = []
columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pv_seg_start', 'pvseg_size', 'segtype']
# Lvm has some issues where it returns failure when querying pvs when other
# operations are in process, see:
# https://bugzilla.redhat.com/show_bug.cgi?id=1274085
while True:
cmd = _dc('pvs', ['-o', ','.join(columns)])
if device:
cmd.extend(device)
rc, out, err = call(cmd)
if rc == 0:
d = parse_column_names(out, columns)
break
else:
time.sleep(0.2)
log_debug("LVM Bug workaround, retrying pvs command...")
return d
def pv_resize(device, size_bytes, create_options):
cmd = ['pvresize']
cmd.extend(options_to_cli_args(create_options))
if size_bytes != 0:
cmd.extend(['--setphysicalvolumesize', str(size_bytes) + 'B'])
cmd.extend([device])
return call(cmd)
def pv_create(create_options, devices):
cmd = ['pvcreate', '-ff']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(devices)
return call(cmd)
def pv_allocatable(device, yes, allocation_options):
yn = 'n'
if yes:
yn = 'y'
cmd = ['pvchange']
cmd.extend(options_to_cli_args(allocation_options))
cmd.extend(['-x', yn, device])
return call(cmd)
def pv_scan(activate, cache, device_paths, major_minors, scan_options):
cmd = ['pvscan']
cmd.extend(options_to_cli_args(scan_options))
if activate:
cmd.extend(['--activate', "ay"])
if cache:
cmd.append('--cache')
if len(device_paths) > 0:
for d in device_paths:
cmd.append(d)
if len(major_minors) > 0:
for mm in major_minors:
cmd.append("%s:%s" % (mm))
return call(cmd)
def vg_create(create_options, pv_devices, name):
cmd = ['vgcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.append(name)
cmd.extend(pv_devices)
return call(cmd)
def vg_change(change_options, name):
cmd = ['vgchange']
cmd.extend(options_to_cli_args(change_options))
cmd.append(name)
return call(cmd)
def vg_reduce(vg_name, missing, pv_devices, reduce_options):
cmd = ['vgreduce']
cmd.extend(options_to_cli_args(reduce_options))
if len(pv_devices) == 0:
cmd.append('--all')
if missing:
cmd.append('--removemissing')
cmd.append(vg_name)
cmd.extend(pv_devices)
return call(cmd)
def vg_extend(vg_name, extend_devices, extend_options):
cmd = ['vgextend']
cmd.extend(options_to_cli_args(extend_options))
cmd.append(vg_name)
cmd.extend(extend_devices)
return call(cmd)
def _vg_value_set(name, arguments, options):
cmd = ['vgchange']
cmd.extend(options_to_cli_args(options))
cmd.append(name)
cmd.extend(arguments)
return call(cmd)
def vg_allocation_policy(vg_name, policy, policy_options):
return _vg_value_set(vg_name, ['--alloc', policy], policy_options)
def vg_max_pv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)],
max_options)
def vg_max_lv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['-l', str(number)], max_options)
def vg_uuid_gen(vg_name, ignore, options):
assert ignore is None
return _vg_value_set(vg_name, ['--uuid'], options)
def activate_deactivate(op, name, activate, control_flags, options):
cmd = [op]
cmd.extend(options_to_cli_args(options))
op = '-a'
if control_flags:
# Autoactivation
if (1 << 0) & control_flags:
op += 'a'
# Exclusive locking (Cluster)
if (1 << 1) & control_flags:
op += 'e'
# Local node activation
if (1 << 2) & control_flags:
op += 'l'
# Activation modes
if (1 << 3) & control_flags:
cmd.extend(['--activationmode', 'complete'])
elif (1 << 4) & control_flags:
cmd.extend(['--activationmode', 'partial'])
# Ignore activation skip
if (1 << 5) & control_flags:
cmd.append('--ignoreactivationskip')
if activate:
op += 'y'
else:
op += 'n'
cmd.append(op)
cmd.append(name)
return call(cmd)
def vg_retrieve(vg_specific):
if vg_specific:
assert isinstance(vg_specific, list)
columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free',
'vg_sysid', 'vg_extent_size', 'vg_extent_count',
'vg_free_count', 'vg_profile', 'max_lv', 'max_pv',
'pv_count', 'lv_count', 'snap_count', 'vg_seqno',
'vg_mda_count', 'vg_mda_free', 'vg_mda_size',
'vg_mda_used_count', 'vg_attr', 'vg_tags']
cmd = _dc('vgs', ['-o', ','.join(columns)])
if vg_specific:
cmd.extend(vg_specific)
d = []
rc, out, err = call(cmd)
if rc == 0:
d = parse_column_names(out, columns)
return d
def lv_retrieve_with_segments():
columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size',
'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent',
'lv_role', 'lv_layout']
cmd = _dc('lvs', ['-a', '-o', ','.join(columns)])
rc, out, err = call(cmd)
d = []
if rc == 0:
d = parse_column_names(out, columns)
return d
if __name__ == '__main__':
pv_data = pv_retrieve_with_segs()
for p in pv_data:
log_debug(str(p))
| gpl-2.0 | -7,792,544,381,953,663,000 | 23.14378 | 79 | 0.658147 | false |
gmimano/commcaretest | corehq/apps/reports/filters/fixtures.py | 1 | 5198 | import json
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_noop
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem
from corehq.apps.locations.util import load_locs_json, location_hierarchy_config
from corehq.apps.reports.filters.base import BaseReportFilter
class AsyncDrillableFilter(BaseReportFilter):
# todo: add documentation
# todo: cleanup template
"""
example_hierarchy = [{"type": "state", "display": "name"},
{"type": "district", "parent_ref": "state_id", "references": "id", "display": "name"},
{"type": "block", "parent_ref": "district_id", "references": "id", "display": "name"},
{"type": "village", "parent_ref": "block_id", "references": "id", "display": "name"}]
"""
template = "reports/filters/drillable_async.html"
hierarchy = [] # a list of fixture data type names that representing different levels of the hierarchy. Starting with the root
def fdi_to_json(self, fdi):
return {
'fixture_type': fdi.data_type_id,
'fields': fdi.fields,
'id': fdi.get_id,
'children': getattr(fdi, '_children', None),
}
fdts = {}
def data_types(self, index=None):
if not self.fdts:
self.fdts = [FixtureDataType.by_domain_tag(self.domain, h["type"]).one() for h in self.hierarchy]
return self.fdts if index is None else self.fdts[index]
@property
def api_root(self):
return reverse('api_dispatch_list', kwargs={'domain': self.domain,
'resource_name': 'fixture',
'api_name': 'v0.1'})
@property
def full_hierarchy(self):
ret = []
for i, h in enumerate(self.hierarchy):
new_h = dict(h)
new_h['id'] = self.data_types(i).get_id
ret.append(new_h)
return ret
def generate_lineage(self, leaf_type, leaf_item_id):
leaf_fdi = FixtureDataItem.get(leaf_item_id)
index = None
for i, h in enumerate(self.hierarchy[::-1]):
if h["type"] == leaf_type:
index = i
if index is None:
raise Exception(
"Could not generate lineage for AsyncDrillableField due to a nonexistent leaf_type (%s)" % leaf_type)
lineage = [leaf_fdi]
for i, h in enumerate(self.full_hierarchy[::-1]):
if i < index or i >= len(self.hierarchy)-1:
continue
real_index = len(self.hierarchy) - (i+1)
lineage.insert(0, FixtureDataItem.by_field_value(self.domain, self.data_types(real_index - 1),
h["references"], lineage[0].fields[h["parent_ref"]]).one())
return lineage
@property
def filter_context(self):
root_fdis = [self.fdi_to_json(f) for f in FixtureDataItem.by_data_type(self.domain, self.data_types(0).get_id)]
f_id = self.request.GET.get('fixture_id', None)
selected_fdi_type = f_id.split(':')[0] if f_id else None
selected_fdi_id = f_id.split(':')[1] if f_id else None
if selected_fdi_id:
index = 0
lineage = self.generate_lineage(selected_fdi_type, selected_fdi_id)
parent = {'children': root_fdis}
for i, fdi in enumerate(lineage[:-1]):
this_fdi = [f for f in parent['children'] if f['id'] == fdi.get_id][0]
next_h = self.hierarchy[i+1]
this_fdi['children'] = [self.fdi_to_json(f) for f in FixtureDataItem.by_field_value(self.domain,
self.data_types(i+1), next_h["parent_ref"], fdi.fields[next_h["references"]])]
parent = this_fdi
return {
'api_root': self.api_root,
'control_name': self.label,
'control_slug': self.slug,
'selected_fdi_id': selected_fdi_id,
'fdis': json.dumps(root_fdis),
'hierarchy': self.full_hierarchy
}
class AsyncLocationFilter(BaseReportFilter):
# todo: cleanup template
label = ugettext_noop("Location")
slug = "location_async"
template = "reports/filters/location_async.html"
@property
def filter_context(self):
api_root = reverse('api_dispatch_list', kwargs={'domain': self.domain,
'resource_name': 'location',
'api_name': 'v0.3'})
selected_loc_id = self.request.GET.get('location_id')
return {
'api_root': api_root,
'control_name': self.label, # todo: cleanup, don't follow this structure
'control_slug': self.slug, # todo: cleanup, don't follow this structure
'loc_id': selected_loc_id,
'locations': json.dumps(load_locs_json(self.domain, selected_loc_id)),
'hierarchy': location_hierarchy_config(self.domain),
}
class MultiLocationFilter(AsyncDrillableFilter):
template = "reports/filters/multi_location.html"
| bsd-3-clause | -391,699,841,776,118,460 | 40.919355 | 130 | 0.563294 | false |
mathieudesro/pathos | pathos/pp_map.py | 1 | 7526 | #!/usr/bin/env python
# Based on code by Kirk Strauser <[email protected]>
# Rev: 1139; Date: 2008-04-16
# (also see code in pathos.pp)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# * Neither the name of Kirk Strauser nor the names of other
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Forked by: Mike McKerns (April 2008)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/pathos/LICENSE
"""
Very basic parallel processing support
Implements a work-alike of the builtin map() function that distributes
work across many processes. As it uses Parallel Python to do the
actual multi-processing, code using this must conform to the usual PP
restrictions (arguments must be serializable, etc.)
"""
from pathos.pp import __STATE, stats, __print_stats as print_stats
#from pathos.pp import ParallelPythonPool as Pool
from pathos.helpers.pp_helper import Server as ppServer
def ppmap(processes, function, sequence, *sequences):
"""Split the work of 'function' across the given number of
processes. Set 'processes' to None to let Parallel Python
autodetect the number of children to use.
Although the calling semantics should be identical to
__builtin__.map (even using __builtin__.map to process
arguments), it differs in that it returns a generator instead of a
list. This enables lazy evaluation of the results so that other
work can be done while the subprocesses are still running.
>>> def rangetotal(n): return n, sum(range(n))
>>> list(map(rangetotal, range(1, 6)))
[(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)]
>>> list(ppmap(1, rangetotal, range(1, 6)))
[(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)]
"""
ppservers = ("*",) # autodetect
#from _ppserver_config import ppservers # read from a config file
# Create a new server if one isn't already initialized
if not __STATE['server']:
__STATE['server'] = ppServer(ppservers=ppservers)
#class dill_wrapper(object):
# """handle non-picklable functions by wrapping with dill"""
# def __init__(self, function):
# from dill import dumps
# self.pickled_function = dumps(function)
# def __call__(self, *args):
# from dill import loads #XXX: server now requires dill
# f = loads(self.pickled_function)
# return f(*args)
# def dill_wrapper(function):
# """handle non-picklable functions by wrapping with dill"""
# from dill import dumps
# pickled_function = dumps(function)
# def unwrap(*args):
# from dill import loads #XXX: server now requires dill
# f = loads(pickled_function)
# return f(*args)
# return unwrap
def submit(*args): #XXX: needs **kwds to allow "depfuncs, modules, ...?
"""Send a job to the server"""
#print globals()['ncalls'] #FIXME: ncalls not in globals()
#XXX: options for submit...
#XXX: func -- function to be executed
#XXX: depfuncs -- functions called from 'func'
#XXX: modules -- modules to import
#XXX: callback -- callback function to be called after 'func' completes
#XXX: callbackargs -- additional args for callback(result, *args)
#XXX: group -- allows naming of 'job group' to use in wait(group)
#XXX: globals -- dictionary from which everything imports
# from mystic.tools import wrap_function, wrap_bounds
# return __STATE['server'].submit(function, args, \
# depfuncs=(wrap_function,wrap_bounds), \
## modules=("mystic","numpy"), \
# globals=globals())
# p_function = dill_wrapper(function)
# return __STATE['server'].submit(p_function, args, globals=globals())
#print __STATE['server'].get_ncpus(), "local workers" #XXX: debug
return __STATE['server'].submit(function, args, globals=globals())
# Merge all the passed-in argument lists together. This is done
# that way because as with the map() function, at least one list
# is required but the rest are optional.
a = [sequence]
a.extend(sequences)
# Set the requested level of multi-processing
#__STATE['server'].set_ncpus(processes or 'autodetect') # never processes=0
if processes == None:
__STATE['server'].set_ncpus('autodetect')
else:
__STATE['server'].set_ncpus(processes) # allow processes=0
#print "running with", __STATE['server'].get_ncpus(), "local workers" #XXX: debug
# First, submit all the jobs. Then harvest the results as they
# come available.
return (subproc() for subproc in map(submit, *a))
def pp_map(function, sequence, *args, **kwds):
'''extend python's parallel map function to parallel python
Inputs:
function -- target function
sequence -- sequence to process in parallel
Additional Inputs:
ncpus -- number of 'local' processors to use [defaut = 'autodetect']
servers -- available distributed parallel python servers [default = ()]
'''
procs = None
servers = ()
if kwds.has_key('ncpus'):
procs = kwds['ncpus']
kwds.pop('ncpus')
if kwds.has_key('servers'):
servers = kwds['servers']
kwds.pop('servers')
# remove all the junk kwds that are added due to poor design!
if kwds.has_key('nnodes'): kwds.pop('nnodes')
if kwds.has_key('nodes'): kwds.pop('nodes')
if kwds.has_key('launcher'): kwds.pop('launcher')
if kwds.has_key('mapper'): kwds.pop('mapper')
if kwds.has_key('queue'): kwds.pop('queue')
if kwds.has_key('timelimit'): kwds.pop('timelimit')
if kwds.has_key('scheduler'): kwds.pop('scheduler')
# return Pool(procs, servers=servers).map(function, sequence, *args, **kwds)
if not __STATE['server']:
__STATE['server'] = job_server = ppServer(ppservers=servers)
return list(ppmap(procs,function,sequence,*args))
if __name__ == '__main__':
# code moved to "pathos/examples/pp_map.py
pass
# EOF
| bsd-3-clause | 8,449,697,180,205,107,000 | 40.811111 | 84 | 0.671273 | false |
HSASec/ProFuzz | Connector_tcp.py | 1 | 1675 | import socket
import sys
import getopt
import DataGenerator
import time
import thread
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
usage = sys.argv[0] +' -h <host> -p <port> [-l <length>] [-c <command>]'
def main(argv):
try:
opts, args = getopt.getopt(sys.argv[1:],"h:p:l:")
except getopt.GetoptError, err:
print usage
sys.exit(1)
#check and set arguments
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == "-p":
port = arg
elif opt == "-b":
length = arg
elif opt == "-c":
command = arg
#check if values exist
try:
host
except NameError:
print 'a host is necessary!'
print usage
sys.exit(0)
try:
port
except NameError:
print 'a port is necessary'
print usage
sys.exit(0)
#if there are no length given use random (length=0)
try:
length
except NameError:
length = 0
print 'using random length'
try:
tcp.connect((host, int(port)))
print "Connected"
except socket.error:
print "Couldn't connect to Server:" + host + ":" + port
sys.exit(2)
while(True):
try:
random = DataGenerator.randString(int(length))
dataSent = tcp.send(random)
print "sent"
time.sleep(5)
except socket.error:
print "Connection lost..."
break
if __name__ == "__main__":
main(sys.argv[1:]) | gpl-3.0 | -1,375,576,282,421,108,200 | 22.661765 | 72 | 0.492537 | false |
red-hat-storage/errata-tool | errata_tool/tests/conftest.py | 1 | 6415 | import json
import os
from errata_tool import ErrataConnector, Erratum
from errata_tool.build import Build
from errata_tool.products import ProductList
from errata_tool.product import Product
from errata_tool.product_version import ProductVersion
from errata_tool.release import Release
from errata_tool.variant import Variant
import requests
import pytest
from six.moves.urllib.parse import urlencode
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(TESTS_DIR, 'fixtures')
class MockResponse(object):
status_code = 200
encoding = 'utf-8'
headers = {'content-type': 'application/json; charset=utf-8'}
params = None
def raise_for_status(self):
pass
@property
def _fixture(self):
"""Return path to our static fixture file. """
fdir = os.path.join(FIXTURES_DIR, 'errata.devel.redhat.com/')
filename = self._url_with_params.replace(
'https://errata.devel.redhat.com/', fdir)
# If we need to represent this API endpoint as both a directory and a
# file, check for a ".body" file.
if os.path.isdir(filename):
return filename + '.body'
return filename
@property
def _url_with_params(self):
url = self.url
if self.params is not None:
url += '?' + urlencode(self.params)
return url
def json(self):
try:
with open(self._fixture) as fp:
return json.load(fp)
except IOError:
print('Try ./new-fixture.sh %s' % self._url_with_params)
raise
@property
def text(self):
"""Return contents of our static fixture file. """
try:
with open(self._fixture) as fp:
return fp.read()
except IOError:
print('Try ./new-fixture.sh %s' % self._url_with_params)
raise
class RequestRecorder(object):
"""Record args to requests.get() or requests.post() """
def __call__(self, url, **kwargs):
"""mocking requests.get() or requests.post() """
self.response = MockResponse()
self.response.url = url
self.response.params = kwargs.get('params')
self.kwargs = kwargs
return self.response
@pytest.fixture
def mock_get():
return RequestRecorder()
@pytest.fixture
def mock_post():
return RequestRecorder()
@pytest.fixture
def mock_put():
return RequestRecorder()
@pytest.fixture
def advisory(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=33840)
@pytest.fixture
def advisory_none_ship(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=43686)
@pytest.fixture
def advisory_with_batch(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=46563)
@pytest.fixture
def rhsa(monkeypatch, mock_get):
"""Like the advisory() fixture above, but an RHSA. """
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=36762)
@pytest.fixture
def productlist(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return ProductList()
@pytest.fixture
def product(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Product('RHCEPH')
@pytest.fixture
def rhacm_product(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Product('RHACM')
@pytest.fixture
def product_version(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return ProductVersion('RHEL-7-RHCEPH-3.1')
@pytest.fixture
def release(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Release(name='rhceph-3.1')
@pytest.fixture
def build(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Build('ceph-12.2.5-42.el7cp')
@pytest.fixture
def rhceph_variant(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Variant(name='8Base-RHCEPH-5.0-MON')
@pytest.fixture
def rhacm_variant(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Variant(name='7Server-RHACM-2.0')
| mit | -4,197,443,629,954,272,000 | 31.729592 | 77 | 0.69431 | false |
ergoregion/Rota-Program | Rota_System/Saving/Excell/Population.py | 1 | 5654 | __author__ = 'Neil Butcher'
from Error import ExcellImportExportError
import xlwt, xlrd
from Rota_System.Roles import GlobalRoleList, role
from Rota_System.Worker import Worker
from Rota_System.StandardTimes import date_string, get_date
def name(an_object):
return an_object.name
class PopulationSavingObject(object):
def __init__(self, population, filename):
if filename:
self._filename = filename
else:
raise ExcellImportExportError('No filename set')
self._population = population
self._book = None
def create(self):
self._book = xlwt.Workbook(encoding="utf-8")
self._population_sheet = self._book.add_sheet("Population")
self._population_sheet.write(0, 0, 'Name')
self._population_sheet.write(0, 1, 'phone')
self._population_sheet.write(0, 2, 'email')
self._population_sheet.write(0, 3, 'address')
self._qualifications_sheet = self._book.add_sheet("Qualifications")
self._qualifications_sheet.write(0, 0, 'Name')
j = 1
for r in GlobalRoleList.roles:
self._qualifications_sheet.write(0, j, r.description)
j += 1
self._dates_sheet = self._book.add_sheet("Unavailable Dates")
self._dates_sheet.write(0, 0, 'Name')
self._save()
def _add_individual(self, person, row):
self._population_sheet.write(row, 0, person.name)
self._population_sheet.write(row, 1, person.phone_number)
self._population_sheet.write(row, 2, person.email)
self._population_sheet.write(row, 3, person.address)
self._qualifications_sheet.write(row, 0, person.name)
self._dates_sheet.write(row, 0, person.name)
j = 1
for r in GlobalRoleList.roles:
if person.suitable_for_role(r):
self._qualifications_sheet.write(row, j, "Y")
j += 1
j = 1
for d in person.blacklisted_dates():
self._dates_sheet.write(row, j, date_string(d))
j += 1
def populate(self):
for j, person in enumerate(self._population):
self._add_individual(person, j + 1)
self._save()
def _save(self):
self._book.save(self._filename)
def load(self):
self._book = xlrd.open_workbook(self._filename)
self._get_sheets()
self._get_roles()
self._get_people()
return self._population
def _get_sheets(self):
names = self._book.sheet_names()
if "Population" not in names:
raise ExcellImportExportError('There is no population sheet in the file')
else:
self._population_sheet = self._book.sheet_by_name("Population")
if "Qualifications" not in names:
raise ExcellImportExportError('There is no qualification sheet in the file')
else:
self._qualifications_sheet = self._book.sheet_by_name("Qualifications")
if "Unavailable Dates" in names:
self._dates_sheet = self._book.sheet_by_name("Unavailable Dates")
else:
self._dates_sheet = None
def _get_roles(self):
self._sheet_role_list = []
for i, cell in enumerate(self._qualifications_sheet.row(0)):
if cell.ctype is 0:
break
try:
r = role(cell.value)
except:
raise ExcellImportExportError('There was an unidentified role: ' + cell.value)
if r is None:
if i > 0:
raise ExcellImportExportError('There was an unidentified role: ' + cell.value)
else:
self._sheet_role_list.append(r)
for r in GlobalRoleList.roles:
if r not in self._sheet_role_list:
raise ExcellImportExportError('There was an role unlisted on the sheet: ' + r.description)
def _get_people(self):
self._population = []
for i in range(1, self._population_sheet.nrows):
if self._population_sheet.cell_type(i, 0) is 0:
break
else:
p = Worker()
p.name = self._population_sheet.cell_value(i, 0)
p.phone_number = self._population_sheet.cell_value(i, 1)
p.email = self._population_sheet.cell_value(i, 2)
p.address = self._population_sheet.cell_value(i, 3)
self._get_qualifications(i, p)
self._get_dates(i, p)
if p.name in map(name, self._population):
raise ExcellImportExportError('There were people with the same name : ' + p.name)
else:
self._population.append(p)
def _get_qualifications(self, row, person):
cells = self._qualifications_sheet.row(row)
if cells[0].value != person.name:
raise ExcellImportExportError('There was a mismatch between people and qalifications on row: ' + str(row))
person.does_nothing()
for i, r in enumerate(self._sheet_role_list):
if cells[i + 1].ctype is not 0:
person.add_role(r.code)
def _get_dates(self, row, person):
if self._dates_sheet is None:
return
cells = self._dates_sheet.row(row)
if cells[0].value != person.name:
raise ExcellImportExportError('There was a mismatch between people and qualifications on row: ' + str(row))
person.clear_blacklist()
for i in range(1, len(cells)):
if cells[i].ctype is not 0:
person.blacklist_date(get_date(cells[i].value))
| mit | -4,878,196,566,270,454,000 | 33.901235 | 119 | 0.581889 | false |
sarielsaz/sarielsaz | test/functional/mempool_packages.py | 1 | 10532 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(SarielsazTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000"], ["-maxorphantx=1000", "-limitancestorcount=5"]]
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
ancestor_fees += mempool[x]['fee']
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
tx1_id, _ = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| mit | 2,006,913,425,574,037,200 | 42.520661 | 154 | 0.621534 | false |
kirchenreich/osm-api-cache | osmcache/models.py | 1 | 2003 | from sqlalchemy import (
BigInteger,
Binary,
Column,
DateTime,
Integer,
Numeric,
String,
)
from sqlalchemy.dialects.postgresql import JSON, TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import func
Base = declarative_base()
class OsmBase(Base):
__abstract__ = True
created_on = Column(TIMESTAMP, default=func.now())
updated_on = Column(TIMESTAMP, default=func.now(), onupdate=func.now())
class Node(OsmBase):
__tablename__ = 'node'
id = Column(BigInteger, primary_key=True, autoincrement=False)
tags = Column(JSON, nullable=True)
meta = Column(JSON, nullable=True)
changeset = Column(BigInteger, nullable=True)
lat = Column(Numeric(11, 8), nullable=True)
lon = Column(Numeric(11, 8), nullable=True)
class Way(OsmBase):
__tablename__ = 'way'
id = Column(BigInteger, primary_key=True, autoincrement=False)
tags = Column(JSON, nullable=True)
meta = Column(JSON, nullable=True)
changeset = Column(BigInteger, nullable=True)
class WayMember(OsmBase):
__tablename__ = 'waymember'
id = Column(BigInteger, primary_key=True, autoincrement=False)
way_id = Column(BigInteger, index=True)
node_id = Column(BigInteger, index=True)
order = Column(Integer, nullable=True, index=True)
class Relation(OsmBase):
__tablename__ = 'relation'
id = Column(BigInteger, primary_key=True, autoincrement=False)
tags = Column(JSON, nullable=True)
meta = Column(JSON, nullable=True)
changeset = Column(BigInteger, nullable=True)
class RelationMember(OsmBase):
__tablename__ = 'relationmember'
id = Column(BigInteger, primary_key=True, autoincrement=False)
relation_id = Column(BigInteger, index=True)
# allowed values: n, w, r
element_type = Column(String(1), index=True)
element_id = Column(BigInteger, index=True)
order = Column(Integer, nullable=True, index=True)
role = Column(String, nullable=True)
| mit | -8,110,983,343,575,887,000 | 24.0375 | 75 | 0.688967 | false |
coassets/initial-d | sample_project/urls.py | 1 | 1537 | from django.conf.urls.defaults import *
from django.conf import settings
from crm.xmlrpc import rpc_handler
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# crm and contactinfo URLs (required)
(r'^crm/', include('crm.urls')),
(r'^contactinfo/', include('contactinfo.urls')),
(r'^ajax/', include('ajax_select.urls')),
url(r'^xml-rpc/', rpc_handler, name='xml_rpc'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
#url(r'^admin/(.*)', admin.site.root),
url(r'^admin/', include(admin.site.urls)),
# use the contrib.auth login/logout views for authentication (optional)
url(
r'^accounts/login/$', 'django.contrib.auth.views.login',
name='auth_login',
),
url(
r'^accounts/logout/$', 'django.contrib.auth.views.logout',
name='auth_logout',
),
# redirect '/' to the CRM dashboard (optional)
url(
'^$',
'django.views.generic.simple.redirect_to',
{'url': '/crm/dashboard/'},
),
)
if settings.DEBUG:
urlpatterns += patterns('',
( r'^%s(?P<path>.*)' % settings.MEDIA_URL.lstrip('/'),
'django.views.static.serve',
{ 'document_root' : settings.MEDIA_ROOT, 'show_indexes': True }
),
)
| gpl-2.0 | -1,501,874,727,447,595,000 | 30.367347 | 76 | 0.615485 | false |
ajrbyers/mondroid | src/monitor/management/commands/install_droids.py | 1 | 1680 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from monitor import models
from crontab import CronTab
import os
import sys
try:
action = sys.argv[1:][1]
except:
action = ''
def find_job(tab, comment):
for job in tab:
if job.comment == comment:
return job
return None
class Command(BaseCommand):
help = 'Installs cron tasks for the monitor.'
def handle(self, *args, **options):
monitor_list = models.Monitor.objects.all()
virtualenv = os.environ.get('VIRTUAL_ENV', None)
tab = CronTab()
for monitor in monitor_list:
current_job = find_job(tab, "fetcher_droid_%s" % monitor.slug)
if current_job == None:
django_command = "&& python %s/manage.py fetcher_droid %s >> /var/log/mondroid/%s.fetcher.log" % (settings.BASE_DIR, monitor.slug, monitor.slug)
if virtualenv:
command = 'export PATH=%s/bin:/usr/local/bin:/usr/bin:/bin %s' % (virtualenv, django_command)
else:
command = '%s' % (django_command)
cron_job = tab.new(command, comment="fetcher_droid_%s" % monitor.slug)
cron_job.minute.every(5)
# Install the parser droid command if it doesn't exist already
current_job = find_job(tab, "parser_droid")
if current_job == None:
if virtualenv:
command = 'export PATH=%s/bin:/usr/local/bin:/usr/bin:/bin && python %s/manage.py parser_droid' % (virtualenv, settings.BASE_DIR)
cron_job = tab.new(command, comment="parser_droid")
cron_job.minute.every(5)
if action == 'test':
print tab.render()
elif action == 'quiet':
pass
else:
tab.write() | gpl-2.0 | 6,182,176,925,109,408,000 | 26.557377 | 148 | 0.685119 | false |
probablytom/msci-model | resp_base/ResponsibleWorkflows.py | 1 | 2256 | from theatre_ag.theatre_ag.workflow import treat_as_workflow
from .Constraints import Deadline
from random import random, choice
class CourseworkWorkflow:
def __init__(self):
self.agent = None
self.competence = {'essays_written': 0.95,
'working_programs': 0.95}
def assign_agent(self, agent):
self.agent = agent
# Lecturers can write essays
# (We're calling papers essays, so as to simplify the model's ontology.)
# RETURNS: tuple (a,b):
# a: success bool
# b: set of constraints with pass/failure
def write_essay(self, agent):
written_successfully = (random() < self.competence['essays_written'])
if 'essays_written' not in agent.socio_states.keys():
agent.socio_states['essays_written'] = 0
for i in range(len(agent.current_responsibility.constraints)):
agent.current_responsibility.constraints[i].record_outcome(True)
if written_successfully:
agent.socio_states['essays_written'] += 1
else:
choice([c for c in agent.current_responsibility.constraints if type(c) is not Deadline]).record_outcome(False)
return (written_successfully, agent.current_responsibility.constraints)
def write_program(self, agent):
written_successfully = (random() < self.competence['working_programs'])
if 'working_programs' not in agent.socio_states.keys():
agent.socio_states['working_programs'] = 0
for i in range(len(agent.current_responsibility.constraints)):
agent.current_responsibility.constraints[i].record_outcome(True)
if written_successfully:
agent.socio_states['working_programs'] += 1
else:
choice([c for c in agent.current_responsibility.constraints if type(c) is not Deadline]).record_outcome(False)
return (written_successfully, agent.current_responsibility.constraints)
class IncompetentCourseworkWorkflow(CourseworkWorkflow):
def __init__(self):
super().__init__()
self.competence = {'essays_written': 0.2,
'working_programs': 0.2}
class DummyWorkflow:
is_workflow = True
treat_as_workflow(CourseworkWorkflow)
| mit | 5,694,300,392,099,812,000 | 39.285714 | 122 | 0.655142 | false |
miyosuda/intro-to-dl-android | python-scripts/example1.py | 1 | 4036 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# MNISTデータロード用のクラスをimport
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
# MNISTデータセットをダウンロード
mnist = input_data.read_data_sets("./tmp/data/", one_hot=True)
# Sessionを生成
sess = tf.InteractiveSession()
# [グラフ定義]
# 入力用の値のいれ物(=PlaceHolder)を作成する. ここには画像データをテンソルにしたものが
# 入ってくる.
# Noneは"数が未定"を表す. 学習時はここが100になり、確認時は10000になる.
# なので、学習時は(100x784)のテンソル、確認時は(10000x784)のテンソルになる.
x = tf.placeholder(tf.float32, [None, 784])
# 784x10個の重み. 学習により変化していく.
W = tf.Variable(tf.zeros([784, 10]))
# 10個のBias値. 学習により変化していく.
b = tf.Variable(tf.zeros([10]))
# (x * W + b)の結果をsoftmax関数に入れ、その結果をyとする.
# yは学習時は(100x10)のテンソル. 確認時は(10000x10)のテンソルになる.
y = tf.nn.softmax(tf.matmul(x, W) + b)
# 損失関数(正解とどれくらいずれているかを表すもの)を定義していく
# y_ は正解データを入れる入れもの.
# Noneとなっているが、学習時にはここが100になり、
# y_は(100, 10)のテンソルとなる.
y_ = tf.placeholder(tf.float32, [None, 10])
# ニューラルネットの出した10個の値と正解の10個の値(正解部分だけが1の配列)を
# もちいて、どれくらいずれていたか、を出す.
# 小さければ小さいほど正解に近かった事を表す値. (合計をひとつのスカラー値として集めたもの)
# 100個分の合計値を求める.
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
# 上記のずれを小さくする様に学習させるOptimizerを用意する.
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# [初期化]
# 変数Wとbを初期化する. 初期化を走らせると、tf.zeros()が実行され、
# Wとbの中の値がすべて0.0で初期化される.
tf.initialize_all_variables().run()
# [学習実行]
# 学習開始. 上記のずれを小さくする学習を1000回繰り返す.
for i in range(1000):
# 画像データと、正解データをそれぞれ学習用データセット55000個の中から
# ランダムで100個ずつ集めてくる.
# batch_xsが(100x784)のテンソル. batch_ysが(100x10)のテンソル.
batch_xs, batch_ys = mnist.train.next_batch(100)
# PlaceHolderに値を入れて学習を実行.
# これを実行するとWとbの値が変化する.
train_step.run({x: batch_xs, y_: batch_ys})
# [学習結果確認]
# 学習が終わったので、結果を確認してみる
# ニューラルネットが出力した10個の値の中で最大だった値のindex(0〜9)と
# 正解データの10個の値の中でどれが最大だったかのindex(0〜9)を比較し
# 一致していれば1(正解)、違っていれば0(不正解)を返す.
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# 上記の1,0の値を10000個の全テストデータに関して求めて平均を出す.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# テストデータ10000個を入力して実行する.
# 出てきた結果が正解率. 0.9くらいの値となり、90%の正解率となる.
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
# [学習結果書き出し]
# Wとbに関して、TensorFlowのテンソルオブジェクトから、
# 保存用にnumpyのndarrayに変換し、値を取り出す.
W_val = W.eval(sess)
b_val = b.eval(sess)
# Wとbをそれぞれcsvに書き出す
np.savetxt('./w.csv', W_val, delimiter=',')
np.savetxt('./b.csv', b_val, delimiter=',')
print('exported: w.csv, b.csv');
| apache-2.0 | -2,668,570,235,435,249,000 | 22.471698 | 76 | 0.735531 | false |
frisk028/flask-app-umn-courses | courses/public/forms.py | 1 | 5377 | # -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import TextField, SelectField, SelectMultipleField, widgets
from wtforms.validators import DataRequired
import json
import os
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class SearchForm(Form):
CAMPUS_CHOICES = [('umntc', 'Twin Cities'), ('umndl', 'Duluth'),
('umnro', 'Rochester'), ('umncr', 'Crookston'),
('umnmo', 'Morris')]
TERM_CHOICES = [('1165', 'Summer 2016'), ('1169', 'Fall 2016'), ('1173', 'Spring 2017')]
COMPARE_CHOICES = [('','--choose comparison--'), ('<', 'less than'), ('<=', 'less than or equal to'),
('=','equal to'), ('>=', 'greater than or equal to'),
('>', 'greater than')]
LEVEL_CHOICES = [('catalog_number<5000', 'Undergraduate Courses'),
('catalog_number>4999', 'Graduate and Professional Courses')]
CLE_CHOICES =[('AH', 'Arts and Humanities'), ('BIOL', 'Biological Sciences'),
('CIV', 'Civic Life and Ethics'), ('DSJ', 'Diversity and Social Justice'),
('ENV', 'Environment'), ('GP', 'Global Perspectives'), ('HIS','Historical Perspectives'),
('LITR', 'Literature'), ('MATH', 'Mathmatical Thinking'), ('PHYS', 'Physical Sciences'),
('SOCS', 'Social Sciences'), ('TS', 'Technology and Society'), ('WI', 'Writing Intensive')]
GE_CHOICES = [('BIOL SCI', 'Biological Sciences'), ('COMMUNICAT', 'Written/Oral Communications'),
('ETH/CIV RE', 'Ethic/Civil Responsibility'), ('GLOB PERSP', 'Global Perspective'),
('HI/BEH/SCC', 'History & Behavioral/Social Sciences'), ('HUMAN DIV', 'Human Diversity'),
('HUMANITIES', 'Humanities'), ('LIB ED ELC', 'Liberal Education Elective'),
('PEOPLE/ENV', 'People/Environment'), ('PHYS SCI', 'Physical Sciences'),
('MATH THINK', 'Mathematical Thinking')]
GER_CHOICES = [('ARTP', 'Artistic Performance'), ('HUM', 'Communication, Language, Literature, and Philosophy'),
('ECR', 'Ethical & Civic Responsibility'), ('ENVT', 'People and Environment'),
('FA', 'Fine Arts'), ('FL', 'Foreign Language'), ('HIST', 'Historical Perspectives'),
('SS', 'Human Behavior, Social Processes, and Institutions'), ('HDIV', 'Human Diversity'),
('IC', 'Intellectual Community'), ('IP', 'International Perspective'),
('M/SR', 'Mathematical/Symbolic Reasoning'), ('SCI', 'Physical & Biological Sciences'),
('SCIL', 'Physical & Biological Sciences with Lab'), ('WLA', 'Writing for the Liberal Arts')]
DLE_CHOICES = [('CDIVERSITY', 'Cultural Diversity in the US'), ('FINE ARTS', 'Fine Arts'), ('GLOBAL PER', 'Global Perspectives'),
('HUMANITIES', 'Humanities'), ('LOGIC & QR', 'Logic & Quantitative Reasoning'), ('NAT SCI', 'Natural Sciences'),
('COMM & LAN', 'Oral Communication & Languages'), ('SOC SCI', 'Social Sciences'), ('SUSTAIN', 'Sustainability'),
('WRITING', 'Writing & Information Literacy')]
campus = SelectField(label='Campus', choices=CAMPUS_CHOICES, validators=[DataRequired()])
cle = MultiCheckboxField(label='Twin Cities/Rochester Liberal Education', choices=CLE_CHOICES)
dle = MultiCheckboxField(label='Duluth Liberal Education', choices=DLE_CHOICES)
ge = MultiCheckboxField(label='Crookston Liberal Education', choices=GE_CHOICES)
ger = MultiCheckboxField(label='Morris Liberal Education', choices=GER_CHOICES)
term = SelectField(label='Term', choices=TERM_CHOICES, validators=[DataRequired()], default='1159')
level = SelectField(label='Level', choices=LEVEL_CHOICES, validators=[DataRequired()])
subject = TextField(label='Subject')
course_number = TextField(label='Course Number')
compare = SelectField(label='Course Number', choices=COMPARE_CHOICES)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
found = False
json_file = '/majors.json'
initial_validation = super(SearchForm, self).validate()
if self.course_number.data:
if self.compare.data == '':
self.compare.errors.append('Please enter a comparison')
return False
if str(self.campus.data) == 'umnmo':
json_file = '/morris.json'
elif str(self.campus.data) == 'umncr':
json_file = '/crookston.json'
elif str(self.campus.data) == 'umndl':
json_file = '/duluth.json'
json_url = os.path.realpath(os.path.dirname(__file__)) + json_file
f = open(json_url,'r')
json_data = json.loads(f.read())
subject = self.subject.data.upper()
if subject: # make sure to only validate subject if something was entered.
for key, value in json_data.iteritems():
if subject == key:
found = True
if not found:
self.subject.errors.append('Please enter a valid course subject')
return False
return True
| mit | -3,242,450,863,080,279,000 | 54.43299 | 134 | 0.590664 | false |
rjschwei/azure-sdk-for-python | unreleased/azure-mgmt-intune/setup.py | 1 | 2533 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from setuptools import find_packages, setup
from io import open
import re
import os.path
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-intune"
PACKAGE_PPRINT_NAME = "Intune Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(),
install_requires=[
'azure-mgmt-nspkg',
'azure-common[autorest]==1.1.4',
],
)
| mit | 7,684,601,118,646,094,000 | 31.896104 | 91 | 0.597315 | false |
ST-Data-Mining/crater | wei/cart_file.py | 1 | 2810 | from __future__ import division
import sys
from os import listdir
from table import *
from os.path import isfile, join
from settings import *
from cart_de import *
from sk import *
from time import strftime
from rf_cart_prediction import *
def myrdiv(d):
def pre(dist):
l=dist.items()[0][-1]
k = dist.items()[0][0]
return [k].extend(l)
stat = []
for key,val in d.iteritems():
val.insert(0,key)
stat.append(val)
return stat
def createfile():
f = open('myresult','w').close()
def writefile(s):
f = open('myresult', 'a')
f.write(s+'\n')
f.close()
def cart_file(path="./data"):
def saveSat(score,dataname):
class1 = dataname +": N-Def"
class2 = dataname +": Y-Def"
name = [pd,pf,prec,g]
for i, s in enumerate(name):
s[class1]= s.get(class1,[])+[float(score[0][i]/100)]
s[class2]= s.get(class2,[])+[float(score[1][i]/100)]
def printresult(dataset):
print "\n" + "+" * 20 + "\n DataSet: "+dataset + "\n" + "+" * 20
for i, k in enumerate(["pd", "pf","prec","g"]):
# pdb.set_trace()
express = "\n"+"*"*10+k+"*"*10
print express
writefile(express)
rdivDemo(myrdiv(lst[i]))
writefile("End time :" +strftime("%Y-%m-%d %H:%M:%S"))
writefile("\n"*2)
print "\n"
def predicttest(predict, testname):
for i in xrange(10):
The.data.predict = predict
score = main()
saveSat(score, testname)
def cart_tunetest(predict):
The.classifier.tuned = True
cart_de()
The.option.tuning = False
predicttest(predict,"Tuned")
The.classifier.tuned = False
def cart_basetest(predict):
The.classifier.cart = True
The.cart.criterion = "entropy"
The.cart.max_features = None
The.cart.max_depth = None
The.cart.min_samples_split = 2
The.cart.min_samples_leaf = 1
The.cart.max_leaf_nodes = None
The.cart.random_state = 0
predicttest(predict, "Cart")
The.classifier.cart = False
def rndfsttest(predict):
The.classifier.rdfor = True
predicttest(predict, "Rdfor")
The.classifier.rdfor = False
random.seed(10)
createfile()
folders = [f for f in listdir(path) if not isfile(join(path, f))]
for one in folders[5:7]:
pd, pf, prec, g = {},{},{},{}
lst = [pd,pf,prec,g]
nextpath = join(path, one)
data = [join(nextpath, f)
for f in listdir(nextpath) if isfile(join(nextpath, f))]
predict = data.pop(-1)
tune_predict = data.pop(-1)
train = data
global The
The.data.predict = tune_predict
The.data.train = train
The.option.baseLine = False
writefile("Begin time :" +strftime("%Y-%m-%d %H:%M:%S"))
writefile("Dataset: "+one)
cart_tunetest(predict)
cart_basetest(predict)
rndfsttest(predict)
printresult(one)
if __name__ =="__main__":
eval(cmd()) | mit | -6,055,038,081,396,519,000 | 24.554545 | 68 | 0.609964 | false |
WilJoey/tn_ckan | ckan/controllers/group.py | 1 | 36100 | import re
import os
import logging
import genshi
import cgi
import datetime
from urllib import urlencode
from pylons.i18n import get_lang
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.lib.maintain as maintain
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.lib.search as search
import ckan.model as model
import ckan.new_authz as new_authz
import ckan.lib.plugins
import ckan.plugins as plugins
from ckan.common import OrderedDict, c, g, request, _
log = logging.getLogger(__name__)
render = base.render
abort = base.abort
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
lookup_group_plugin = ckan.lib.plugins.lookup_group_plugin
class GroupController(base.BaseController):
group_type = 'group'
## hooks for subclasses
def _group_form(self, group_type=None):
return lookup_group_plugin(group_type).group_form()
def _form_to_db_schema(self, group_type=None):
return lookup_group_plugin(group_type).form_to_db_schema()
def _db_to_form_schema(self, group_type=None):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
return lookup_group_plugin(group_type).db_to_form_schema()
def _setup_template_variables(self, context, data_dict, group_type=None):
return lookup_group_plugin(group_type).\
setup_template_variables(context, data_dict)
def _new_template(self, group_type):
return lookup_group_plugin(group_type).new_template()
def _index_template(self, group_type):
return lookup_group_plugin(group_type).index_template()
def _about_template(self, group_type):
return lookup_group_plugin(group_type).about_template()
def _read_template(self, group_type):
return lookup_group_plugin(group_type).read_template()
def _history_template(self, group_type):
return lookup_group_plugin(group_type).history_template()
def _edit_template(self, group_type):
return lookup_group_plugin(group_type).edit_template()
def _activity_template(self, group_type):
return lookup_group_plugin(group_type).activity_template()
def _admins_template(self, group_type):
return lookup_group_plugin(group_type).admins_template()
def _bulk_process_template(self, group_type):
return lookup_group_plugin(group_type).bulk_process_template()
## end hooks
def _replace_group_org(self, string):
''' substitute organization for group if this is an org'''
if self.group_type == 'organization':
string = re.sub('^group', 'organization', string)
return string
def _action(self, action_name):
''' select the correct group/org action '''
return get_action(self._replace_group_org(action_name))
def _check_access(self, action_name, *args, **kw):
''' select the correct group/org check_access '''
return check_access(self._replace_group_org(action_name), *args, **kw)
def _render_template(self, template_name):
''' render the correct group/org template '''
return render(self._replace_group_org(template_name))
def _redirect_to(self, *args, **kw):
''' wrapper to ensue the correct controller is used '''
if self.group_type == 'organization' and 'controller' in kw:
kw['controller'] = 'organization'
return h.redirect_to(*args, **kw)
def _url_for(self, *args, **kw):
''' wrapper to ensue the correct controller is used '''
if self.group_type == 'organization' and 'controller' in kw:
kw['controller'] = 'organization'
return h.url_for(*args, **kw)
def _guess_group_type(self, expecting_name=False):
"""
Guess the type of group from the URL handling the case
where there is a prefix on the URL (such as /data/organization)
"""
parts = [x for x in request.path.split('/') if x]
idx = -1
if expecting_name:
idx = -2
gt = parts[idx]
if gt == 'group':
gt = None
return gt
def index(self):
group_type = self._guess_group_type()
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'with_private': False}
q = c.q = request.params.get('q', '')
data_dict = {'all_fields': True, 'q': q}
sort_by = c.sort_by_selected = request.params.get('sort')
if sort_by:
data_dict['sort'] = sort_by
try:
self._check_access('site_read', context)
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
# pass user info to context as needed to view private datasets of
# orgs correctly
if c.userobj:
context['user_id'] = c.userobj.id
context['user_is_admin'] = c.userobj.sysadmin
results = self._action('group_list')(context, data_dict)
c.page = h.Page(
collection=results,
page=request.params.get('page', 1),
url=h.pager_url,
items_per_page=21
)
return render(self._index_template(group_type))
def read(self, id, limit=20):
group_type = self._get_group_type(id.split('@')[0])
if group_type != self.group_type:
abort(404, _('Incorrect group type'))
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'schema': self._db_to_form_schema(group_type=group_type),
'for_view': True}
data_dict = {'id': id}
# unicode format (decoded from utf8)
q = c.q = request.params.get('q', '')
try:
# Do not query for the group datasets when dictizing, as they will
# be ignored and get requested on the controller anyway
context['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
c.group = context['group']
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read group %s') % id)
self._read(id, limit)
return render(self._read_template(c.group_dict['type']))
def _read(self, id, limit):
''' This is common code used by both read and bulk_process'''
group_type = self._get_group_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'schema': self._db_to_form_schema(group_type=group_type),
'for_view': True, 'extras_as_string': True}
q = c.q = request.params.get('q', '')
# Search within group
if c.group_dict.get('is_organization'):
q += ' owner_org:"%s"' % c.group_dict.get('id')
else:
q += ' groups:"%s"' % c.group_dict.get('name')
c.description_formatted = h.render_markdown(c.group_dict.get('description'))
context['return_query'] = True
# c.group_admins is used by CKAN's legacy (Genshi) templates only,
# if we drop support for those then we can delete this line.
c.group_admins = new_authz.get_group_or_org_admin_ids(c.group.id)
try:
page = int(request.params.get('page', 1))
except ValueError, e:
abort(400, ('"page" parameter must be an integer'))
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
#sort_by = request.params.get('sort', 'name asc')
sort_by = request.params.get('sort', None)
def search_url(params):
if group_type == 'organization':
if c.action == 'bulk_process':
url = self._url_for(controller='organization',
action='bulk_process',
id=id)
else:
url = self._url_for(controller='organization',
action='read',
id=id)
else:
url = self._url_for(controller='group', action='read', id=id)
params = [(k, v.encode('utf-8') if isinstance(v, basestring)
else str(v)) for k, v in params]
return url + u'?' + urlencode(params)
def drill_down_url(**by):
return h.add_url_param(alternative_url=None,
controller='group', action='read',
extras=dict(id=c.group_dict.get('name')),
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace,
controller='group', action='read',
extras=dict(id=c.group_dict.get('name')))
c.remove_field = remove_field
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params)
try:
c.fields = []
search_extras = {}
for (param, value) in request.params.items():
if not param in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
q += ' %s: "%s"' % (param, value)
else:
search_extras[param] = value
fq = 'capacity:"public"'
user_member_of_orgs = [org['id'] for org
in h.organizations_available('read')]
if (c.group and c.group.id in user_member_of_orgs):
fq = ''
context['ignore_capacity_check'] = True
facets = OrderedDict()
default_facet_titles = {'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license_id': _('Licenses')}
for facet in g.facets:
if facet in default_facet_titles:
facets[facet] = default_facet_titles[facet]
else:
facets[facet] = facet
# Facet titles
for plugin in plugins.PluginImplementations(plugins.IFacets):
if self.group_type == 'organization':
facets = plugin.organization_facets(
facets, self.group_type, None)
else:
facets = plugin.group_facets(
facets, self.group_type, None)
if 'capacity' in facets and (self.group_type != 'organization' or
not user_member_of_orgs):
del facets['capacity']
c.facet_titles = facets
data_dict = {
'q': q,
'fq': fq,
'facet.field': facets.keys(),
'rows': limit,
'sort': sort_by,
'start': (page - 1) * limit,
'extras': search_extras
}
query = get_action('package_search')(context, data_dict)
c.page = h.Page(
collection=query['results'],
page=page,
url=pager_url,
item_count=query['count'],
items_per_page=limit
)
c.group_dict['package_count'] = query['count']
c.facets = query['facets']
maintain.deprecate_context_item('facets',
'Use `c.search_facets` instead.')
c.search_facets = query['search_facets']
c.search_facets_limits = {}
for facet in c.facets.keys():
limit = int(request.params.get('_%s_limit' % facet,
g.facets_default_number))
c.search_facets_limits[facet] = limit
c.page.items = query['results']
c.sort_by_selected = sort_by
except search.SearchError, se:
log.error('Group search error: %r', se.args)
c.query_error = True
c.facets = {}
c.page = h.Page(collection=[])
self._setup_template_variables(context, {'id':id},
group_type=group_type)
def bulk_process(self, id):
''' Allow bulk processing of datasets for an organization. Make
private/public or delete. For organization admins.'''
group_type = self._get_group_type(id.split('@')[0])
if group_type != 'organization':
# FIXME: better error
raise Exception('Must be an organization')
# check we are org admin
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'schema': self._db_to_form_schema(group_type=group_type),
'for_view': True, 'extras_as_string': True}
data_dict = {'id': id}
try:
# Do not query for the group datasets when dictizing, as they will
# be ignored and get requested on the controller anyway
context['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
c.group = context['group']
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read group %s') % id)
#use different form names so that ie7 can be detected
form_names = set(["bulk_action.public", "bulk_action.delete",
"bulk_action.private"])
actions_in_form = set(request.params.keys())
actions = form_names.intersection(actions_in_form)
# If no action then just show the datasets
if not actions:
# unicode format (decoded from utf8)
limit = 500
self._read(id, limit)
c.packages = c.page.items
return render(self._bulk_process_template(group_type))
#ie7 puts all buttons in form params but puts submitted one twice
for key, value in dict(request.params.dict_of_lists()).items():
if len(value) == 2:
action = key.split('.')[-1]
break
else:
#normal good browser form submission
action = actions.pop().split('.')[-1]
# process the action first find the datasets to perform the action on.
# they are prefixed by dataset_ in the form data
datasets = []
for param in request.params:
if param.startswith('dataset_'):
datasets.append(param[8:])
action_functions = {
'private': 'bulk_update_private',
'public': 'bulk_update_public',
'delete': 'bulk_update_delete',
}
data_dict = {'datasets': datasets, 'org_id': c.group_dict['id']}
try:
get_action(action_functions[action])(context, data_dict)
except NotAuthorized:
abort(401, _('Not authorized to perform bulk update'))
base.redirect(h.url_for(controller='organization',
action='bulk_process',
id=id))
def new(self, data=None, errors=None, error_summary=None):
group_type = self._guess_group_type(True)
if data:
data['type'] = group_type
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'save': 'save' in request.params,
'parent': request.params.get('parent', None)}
try:
self._check_access('group_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to create a group'))
if context['save'] and not data:
return self._save_new(context, group_type)
data = data or {}
if not data.get('image_url', '').startswith('http'):
data.pop('image_url', None)
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new'}
self._setup_template_variables(context, data, group_type=group_type)
c.form = render(self._group_form(group_type=group_type),
extra_vars=vars)
return render(self._new_template(group_type))
def edit(self, id, data=None, errors=None, error_summary=None):
group_type = self._get_group_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'save': 'save' in request.params,
'for_edit': True,
'parent': request.params.get('parent', None)
}
data_dict = {'id': id}
if context['save'] and not data:
return self._save_edit(id, context)
try:
old_data = self._action('group_show')(context, data_dict)
c.grouptitle = old_data.get('title')
c.groupname = old_data.get('name')
data = data or old_data
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read group %s') % '')
group = context.get("group")
c.group = group
c.group_dict = self._action('group_show')(context, data_dict)
try:
self._check_access('group_update', context)
except NotAuthorized, e:
abort(401, _('User %r not authorized to edit %s') % (c.user, id))
errors = errors or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'edit'}
self._setup_template_variables(context, data, group_type=group_type)
c.form = render(self._group_form(group_type), extra_vars=vars)
return render(self._edit_template(c.group.type))
def _get_group_type(self, id):
"""
Given the id of a group it determines the type of a group given
a valid id/name for the group.
"""
group = model.Group.get(id)
if not group:
return None
return group.type
def _save_new(self, context, group_type=None):
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.params))))
data_dict['type'] = group_type or 'group'
context['message'] = data_dict.get('log_message', '')
data_dict['users'] = [{'name': c.user, 'capacity': 'admin'}]
group = self._action('group_create')(context, data_dict)
# Redirect to the appropriate _read route for the type of group
h.redirect_to(group['type'] + '_read', id=group['name'])
except NotAuthorized:
abort(401, _('Unauthorized to read group %s') % '')
except NotFound, e:
abort(404, _('Group not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new(data_dict, errors, error_summary)
def _force_reindex(self, grp):
''' When the group name has changed, we need to force a reindex
of the datasets within the group, otherwise they will stop
appearing on the read page for the group (as they're connected via
the group name)'''
group = model.Group.get(grp['name'])
for dataset in group.packages():
search.rebuild(dataset.name)
def _save_edit(self, id, context):
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
context['allow_partial_update'] = True
group = self._action('group_update')(context, data_dict)
if id != group['name']:
self._force_reindex(group)
h.redirect_to('%s_read' % group['type'], id=group['name'])
except NotAuthorized:
abort(401, _('Unauthorized to read group %s') % id)
except NotFound, e:
abort(404, _('Group not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def authz(self, id):
group = model.Group.get(id)
if group is None:
abort(404, _('Group not found'))
c.groupname = group.name
c.grouptitle = group.display_name
try:
context = \
{'model': model, 'user': c.user or c.author, 'group': group}
self._check_access('group_edit_permissions', context)
c.authz_editable = True
c.group = context['group']
except NotAuthorized:
c.authz_editable = False
if not c.authz_editable:
abort(401,
_('User %r not authorized to edit %s authorizations') %
(c.user, id))
roles = self._handle_update_of_authz(group)
self._prepare_authz_info_for_render(roles)
return render('group/authz.html')
def delete(self, id):
if 'cancel' in request.params:
self._redirect_to(controller='group', action='edit', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
try:
self._check_access('group_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s') % '')
try:
if request.method == 'POST':
self._action('group_delete')(context, {'id': id})
if self.group_type == 'organization':
h.flash_notice(_('Organization has been deleted.'))
else:
h.flash_notice(_('Group has been deleted.'))
self._redirect_to(controller='group', action='index')
c.group_dict = self._action('group_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s') % '')
except NotFound:
abort(404, _('Group not found'))
return self._render_template('group/confirm_delete.html')
def members(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
try:
c.members = self._action('member_list')(
context, {'id': id, 'object_type': 'user'}
)
c.group_dict = self._action('group_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s') % '')
except NotFound:
abort(404, _('Group not found'))
return self._render_template('group/members.html')
def member_new(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
#self._check_access('group_delete', context, {'id': id})
try:
if request.method == 'POST':
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.params))))
data_dict['id'] = id
email = data_dict.get('email')
if email:
user_data_dict = {
'email': email,
'group_id': data_dict['id'],
'role': data_dict['role']
}
del data_dict['email']
user_dict = self._action('user_invite')(context,
user_data_dict)
data_dict['username'] = user_dict['name']
c.group_dict = self._action('group_member_create')(context, data_dict)
self._redirect_to(controller='group', action='members', id=id)
else:
user = request.params.get('user')
if user:
c.user_dict = get_action('user_show')(context, {'id': user})
c.user_role = new_authz.users_role_for_group_or_org(id, user) or 'member'
else:
c.user_role = 'member'
c.group_dict = self._action('group_show')(context, {'id': id})
group_type = 'organization' if c.group_dict['is_organization'] else 'group'
c.roles = self._action('member_roles_list')(
context, {'group_type': group_type}
)
except NotAuthorized:
abort(401, _('Unauthorized to add member to group %s') % '')
except NotFound:
abort(404, _('Group not found'))
except ValidationError, e:
h.flash_error(e.error_summary)
return self._render_template('group/member_new.html')
def member_delete(self, id):
if 'cancel' in request.params:
self._redirect_to(controller='group', action='members', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
try:
self._check_access('group_member_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s members') % '')
try:
user_id = request.params.get('user')
if request.method == 'POST':
self._action('group_member_delete')(context, {'id': id, 'user_id': user_id})
h.flash_notice(_('Group member has been deleted.'))
self._redirect_to(controller='group', action='members', id=id)
c.user_dict = self._action('user_show')(context, {'id': user_id})
c.user_id = user_id
c.group_id = id
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s') % '')
except NotFound:
abort(404, _('Group not found'))
return self._render_template('group/confirm_delete_member.html')
def history(self, id):
if 'diff' in request.params or 'selected1' in request.params:
try:
params = {'id': request.params.getone('group_name'),
'diff': request.params.getone('selected1'),
'oldid': request.params.getone('selected2'),
}
except KeyError, e:
if 'group_name' in dict(request.params):
id = request.params.getone('group_name')
c.error = \
_('Select two revisions before doing the comparison.')
else:
params['diff_entity'] = 'group'
h.redirect_to(controller='revision', action='diff', **params)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'schema': self._db_to_form_schema()}
data_dict = {'id': id}
try:
c.group_dict = self._action('group_show')(context, data_dict)
c.group_revisions = self._action('group_revision_list')(context,
data_dict)
#TODO: remove
# Still necessary for the authz check in group/layout.html
c.group = context['group']
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(401, _('User %r not authorized to edit %r') % (c.user, id))
format = request.params.get('format', '')
if format == 'atom':
# Generate and return Atom 1.0 document.
from webhelpers.feedgenerator import Atom1Feed
feed = Atom1Feed(
title=_(u'CKAN Group Revision History'),
link=self._url_for(controller='group', action='read',
id=c.group_dict['name']),
description=_(u'Recent changes to CKAN Group: ') +
c.group_dict['display_name'],
language=unicode(get_lang()),
)
for revision_dict in c.group_revisions:
revision_date = h.date_str_to_datetime(
revision_dict['timestamp'])
try:
dayHorizon = int(request.params.get('days'))
except:
dayHorizon = 30
dayAge = (datetime.datetime.now() - revision_date).days
if dayAge >= dayHorizon:
break
if revision_dict['message']:
item_title = u'%s' % revision_dict['message'].\
split('\n')[0]
else:
item_title = u'%s' % revision_dict['id']
item_link = h.url_for(controller='revision', action='read',
id=revision_dict['id'])
item_description = _('Log message: ')
item_description += '%s' % (revision_dict['message'] or '')
item_author_name = revision_dict['author']
item_pubdate = revision_date
feed.add_item(
title=item_title,
link=item_link,
description=item_description,
author_name=item_author_name,
pubdate=item_pubdate,
)
feed.content_type = 'application/atom+xml'
return feed.writeString('utf-8')
return render(self._history_template(c.group_dict['type']))
def activity(self, id, offset=0):
'''Render this group's public activity stream page.'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True}
try:
c.group_dict = self._get_group_dict(id)
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(401,
_('Unauthorized to read group {group_id}').format(
group_id=id))
# Add the group's activity stream (already rendered to HTML) to the
# template context for the group/read.html template to retrieve later.
c.group_activity_stream = self._action('group_activity_list_html')(
context, {'id': c.group_dict['id'], 'offset': offset})
return render(self._activity_template(c.group_dict['type']))
def follow(self, id):
'''Start following this group.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author}
data_dict = {'id': id}
try:
get_action('follow_group')(context, data_dict)
group_dict = get_action('group_show')(context, data_dict)
h.flash_success(_("You are now following {0}").format(
group_dict['title']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except NotAuthorized as e:
h.flash_error(e.extra_msg)
h.redirect_to(controller='group', action='read', id=id)
def unfollow(self, id):
'''Stop following this group.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author}
data_dict = {'id': id}
try:
get_action('unfollow_group')(context, data_dict)
group_dict = get_action('group_show')(context, data_dict)
h.flash_success(_("You are no longer following {0}").format(
group_dict['title']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except (NotFound, NotAuthorized) as e:
error_message = e.extra_msg or e.message
h.flash_error(error_message)
h.redirect_to(controller='group', action='read', id=id)
def followers(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
c.group_dict = self._get_group_dict(id)
try:
c.followers = get_action('group_follower_list')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to view followers %s') % '')
return render('group/followers.html')
def admins(self, id):
c.group_dict = self._get_group_dict(id)
c.admins = new_authz.get_group_or_org_admin_ids(id)
return render(self._admins_template(c.group_dict['type']))
def about(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
c.group_dict = self._get_group_dict(id)
group_type = c.group_dict['type']
self._setup_template_variables(context, {'id': id},
group_type=group_type)
return render(self._about_template(group_type))
def _get_group_dict(self, id):
''' returns the result of group_show action or aborts if there is a
problem '''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'for_view': True}
try:
return self._action('group_show')(context, {'id': id})
except NotFound:
abort(404, _('Group not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read group %s') % id)
def _render_edit_form(self, fs):
# errors arrive in c.error and fs.errors
c.fieldset = fs
return render('group/edit_form.html')
def _update(self, fs, group_name, group_id):
'''
Writes the POST data (associated with a group edit) to the database
@input c.error
'''
validation = fs.validate()
if not validation:
c.form = self._render_edit_form(fs)
raise base.ValidationException(fs)
try:
fs.sync()
except Exception, inst:
model.Session.rollback()
raise
else:
model.Session.commit()
def _update_authz(self, fs):
validation = fs.validate()
if not validation:
c.form = self._render_edit_form(fs)
raise base.ValidationException(fs)
try:
fs.sync()
except Exception, inst:
model.Session.rollback()
raise
else:
model.Session.commit()
| mit | 8,639,835,599,501,780,000 | 38.496718 | 93 | 0.531163 | false |
lonnen/socorro | socorro/unittest/external/es/test_super_search_fields.py | 1 | 10900 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import datetime
import pytest
from socorro.lib import BadArgumentError
from socorro.external.es.super_search_fields import (
FIELDS,
is_doc_values_friendly,
add_doc_values,
SuperSearchFieldsModel,
)
from socorro.lib import datetimeutil
from socorro.unittest.external.es.base import ElasticsearchTestCase
# Uncomment these lines to decrease verbosity of the elasticsearch library
# while running unit tests.
# import logging
# logging.getLogger('elasticsearch').setLevel(logging.ERROR)
# logging.getLogger('requests').setLevel(logging.ERROR)
class TestIntegrationSuperSearchFields(ElasticsearchTestCase):
"""Test SuperSearchFields with an elasticsearch database containing fake data"""
def setup_method(self, method):
super().setup_method(method)
config = self.get_base_config(cls=SuperSearchFieldsModel)
self.api = SuperSearchFieldsModel(config=config)
self.api.get_fields = lambda: copy.deepcopy(FIELDS)
def test_get_fields(self):
results = self.api.get_fields()
assert results == FIELDS
def test_get_missing_fields(self):
config = self.get_base_config(
cls=SuperSearchFieldsModel, es_index='socorro_integration_test_%W'
)
api = SuperSearchFieldsModel(config=config)
fake_mappings = [
# First mapping
{
config.elasticsearch_doctype: {
'properties': {
# Add a bunch of unknown fields.
'field_z': {
'type': 'string'
},
'namespace1': {
'type': 'object',
'properties': {
'field_a': {
'type': 'string'
},
'field_b': {
'type': 'long'
}
}
},
'namespace2': {
'type': 'object',
'properties': {
'subspace1': {
'type': 'object',
'properties': {
'field_b': {
'type': 'long'
}
}
}
}
},
# Add a few known fields that should not appear.
'processed_crash': {
'type': 'object',
'properties': {
'signature': {
'type': 'string'
},
'product': {
'type': 'string'
},
}
}
}
}
},
# Second mapping to compare to the first
{
config.elasticsearch_doctype: {
'properties': {
'namespace1': {
'type': 'object',
'properties': {
'subspace1': {
'type': 'object',
'properties': {
'field_d': {
'type': 'long'
}
}
}
}
}
}
}
},
]
now = datetimeutil.utc_now()
indices = []
try:
# Using "2" here means that an index will be missing, hence testing
# that it swallows the subsequent error.
for i in range(2):
date = now - datetime.timedelta(weeks=i)
index = date.strftime(api.context.get_index_template())
mapping = fake_mappings[i % len(fake_mappings)]
api.context.create_index(index, mappings=mapping)
indices.append(index)
api = SuperSearchFieldsModel(config=config)
missing_fields = api.get_missing_fields()
expected = [
'field_z',
'namespace1.field_a',
'namespace1.field_b',
'namespace1.subspace1.field_d',
'namespace2.subspace1.field_b',
]
assert missing_fields['hits'] == expected
assert missing_fields['total'] == 5
finally:
for index in indices:
self.index_client.delete(index=index)
def test_get_mapping(self):
mapping = self.api.get_mapping()
doctype = self.es_context.get_doctype()
assert doctype in mapping
properties = mapping[doctype]['properties']
assert 'processed_crash' in properties
assert 'raw_crash' in properties
processed_crash = properties['processed_crash']['properties']
# Check in_database_name is used.
assert 'os_name' in processed_crash
assert 'platform' not in processed_crash
# Those fields have no `storage_mapping`.
assert 'fake_field' not in properties['raw_crash']['properties']
# Those fields have a `storage_mapping`.
assert processed_crash['release_channel'] == {
'analyzer': 'keyword',
'type': 'string'
}
# Test nested objects.
assert 'json_dump' in processed_crash
assert 'properties' in processed_crash['json_dump']
assert 'write_combine_size' in processed_crash['json_dump']['properties']
assert processed_crash['json_dump']['properties']['write_combine_size'] == {
'type': 'long',
'doc_values': True
}
# Test overwriting a field.
mapping = self.api.get_mapping(overwrite_mapping={
'name': 'fake_field',
'namespace': 'raw_crash',
'in_database_name': 'fake_field',
'storage_mapping': {
'type': 'long'
}
})
properties = mapping[doctype]['properties']
assert 'fake_field' in properties['raw_crash']['properties']
assert properties['raw_crash']['properties']['fake_field']['type'] == 'long'
def test_test_mapping(self):
"""Much test. So meta. Wow test_test_. """
# First test a valid mapping.
mapping = self.api.get_mapping()
assert self.api.test_mapping(mapping) is None
# Insert an invalid storage mapping.
mapping = self.api.get_mapping({
'name': 'fake_field',
'namespace': 'raw_crash',
'in_database_name': 'fake_field',
'storage_mapping': {
'type': 'unkwown'
}
})
with pytest.raises(BadArgumentError):
self.api.test_mapping(mapping)
# Test with a correct mapping but with data that cannot be indexed.
self.index_crash({
'date_processed': datetimeutil.utc_now(),
'product': 'WaterWolf',
})
self.es_context.refresh()
mapping = self.api.get_mapping({
'name': 'product',
'storage_mapping': {
'type': 'long'
}
})
with pytest.raises(BadArgumentError):
self.api.test_mapping(mapping)
def get_fields():
return FIELDS.items()
@pytest.mark.parametrize('name, properties', get_fields())
def test_validate_super_search_fields(name, properties):
"""Validates the contents of socorro.external.es.super_search_fields.FIELDS"""
# FIXME(willkg): When we start doing schema stuff in Python, we should
# switch this to a schema validation.
property_keys = [
'data_validation_type',
'description',
'form_field_choices',
'has_full_version',
'in_database_name',
'is_exposed',
'is_returned',
'name',
'namespace',
'permissions_needed',
'query_type',
'storage_mapping',
]
# Assert it has all the keys
assert sorted(properties.keys()) == sorted(property_keys)
# Assert boolean fields have boolean values
for key in ['has_full_version', 'is_exposed', 'is_returned']:
assert properties[key] in (True, False)
# Assert data_validation_type has a valid value
assert properties['data_validation_type'] in ('bool', 'datetime', 'enum', 'int', 'str')
# Assert query_type has a valid value
assert properties['query_type'] in ('bool', 'date', 'enum', 'flag', 'number', 'string')
# The name in the mapping should be the same as the name in properties
assert properties['name'] == name
@pytest.mark.parametrize('value, expected', [
# No type -> False
({}, False),
# object -> False
({'type': 'object'}, False),
# Analyzed string -> False
({'type': 'string'}, False),
({'type': 'string', 'analyzer': 'keyword'}, False),
# Unanalyzed string -> True
({'type': 'string', 'index': 'not_analyzed'}, True),
# Anything else -> True
({'type': 'long'}, True),
])
def test_is_doc_values_friendly(value, expected):
assert is_doc_values_friendly(value) == expected
def test_add_doc_values():
data = {'type': 'short'}
add_doc_values(data)
assert data == {
'type': 'short',
'doc_values': True
}
data = {
'fields': {
'AsyncShutdownTimeout': {
'analyzer': 'standard',
'index': 'analyzed',
'type': 'string',
},
'full': {
'index': 'not_analyzed',
'type': 'string',
}
},
'type': 'multi_field',
}
add_doc_values(data)
assert data == {
'fields': {
'AsyncShutdownTimeout': {
'analyzer': 'standard',
'index': 'analyzed',
'type': 'string',
},
'full': {
'index': 'not_analyzed',
'type': 'string',
'doc_values': True,
}
},
'type': 'multi_field',
}
| mpl-2.0 | -3,324,909,418,101,104,600 | 31.831325 | 91 | 0.475596 | false |
anthonyfok/frescobaldi | frescobaldi_app/snippet/insert.py | 1 | 9303 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Insert snippets into a Document.
"""
import sys
from PyQt5.QtCore import QSettings
from PyQt5.QtGui import QTextCursor
from PyQt5.QtWidgets import QMessageBox
import cursortools
import tokeniter
import indent
from . import snippets
from . import expand
def insert(name, view):
"""Insert named snippet into the view."""
text, variables = snippets.get(name)
cursor = view.textCursor()
selection = variables.get('selection', '')
if 'yes' in selection and not cursor.hasSelection():
return
if 'strip' in selection:
cursortools.strip_selection(cursor)
pos = cursor.selectionStart()
with cursortools.compress_undo(cursor):
# insert the snippet, might return a new cursor
if 'python' in variables:
new = insert_python(text, cursor, name, view)
elif 'macro' in variables:
new = insert_macro(text, view)
else:
new = insert_snippet(text, cursor, variables)
# QTextBlocks the snippet starts and ends
block = cursor.document().findBlock(pos)
last = cursor.block()
# re-indent if not explicitly suppressed by a 'indent: no' variable
if last != block and 'no' not in variables.get('indent', ''):
c = QTextCursor(last)
c.setPosition(block.position(), QTextCursor.KeepAnchor)
with cursortools.compress_undo(c, True):
indent.re_indent(c, True)
if not new and 'keep' in selection:
end = cursor.position()
cursor.setPosition(pos)
cursor.setPosition(end, QTextCursor.KeepAnchor)
view.setTextCursor(new or cursor)
def insert_snippet(text, cursor, variables):
"""Inserts a normal text snippet.
After the insert, the cursor points to the end of the inserted snippet.
If this function returns a cursor it must be set as the cursor for the view
after the snippet has been inserted.
"""
exp_base = expand.Expander(cursor)
evs = [] # make a list of events, either text or a constant
for text, key in snippets.expand(text):
if text:
evs.append(text)
if key == '$':
evs.append('$')
elif key:
# basic variables
func = getattr(exp_base, key, None)
if func:
evs.append(func())
selectionUsed = expand.SELECTION in evs
# do the padding if 'selection: strip;' is used
if selectionUsed and 'strip' in variables.get('selection', ''):
space = '\n' if '\n' in cursor.selection().toPlainText() else ' '
# change whitespace in previous and next piece of text
i = evs.index(expand.SELECTION)
for j in range(i-1, -i, -1):
if evs[j] not in expand.constants:
evs[j] = evs[j].rstrip() + space
break
for j in range(i+1, len(evs)):
if evs[j] not in expand.constants:
evs[j] = space + evs[j].lstrip()
break
# now insert the text
ins = QTextCursor(cursor)
selectionUsed and ins.setPosition(cursor.selectionStart())
a, c = -1, -1
for e in evs:
if e == expand.ANCHOR:
a = ins.position()
elif e == expand.CURSOR:
c = ins.position()
elif e == expand.SELECTION:
ins.setPosition(cursor.selectionEnd())
else:
ins.insertText(e)
cursor.setPosition(ins.position())
# return a new cursor if requested
if (a, c) != (-1, -1):
new = QTextCursor(cursor)
if a != -1:
new.setPosition(a)
if c != -1:
new.setPosition(c, QTextCursor.KeepAnchor if a != -1 else QTextCursor.MoveAnchor)
return new
def insert_python(text, cursor, name, view):
"""Regards the text as Python code, and exec it.
name and view are given in case an exception occurs.
The following variables are available:
- text: contains selection or '', set it to insert new text
- state: contains simplestate for the cursor position
- cursor: the QTextCursor
After the insert, the cursor points to the end of the inserted snippet.
"""
namespace = {
'cursor': QTextCursor(cursor),
'state': state(cursor),
'text': cursor.selection().toPlainText(),
'view': view,
'ANCHOR': 1,
'CURSOR': 2,
}
try:
code = compile(text, "<snippet>", "exec")
if sys.version_info < (3, 0):
exec("exec code in namespace")
else:
exec(code, namespace)
if 'main' in namespace:
return namespace['main']()
except Exception:
handle_exception(name, view)
else:
text = namespace.get('text', '')
if isinstance(text, (tuple, list)):
ANCHOR = namespace.get('ANCHOR', 1)
CURSOR = namespace.get('CURSOR', 2)
a, c = -1, -1
for t in text:
if t == ANCHOR:
a = cursor.selectionStart()
elif t == CURSOR:
c = cursor.selectionStart()
else:
cursor.insertText(t)
if (a, c) != (-1, -1):
new = QTextCursor(cursor)
if a != -1:
new.setPosition(a)
if c != -1:
new.setPosition(c, QTextCursor.KeepAnchor if a != -1 else QTextCursor.MoveAnchor)
return new
else:
cursor.insertText(namespace['text'])
def insert_macro(text, view):
"""The macro snippet is a sequence of commands which are either
Frescobaldi actions or other snippets.
"""
import re
import actioncollectionmanager
from . import model
avail_snippets = {}
for n in model.model().names():
varname = snippets.get(n).variables.get('name')
if varname:
avail_snippets[varname] = n
avail_actions = {}
win = view.window()
for collection in actioncollectionmanager.manager(win).actionCollections():
for name, action in collection.actions().items():
avail_actions[name] = action
commands = [x.strip() for x in text.split('\n') if x]
for c in commands:
if c in avail_snippets:
insert(avail_snippets[c], view)
elif c in avail_actions:
avail_actions[c].trigger()
def state(cursor):
"""Returns the simplestate string for the position of the cursor."""
import simplestate
pos = cursor.selectionStart()
block = cursor.document().findBlock(pos)
tokens = tokeniter.tokens(block)
state = tokeniter.state(block)
column = pos - block.position()
for t in tokens:
if t.end > column:
break
state.follow(t)
return simplestate.state(state)
def handle_exception(name, view):
"""Called when a snippet raises a Python exception.
Shows the error message and offers the option to edit the offending snippet.
"""
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
while tb and tb[0][0] != "<snippet>":
del tb[0]
msg = ''.join(traceback.format_list(tb) +
traceback.format_exception_only(exc_type, exc_value))
dlg = QMessageBox(QMessageBox.Critical, _("Snippet error"), msg,
QMessageBox.Ok | QMessageBox.Cancel)
dlg.button(QMessageBox.Ok).setText(_("Edit Snippet"))
dlg.setDefaultButton(QMessageBox.Cancel)
dlg.setEscapeButton(QMessageBox.Cancel)
if dlg.exec_() != QMessageBox.Ok:
return
# determine line number
if exc_type is SyntaxError:
lineno = exc_value.lineno
elif tb:
lineno = tb[0][1]
else:
lineno = None
import panelmanager
from . import edit
widget = panelmanager.manager(view.window()).snippettool.widget()
textedit = edit.Edit(widget, name).text
if lineno is not None:
# convert to line number in full snippet text
for block in cursortools.all_blocks(textedit.document()):
if block.text().startswith('-*- '):
lineno += 1
else:
break
block = textedit.document().findBlockByNumber(lineno-1)
if block.isValid():
textedit.setTextCursor(QTextCursor(block))
| gpl-2.0 | -6,613,117,712,532,106,000 | 31.872792 | 101 | 0.603031 | false |
flyte/pi-mqtt-gpio | tests/modules/test_raspberrypi.py | 1 | 10361 | import pytest
import time
from pi_mqtt_gpio.modules import raspberrypi, PinDirection, PinPullup, InterruptEdge
pytestmark = pytest.mark.hw_raspberrypi
"""
Attention:
The following tests will only work with hardware modifikations.
Definition of wired connected GPIOs, always connect xxx_OUTPUT
with xxx_INPUT. You may connect them with a 10kOhm resistor.
"""
# gpio pins for get and set
TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT = 26
TEST_RASPBERRYPI_GPIO_SET_GET_INPUT = 21
# gpio pins interrupt tests
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT = 19
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT = 20
TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_OUTPUT = 13
TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_INPUT = 16
TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_OUTPUT = 6
TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_INPUT = 12
# global gpio and interrupt callback function
gpio = None
interrupt_count = 0
test_handle = 0
# Use this function for each testcase
@pytest.fixture(autouse=True)
def fix_raspberrypi_setup_teardown():
# initialize gpios and counter for interrupt before each test case
global gpio
gpio = raspberrypi.GPIO("")
global interrupt_count
interrupt_count = 0
# run the test case here
yield
# clean up the gpios after each test
gpio.cleanup()
def gpio_testcallback(handle, pin, value):
"""
callback function for test interrupts, count the calls of this function
and store a given handle.
"""
global interrupt_count
interrupt_count = interrupt_count + 1
global test_handle
test_handle = handle
def test_raspberrypi_setup_output_pin():
# setup a output gpio
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, PinDirection.OUTPUT, None, {"initial": None}
)
def test_raspberrypi_setup_input_pin():
# setup a input gpio
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_INPUT, PinDirection.INPUT, PinPullup.OFF, {}
)
def test_raspberrypi_set_pin():
# setup a output gpio and set value to high
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, PinDirection.OUTPUT, None, {"initial": None}
)
gpio.set_pin(TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, True)
def test_raspberrypi_get_pin():
# setup a output and input gpio and read the value from input
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, PinDirection.OUTPUT, None, {"initial": None}
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_INPUT, PinDirection.INPUT, PinPullup.OFF, {}
)
# get the value of the connected input
value = gpio.get_pin(TEST_RASPBERRYPI_GPIO_SET_GET_INPUT)
assert value == True
def test_raspberrypi_low_high():
# setup a output gpio and set value to low and high
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "high"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_SET_GET_INPUT, PinDirection.INPUT, PinPullup.OFF, {}
)
# set gpio TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT low
gpio.set_pin(TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, False)
value = gpio.get_pin(TEST_RASPBERRYPI_GPIO_SET_GET_INPUT)
assert value == False
# set gpio TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT high
gpio.set_pin(TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, True)
value = gpio.get_pin(TEST_RASPBERRYPI_GPIO_SET_GET_INPUT)
assert value == True
# set gpio TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT low again
gpio.set_pin(TEST_RASPBERRYPI_GPIO_SET_GET_OUTPUT, False)
value = gpio.get_pin(TEST_RASPBERRYPI_GPIO_SET_GET_INPUT)
assert value == False
def test_raspberrypi_interrupt_callback_handle():
# test for handle passing
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "low"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT, PinDirection.INPUT, PinPullup.UP, {}
)
gpio.io.remove_event_detect(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT)
gpio.setup_interrupt(
"myhandle",
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT,
InterruptEdge.RISING,
gpio_testcallback,
300,
)
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 1
assert test_handle == "myhandle"
def test_raspberrypi_interrupt_trigger_rising_single():
# setup outputs and inputs for interrupt tests for rising edges
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "low"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT, PinDirection.INPUT, PinPullup.UP, {}
)
gpio.io.remove_event_detect(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT)
gpio.setup_interrupt(
None,
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT,
InterruptEdge.RISING,
gpio_testcallback,
300,
)
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 1
def test_raspberrypi_interrupt_trigger_rising_multi():
# setup outputs and inputs for interrupt tests for multiple rising edges
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "low"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT, PinDirection.INPUT, PinPullup.UP, {}
)
gpio.io.remove_event_detect(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT)
gpio.setup_interrupt(
None,
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT,
InterruptEdge.RISING,
gpio_testcallback,
300,
)
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, False) # reset low
time.sleep(0.4) # wait bounce time
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, False) # reset low
time.sleep(0.4) # wait bounce time
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, False) # reset low
time.sleep(0.3) # wait bounce time
assert interrupt_count == 3
def test_raspberrypi_interrupt_trigger_rising_bouncetime():
# test for bouncetime, miss one interrupt durring bounce time
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "low"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT, PinDirection.INPUT, PinPullup.UP, {}
)
gpio.io.remove_event_detect(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT)
gpio.setup_interrupt(
None,
TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_INPUT,
InterruptEdge.RISING,
gpio_testcallback,
300,
)
# generate one interrupt
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, False) # reset low
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 1
# miss one interrupt
time.sleep(0) # wait less time than bounce time
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, False) # reset low
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 1 # interrupt should not be called
# take a new interrupt
time.sleep(0.3) # wait bounce time
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, True) # one rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_RISING_OUTPUT, False) # reset low
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 2 # take this interrupt
def test_raspberrypi_interrupt_trigger_falling_single():
# setup outputs and inputs for interrupt tests for falling edges
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "high"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_INPUT,
PinDirection.INPUT,
PinPullup.DOWN,
{},
)
gpio.io.remove_event_detect(TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_INPUT)
gpio.setup_interrupt(
None,
TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_INPUT,
InterruptEdge.FALLING,
gpio_testcallback,
300,
)
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_OUTPUT, True) # set output high
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 0 # should not happen on rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_FALLING_OUTPUT, False) # set output low
time.sleep(0.1) # wait to callback function happens
assert interrupt_count == 1 # should not happen on rising edge
def test_raspberrypi_interrupt_trigger_both_single():
# test for rising and falling edge configuration
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_OUTPUT,
PinDirection.OUTPUT,
None,
{"initial": "low"},
)
gpio.setup_pin(
TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_INPUT, PinDirection.INPUT, PinPullup.OFF, {}
)
gpio.io.remove_event_detect(TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_INPUT)
gpio.setup_interrupt(
None,
TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_INPUT,
InterruptEdge.BOTH,
gpio_testcallback,
)
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_OUTPUT, True) # set output high
time.sleep(0.2) # wait to callback function happens
assert interrupt_count == 1 # interrupt on rising edge
gpio.set_pin(TEST_RASPBERRYPI_GPIO_INTERRUPT_BOTH_OUTPUT, False) # set output low
time.sleep(0.2) # wait to callback function happens
assert interrupt_count == 2 # interrupt on falling edge
| mit | -6,436,631,038,160,396,000 | 33.652174 | 90 | 0.690763 | false |
aiven/journalpump | journalpump/journalpump.py | 1 | 28684 | # Copyright 2015, Aiven, https://aiven.io/
#
# This file is under the Apache License, Version 2.0.
# See the file `LICENSE` for details.
from . import geohash, statsd
from .daemon import ServiceDaemon
from .senders import (
AWSCloudWatchSender, ElasticsearchSender, FileSender, GoogleCloudLoggingSender, KafkaSender, LogplexSender, RsyslogSender
)
from .senders.base import MAX_KAFKA_MESSAGE_SIZE, SenderInitializationError, Tagged
from .types import GeoIPProtocol
from .util import atomic_replace_file, default_json_serialization
from functools import reduce
from systemd.journal import Reader
from typing import Type, Union
import copy
import datetime
import json
import logging
import re
import select
import systemd.journal
import time
import uuid
GeoIPReader: Union[Type[GeoIPProtocol], None]
try:
from geoip2.database import Reader as GeoIPReader
except ImportError:
GeoIPReader = None
def _convert_uuid(s):
return str(uuid.UUID(s.decode()))
def convert_mon(s): # pylint: disable=unused-argument
return None
def convert_realtime(t):
return int(t) / 1000000.0 # Stock systemd transforms these into datetimes
converters = {
"CODE_LINE": int,
"COREDUMP_TIMESTAMP": convert_realtime,
"MESSAGE_ID": _convert_uuid,
"PRIORITY": int,
"_AUDIT_ID": int,
"_AUDIT_LOGINUID": int,
"_AUDIT_SESSION": int,
"_AUDIT_TYPE": int,
"_BOOT_ID": _convert_uuid,
"_GID": int,
"_MACHINE_ID": _convert_uuid,
"_PID": int,
"_SOURCE_MONOTONIC_TIMESTAMP": convert_mon,
"_SOURCE_REALTIME_TIMESTAMP": convert_realtime,
"__MONOTONIC_TIMESTAMP": convert_mon,
"__REALTIME_TIMESTAMP": convert_realtime,
}
class JournalObject:
def __init__(self, cursor=None, entry=None):
self.cursor = cursor
self.entry = entry or {}
class PumpReader(Reader):
def convert_entry(self, entry):
"""Faster journal lib _convert_entry replacement"""
output = {}
for key, value in entry.items():
convert = converters.get(key)
if convert is not None:
try:
value = convert(value)
except ValueError:
pass
if isinstance(value, bytes):
try:
value = bytes.decode(value)
except Exception: # pylint: disable=broad-except
pass
output[key] = value
return output
def get_next(self, skip=1):
# pylint: disable=no-member, protected-access
"""Private get_next implementation that doesn't store the cursor since we don't want it"""
if super()._next(skip):
entry = super()._get_all()
if entry:
entry["__REALTIME_TIMESTAMP"] = self._get_realtime()
return JournalObject(cursor=self._get_cursor(), entry=self.convert_entry(entry))
return None
class JournalReader(Tagged):
# config name <--> class mapping
sender_classes = {
"elasticsearch": ElasticsearchSender,
"kafka": KafkaSender,
"logplex": LogplexSender,
"file": FileSender,
"rsyslog": RsyslogSender,
"aws_cloudwatch": AWSCloudWatchSender,
"google_cloud_logging": GoogleCloudLoggingSender,
}
def __init__(
self,
*,
name,
config,
field_filters,
geoip,
stats,
tags=None,
seek_to=None,
msg_buffer_max_length=50000,
searches=None,
initial_position=None
):
Tagged.__init__(self, tags, reader=name)
self.log = logging.getLogger("JournalReader:{}".format(name))
self.name = name
self.msg_buffer_max_length = msg_buffer_max_length
self.initial_position = initial_position
self.read_bytes = 0
self.read_lines = 0
self._last_sent_read_lines = 0
self._last_sent_read_bytes = 0
self._sent_bytes_diff = 0
self._sent_lines_diff = 0
self.last_stats_print_time = time.monotonic()
self.geoip = geoip
self.config = config
self.field_filters = field_filters
self.stats = stats
self.cursor = seek_to
self.registered_for_poll = False
self.journald_reader = None
self.last_journald_create_attempt = 0
self.running = True
self.senders = {}
self._initialized_senders = set()
self.last_stats_send_time = time.monotonic()
self.last_journal_msg_time = time.monotonic()
self.searches = list(self._build_searches(searches))
def create_journald_reader_if_missing(self):
if not self.journald_reader and time.monotonic() - self.last_journald_create_attempt > 2:
self.last_journald_create_attempt = time.monotonic()
self.journald_reader = self.get_reader(seek_to=self.cursor)
def update_poll_registration_status(self, poller):
self.create_journald_reader_if_missing()
if not self.journald_reader:
return
sender_over_limit = any(len(sender.msg_buffer) > self.msg_buffer_max_length for sender in self.senders.values())
if not self.registered_for_poll and not sender_over_limit:
self.log.info(
"Message buffer size under threshold for all senders, starting processing journal for %r", self.name
)
self.register_for_poll(poller)
elif self.registered_for_poll and sender_over_limit:
self.log.info(
"Message buffer size for at least one sender over threshold, stopping processing journal for %r", self.name
)
self.unregister_from_poll(poller)
def register_for_poll(self, poller):
if self.journald_reader:
poller.register(self.journald_reader, self.journald_reader.get_events())
self.registered_for_poll = True
self.log.info("Registered reader %r with fd %r", self.name, self.journald_reader.fileno())
def unregister_from_poll(self, poller):
if self.journald_reader:
poller.unregister(self.journald_reader)
self.registered_for_poll = False
self.log.info("Unregistered reader %r with fd %r", self.name, self.journald_reader.fileno())
def get_resume_cursor(self):
"""Find the sender cursor location where a new JournalReader instance should resume reading from"""
if not self.senders:
self.log.info("Reader has no senders, using reader's resume location")
return self.cursor
for sender_name, sender in self.senders.items():
state = sender.get_state()
cursor = state["sent"]["cursor"]
if cursor is None:
self.log.info("Sender %r needs a full catchup from beginning, resuming from journal start", sender_name)
return None
# TODO: pick oldest sent cursor
self.log.info("Resuming reader from sender's ('%s') position", sender_name)
return cursor
return None
def request_stop(self):
self.running = False
for sender in self.senders.values():
sender.request_stop()
def close(self):
if self.journald_reader:
self.journald_reader.close()
def initialize_senders(self):
configured_senders = self.config.get("senders", {})
for sender_name, sender_config in configured_senders.items():
if sender_name in self._initialized_senders:
continue
try:
sender_class = self.sender_classes[sender_config["output_type"]]
except KeyError as ex:
raise Exception("Unknown sender type {!r}".format(sender_config["output_type"])) from ex
field_filter = None
if sender_config.get("field_filter", None):
field_filter = self.field_filters[sender_config["field_filter"]]
extra_field_values = sender_config.get("extra_field_values", {})
if not isinstance(extra_field_values, dict):
self.log.warning("extra_field_values: %r not a dictionary object, ignoring", extra_field_values)
extra_field_values = {}
try:
sender = sender_class(
config=sender_config,
field_filter=field_filter,
extra_field_values=extra_field_values,
msg_buffer_max_length=self.msg_buffer_max_length,
name=sender_name,
reader=self,
stats=self.stats,
tags=self.make_tags(),
)
except SenderInitializationError:
# If sender init fails, log exception, don't start() the sender
# and don't add it to self.senders dict. A metric about senders that
# failed to start is sent at the end of this method
self.log.exception("Sender %r failed to initialize", sender_name)
else:
self._initialized_senders.add(sender_name)
sender.start()
self.senders[sender_name] = sender
failed_senders = len(configured_senders) - len(self._initialized_senders)
self.stats.gauge("sender.failed_to_start", value=failed_senders, tags=self.make_tags())
def get_state(self):
sender_state = {name: sender.get_state() for name, sender in self.senders.items()}
search_state = {search["name"]: search.get("hits", 0) for search in self.searches}
return {
"cursor": self.cursor,
"searches": search_state,
"senders": sender_state,
"total_lines": self.read_lines,
"total_bytes": self.read_bytes,
}
def inc_line_stats(self, *, journal_lines, journal_bytes):
self.read_bytes += journal_bytes
self.read_lines += journal_lines
now = time.monotonic()
if (now - self.last_stats_send_time) < 10.0:
# Do not send stats too often
return
tags = self.make_tags()
self.stats.gauge("journal.last_read_ago", value=now - self.last_journal_msg_time, tags=tags)
self.stats.gauge("journal.read_lines", value=self.read_lines, tags=tags)
self.stats.gauge("journal.read_bytes", value=self.read_bytes, tags=tags)
self.last_stats_send_time = now
self._sent_lines_diff += self.read_lines - self._last_sent_read_lines
self._sent_bytes_diff += self.read_bytes - self._last_sent_read_bytes
if now - self.last_stats_print_time > 120:
self.log.info("Processed %r journal lines (%r bytes)", self._sent_lines_diff, self._sent_bytes_diff)
self._sent_lines_diff = 0
self._sent_bytes_diff = 0
self.last_stats_print_time = now
self._last_sent_read_lines = self.read_lines
self._last_sent_read_bytes = self.read_bytes
for sender in self.senders.values():
sender.refresh_stats()
def read_next(self):
jobject = next(self.journald_reader)
if jobject.cursor:
self.cursor = jobject.cursor
self.last_journal_msg_time = time.monotonic()
return jobject
def get_reader(self, seek_to=None, reinit=False):
"""Return an initialized reader or None"""
if not reinit and self.journald_reader:
return self.journald_reader
if self.journald_reader:
# Close the existing reader
self.journald_reader.close() # pylint: disable=no-member
self.journald_reader = None
# convert named flags e.g. "SYSTEM" to integer values
journal_flags = self.config.get("journal_flags")
if isinstance(journal_flags, list):
journal_flags = reduce(
lambda a,
b: a | b,
[getattr(systemd.journal, flag.strip()) for flag in journal_flags],
)
try:
self.journald_reader = PumpReader(
files=self.config.get("journal_files"),
flags=journal_flags,
path=self.config.get("journal_path"),
)
except FileNotFoundError as ex:
self.log.warning("journal for %r not available yet: %s: %s", self.name, ex.__class__.__name__, ex)
return None
if seek_to:
self.journald_reader.seek_cursor(seek_to) # pylint: disable=no-member
# Now the cursor points to the last read item, step over it so that we
# do not read the same item twice
self.journald_reader._next() # pylint: disable=protected-access
elif self.initial_position == "tail":
self.journald_reader.seek_tail()
self.journald_reader._next() # pylint: disable=protected-access
elif self.initial_position == "head":
self.journald_reader.seek_head()
elif isinstance(self.initial_position, int):
# Seconds from the current boot time
self.journald_reader.seek_monotonic(self.initial_position)
# Only do the initial seek once when the pump is started for the first time,
# the rest of the seeks will be to the last known cursor position
self.initial_position = None
for unit_to_match in self.config.get("units_to_match", []):
self.journald_reader.add_match(_SYSTEMD_UNIT=unit_to_match)
self.initialize_senders()
return self.journald_reader
def ip_to_geohash(self, tags, args):
"""ip_to_geohash(ip_tag_name,precision) -> Convert IP address to geohash"""
if len(args) > 1:
precision = int(args[1])
else:
precision = 8
ip = tags[args[0]]
res = self.geoip.city(ip)
if not res:
return ""
loc = res.location
return geohash.encode(loc.latitude, loc.longitude, precision) # pylint: disable=no-member
def _build_searches(self, searches):
"""
Pre-generate regex objects and tag value conversion methods for searches
"""
# Example:
# {"name": "service_stop", "tags": {"foo": "bar"}, "search": {"MESSAGE": "Stopped target (?P<target>.+)\\."}}
re_op = re.compile("(?P<func>[a-z_]+)\\((?P<args>[a-z0-9_,]+)\\)")
funcs = {
"ip_to_geohash": self.ip_to_geohash,
}
for search in searches:
search.setdefault("tags", {})
search.setdefault("fields", {})
output = copy.deepcopy(search)
for name, pattern in output["fields"].items():
output["fields"][name] = re.compile(pattern)
for tag, value in search["tags"].items():
if "(" in value or ")" in value:
# Tag uses a method conversion call, e.g. "ip_to_geohash(ip_address,5)"
match = re_op.search(value)
if not match:
raise Exception("Invalid tag function tag value: {!r}".format(value))
func_name = match.groupdict()["func"]
try:
f = funcs[func_name] # pylint: disable=unused-variable
except KeyError as ex:
raise Exception("Unknown tag function {!r} in {!r}".format(func_name, value)) from ex
args = match.groupdict()["args"].split(",") # pylint: disable=unused-variable
def value_func(tags, f=f, args=args): # pylint: disable=undefined-variable
return f(tags, args)
output["tags"][tag] = value_func
yield output
def perform_searches(self, jobject):
entry = jobject.entry
results = {}
for search in self.searches:
all_match = True
tags = {}
for field, regex in search["fields"].items():
line = entry.get(field, "")
if not line:
all_match = False
break
if isinstance(line, bytes):
try:
line = line.decode("utf-8")
except UnicodeDecodeError:
# best-effort decode failed
all_match = False
break
match = regex.search(line)
if not match:
all_match = False
break
field_values = match.groupdict()
for tag, value in field_values.items():
tags[tag] = value
if not all_match:
continue
# add static tags + possible callables
for tag, value in search.get("tags", {}).items():
if callable(value):
tags[tag] = value(tags)
else:
tags[tag] = value
results[search["name"]] = tags
if self.stats:
self.stats.increase(search["name"], tags=self.make_tags(tags))
search["hits"] = search.get("hits", 0) + 1
return results
class FieldFilter:
def __init__(self, name, config):
self.name = name
self.whitelist = config.get("type", "whitelist") == "whitelist"
self.fields = [f.lstrip("_").lower() for f in config["fields"]]
def filter_fields(self, data):
return {name: val for name, val in data.items() if (name.lstrip("_").lower() in self.fields) is self.whitelist}
class JournalObjectHandler:
def __init__(self, jobject, reader, pump):
self.error_reported = False
self.jobject = jobject
self.json_objects = {}
self.log = logging.getLogger(self.__class__.__name__)
self.pump = pump
self.reader = reader
def process(self):
new_entry = {}
for key, value in self.jobject.entry.items():
if isinstance(value, bytes):
new_entry[key.lstrip("_")] = repr(value) # value may be bytes in any encoding
else:
new_entry[key.lstrip("_")] = value
if self.jobject.cursor is None:
self.log.debug("No more journal entries to read")
return False
if self.reader.searches:
if not self.reader.perform_searches(self.jobject):
return True
if not self.pump.check_match(new_entry):
return True
for sender in self.reader.senders.values():
json_entry = self._get_or_generate_json(sender.field_filter, sender.extra_field_values, new_entry)
sender.msg_buffer.add_item(item=json_entry, cursor=self.jobject.cursor)
if self.json_objects:
max_bytes = max(len(v) for v in self.json_objects.values())
self.reader.inc_line_stats(journal_bytes=max_bytes, journal_lines=1)
return True
def _get_or_generate_json(self, field_filter, extra_field_values, data):
ff_name = "" if field_filter is None else field_filter.name
if ff_name in self.json_objects:
return self.json_objects[ff_name]
# Always set a timestamp field that gets turned into an ISO timestamp based on REALTIME_TIMESTAMP if available
if "REALTIME_TIMESTAMP" in data:
timestamp = datetime.datetime.utcfromtimestamp(data["REALTIME_TIMESTAMP"])
else:
timestamp = datetime.datetime.utcnow()
data["timestamp"] = timestamp
if extra_field_values:
data.update(extra_field_values)
if field_filter:
data = field_filter.filter_fields(data)
json_entry = json.dumps(data, default=default_json_serialization).encode("utf8")
if len(json_entry) > MAX_KAFKA_MESSAGE_SIZE:
json_entry = self._truncate_long_message(json_entry)
self.json_objects[ff_name] = json_entry
return json_entry
def _truncate_long_message(self, json_entry):
error = "too large message {} bytes vs maximum {} bytes".format(len(json_entry), MAX_KAFKA_MESSAGE_SIZE)
if not self.error_reported:
self.pump.stats.increase(
"journal.read_error", tags=self.pump.make_tags({
"error": "too_long",
"reader": self.reader.name,
})
)
self.log.warning("%s: %s ...", error, json_entry[:1024])
self.error_reported = True
entry = {
"error": error,
"partial_data": json_entry[:1024],
}
return json.dumps(entry, default=default_json_serialization).encode("utf8")
class JournalPump(ServiceDaemon, Tagged):
def __init__(self, config_path):
Tagged.__init__(self)
self.stats = None
self.geoip = None
self.poller = select.poll()
self.readers_active_config = None
self.readers = {}
self.field_filters = {}
self.previous_state = None
self.last_state_save_time = time.monotonic()
ServiceDaemon.__init__(self, config_path=config_path, multi_threaded=True, log_level=logging.INFO)
self.start_time_str = datetime.datetime.utcnow().isoformat()
self.configure_field_filters()
self.configure_readers()
self.stale_readers = set()
def configure_field_filters(self):
filters = self.config.get("field_filters", {})
self.field_filters = {name: FieldFilter(name, config) for name, config in filters.items()}
def configure_readers(self):
new_config = self.config.get("readers", {})
if self.readers_active_config == new_config:
# No changes in readers, no reconfig required
return
# replace old readers with new ones
for reader in self.readers.values():
reader.request_stop()
reader.unregister_from_poll(self.poller)
self.stale_readers.add(reader)
self.readers = {}
state = self.load_state()
for reader_name, reader_config in new_config.items():
reader_state = state.get("readers", {}).get(reader_name, {})
resume_cursor = None
for sender_name, sender in reader_state.get("senders", {}).items():
sender_cursor = sender["sent"]["cursor"]
if sender_cursor is None:
self.log.info("Sender %r for reader %r needs full sync from beginning", sender_name, reader_name)
resume_cursor = None
break
# TODO: pick the OLDEST cursor
resume_cursor = sender_cursor
self.log.info("Reader %r resuming from cursor position: %r", reader_name, resume_cursor)
initial_position = reader_config.get("initial_position")
reader = JournalReader(
name=reader_name,
config=reader_config,
field_filters=self.field_filters,
geoip=self.geoip,
stats=self.stats,
msg_buffer_max_length=self.config.get("msg_buffer_max_length", 50000),
seek_to=resume_cursor,
initial_position=initial_position,
tags=self.make_tags(),
searches=reader_config.get("searches", {}),
)
self.readers[reader_name] = reader
self.readers_active_config = new_config
def handle_new_config(self):
"""Called by ServiceDaemon when config has changed"""
stats = self.config.get("statsd") or {}
self.stats = statsd.StatsClient(
host=stats.get("host"),
port=stats.get("port"),
tags=stats.get("tags"),
)
self.replace_tags(self.config.get("tags", {}))
geoip_db_path = self.config.get("geoip_database")
if geoip_db_path:
self.log.info("Loading GeoIP data from %r", geoip_db_path)
if GeoIPReader is None:
raise ValueError("geoip_database configured but geoip2 module not available")
self.geoip = GeoIPReader(geoip_db_path)
self.configure_field_filters()
self.configure_readers()
def sigterm(self, signum, frame):
try:
self.save_state()
except Exception: # pylint: disable=broad-except
self.log.exception("Saving state at shutdown failed")
for reader in self.readers.values():
reader.request_stop()
reader.unregister_from_poll(self.poller)
self.stale_readers.add(reader)
super().sigterm(signum, frame)
def load_state(self):
file_path = self.get_state_file_path()
if not file_path:
return {}
try:
with open(file_path, "r") as fp:
return json.load(fp)
except FileNotFoundError:
return {}
def check_match(self, entry):
if not self.config.get("match_key"):
return True
if entry.get(self.config["match_key"]) == self.config["match_value"]:
return True
return False
def read_single_message(self, reader):
try:
jobject = reader.read_next()
if jobject is None or jobject.entry is None:
return False
return JournalObjectHandler(jobject, reader, self).process()
except StopIteration:
self.log.debug("No more journal entries to read")
return False
except Exception as ex: # pylint: disable=broad-except
self.log.exception("Unexpected exception while handling entry for %s", reader.name)
self.stats.unexpected_exception(ex=ex, where="mainloop", tags=self.make_tags({"app": "journalpump"}))
time.sleep(0.5)
return False
def read_all_available_messages(self, reader, hits):
lines = 0
while self.read_single_message(reader):
lines += 1
for search in reader.searches:
hits[search["name"]] = search.get("hits", 0)
return lines
def get_state_file_path(self):
return self.config.get("json_state_file_path")
def save_state(self):
state_file_path = self.get_state_file_path()
if not state_file_path:
return
reader_state = {name: reader.get_state() for name, reader in self.readers.items()}
state_to_save = {
"readers": reader_state,
"start_time": self.start_time_str,
}
if state_to_save != self.previous_state:
with atomic_replace_file(state_file_path) as fp:
json.dump(state_to_save, fp, indent=4, sort_keys=True)
self.previous_state = state_to_save
self.log.debug("Wrote state file: %r", state_to_save)
def _close_stale_readers(self):
while self.stale_readers:
reader = self.stale_readers.pop()
reader.close()
def run(self):
last_stats_time = 0
while self.running:
self._close_stale_readers()
results = self.poller.poll(1000)
hits = {}
lines = 0
for fd, _event in results:
for reader in self.readers.values():
jdr = reader.journald_reader
if not jdr or fd != jdr.fileno():
continue
if jdr.process() == systemd.journal.APPEND:
lines += self.read_all_available_messages(reader, hits)
break
else:
self.log.error("Could not find reader with fd %r", fd)
for reader in self.readers.values():
reader.update_poll_registration_status(self.poller)
if hits and time.monotonic() - last_stats_time > 60.0:
self.log.info("search hits stats: %s", hits)
last_stats_time = time.monotonic()
now = time.monotonic()
if now - self.last_state_save_time > 10.0:
self.save_state()
self.last_state_save_time = now
if not lines:
for reader in self.readers.values():
# Refresh readers so they can send their buffered stats out
reader.inc_line_stats(journal_bytes=0, journal_lines=0)
self.log.debug("No new journal lines received")
self.ping_watchdog()
self._close_stale_readers()
if __name__ == "__main__":
JournalPump.run_exit()
| apache-2.0 | 7,405,992,903,869,244,000 | 36.643045 | 125 | 0.573386 | false |
hofschroeer/shinysdr | shinysdr/test/test_types.py | 1 | 4610 | # Copyright 2013, 2014, 2015 Kevin Reid <[email protected]>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division
from twisted.trial import unittest
from shinysdr.types import Constant, Enum, Range
def _testType(self, type_obj, good, bad):
for case in good:
if isinstance(case, tuple):
input_value, output_value = case
else:
input_value = case
output_value = case
self.assertEqual(type_obj(input_value), output_value, msg='for input %r' % (input_value,))
for value in bad:
# pylint: disable=cell-var-from-loop
self.assertRaises(ValueError, lambda: type_obj(value))
class TestConstant(unittest.TestCase):
longMessage = True
def test_serial(self):
self.assertEqual({u'type': u'constant', u'value': 1}, Constant(1).type_to_json())
def test_run(self):
_testType(self,
Constant(1),
[1, 1.0, (None, 1), ('foo', 1)],
[])
class TestEnum(unittest.TestCase):
longMessage = True
def test_strict(self):
_testType(self,
Enum({u'a': u'a', u'b': u'b'}, strict=True),
[(u'a', u'a'), ('a', u'a')],
[u'c', 999])
def test_strict_by_default(self):
_testType(self,
Enum({u'a': u'a', u'b': u'b'}),
[(u'a', u'a'), ('a', u'a')],
[u'c', 999])
def test_lenient(self):
_testType(self,
Enum({u'a': u'a', u'b': u'b'}, strict=False),
[(u'a', u'a'), ('a', u'a'), u'c', (999, u'999')],
[])
class TestRange(unittest.TestCase):
longMessage = True
def test_discrete(self):
_testType(self,
Range([(1, 1), (2, 3), (5, 5)], strict=True, integer=False),
[(0, 1), 1, (1.49, 1), (1.50, 1), (1.51, 2), 2, 2.5, 3, (4, 3), (4.1, 5), 5, (6, 5)],
[])
def test_log_integer(self):
_testType(self,
Range([(1, 32)], strict=True, logarithmic=True, integer=True),
[(0, 1), 1, 2, 4, 32, (2.0, 2), (2.5, 2), (3.5, 4), (33, 32)],
[])
def test_shifted_float(self):
_testType(self,
Range([(3, 4)], strict=True, logarithmic=False, integer=False).shifted_by(-3),
[(-0.5, 0), 0, 0.25, 1, (1.5, 1)],
[])
def test_shifted_integer(self):
_testType(self,
Range([(3, 4)], strict=True, logarithmic=False, integer=True).shifted_by(-3),
[(-0.5, 0), 0, (0.25, 0), 1, (1.5, 1)],
[])
def test_repr(self):
self.assertEqual('Range([(1, 2), (3, 4)], strict=True, logarithmic=False, integer=False)',
repr(Range([(1, 2), (3, 4)])))
self.assertEqual('Range([(1, 2), (3, 4)], strict=False, logarithmic=False, integer=False)',
repr(Range([(1, 2), (3, 4)], strict=False)))
self.assertEqual('Range([(1, 2), (3, 4)], strict=True, logarithmic=True, integer=False)',
repr(Range([(1, 2), (3, 4)], logarithmic=True)))
self.assertEqual('Range([(1, 2), (3, 4)], strict=True, logarithmic=False, integer=True)',
repr(Range([(1, 2), (3, 4)], integer=True)))
def test_equal(self):
self.assertEqual(Range([(1, 2), (3, 4)]),
Range([(1, 2), (3, 4)]))
self.assertEqual(Range([(1, 2), (3, 4)], integer=True, logarithmic=True),
Range([(1, 2), (3, 4)], integer=True, logarithmic=True))
self.assertNotEqual(Range([(1, 2), (3, 4)]),
Range([(0, 2), (3, 4)]))
self.assertNotEqual(Range([(1, 2)]),
Range([(1, 2)], integer=True))
self.assertNotEqual(Range([(1, 2)]),
Range([(1, 2)], logarithmic=True))
self.assertNotEqual(Range([(1, 2)]),
Range([(1, 2)], strict=False))
| gpl-3.0 | -7,255,775,303,882,669,000 | 37.107438 | 99 | 0.52538 | false |
skylifewww/pangolinland | article/models.py | 1 | 11009 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
# from iosDevCourse.users.models import User
from django.db import models
from embed_video.fields import EmbedVideoField
from django.core.urlresolvers import reverse
from mptt.models import MPTTModel, TreeForeignKey
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
import mptt
from mptt.fields import TreeForeignKey
import random
from django.conf import settings
from easy_thumbnails.fields import ThumbnailerImageField
# from content.models import Slide
def make_upload_path(instance, filename, prefix=False):
n1 = random.randint(0, 10000)
n2 = random.randint(0, 10000)
n3 = random.randint(0, 10000)
c = filename.split(".")
filename = str(n1) + "_" + str(n2) + "_" + str(n3) + "." + c[-1]
return u"%s/%s" % (settings.IMAGE_UPLOAD_DIR, filename)
# Create your models here.
class Category(MPTTModel):
name = models.CharField(max_length=250, verbose_name="Name Category", blank=True, default="", unique=True)
parent = TreeForeignKey('self', related_name="children", blank=True, null=True, db_index=True, verbose_name="Parent class")
published = models.BooleanField(verbose_name="Published", blank=True, default="")
ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
def get_slides(self):
return Slide.objects.filter(category=self)
class Meta:
db_table = "category"
verbose_name = "Category"
verbose_name_plural = "Categories"
ordering = ('tree_id','level')
def __str__(self):
return self.name
class MPTTMeta:
# level_attr = 'mptt_level'
order_insertion_by = ['name']
mptt.register(Category, order_insertion_by=['name'])
class Creator(MPTTModel):
slug = models.CharField(max_length=250, blank=True, verbose_name="Url")
name = models.CharField(max_length=200, verbose_name="Creator device", blank=True, default="", unique=True)
parent = TreeForeignKey('self', related_name="children", blank=True, null=True, db_index=True, verbose_name="Parent class")
class Meta:
db_table = "creators"
verbose_name = "Creator"
verbose_name_plural = "Creators"
ordering = ('tree_id', 'level')
def __str__(self):
return self.name
def pic_slug(self):
if self.slug:
return u'<img src="%s" width="70"/>' % self.slug
else:
return '(none)'
pic_slug.short_description = 'Logo Creator'
pic_slug.allow_tags = True
class MPTTMeta:
# level_attr = 'mptt_level'
order_insertion_by = ['name']
mptt.register(Creator, order_insertion_by=['name'])
class Tag(models.Model):
tag_name = models.CharField(max_length=50, verbose_name="Tag Name")
class Meta:
db_table = "tags"
verbose_name = "tags"
verbose_name_plural = "tag"
def __unicode__(self):
return self.tag_name
# class Works(models.Model):
# work_creator = models.CharField(max_length=50, verbose_name="creator", blank=True, null=True, default="")
# work_category = TreeForeignKey(Category, related_name="works", verbose_name="Category", default="", blank=True)
# # image = ThumbnailerImageField(upload_to=make_upload_path, blank=True, verbose_name="картинка")
# slug = models.CharField(max_length=250, blank=True, verbose_name="Url")
# short_text = RichTextUploadingField(blank=True, verbose_name="Short text")
# full_text = RichTextUploadingField(blank=True, verbose_name="Full text")
# work_title = models.CharField(max_length=50, verbose_name="Work Title")
# class Meta:
# db_table = "works"
# verbose_name = "works"
# verbose_name_plural = "works"
# def __unicode__(self):
# return self.work_title
# def pic(self):
# if self.image:
# return u'<img src="%s" width="70"/>' % self.image.url
# else:
# return '(none)'
# pic.short_description = u'Большая картинка'
# pic.allow_tags = True
# def pic_slug(self):
# if self.slug:
# return u'<img src="%s" width="70"/>' % self.slug
# else:
# return '(none)'
# pic_slug.short_description = 'work'
# pic_slug.allow_tags = True
class Article(models.Model):
article_title = models.CharField(max_length=250, verbose_name="Article Title")
article_date = models.DateTimeField(verbose_name="Release date")
article_tag = models.ManyToManyField(Tag, related_name="tags", related_query_name="tags", verbose_name="Tags")
# product_works = models.ManyToManyField(Works, related_name="works", related_query_name="works", verbose_name="Works", blank=True, default="")
article_category = TreeForeignKey(Category, related_name="articles", verbose_name="Categories", default="", blank=True)
article_creator = TreeForeignKey(Creator, related_name="creator", max_length=200, verbose_name="Creator", blank=True, default="")
article_video = EmbedVideoField(verbose_name='Video', blank=True, help_text='URL video', null=True)
video_published = models.BooleanField( blank=True, default="")
slug = models.CharField(max_length=250, blank=True, verbose_name="Url")
slogan = models.CharField(max_length=250, verbose_name="Article Slogan")
short_text = RichTextUploadingField(blank=True, verbose_name="Short text")
full_text = RichTextUploadingField(blank=True, verbose_name="Full text")
published = models.BooleanField(verbose_name="Published", blank=True)
published_main = models.BooleanField( blank=True, default="", verbose_name="Published on main page",)
ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
def __unicode__(self):
return self.product_title
class Meta:
db_table = 'articles'
verbose_name = "Article"
verbose_name_plural = "Articles"
ordering = ['ordering']
# def pic(self):
# if self.image:
# return u'<img src="%s" width="70"/>' % self.image.url
# else:
# return '(none)'
# pic.short_description = u'Большая картинка'
# pic.allow_tags = True
def pic_slug(self):
if self.slug:
return u'<img src="%s" width="70"/>' % self.slug
else:
return '(none)'
pic_slug.short_description = 'Article image'
pic_slug.allow_tags = True
class MenuItemArticle(models.Model):
category = models.ForeignKey(Category, null=True, blank=True, verbose_name="Category")
name = models.CharField(max_length=200, verbose_name="Name")
slug = models.CharField(max_length=250, blank=True, verbose_name="Url")
published = models.BooleanField(verbose_name="Published")
ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'menuItemsArticles'
verbose_name_plural = "Menu Items for Articles"
verbose_name = "Menu Item"
ordering = ['ordering']
class Support(models.Model):
title = models.CharField(max_length=250, verbose_name="Support Title")
# date = models.DateTimeField(verbose_name="Release date")
tag = models.ManyToManyField(Tag, related_name="support_tags", related_query_name="support_tags", verbose_name="Tags")
# product_works = models.ManyToManyField(Works, related_name="works", related_query_name="works", verbose_name="Works", blank=True, default="")
category = TreeForeignKey(Category, related_name="supports", verbose_name="Categories", default="", blank=True)
# product_creator = TreeForeignKey(Creator, related_name="creator", max_length=200, verbose_name="Creator", blank=True, default="")
video = EmbedVideoField(verbose_name='Video', blank=True, help_text='URL video', null=True)
video_published = models.BooleanField( blank=True, default="")
slug = models.CharField(max_length=250, blank=True, verbose_name="Url")
slogan = models.CharField(max_length=250, verbose_name="Support Slogan")
short_text = RichTextUploadingField(blank=True, verbose_name="Short text")
full_text = RichTextUploadingField(blank=True, verbose_name="Full text")
published = models.BooleanField(verbose_name="Published", blank=True)
# published_main = models.BooleanField( blank=True, default="", verbose_name="Published on main page",)
ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
def __unicode__(self):
return self.title
class Meta:
db_table = 'support'
verbose_name = "Support"
verbose_name_plural = "Supports"
ordering = ['ordering']
# def pic(self):
# if self.image:
# return u'<img src="%s" width="70"/>' % self.image.url
# else:
# return '(none)'
# pic.short_description = u'Большая картинка'
# pic.allow_tags = True
def pic_slug(self):
if self.slug:
return u'<img src="%s" width="70"/>' % self.slug
else:
return '(none)'
pic_slug.short_description = 'Support image'
pic_slug.allow_tags = True
# class Slide(models.Model):
# category = TreeForeignKey(Category, related_name="slides_article", verbose_name="Category", default="", blank=True, null=True)
# name = models.CharField(max_length=250, verbose_name="Name")
# product = models.ForeignKey(Product, null=True, blank=True, verbose_name="Product")
# # image = models.ImageField(upload_to=make_upload_path, blank=True, verbose_name="Изображение")
# slug = models.CharField(max_length=250, blank=True, verbose_name="Url pic")
# text1 = RichTextUploadingField(blank=True, verbose_name="Text1")
# text2 = RichTextUploadingField(blank=True, verbose_name="Text2")
# published = models.BooleanField(verbose_name="Published", blank=True)
# published_main = models.BooleanField(verbose_name="Published on main", default="", blank=True)
# ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
# def __unicode__(self):
# return self.name
# def pic(self):
# if self.image:
# return u'<img src="%s" width="70"/>' % self.image.url
# else:
# return '(none)'
# pic.short_description = u'Большая картинка'
# pic.allow_tags = True
# def pic_slug(self):
# if self.slug:
# return u'<img src="%s" width="70"/>' % self.slug
# else:
# return '(none)'
# pic_slug.short_description = 'Slide'
# pic_slug.allow_tags = True
# class Meta:
# verbose_name_plural = "Slides"
# verbose_name = "Slide"
| mit | 3,099,177,084,353,075,700 | 37.350877 | 147 | 0.647575 | false |
fkie-cad/FACT_core | src/plugins/analysis/file_system_metadata/test/test_file_system_metadata_routes.py | 1 | 6342 | # pylint: disable=invalid-name
from base64 import b64encode
from unittest import TestCase
from flask import Flask
from flask_restx import Api
from helperFunctions.database import ConnectTo
from test.common_helper import create_test_file_object, create_test_firmware, get_config_for_testing
from test.unit.web_interface.rest.conftest import decode_response
from ..code.file_system_metadata import AnalysisPlugin
from ..routes import routes
class DbInterfaceMock:
def __init__(self, config):
self.config = config
self.fw = create_test_firmware()
self.fw.processed_analysis[AnalysisPlugin.NAME] = {'files': {b64_encode('some_file'): {'test_result': 'test_value'}}}
self.fo = create_test_file_object()
self.fo.virtual_file_path['some_uid'] = ['some_uid|{}|/{}'.format(self.fw.uid, 'some_file')]
def get_object(self, uid):
if uid == self.fw.uid:
return self.fw
if uid == 'foo':
return self.fo
if uid == 'bar':
fo = create_test_file_object()
fo.virtual_file_path = {'some_uid': ['a|b|c']}
return fo
return None
def shutdown(self):
pass
class TestFileSystemMetadataRoutesStatic(TestCase):
def setUp(self):
self.config = get_config_for_testing()
routes.FsMetadataDbInterface.__bases__ = (DbInterfaceMock,)
def test_get_results_from_parent_fos(self):
fw = create_test_firmware()
fo = create_test_file_object()
file_name = 'folder/file'
encoded_name = b64_encode(file_name)
fw.processed_analysis[AnalysisPlugin.NAME] = {'files': {encoded_name: {'result': 'value'}}}
fo.virtual_file_path['some_uid'] = ['some_uid|{}|/{}'.format(fw.uid, file_name)]
results = {}
routes.FsMetadataRoutesDbInterface.get_results_from_parent_fos(fw, fo, results)
assert results != {}, 'result should not be empty'
assert file_name in results, 'files missing from result'
assert 'parent_uid' in results[file_name], 'parent uid missing in result'
assert 'result' in results[file_name], 'analysis result is missing'
assert results[file_name]['result'] == 'value', 'wrong value of analysis result'
def test_get_results_from_parent_fos__multiple_vfps_in_one_fw(self):
fw = create_test_firmware()
fo = create_test_file_object()
file_names = ['file_a', 'file_b', 'file_c']
fw.processed_analysis[AnalysisPlugin.NAME] = {'files': {b64_encode(f): {'result': 'value'} for f in file_names}}
vfp = fo.virtual_file_path['some_uid'] = []
for f in file_names:
vfp.append('some_uid|{}|/{}'.format(fw.uid, f))
results = {}
routes.FsMetadataRoutesDbInterface.get_results_from_parent_fos(fw, fo, results)
assert results is not None
assert results != {}, 'result should not be empty'
assert len(results) == 3, 'wrong number of results'
assert all(f in results for f in file_names), 'files missing from result'
assert 'result' in results[file_names[0]], 'analysis result is missing'
assert results[file_names[0]]['result'] == 'value', 'wrong value of analysis result'
def test_get_analysis_results_for_included_uid(self):
with ConnectTo(routes.FsMetadataRoutesDbInterface, self.config) as db_interface:
result = db_interface.get_analysis_results_for_included_uid('foo')
assert result is not None
assert result != {}, 'result should not be empty'
assert len(result) == 1, 'wrong number of results'
assert 'some_file' in result, 'files missing from result'
def test_get_analysis_results_for_included_uid__uid_not_found(self):
with ConnectTo(routes.FsMetadataRoutesDbInterface, self.config) as db_interface:
result = db_interface.get_analysis_results_for_included_uid('not_found')
assert result is not None
assert result == {}, 'result should be empty'
def test_get_analysis_results_for_included_uid__parent_not_found(self):
with ConnectTo(routes.FsMetadataRoutesDbInterface, self.config) as db_interface:
result = db_interface.get_analysis_results_for_included_uid('bar')
assert result is not None
assert result == {}, 'result should be empty'
class TestFileSystemMetadataRoutes(TestCase):
def setUp(self):
routes.FrontEndDbInterface = DbInterfaceMock
app = Flask(__name__)
app.config.from_object(__name__)
app.config['TESTING'] = True
app.jinja_env.filters['replace_uid_with_hid'] = lambda x: x
app.jinja_env.filters['nice_unix_time'] = lambda x: x
config = get_config_for_testing()
self.plugin_routes = routes.PluginRoutes(app, config)
self.test_client = app.test_client()
def test_get_analysis_results_of_parent_fo(self):
rv = self.test_client.get('/plugins/file_system_metadata/ajax/{}'.format('foo'))
assert 'test_result' in rv.data.decode()
assert 'test_value' in rv.data.decode()
class TestFileSystemMetadataRoutesRest(TestCase):
def setUp(self):
routes.FrontEndDbInterface = DbInterfaceMock
app = Flask(__name__)
app.config.from_object(__name__)
app.config['TESTING'] = True
config = get_config_for_testing()
api = Api(app)
endpoint, methods = routes.FSMetadataRoutesRest.ENDPOINTS[0]
api.add_resource(
routes.FSMetadataRoutesRest,
endpoint,
methods=methods,
resource_class_kwargs={'config': config}
)
self.test_client = app.test_client()
def test_get_rest(self):
result = decode_response(self.test_client.get('/plugins/file_system_metadata/rest/{}'.format('foo')))
assert AnalysisPlugin.NAME in result
assert 'some_file' in result[AnalysisPlugin.NAME]
assert 'test_result' in result[AnalysisPlugin.NAME]['some_file']
def test_get_rest__no_result(self):
result = decode_response(self.test_client.get('/plugins/file_system_metadata/rest/{}'.format('not_found')))
assert AnalysisPlugin.NAME in result
assert result[AnalysisPlugin.NAME] == {}
def b64_encode(string):
return b64encode(string.encode()).decode()
| gpl-3.0 | 907,264,697,385,160,200 | 39.139241 | 125 | 0.645853 | false |
cuckoobox/cuckoo | cuckoo/data/web/local_settings.py | 1 | 1355 | # Copyright (C) 2013 Claudio Guarnieri.
# Copyright (C) 2014-2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import web.errors
# Maximum upload size (10GB, so there's basically no limit).
MAX_UPLOAD_SIZE = 10*1024*1024*1024
# Override default secret key stored in $CWD/web/.secret_key
# Make this unique, and don't share it with anybody.
# SECRET_KEY = "YOUR_RANDOM_KEY"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
ADMINS = (
# ("Your Name", "[email protected]"),
)
MANAGERS = ADMINS
# Allow verbose debug error message in case of application fault.
# It's strongly suggested to set it to False if you are serving the
# web application from a web server front-end (i.e. Apache).
DEBUG = False
DEBUG404 = False
# A list of strings representing the host/domain names that this Django site
# can serve.
# Values in this list can be fully qualified names (e.g. 'www.example.com').
# When DEBUG is True or when running tests, host validation is disabled; any
# host will be accepted. Thus it's usually only necessary to set it in production.
ALLOWED_HOSTS = ["*"]
handler404 = web.errors.handler404
handler500 = web.errors.handler500
| mit | -6,930,075,890,209,785,000 | 33.74359 | 82 | 0.743911 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/textwrap.py | 1 | 15708 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
__revision__ = "$Id: textwrap.py 67747 2008-12-13 23:20:54Z antoine.pitrou $"
import string, re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
| mit | -7,864,780,077,585,546,000 | 39.380463 | 80 | 0.592564 | false |
Karosuo/Linux_tools | xls_handlers/xls_sum_venv/lib/python3.6/site-packages/xlsxwriter/chart_column.py | 1 | 3545 | ###############################################################################
#
# ChartColumn - A class for writing the Excel XLSX Column charts.
#
# Copyright 2013-2019, John McNamara, [email protected]
#
from . import chart
class ChartColumn(chart.Chart):
"""
A class for writing the Excel XLSX Column charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartColumn, self).__init__()
if options is None:
options = {}
self.subtype = options.get('subtype')
if not self.subtype:
self.subtype = 'clustered'
self.horiz_val_axis = 0
if self.subtype == 'percent_stacked':
self.y_axis['defaults']['num_format'] = '0%'
# Set the available data label positions for this chart type.
self.label_position_default = 'outside_end'
self.label_positions = {
'center': 'ctr',
'inside_base': 'inBase',
'inside_end': 'inEnd',
'outside_end': 'outEnd'}
self.set_y_axis({})
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:barChart element.
self._write_bar_chart(args)
def _write_bar_chart(self, args):
# Write the <c:barChart> element.
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
subtype = self.subtype
if subtype == 'percent_stacked':
subtype = 'percentStacked'
# Set a default overlap for stacked charts.
if 'stacked' in self.subtype:
if self.series_overlap_1 is None:
self.series_overlap_1 = 100
self._xml_start_tag('c:barChart')
# Write the c:barDir element.
self._write_bar_dir()
# Write the c:grouping element.
self._write_grouping(subtype)
# Write the c:ser elements.
for data in series:
self._write_ser(data)
# Write the c:gapWidth element.
if args['primary_axes']:
self._write_gap_width(self.series_gap_1)
else:
self._write_gap_width(self.series_gap_2)
# Write the c:overlap element.
if args['primary_axes']:
self._write_overlap(self.series_overlap_1)
else:
self._write_overlap(self.series_overlap_2)
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:barChart')
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_bar_dir(self):
# Write the <c:barDir> element.
val = 'col'
attributes = [('val', val)]
self._xml_empty_tag('c:barDir', attributes)
def _write_err_dir(self, val):
# Overridden from Chart class since it is not used in Column charts.
pass
| gpl-3.0 | -2,085,466,959,176,181,800 | 26.061069 | 79 | 0.464598 | false |
jf-parent/webbase | {{cookiecutter.project_name}}/jobs/send_email.py | 1 | 34162 | # -*- encoding:utf8 -*-
# patched for python 3.5
import os
import json
import memcache
import requests
import logging
import base64
from hashlib import md5
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG)
def send_email(rest_api_id, rest_api_secret, email):
print("Sending email: {email}".format(email = email))
SPApiProxy = PySendPulse(
rest_api_id,
rest_api_secret
)
SPApiProxy.smtp_send_mail(email)
class PySendPulse:
""" SendPulse REST API python wrapper
"""
__api_url = "https://api.sendpulse.com"
__user_id = None
__secret = None
__token = None
__token_file_path = "/tmp/"
__token_hash_name = None
__storage_type = "FILE"
__refresh_token = 0
MEMCACHED_VALUE_TIMEOUT = 3600
ALLOWED_STORAGE_TYPES = ['FILE', 'MEMCACHED']
def __init__(self, user_id, secret, storage_type="FILE"):
""" SendPulse API constructor
@param user_id: string REST API ID from SendPulse settings
@param secret: string REST API Secret from SendPulse settings
@param storage_type: string FILE|MEMCACHED
@raise: Exception empty credentials or get token failed
"""
logging.info("Initialization SendPulse REST API Class")
if not user_id or not secret:
raise Exception("Empty ID or SECRET")
self.__user_id = user_id
self.__secret = secret
self.__storage_type = storage_type.upper()
m = md5()
m.update("{}::{}".format(user_id, secret).encode('utf'))
self.__token_hash_name = m.hexdigest()
if self.__storage_type not in self.ALLOWED_STORAGE_TYPES:
logging.warn("Wrong storage type '{}'. Allowed storage types are: {}".format(storage_type, self.ALLOWED_STORAGE_TYPES))
logging.warn("Try to use 'FILE' instead.")
self.__storage_type = 'FILE'
logging.debug("Try to get security token from '{}'".format(self.__storage_type, ))
if self.__storage_type == "MEMCACHED":
mc = memcache.Client(['127.0.0.1:11211'])
self.__token = mc.get(self.__token_hash_name)
else: # file
filepath = "{}{}".format(self.__token_file_path, self.__token_hash_name)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
self.__token = f.readline()
else:
logging.error("Can't find file '{}' to read security token.".format(filepath))
logging.debug("Got: '{}'".format(self.__token, ))
if not self.__token and not self.__get_token():
raise Exception("Could not connect to API. Please, check your ID and SECRET")
def __get_token(self):
""" Get new token from API server and store it in storage
@return: boolean
"""
logging.debug("Try to get new token from server")
self.__refresh_token += 1
data = {
"grant_type": "client_credentials",
"client_id": self.__user_id,
"client_secret": self.__secret,
}
response = self.__send_request("oauth/access_token", "POST", data, False)
if response.status_code != 200:
return False
self.__refresh_token = 0
self.__token = response.json()['access_token']
logging.debug("Got: '{}'".format(self.__token, ))
if self.__storage_type == "MEMCACHED":
logging.debug("Try to set token '{}' into 'MEMCACHED'".format(self.__token, ))
mc = memcache.Client(['127.0.0.1:11211'])
mc.set(self.__token_hash_name, self.__token, self.MEMCACHED_VALUE_TIMEOUT)
else:
filepath = "{}{}".format(self.__token_file_path, self.__token_hash_name)
try:
with open(filepath, 'w') as f:
f.write(self.__token)
logging.debug("Set token '{}' into 'FILE' '{}'".format(self.__token, filepath))
except IOError:
logging.warn("Can't create 'FILE' to store security token. Please, check your settings.")
if self.__token:
return True
return False
def __send_request(self, path, method="GET", params=None, use_token=True):
""" Form and send request to API service
@param path: sring what API url need to call
@param method: HTTP method GET|POST|PUT|DELETE
@param params: dict argument need to send to server
@param use_token: boolean need to use token or not
@return: HTTP requests library object http://www.python-requests.org/
"""
url = "{}/{}".format(self.__api_url, path)
method.upper()
logging.debug("__send_request method: {} url: '{}' with parameters: {}".format(method, url, params))
if type(params) not in (dict, list):
params = {}
if use_token and self.__token:
headers = {'Authorization': 'Bearer {}'.format(self.__token)}
else:
headers = {}
if method == "POST":
response = requests.post(url, headers=headers, data=params)
elif method == "PUT":
response = requests.put(url, headers=headers, data=params)
elif method == "DELETE":
response = requests.delete(url, headers=headers, data=params)
else:
response = requests.get(url, headers=headers, params=params)
if response.status_code == 401 and self.__refresh_token == 0:
self.__get_token()
return self.__send_request(path, method, params)
elif response.status_code == 404:
logging.warn("404: Sorry, the page you are looking for could not be found.")
logging.debug("Raw_server_response: {}".format(response.text, ))
elif response.status_code == 500:
logging.critical("Whoops, looks like something went wrong on the server. Please contact with out support [email protected].")
else:
try:
logging.debug("Request response: {}".format(response.json(), ))
except:
logging.critical("Raw server response: {}".format(response.text, ))
return response.status_code
return response
def __handle_result(self, data):
""" Process request results
@param data:
@return: dictionary with response message and/or http code
"""
if 'status_code' not in data:
if data.status_code == 200:
logging.debug("Hanle result: {}".format(data.json(), ))
return data.json()
elif data.status_code == 404:
response = {
'is_error': True,
'http_code': data.status_code,
'message': "Sorry, the page you are looking for {} could not be found.".format(data.url, )
}
elif data.status_code == 500:
response = {
'is_error': True,
'http_code': data.status_code,
'message': "Whoops, looks like something went wrong on the server. Please contact with out support [email protected]."
}
else:
response = {
'is_error': True,
'http_code': data.status_code
}
response.update(data.json())
else:
response = {
'is_error': True,
'http_code': data
}
logging.debug("Hanle result: {}".format(response, ))
return {'data': response}
def __handle_error(self, custom_message=None):
""" Process request errors
@param custom_message:
@return: dictionary with response custom error message and/or error code
"""
message = {'is_error': True}
if custom_message is not None:
message['message'] = custom_message
logging.error("Hanle error: {}".format(message, ))
return message
# ------------------------------------------------------------------ #
# BALANCE #
# ------------------------------------------------------------------ #
def get_balance(self, currency=None):
""" Get balance
@param currency: USD, EUR, GBP, UAH, RUR, INR, JPY
@return: dictionary with response message
"""
logging.info("Function call: get_balance")
return self.__handle_result(self.__send_request('balance/{}'.format(currency.upper() if currency else ''), ))
# ------------------------------------------------------------------ #
# ADDRESSBOOKS #
# ------------------------------------------------------------------ #
def add_addressbook(self, addressbook_name):
""" Create addressbook
@param addressbook_name: string name for addressbook
@return: dictionary with response message
"""
logging.info("Function call: create_addressbook: '{}'".format(addressbook_name, ))
return self.__handle_error("Empty AddressBook name") if not addressbook_name else self.__handle_result(self.__send_request('addressbooks', 'POST', {'bookName': addressbook_name}))
def edit_addressbook(self, id, new_addressbook_name):
""" Edit addressbook name
@param id: unsigned int addressbook ID
@param new_addressbook_name: string new name for addressbook
@return: dictionary with response message
"""
logging.info("Function call: edit_addressbook: '{}' with new addressbook name '{}'".format(id, new_addressbook_name))
if not id or not new_addressbook_name:
return self.__handle_error("Empty new name or addressbook id")
return self.__handle_result(self.__send_request('addressbooks/{}'.format(id), 'PUT', {'name': new_addressbook_name}))
def delete_addressbook(self, id):
""" Remove addressbook
@param id: unsigned int addressbook ID
@return: dictionary with response message
"""
logging.info("Function call: remove_addressbook: '{}'".format(id, ))
return self.__handle_error("Empty addressbook id") if not id else self.__handle_result(self.__send_request('addressbooks/{}'.format(id), 'DELETE'))
def get_list_of_addressbooks(self, limit=0, offset=0):
""" Get list of addressbooks
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@return: dictionary with response message
"""
logging.info("Function call: get_list_of_addressbooks")
return self.__handle_result(self.__send_request('addressbooks', 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def get_addressbook_info(self, id):
""" Get information about addressbook
@param id: unsigned int addressbook ID
@return: dictionary with response message
"""
logging.info("Function call: get_addressbook_info: '{}'".format(id, ))
return self.__handle_error("Empty addressbook id") if not id else self.__handle_result(self.__send_request('addressbooks/{}'.format(id)))
# ------------------------------------------------------------------ #
# EMAIL ADDRESSES #
# ------------------------------------------------------------------ #
def get_emails_from_addressbook(self, id, limit=0, offset=0):
""" List email addresses from addressbook
@param id: unsigned int addressbook ID
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@return: dictionary with response message
"""
logging.info("Function call: get_emails_from_addressbook: '{}'".format(id, ))
return self.__handle_error("Empty addressbook id") if not id else self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def add_emails_to_addressbook(self, id, emails):
""" Add new emails to addressbook
@param id: unsigned int addressbook ID
@param emails: list of dictionaries [
{'email': '[email protected]', 'variables': {'varname_1': 'value_1', ..., 'varname_n': 'value_n' {{"}}"}},
{...},
{'email': '[email protected]'{{"}}"}}
]
@return: dictionary with response message
"""
logging.info("Function call: add_emails_to_addressbook into: {}".format(id, ))
if not id or not emails:
self.__handle_error("Empty addressbook id or emails")
try:
emails = json.dumps(emails)
except:
logging.debug("Emails: {}".format(emails))
return self.__handle_error("Emails list can't be converted by JSON library")
return self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'POST', {'emails': emails}))
def delete_emails_from_addressbook(self, id, emails):
""" Delete email addresses from addressbook
@param id: unsigned int addressbook ID
@param emails: list of emails ['test_1@test_1.com', ..., 'test_n@test_n.com']
@return: dictionary with response message
"""
logging.info("Function call: delete_emails_from_addressbook from: {}".format(id, ))
if not id or not emails:
self.__handle_error("Empty addressbook id or emails")
try:
emails = json.dumps(emails)
except:
logging.debug("Emails: {}".format(emails))
return self.__handle_error("Emails list can't be converted by JSON library")
return self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'DELETE', {'emails': emails}))
# ------------------------------------------------------------------ #
# EMAIL CAMPAIGNS #
# ------------------------------------------------------------------ #
def get_campaign_cost(self, id):
""" Get cost of campaign based on addressbook
@param id: unsigned int addressbook ID
@return: dictionary with response message
"""
logging.info("Function call: get_campaign_cost: '{}'".format(id, ))
return self.__handle_error("Empty addressbook id") if not id else self.__handle_result(self.__send_request('addressbooks/{}/cost'.format(id)))
def get_list_of_campaigns(self, limit=0, offset=0):
""" Get list of campaigns
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@return: dictionary with response message
"""
logging.info("Function call: get_list_of_campaigns")
return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def get_campaign_info(self, id):
""" Get information about campaign
@param id: unsigned int campaign ID
@return: dictionary with response message
"""
logging.info("Function call: get_campaign_info from: {}".format(id, ))
return self.__handle_error("Empty campaign id") if not id else self.__handle_result(self.__send_request('campaigns/{}'.format(id, )))
def get_campaign_stat_by_countries(self, id):
""" Get information about campaign
@param id: unsigned int campaign ID
@return: dictionary with response message
"""
logging.info("Function call: get_campaign_stat_by_countries from: '{}'".format(id, ))
return self.__handle_error("Empty campaign id") if not id else self.__handle_result(self.__send_request('campaigns/{}/countries'.format(id, )))
def get_campaign_stat_by_referrals(self, id):
""" Get campaign statistic by referrals
@param id: unsigned int campaign ID
@return: dictionary with response message
"""
logging.info("Function call: get_campaign_stat_by_referrals from: '{}'".format(id, ))
return self.__handle_error("Empty campaign id") if not id else self.__handle_result(self.__send_request('campaigns/{}/referrals'.format(id, )))
def add_campaign(self, from_email, from_name, subject, body, addressbook_id, campaign_name='', attachments=None):
""" Create new campaign
@param from_email: string senders email
@param from_name: string senders name
@param subject: string campaign title
@param body: string campaign body
@param addressbook_id: unsigned int addressbook ID
@param campaign_name: string campaign name
@param attachments: dictionary with {filename_1: filebody_1, ..., filename_n: filebody_n}
@return: dictionary with response message
"""
if not attachments:
attachments = {}
logging.info("Function call: create_campaign")
if not from_name or not from_email:
return self.__handle_error('Seems you pass not all data for sender: Email or Name')
elif not subject or not body:
return self.__handle_error('Seems you pass not all data for task: Title or Body')
elif not addressbook_id:
return self.__handle_error('Seems you not pass addressbook ID')
if not attachments:
attachments = {}
return self.__handle_result(self.__send_request('campaigns', 'POST', {
'sender_name': from_name,
'sender_email': from_email,
'subject': subject,
'body': base64.b64encode(body),
'list_id': addressbook_id,
'name': campaign_name,
'attachments': json.dumps(attachments)
}))
def cancel_campaign(self, id):
""" Cancel campaign
@param id: unsigned int campaign ID
@return: dictionary with response message
"""
logging.info("Function call: cancel_campaign : '{}'".format(id, ))
return self.__handle_error("Empty campaign id") if not id else self.__handle_result(self.__send_request('campaigns/{}'.format(id, ), 'DELETE'))
# ------------------------------------------------------------------ #
# EMAIL SENDERS #
# ------------------------------------------------------------------ #
def get_list_of_senders(self):
""" List of all senders
@return: dictionary with response message
"""
logging.info("Function call: get_senders")
return self.__handle_result(self.__send_request('senders'))
def add_sender(self, email, name):
""" Add sender
@param email: string sender from email
@param name: string senders from name
@return: dictionary with response message
"""
logging.info("Function call: add_sender: '{}' '{}'".format(email, name))
if not name or not email:
return self.__handle_error("Seems you passing not all data for sender: Email: '{}' or Name: '{}'".format(email, name))
return self.__handle_result(self.__send_request('senders', 'POST', {'email': email, 'name': name}))
def delete_sender(self, email):
""" Delete sender
@param email: string sender from email
@return: dictionary with response message
"""
logging.info("Function call: delete_sender: '{}'".format(email, ))
return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders', 'DELETE', {'email': email}))
def activate_sender(self, email, code):
""" Activate new sender
@param email: string sender from email
@param code: string activation code
@return: dictionary with response message
"""
logging.info("Function call: activate_sender '{}' with code '{}'".format(email, code))
if not email or not code:
return self.__handle_error("Empty email '{}' or activation code '{}'".format(email, code))
return self.__handle_result(self.__send_request('senders/{}/code'.format(email, ), 'POST', {'code': code}))
def send_sender_activation_email(self, email):
""" Request email with activation code
@param email: string sender from email
@return: dictionary with response message
"""
logging.info("Function call: send_sender_activation_email for '{}'".format(email, ))
return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders/{}/code'.format(email, )))
# ------------------------------------------------------------------ #
# EMAILS #
# ------------------------------------------------------------------ #
def get_email_info_from_one_addressbooks(self, id, email):
""" Get information about email address from one addressbook
@param id: unsigned int addressbook ID
@param email: string valid email address
@return: dictionary with response message
"""
logging.info("Function call: get_email_info_from_one_addressbooks from: '{}'".format(id, ))
if not id or not email:
self.__handle_error("Empty addressbook id or email")
return self.__handle_result(self.__send_request('addressbooks/{}/emails/{}'.format(id, email)))
def get_email_info_from_all_addressbooks(self, email):
""" Get global information about email
@param email: string email
@return: dictionary with response message
"""
logging.info("Function call: get_email_info_from_all_addressbooks for '{}'".format(email, ))
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('emails/{}'.format(email, )))
def delete_email_from_all_addressooks(self, email):
""" Remove email from all addressbooks
@param email: string email
@return: dictionary with response message
"""
logging.info("Function call: delete_email_from_all_addressooks for '{}'".format(email, ))
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('emails/{}'.format(email, ), 'DELETE'))
def get_email_statistic_by_campaigns(self, email):
""" Get email statistic by all campaigns
@param email: string email
@return: dictionary with response message
"""
logging.info("Function call: get_email_statistic_by_campaigns for '{}'".format(email, ))
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('emails/{}/campaigns'.format(email, )))
def get_emails_in_blacklist(self, limit=0, offset=0):
""" Get all emails from blacklist
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@return: dictionary with response message
"""
logging.info("Function call: get_emails_in_blacklist")
return self.__handle_result(self.__send_request('blacklist', 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def add_email_to_blacklist(self, email, comment=''):
""" Add email to blacklist
@param email: string emails divided by commas 'email_1, ..., email_n'
@param comment: string describing why email added to blacklist
@return: dictionary with response message
"""
logging.info("Function call: add_email_to_blacklist for '{}'".format(email, ))
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('blacklist', 'POST', {'emails': base64.b64encode(email), 'comment': comment}))
def delete_email_from_blacklist(self, email):
""" Remove emails from blacklist
@param email: string email
@return: dictionary with response message
"""
logging.info("Function call: delete_email_from_blacklist for '{}'".format(email, ))
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('blacklist', 'DELETE', {'emails': base64.b64encode(email)}))
# ------------------------------------------------------------------ #
# SMTP #
# ------------------------------------------------------------------ #
def smtp_get_list_of_emails(self, limit=0, offset=0, date_from=None, date_to=None, sender=None, recipient=None):
""" SMTP: get list of emails
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@param date_from: string date for filter in 'YYYY-MM-DD'
@param date_to: string date for filter in 'YYYY-MM-DD'
@param sender: string from email
@param recipient: string for email
@return: dictionary with response message
"""
logging.info("Function call: smtp_get_list_of_emails")
return self.__handle_result(self.__send_request('smtp/emails', 'GET', {
'limit': limit,
'offset': offset,
'from': date_from,
'to': date_to,
'sender': sender,
'recipient': recipient
}))
def smtp_get_email_info_by_id(self, id):
""" Get information about email by ID
@param id: unsigned int email id
@return: dictionary with response message
"""
logging.info("Function call: smtp_get_email_info_by_id for '{}'".format(id, ))
return self.__handle_error('Empty email') if not id else self.__handle_result(self.__send_request('smtp/emails/{}'.format(id, )))
def smtp_add_emails_to_unsubscribe(self, emails):
""" SMTP: add emails to unsubscribe list
@param emails: list of dictionaries [{'email': 'test_1@test_1.com', 'comment': 'comment_1'}, ..., {'email': 'test_n@test_n.com', 'comment': 'comment_n'}]
@return: dictionary with response message
"""
logging.info("Function call: smtp_add_emails_to_unsubscribe")
return self.__handle_error('Empty email') if not emails else self.__handle_result(self.__send_request('smtp/unsubscribe', 'POST', {'emails': json.dumps(emails)}))
def smtp_delete_emails_from_unsubscribe(self, emails):
""" SMTP: remove emails from unsubscribe list
@param emails: list of dictionaries ['test_1@test_1.com', ..., 'test_n@test_n.com']
@return: dictionary with response message
"""
logging.info("Function call: smtp_delete_emails_from_unsubscribe")
return self.__handle_error('Empty email') if not emails else self.__handle_result(self.__send_request('smtp/unsubscribe', 'DELETE', {'emails': json.dumps(emails)}))
def smtp_get_list_of_ip(self):
""" SMTP: get list of IP
@return: dictionary with response message
"""
logging.info("Function call: smtp_get_list_of_ip")
return self.__handle_result(self.__send_request('smtp/ips'))
def smtp_get_list_of_allowed_domains(self):
""" SMTP: get list of allowed domains
@return: dictionary with response message
"""
logging.info("Function call: smtp_get_list_of_allowed_domains")
return self.__handle_result(self.__send_request('smtp/domains'))
def smtp_add_domain(self, email):
""" SMTP: add and verify new domain
@param email: string valid email address on the domain you want to verify. We will send an email message to the specified email address with a verification link.
@return: dictionary with response message
"""
logging.info("Function call: smtp_add_domain")
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('smtp/domains', 'POST', {'email': email}))
def smtp_verify_domain(self, email):
""" SMTP: verify domain already added domain
@param email: string valid email address on the domain you want to verify. We will send an email message to the specified email address with a verification link.
@return: dictionary with response message
"""
logging.info("Function call: smtp_verify_domain")
return self.__handle_error('Empty email') if not email else self.__handle_result(self.__send_request('smtp/domains/{}'.format(email, )))
def smtp_send_mail(self, email):
""" SMTP: send email
@param email: string valid email address. We will send an email message to the specified email address with a verification link.
@return: dictionary with response message
"""
logging.info("Function call: smtp_send_mail")
if not email.get('html') or not email.get('text'):
return self.__handle_error('Seems we have empty body')
elif not email.get('subject'):
return self.__handle_error('Seems we have empty subject')
elif not email.get('from') or not email.get('to'):
return self.__handle_error("Seems we have empty some credentials 'from': '{}' or 'to': '{}' fields".format(email.get('from'), email.get('to')))
email['html'] = base64.b64encode(email.get('html').encode('utf')).decode('utf')
return self.__handle_result(self.__send_request('smtp/emails', 'POST', {'email': json.dumps(email)}))
# ------------------------------------------------------------------ #
# PUSH #
# ------------------------------------------------------------------ #
def push_get_tasks(self, limit=0, offset=0):
""" PUSH: get list of tasks
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@return: dictionary with response message
"""
logging.info("Function call: push_get_tasks")
return self.__handle_result(self.__send_request('push/tasks', 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def push_get_websites(self, limit=0, offset=0):
""" PUSH: get list of websites
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@return: dictionary with response message
"""
logging.info("Function call: push_get_websites")
return self.__handle_result(self.__send_request('push/websites', 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def push_count_websites(self):
""" PUSH: get amount of websites
@return: dictionary with response message
"""
logging.info("Function call: push_count_websites")
return self.__handle_result(self.__send_request('push/websites/total', 'GET', {}))
def push_get_variables(self, id):
""" PUSH: get list of all variables for website
@param id: unsigned int website id
@return: dictionary with response message
"""
logging.info("Function call: push_get_variables for {}".format(id))
return self.__handle_result(self.__send_request('push/websites/{}/variables'.format(id), 'GET', {}))
def push_get_subscriptions(self, id, limit=0, offset=0):
""" PUSH: get list of all subscriptions for website
@param limit: unsigned int max limit of records. The max value is 100
@param offset: unsigned int how many records pass before selection
@param id: unsigned int website id
@return: dictionary with response message
"""
logging.info("Function call: push_get_subscriptions for {}".format(id))
return self.__handle_result(self.__send_request('push/websites/{}/subscriptions'.format(id), 'GET', {'limit': limit or 0, 'offset': offset or 0}))
def push_count_subscriptions(self, id):
""" PUSH: get amount of subscriptions for website
@param id: unsigned int website id
@return: dictionary with response message
"""
logging.info("Function call: push_count_subscriptions for {}".format(id))
return self.__handle_result(self.__send_request('push/websites/{}/subscriptions/total'.format(id), 'GET', {}))
def push_set_subscription_state(self, subscription_id, state_value):
""" PUSH: get amount of subscriptions for website
@param subscription_id: unsigned int subscription id
@param state_value: unsigned int state value. Can be 0 or 1
@return: dictionary with response message
"""
logging.info("Function call: push_set_subscription_state for {} to state {}".format(subscription_id, state_value))
return self.__handle_result(self.__send_request('/push/subscriptions/state', 'POST', {'id': subscription_id, 'state': state_value}))
def push_create(self, title, website_id, body, ttl, additional_params={}):
""" PUSH: create new push
@param title: string push title
@param website_id: unsigned int website id
@param body: string push body
@param ttl: unsigned int ttl for push messages
@param additional_params: dictionary additional params for push task
@return: dictionary with response message
"""
data_to_send = {
'title': title,
'website_id': website_id,
'body': body,
'ttl': ttl
}
if additional_params:
data_to_send.update(additional_params)
logging.info("Function call: push_create")
return self.__handle_result(self.__send_request('/push/tasks', 'POST', data_to_send))
| mit | 5,319,612,941,020,626,000 | 45.990371 | 205 | 0.584948 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.