repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
erwilan/ansible | contrib/inventory/collins.py | 51 | 17969 | #!/usr/bin/env python
"""
Collins external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
Collins is a hardware asset management system originally developed by
Tumblr for tracking new hardware as it built out its own datacenters. It
exposes a rich API for manipulating and querying one's hardware inventory,
which makes it an ideal 'single point of truth' for driving systems
automation like Ansible. Extensive documentation on Collins, including a quickstart,
API docs, and a full reference manual, can be found here:
http://tumblr.github.io/collins
This script adds support to Ansible for obtaining a dynamic inventory of
assets in your infrastructure, grouping them in Ansible by their useful attributes,
and binding all facts provided by Collins to each host so that they can be used to
drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
Cobbler inventory script.
To use it, copy it to your repo and pass -i <collins script> to the ansible or
ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
to /etc/ansible and this script to /etc/ansible/hosts.
Alongside the options set in collins.ini, there are several environment variables
that will be used instead of the configured values if they are set:
- COLLINS_USERNAME - specifies a username to use for Collins authentication
- COLLINS_PASSWORD - specifies a password to use for Collins authentication
- COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
this can be used to run Ansible automation against different asset classes than
server nodes, such as network switches and PDUs
- COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
<location of collins.py>/collins.ini
If errors are encountered during operation, this script will return an exit code of
255; otherwise, it will return an exit code of 0.
Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name'].
Tested against Ansible 1.8.2 and Collins 1.3.0.
"""
# (c) 2014, Steve Salevan <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import ConfigParser
import logging
import os
import re
import sys
from time import time
import traceback
try:
import json
except ImportError:
import simplejson as json
from six import iteritems
from six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([
'CREATED',
'DELETED',
'UPDATED',
'STATE',
])
LOG_FORMAT = '%(asctime)-15s %(message)s'
class Error(Exception):
pass
class MaxRetriesError(Error):
pass
class CollinsInventory(object):
def __init__(self):
""" Constructs CollinsInventory object and reads all configuration. """
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
filename=self.log_location)
self.log = logging.getLogger('CollinsInventory')
def _asset_get_attribute(self, asset, attrib):
""" Returns a user-defined attribute from an asset if it exists; otherwise,
returns None. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return asset['ATTRIBS'][attrib_block][attrib]
return None
def _asset_has_attribute(self, asset, attrib):
""" Returns whether a user-defined attribute is present on an asset. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return True
return False
def run(self):
""" Main execution path """
# Updates cache if cache is not present or has expired.
successful = True
if self.args.refresh_cache:
successful = self.update_cache()
elif not self.is_cache_valid():
successful = self.update_cache()
else:
successful = self.load_inventory_from_cache()
successful &= self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
else: # default action with no options
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
print(data_to_print)
return successful
def find_assets(self, attributes={}, operation='AND'):
""" Obtains Collins assets matching the provided attributes. """
# Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
query_parameters = {
'details': ['True'],
'operation': [operation],
'query': attributes_query,
'remoteLookup': [str(self.query_remote_dcs)],
'size': [self.results_per_query],
'type': [self.collins_asset_type],
}
assets = []
cur_page = 0
num_retries = 0
# Locates all assets matching the provided query, exhausting pagination.
while True:
if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
query_parameters['page'] = cur_page
query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
urlencode(query_parameters, doseq=True)
)
try:
response = open_url(query_url,
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password,
force_basic_auth=True)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
# If we've retrieved all of our assets, breaks out of the loop.
if len(json_response['data']['Data']) == 0:
break
cur_page += 1
num_retries = 0
except:
self.log.error("Error while communicating with Collins, retrying:\n%s" % traceback.format_exc())
num_retries += 1
return assets
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries')
self.results_per_query = config.getint('collins', 'results_per_query')
self.ip_address_index = config.getint('collins', 'ip_address_index')
self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
cache_path = config.get('collins', 'cache_path')
self.cache_path_cache = cache_path + \
'/ansible-collins-%s.cache' % self.collins_asset_type
self.cache_path_inventory = cache_path + \
'/ansible-collins-%s.index' % self.collins_asset_type
self.cache_max_age = config.getint('collins', 'cache_max_age')
log_path = config.get('collins', 'log_path')
self.log_location = log_path + '/ansible-collins.log'
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins '
'(default: False - use cache files)')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to Collins and saves the output in a cache """
self.cache = dict()
self.inventory = dict()
# Locates all server assets from Collins.
try:
server_assets = self.find_assets()
except:
self.log.error("Error while locating assets from Collins:\n%s" % traceback.format_exc())
return False
for asset in server_assets:
# Determines the index to retrieve the asset's IP address either by an
# attribute set on the Collins asset or the pre-configured value.
if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
try:
ip_index = int(ip_index)
except:
self.log.error(
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
ip_index)
else:
ip_index = self.ip_address_index
asset['COLLINS'] = {}
# Attempts to locate the asset's primary identifier (hostname or IP address),
# which will be used to index the asset throughout the Ansible inventory.
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping" % asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(
"No IP address found at index %s for asset '%s', skipping",
ip_index, asset)
continue
else:
asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
# Adds an asset index to the Ansible inventory based upon unpacking
# the name of the asset's current STATE from its dictionary.
if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
state_inventory_key = self.to_safe(
'STATE-%s' % asset['ASSET']['STATE']['NAME'])
self.push(self.inventory, state_inventory_key, asset_identifier)
# Indexes asset by all user-defined Collins attributes.
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
for attrib in asset['ATTRIBS'][attrib_block].keys():
asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by all built-in Collins attributes.
for attribute in asset['ASSET'].keys():
if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
attribute_val = asset['ASSET'][attribute]
if attribute_val is not None:
attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by hardware product information.
if 'HARDWARE' in asset:
if 'PRODUCT' in asset['HARDWARE']['BASE']:
product = asset['HARDWARE']['BASE']['PRODUCT']
if product:
product_key = self.to_safe(
'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
self.push(self.inventory, product_key, asset_identifier)
# Indexing now complete, adds the host details to the asset cache.
self.cache[asset_identifier] = asset
try:
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
except:
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
return False
return True
def push(self, dictionary, key, value):
""" Adds a value to a list at a dictionary key, creating the list if it doesn't
exist. """
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
def get_host_info(self):
""" Get variables about a specific host. """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
try:
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
return True
except:
self.log.error("Error while loading inventory:\n%s",
traceback.format_exc())
self.inventory = {}
return False
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
try:
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
return True
except:
self.log.error("Error while loading host cache:\n%s",
traceback.format_exc())
self.cache = {}
return False
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a specified file. """
json_data = self.json_format_dict(data, self.args.pretty)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they
can be used as Ansible groups """
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ in '__main__':
inventory = CollinsInventory()
if inventory.run():
sys.exit(0)
else:
sys.exit(-1)
| gpl-3.0 | 7,733,425,111,266,136,000 | 39.746032 | 112 | 0.603985 | false |
Vutshi/qutip | examples/ex_floquet_markov_master_equation.py | 1 | 3601 | #
# Example: Find the floquet modes and quasi energies for a driven system and
# plot the floquet states/quasienergies for one period of the driving.
#
from qutip import *
from pylab import *
import time
def J_cb(omega):
""" Noise spectral density """
return omega
def hamiltonian_t(t, args):
""" evaluate the hamiltonian at time t. """
H0 = args[0]
H1 = args[1]
w = args[2]
return H0 + cos(w * t) * H1
def qubit_integrate(delta, eps0, A, omega, psi0, tlist):
# Hamiltonian
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
H0 = - delta/2.0 * sx - eps0/2.0 * sz
H1 = A/2.0 * sz
#H_args = (H0, H1, omega)
H_args = {'w': omega}
H = [H0, [H1, 'sin(w * t)']]
# find the propagator for one driving period
T = 2*pi / omega
f_modes_0,f_energies = floquet_modes(H, T, H_args)
c_op = sigmax()
kmax = 1
temp = 25e-3
w_th = temp * (1.38e-23 / 6.626e-34) * 2 * pi * 1e-9
Delta, X, Gamma, A = floquet_master_equation_rates(f_modes_0, f_energies, c_op, H, T, H_args, J_cb, w_th, kmax)
k_idx = 0
for k in range(-kmax,kmax+1, 1):
print "X[",k,"] =\n", X[:,:,k_idx]
k_idx += 1
k_idx = 0
for k in range(-kmax,kmax+1, 1):
print "Delta[",k,"] =\n", Delta[:,:,k_idx]
k_idx += 1
k_idx = 0
for k in range(-kmax,kmax+1, 1):
print "Gamma[",k,"] =\n", Gamma[:,:,k_idx]
k_idx += 1
print "A =\n", A
rho_ss = floquet_master_equation_steadystate(H0, A)
R = floquet_master_equation_tensor(A, f_energies)
print "Floquet-Markov master equation tensor"
print "R =\n", R
print "Floquet-Markov master equation steady state =\n", rho_ss
p_ex_0 = zeros(shape(tlist))
p_ex_1 = zeros(shape(tlist))
e_0 = zeros(shape(tlist))
e_1 = zeros(shape(tlist))
f_modes_table_t = floquet_modes_table(f_modes_0, f_energies, tlist, H, T, H_args)
for idx, t in enumerate(tlist):
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
p_ex_0[idx] = expect(sm.dag() * sm, f_modes_t[0])
p_ex_1[idx] = expect(sm.dag() * sm, f_modes_t[1])
#evals = hamiltonian_t(t, H_args).eigenenergies()
evals = qobj_list_evaluate(H, t, H_args).eigenenergies()
e_0[idx] = min(real(evals))
e_1[idx] = max(real(evals))
return p_ex_0, p_ex_1, e_0, e_1, f_energies
#
# set up the calculation: a strongly driven two-level system
# (repeated LZ transitions)
#
delta = 0.2 * 2 * pi # qubit sigma_x coefficient
eps0 = 1.0 * 2 * pi # qubit sigma_z coefficient
A = 2.5 * 2 * pi # sweep rate
psi0 = basis(2,0) # initial state
omega = 1.0 * 2 * pi # driving frequency
T = (2*pi)/omega # driving period
tlist = linspace(0.0, 1 * T, 101)
start_time = time.time()
p_ex_0, p_ex_1, e_0, e_1, f_e = qubit_integrate(delta, eps0, A, omega, psi0, tlist)
print 'dynamics: time elapsed = ' + str(time.time() - start_time)
#
# plot the results
#
figure(figsize=[8,10])
subplot(2,1,1)
plot(tlist, real(p_ex_0), 'b', tlist, real(p_ex_1), 'r')
xlabel('Time ($T$)')
ylabel('Excitation probabilities')
title('Floquet modes')
legend(("Floquet mode 1", "Floquet mode 2"))
subplot(2,1,2)
plot(tlist, real(e_0), 'c', tlist, real(e_1), 'm')
plot(tlist, ones(shape(tlist)) * f_e[0], 'b', tlist, ones(shape(tlist)) * f_e[1], 'r')
xlabel('Time ($T$)')
ylabel('Energy [GHz]')
title('Eigen- and quasi-energies')
legend(("Ground state", "Excited state", "Quasienergy 1", "Quasienergy 2"))
show()
| gpl-3.0 | -4,599,387,329,284,852,700 | 25.873134 | 115 | 0.568453 | false |
thebestpatrick/nda-extractor | bin2csv.py | 1 | 7172 | #!/bin/python3.4
import sys, getopt
import binascii
import time
import math
import datetime
import csv
def get_step_name(s):
if s == 1:
return "CC_Chg"
elif s == 2:
return "CC_Dchg"
# TODO: 3
elif s == 4:
return "Rest"
# TODO: 5, 6
elif s == 7:
return "CCCV_Chg"
# TODO: The rest
else:
return str(s)
# Return a dict containing the relevant data. all nice and pretty like.
def process_byte_stream(byte_stream):
curr_dict = {}
# Line ID
line_idb = int.from_bytes(byte_stream[0:4], byteorder='little')
curr_dict['record_id'] = line_idb
# End line ID
# Jumpto
col2 = int.from_bytes(byte_stream[4:8], byteorder='little')
curr_dict['jumpto'] = col2
# end jumpto
# Step ID
sid = int.from_bytes(byte_stream[8:9], byteorder='little')
# If step id is zero, there is funny behavior.
curr_dict['step_id'] = sid
# End Step ID
# Step name? Might be with step ID too. In any case, probably an
# identifier for charge, rest, discharge, etc.
# 4=REST. 1=CC_Chg. 7=CCCV_Chg. 2=CC_DChg.
sjob = int.from_bytes(byte_stream[9:10], byteorder='little')
sjob_name = get_step_name(sjob)
curr_dict['step_name'] = sjob_name
# End step name
# Time in step
tis = int.from_bytes(byte_stream[10:14], byteorder='little')
curr_dict['time_in_step'] = tis
#print(tic)
# end time in step
# Voltage
volts = int.from_bytes(byte_stream[14:18], byteorder='little')
if volts > 0x7FFFFFFFFF:
volts -= 0x100000000000000
curr_dict['voltage'] = volts/10000
# End voltage
# Current
current = int.from_bytes(byte_stream[18:22], byteorder='little')
if current > 0x7FFFFFFF:
current -= 0x100000000
curr_dict['current'] = current / 10000
# End Current
# blank? This section seems to be blank, but it might not be?
# By process of elimination, it might be tempurature.
blank = int.from_bytes(byte_stream[22:30], byteorder='little')
curr_dict['blank'] = blank
# end blank?
# Charge and Energy
comp1 = int.from_bytes(byte_stream[30:38], byteorder='little')
if comp1 > 0x7FFFFFFF:
comp1 -= 0x100000000
comp2 = int.from_bytes(byte_stream[38:46], byteorder='little')
if comp2 > 0x7FFFFFFF:
comp2 -= 0x100000000
comp1 = comp1 / 3600000
comp2 = comp2 / 3600000
curr_dict['charge_mAh'] = comp1
curr_dict['energy_mWh'] = comp2
# End charge and energy
# Time and date
timestamp = int.from_bytes(byte_stream[46:54], byteorder='little')
newt = datetime.datetime.fromtimestamp(timestamp)
curr_dict['timestamp'] = newt.strftime('%Y-%m-%d %H:%M:%S')
# end time and date
# last 5? silly number. The last one might be an indicator, and the other
# 4 might be a number. Possibly a checksum
last = int.from_bytes(byte_stream[54:59], byteorder='little')
curr_dict['last'] = last
# end
#stuff = []
#for a in byte_stream:
# stuff.append(a)
#print(curr_dict)
# Raw binary available for bugfixing purposes only
raw_bin = str(binascii.hexlify(bytearray(byte_stream)))
curr_dict['RAW_BIN'] = raw_bin
#time.sleep(.1)
return curr_dict
def process_header(header_bytes):
magic_number = header_bytes[0:6].decode('utf-8')
if magic_number != 'NEWARE':
raise RuntimeError("Magic number wrong. Not valid .nda file")
# Possibly ASCI coding but whatever. This works.
year = header_bytes[6:10].decode('utf-8')
month = header_bytes[10:12].decode('utf-8')
day = header_bytes[12:14].decode('utf-8')
hour = header_bytes[2137:2139].decode('utf-8')
minute = header_bytes[2140:2142].decode('utf-8')
second = header_bytes[2143:2145].decode('utf-8')
version = header_bytes[112:142].decode('utf-8').strip()
name = header_bytes[2166:2176].decode('utf-8').strip('\00')
comments = header_bytes[2181:2300].decode('utf-8').strip('\00')
# Not sure if this is really channel stuff...
machine = int.from_bytes(header_bytes[2091:2092], byteorder='little')
channel = int.from_bytes(header_bytes[2092:2093], byteorder='little')
#ret = {}
ret = {
'year': year, 'month': month, 'day': day, 'hour': hour,
'minute': minute, 'second': second, 'version': version,
'comments': comments, 'machine': machine, 'channel': channel,
'name': name
}
# TODO: find mass or something
return ret
def process_subheader(subheader_bytes):
pass
def dict_to_csv_line(indict, lorder):
csv_line = []
for a in lorder:
if a == 'time_in_step':
seconds = indict.get(a)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
csv_line.append("%d:%02d:%02d" % (h, m, s))
# FIXME: do a proper handling of these lines, I think they are special
# in some way, so will need special handling. until then, ignore them
elif a == "step_id" and indict.get(a) == 0:
return None
else:
csv_line.append(str(indict.get(a)))
return csv_line
def process_nda(inpath, outpath=':auto:', csv_line_order=['record_id', 'jumpto',
'step_id', 'step_name','time_in_step', 'voltage', 'current', 'blank',
'charge_mAh', 'energy_mWh', 'timestamp']):
header_size = 2304
byte_line = []
line_size = 59
line_number = 0
main_data = False
if outpath == ':auto:':
outpath = inpath + '.csv'
if outpath != ':mem:':
outfile = open(outpath, 'w')
else:
import io
outfile = io.StringIO()
csv_out = csv.writer(outfile, delimiter=',', quotechar="\"")
csv_out.writerow(csv_line_order)
header_data = {}
with open(inpath, "rb") as f:
header_bytes = f.read(header_size)
# TODO: header decoding, including finding a mass
header_data = process_header(header_bytes)
byte = f.read(1)
pos = 0
subheader = b''
while byte:
if not main_data:
local = int.from_bytes(byte, byteorder='little')
if local == 255:
main_data = True
# TODO: Secondary header decoding
#header_data['subheader'] = process_subheader(subheader)
continue
else:
subheader += byte
byte = f.read(1)
continue
line = f.read(line_size)
if line == b'':
break
dict_line = process_byte_stream(line)
csv_line = dict_to_csv_line(dict_line, csv_line_order)
#print(csv_line)
if csv_line:
csv_out.writerow(csv_line)
if outpath == ':mem:':
return outfile, header_data, csv_line
outfile.close()
return outpath, header_data, csv_line
#print(subheader)
if __name__ == "__main__":
print(process_nda(sys.argv[1], sys.argv[2]))
| bsd-2-clause | 704,558,328,830,409,200 | 28.514403 | 82 | 0.574875 | false |
radicalbit/ambari | ambari-agent/conf/windows/service_wrapper.py | 5 | 8057 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import ConfigParser
import os
import optparse
import sys
import win32serviceutil
import win32api
import win32event
import win32service
from ambari_commons.ambari_service import AmbariService, ENV_PYTHON_PATH
from ambari_commons.exceptions import *
from ambari_commons.logging_utils import *
from ambari_commons.os_windows import WinServiceController
from ambari_commons.os_utils import find_in_path
from ambari_agent.AmbariConfig import AmbariConfig, updateConfigServerHostname
from ambari_agent.HeartbeatHandlers import HeartbeatStopHandlers
AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
SETUP_ACTION = "setup"
START_ACTION = "start"
STOP_ACTION = "stop"
RESET_ACTION = "reset"
STATUS_ACTION = "status"
DEBUG_ACTION = "debug"
def parse_options():
# parse env cmd
with open(os.path.join(os.getcwd(), "ambari-env.cmd"), "r") as env_cmd:
content = env_cmd.readlines()
for line in content:
if line.startswith("set"):
name, value = line[4:].split("=")
os.environ[name] = value.rstrip()
# checking env variables, and fallback to working dir if no env var was founded
if not os.environ.has_key("AMBARI_AGENT_CONF_DIR"):
os.environ["AMBARI_AGENT_CONF_DIR"] = os.getcwd()
if not os.environ.has_key("AMBARI_AGENT_LOG_DIR"):
os.environ["AMBARI_AGENT_LOG_DIR"] = os.path.join("\\", "var", "log", "ambari-agent")
if not os.path.exists(os.environ["AMBARI_AGENT_LOG_DIR"]):
os.makedirs(os.environ["AMBARI_AGENT_LOG_DIR"])
if not os.environ.has_key("PYTHON_EXE"):
os.environ["PYTHON_EXE"] = find_in_path("python.exe")
class AmbariAgentService(AmbariService):
AmbariService._svc_name_ = "Ambari Agent"
AmbariService._svc_display_name_ = "Ambari Agent"
AmbariService._svc_description_ = "Ambari Agent"
heartbeat_stop_handler = None
# Adds the necessary script dir to the Python's modules path
def _adjustPythonPath(self, current_dir):
iPos = 0
python_path = os.path.join(current_dir, "sbin")
sys.path.insert(iPos, python_path)
# Add the alerts and apscheduler subdirs to the path, for the imports to work correctly without
# having to modify the files in these 2 subdirectories
agent_path = os.path.join(current_dir, "sbin", "ambari_agent")
iPos += 1
sys.path.insert(iPos, agent_path)
for subdir in os.listdir(agent_path):
full_subdir = os.path.join(agent_path, subdir)
iPos += 1
sys.path.insert(iPos, full_subdir)
def SvcDoRun(self):
parse_options()
self.redirect_output_streams()
# Soft dependency on the Windows Time service
ensure_time_service_is_started()
self.heartbeat_stop_handler = HeartbeatStopHandlers(AmbariAgentService._heventSvcStop)
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
from ambari_agent import main
main.main(self.heartbeat_stop_handler)
def _InitOptionsParser(self):
return init_options_parser()
def redirect_output_streams(self):
self._RedirectOutputStreamsToFile(AmbariConfig.getOutFile())
pass
def ensure_time_service_is_started():
ret = WinServiceController.EnsureServiceIsStarted("W32Time")
if 0 != ret:
raise FatalException(-1, "Error starting Windows Time service: " + str(ret))
pass
def ctrlHandler(ctrlType):
AmbariAgentService.DefCtrlCHandler()
return True
#
# Configures the Ambari Agent settings and registers the Windows service.
#
def setup(options):
config = AmbariConfig()
# TODO AMBARI-18733, need to read home_dir to get correct config file.
configFile = config.getConfigFile()
updateConfigServerHostname(configFile, options.host_name)
AmbariAgentService.set_ctrl_c_handler(ctrlHandler)
AmbariAgentService.Install()
#
# Starts the Ambari Agent as a service.
# Start the Agent in normal mode, as a Windows service. If the Ambari Agent is
# not registered as a service, the function fails. By default, only one instance of the service can
# possibly run.
#
def svcstart(options):
(ret, msg) = AmbariAgentService.Start(15)
if 0 != ret:
options.exit_message = msg
pass
#
# Stops the Ambari Agent.
#
def svcstop(options):
(ret, msg) = AmbariAgentService.Stop()
if 0 != ret:
options.exit_message = msg
#
# The Ambari Agent status.
#
def svcstatus(options):
options.exit_message = None
statusStr = AmbariAgentService.QueryStatus()
print "Ambari Agent is " + statusStr
def svcdebug(options):
sys.frozen = 'windows_exe' # Fake py2exe so we can debug
AmbariAgentService.set_ctrl_c_handler(ctrlHandler)
win32serviceutil.HandleCommandLine(AmbariAgentService, options)
def init_options_parser():
parser = optparse.OptionParser(usage="usage: %prog action [options]", )
parser.add_option('-r', '--hostname', dest="host_name", default="localhost",
help="Use specified Ambari server host for registration.")
parser.add_option('-j', '--java-home', dest="java_home", default=None,
help="Use specified java_home. Must be valid on all hosts")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Print verbose status messages")
parser.add_option("-s", "--silent",
action="store_true", dest="silent", default=False,
help="Silently accepts default prompt values")
parser.add_option('--jdbc-driver', default=None,
help="Specifies the path to the JDBC driver JAR file for the " \
"database type specified with the --jdbc-db option. Used only with --jdbc-db option.",
dest="jdbc_driver")
return parser
#
# Main.
#
def agent_main():
parser = init_options_parser()
(options, args) = parser.parse_args()
options.warnings = []
if len(args) == 0:
print parser.print_help()
parser.error("No action entered")
action = args[0]
possible_args_numbers = [1]
matches = 0
for args_number_required in possible_args_numbers:
matches += int(len(args) == args_number_required)
if matches == 0:
print parser.print_help()
possible_args = ' or '.join(str(x) for x in possible_args_numbers)
parser.error("Invalid number of arguments. Entered: " + str(len(args)) + ", required: " + possible_args)
options.exit_message = "Ambari Agent '%s' completed successfully." % action
try:
if action == SETUP_ACTION:
setup(options)
elif action == START_ACTION:
svcstart(options)
elif action == DEBUG_ACTION:
svcdebug(options)
elif action == STOP_ACTION:
svcstop(options)
elif action == STATUS_ACTION:
svcstatus(options)
else:
parser.error("Invalid action")
if options.warnings:
for warning in options.warnings:
print_warning_msg(warning)
pass
options.exit_message = "Ambari Agent '%s' completed with warnings." % action
pass
except FatalException as e:
if e.reason is not None:
print_error_msg("Exiting with exit code {0}. \nREASON: {1}".format(e.code, e.reason))
sys.exit(e.code)
except NonFatalException as e:
options.exit_message = "Ambari Agent '%s' completed with warnings." % action
if e.reason is not None:
print_warning_msg(e.reason)
if options.exit_message is not None:
print options.exit_message
if __name__ == '__main__':
try:
agent_main()
except (KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
| apache-2.0 | -4,496,453,006,264,822,300 | 30.228682 | 108 | 0.711431 | false |
andrewcbennett/iris | lib/iris/tests/unit/fileformats/grib/save_rules/test__product_definition_template_8_and_11.py | 1 | 8687 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for
:func:`iris.fileformats.grib._save_rules._product_definition_template_8_and_11`
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import gribapi
from iris.coords import CellMethod, DimCoord
from cf_units import Unit
from iris.tests import mock
import iris.tests.stock as stock
from iris.fileformats.grib._save_rules import \
_product_definition_template_8_and_11
class TestTypeOfStatisticalProcessing(tests.IrisTest):
def setUp(self):
self.cube = stock.lat_lon_cube()
# Rename cube to avoid warning about unknown discipline/parameter.
self.cube.rename('air_temperature')
coord = DimCoord(23, 'time', bounds=[0, 100],
units=Unit('days since epoch', calendar='standard'))
self.cube.add_aux_coord(coord)
@mock.patch.object(gribapi, 'grib_set')
def test_sum(self, mock_set):
cube = self.cube
cell_method = CellMethod(method='sum', coords=['time'])
cube.add_cell_method(cell_method)
_product_definition_template_8_and_11(cube, mock.sentinel.grib)
mock_set.assert_any_call(mock.sentinel.grib,
"typeOfStatisticalProcessing", 1)
@mock.patch.object(gribapi, 'grib_set')
def test_unrecognised(self, mock_set):
cube = self.cube
cell_method = CellMethod(method='95th percentile', coords=['time'])
cube.add_cell_method(cell_method)
_product_definition_template_8_and_11(cube, mock.sentinel.grib)
mock_set.assert_any_call(mock.sentinel.grib,
"typeOfStatisticalProcessing", 255)
@mock.patch.object(gribapi, 'grib_set')
def test_multiple_cell_method_coords(self, mock_set):
cube = self.cube
cell_method = CellMethod(method='sum',
coords=['time', 'forecast_period'])
cube.add_cell_method(cell_method)
with self.assertRaisesRegexp(ValueError,
'Cannot handle multiple coordinate name'):
_product_definition_template_8_and_11(cube, mock.sentinel.grib)
@mock.patch.object(gribapi, 'grib_set')
def test_cell_method_coord_name_fail(self, mock_set):
cube = self.cube
cell_method = CellMethod(method='mean', coords=['season'])
cube.add_cell_method(cell_method)
with self.assertRaisesRegexp(
ValueError, "Expected a cell method with a coordinate "
"name of 'time'"):
_product_definition_template_8_and_11(cube, mock.sentinel.grib)
class TestTimeCoordPrerequisites(tests.IrisTest):
def setUp(self):
self.cube = stock.lat_lon_cube()
# Rename cube to avoid warning about unknown discipline/parameter.
self.cube.rename('air_temperature')
@mock.patch.object(gribapi, 'grib_set')
def test_multiple_points(self, mock_set):
# Add time coord with multiple points.
coord = DimCoord([23, 24, 25], 'time',
bounds=[[22, 23], [23, 24], [24, 25]],
units=Unit('days since epoch', calendar='standard'))
self.cube.add_aux_coord(coord, 0)
with self.assertRaisesRegexp(
ValueError, 'Expected length one time coordinate'):
_product_definition_template_8_and_11(self.cube,
mock.sentinel.grib)
@mock.patch.object(gribapi, 'grib_set')
def test_no_bounds(self, mock_set):
# Add time coord with no bounds.
coord = DimCoord(23, 'time',
units=Unit('days since epoch', calendar='standard'))
self.cube.add_aux_coord(coord)
with self.assertRaisesRegexp(
ValueError, 'Expected time coordinate with two bounds, '
'got 0 bounds'):
_product_definition_template_8_and_11(self.cube,
mock.sentinel.grib)
@mock.patch.object(gribapi, 'grib_set')
def test_more_than_two_bounds(self, mock_set):
# Add time coord with more than two bounds.
coord = DimCoord(23, 'time', bounds=[21, 22, 23],
units=Unit('days since epoch', calendar='standard'))
self.cube.add_aux_coord(coord)
with self.assertRaisesRegexp(
ValueError, 'Expected time coordinate with two bounds, '
'got 3 bounds'):
_product_definition_template_8_and_11(self.cube,
mock.sentinel.grib)
class TestEndOfOverallTimeInterval(tests.IrisTest):
def setUp(self):
self.cube = stock.lat_lon_cube()
# Rename cube to avoid warning about unknown discipline/parameter.
self.cube.rename('air_temperature')
cell_method = CellMethod(method='sum', coords=['time'])
self.cube.add_cell_method(cell_method)
@mock.patch.object(gribapi, 'grib_set')
def test_default_calendar(self, mock_set):
cube = self.cube
# End bound is 1972-04-26 10:27:07.
coord = DimCoord(23.0, 'time', bounds=[0.452, 20314.452],
units=Unit('hours since epoch'))
cube.add_aux_coord(coord)
grib = mock.sentinel.grib
_product_definition_template_8_and_11(cube, grib)
mock_set.assert_any_call(
grib, "yearOfEndOfOverallTimeInterval", 1972)
mock_set.assert_any_call(
grib, "monthOfEndOfOverallTimeInterval", 4)
mock_set.assert_any_call(
grib, "dayOfEndOfOverallTimeInterval", 26)
mock_set.assert_any_call(
grib, "hourOfEndOfOverallTimeInterval", 10)
mock_set.assert_any_call(
grib, "minuteOfEndOfOverallTimeInterval", 27)
mock_set.assert_any_call(
grib, "secondOfEndOfOverallTimeInterval", 7)
@mock.patch.object(gribapi, 'grib_set')
def test_360_day_calendar(self, mock_set):
cube = self.cube
# End bound is 1972-05-07 10:27:07
coord = DimCoord(23.0, 'time', bounds=[0.452, 20314.452],
units=Unit('hours since epoch', calendar='360_day'))
cube.add_aux_coord(coord)
grib = mock.sentinel.grib
_product_definition_template_8_and_11(cube, grib)
mock_set.assert_any_call(
grib, "yearOfEndOfOverallTimeInterval", 1972)
mock_set.assert_any_call(
grib, "monthOfEndOfOverallTimeInterval", 5)
mock_set.assert_any_call(
grib, "dayOfEndOfOverallTimeInterval", 7)
mock_set.assert_any_call(
grib, "hourOfEndOfOverallTimeInterval", 10)
mock_set.assert_any_call(
grib, "minuteOfEndOfOverallTimeInterval", 27)
mock_set.assert_any_call(
grib, "secondOfEndOfOverallTimeInterval", 7)
class TestNumberOfTimeRange(tests.IrisTest):
@mock.patch.object(gribapi, 'grib_set')
def test_other_cell_methods(self, mock_set):
cube = stock.lat_lon_cube()
# Rename cube to avoid warning about unknown discipline/parameter.
cube.rename('air_temperature')
coord = DimCoord(23, 'time', bounds=[0, 24],
units=Unit('hours since epoch'))
cube.add_aux_coord(coord)
# Add one time cell method and another unrelated one.
cell_method = CellMethod(method='mean', coords=['elephants'])
cube.add_cell_method(cell_method)
cell_method = CellMethod(method='sum', coords=['time'])
cube.add_cell_method(cell_method)
_product_definition_template_8_and_11(cube, mock.sentinel.grib)
mock_set.assert_any_call(mock.sentinel.grib, 'numberOfTimeRange', 1)
if __name__ == "__main__":
tests.main()
| gpl-3.0 | 6,633,216,462,288,367,000 | 40.366667 | 79 | 0.624036 | false |
jscn/django | tests/i18n/test_extraction.py | 3 | 37132 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import time
import warnings
from unittest import SkipTest, skipUnless
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from django.core.management.commands.makemessages import \
Command as MakeMessagesCommand
from django.core.management.utils import find_command
from django.test import SimpleTestCase, mock, override_settings
from django.test.testcases import SerializeMixin
from django.test.utils import captured_stderr, captured_stdout
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SerializeMixin, SimpleTestCase):
# makemessages scans the current working directory and writes in the
# locale subdirectory. There aren't any options to control this. As a
# consequence tests can't run in parallel. Since i18n tests run in less
# than 4 seconds, serializing them with SerializeMixin is acceptable.
lockfile = __file__
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2, stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def _assertPoKeyword(self, keyword, expected_value, haystack, use_quotes=True):
q = '"'
if use_quotes:
expected_value = '"%s"' % expected_value
q = "'"
needle = '%s %s' % (keyword, expected_value)
expected_value = re.escape(expected_value)
return self.assertTrue(re.search('^%s %s' % (keyword, expected_value), haystack, re.MULTILINE),
'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertMsgId(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid', msgid, haystack, use_quotes=use_quotes)
def assertMsgIdPlural(self, msgid, haystack, use_quotes=True):
return self._assertPoKeyword('msgid_plural', msgid, haystack, use_quotes=use_quotes)
def assertMsgStr(self, msgstr, haystack, use_quotes=True):
return self._assertPoKeyword('msgstr', msgstr, haystack, use_quotes=use_quotes)
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
path = os.path.join(cwd_prefix, *comment_parts)
parts = [path]
if isinstance(line_number, six.string_types):
line_number = self._get_token_line_number(path, line_number)
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
pattern = re.compile(r'^\#\:.*' + re.escape(needle), re.MULTILINE)
if assert_presence:
return six.assertRegex(self, po_contents, pattern, '"%s" not found in final .po file.' % needle)
else:
if six.PY3:
return self.assertNotRegex(
po_contents, pattern, '"%s" shouldn\'t be in final .po file.' % needle
)
else:
return self.assertNotRegexpMatches(
po_contents, pattern, '"%s" shouldn\'t be in final .po file.' % needle
)
def _get_token_line_number(self, path, token):
with open(path) as f:
for line, content in enumerate(f, 1):
if token in force_text(content):
return line
self.fail("The token '%s' could not be found in %s, please check the test config" % (token, path))
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of
the :42 suffix part.
A string token can also be passed as line_number, in which case it
will be searched in the template, and its line number will be used.
A msgid is a suitable candidate.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
"""
Assert that file was recently modified (modification time was less than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
"""
Assert that file was not recently modified (modification time was more than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertNotIn('This comment should not be extracted', po_contents)
# Comments in templates
self.assertIn('#. Translators: This comment should be extracted', po_contents)
self.assertIn(
"#. Translators: Django comment block for translators\n#. "
"string's meaning unveiled",
po_contents
)
self.assertIn('#. Translators: One-line translator comment #1', po_contents)
self.assertIn('#. Translators: Two-line translator comment #1\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #2', po_contents)
self.assertIn('#. Translators: Two-line translator comment #2\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #3', po_contents)
self.assertIn('#. Translators: Two-line translator comment #3\n#. continued here.', po_contents)
self.assertIn('#. Translators: One-line translator comment #4', po_contents)
self.assertIn('#. Translators: Two-line translator comment #4\n#. continued here.', po_contents)
self.assertIn(
'#. Translators: One-line translator comment #5 -- with '
'non ASCII characters: áéíóúö',
po_contents
)
self.assertIn(
'#. Translators: Two-line translator comment #5 -- with '
'non ASCII characters: áéíóúö\n#. continued here.',
po_contents
)
def test_special_char_extracted(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId("Non-breaking space\xa0:", po_contents)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("Get my line number", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 'Get my line number', 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
self.assertTrue(MakeMessagesCommand.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
msg = (
'Translation blocks must not include other block tags: blocktrans '
'(file %s, line 3)' % os.path.join('templates', 'template_with_error.tpl')
)
with self.assertRaisesMessage(SyntaxError, msg):
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Special trans context #1"', po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertIn('msgctxt "Special trans context #2"', po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertIn('msgctxt "Special trans context #3"', po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% trans %} with a filter
for minor_part in 'abcdefgh': # Iterate from #7.1a to #7.1h template markers
self.assertIn('msgctxt "context #7.1{}"'.format(minor_part), po_contents)
self.assertMsgId('Translatable literal #7.1{}'.format(minor_part), po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context #1"', po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertIn('msgctxt "Special blocktrans context #2"', po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertIn("Translatable literal #8b-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #3"', po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertIn("Translatable literal #8c-plural", po_contents)
self.assertIn('msgctxt "Special blocktrans context #4"', po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertIn('msgctxt "Context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Context wrapped in single quotes"', po_contents)
# {% blocktrans %}
self.assertIn('msgctxt "Special blocktrans context wrapped in double quotes"', po_contents)
self.assertIn('msgctxt "Special blocktrans context wrapped in single quotes"', po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #1' \(file templates[/\\]comments.thtml, line 4\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #3' \(file templates[/\\]comments.thtml, line 6\) "
r"was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n "
r"comment #4' \(file templates[/\\]comments.thtml, line 8\) "
"was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertNotIn('ignored comment #1', po_contents)
self.assertNotIn('Translators: ignored i18n comment #1', po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertNotIn('ignored i18n comment #2', po_contents)
self.assertNotIn('ignored comment #2', po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertNotIn('ignored comment #3', po_contents)
self.assertNotIn('ignored i18n comment #3', po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertNotIn('ignored comment #4', po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertNotIn('ignored comment #5', po_contents)
self.assertNotIn('ignored i18n comment #4', po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertIn('#. Translators: valid i18n comment #5', po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertIn('#. Translators: valid i18n comment #6', po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertIn('#. Translators: valid i18n comment #7', po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
def test_makemessages_find_files(self):
"""
Test that find_files only discover files having the proper extensions.
"""
cmd = MakeMessagesCommand()
cmd.ignore_patterns = ['CVS', '.*', '*~', '*.pyc']
cmd.symlinks = False
cmd.domain = 'django'
cmd.extensions = ['html', 'txt', 'py']
cmd.verbosity = 0
cmd.locale_paths = []
cmd.default_locale_path = os.path.join(self.test_dir, 'locale')
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.py', '.html', '.txt'}), set())
cmd.extensions = ['js']
cmd.domain = 'djangojs'
found_files = cmd.find_files(self.test_dir)
found_exts = set([os.path.splitext(tfile.file)[1] for tfile in found_files])
self.assertEqual(found_exts.difference({'.js'}), set())
@mock.patch('django.core.management.commands.makemessages.popen_wrapper')
def test_makemessages_gettext_version(self, mocked_popen_wrapper):
# "Normal" output:
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.18.1\n"
"Copyright (C) 1995-1998, 2000-2010 Free Software Foundation, Inc.\n"
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n"
"This is free software: you are free to change and redistribute it.\n"
"There is NO WARRANTY, to the extent permitted by law.\n"
"Written by Ulrich Drepper.\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 18, 1))
# Version number with only 2 parts (#23788)
mocked_popen_wrapper.return_value = (
"xgettext (GNU gettext-tools) 0.17\n", '', 0)
cmd = MakeMessagesCommand()
self.assertEqual(cmd.gettext_version, (0, 17))
# Bad version output
mocked_popen_wrapper.return_value = (
"any other return value\n", '', 0)
cmd = MakeMessagesCommand()
with six.assertRaisesRegex(self, CommandError, "Unable to get gettext version. Is it installed?"):
cmd.gettext_version
def test_po_file_encoding_when_updating(self):
"""Update of PO file doesn't corrupt it with non-UTF-8 encoding on Python3+Windows (#23271)"""
BR_PO_BASE = 'locale/pt_BR/LC_MESSAGES/django'
os.chdir(self.test_dir)
shutil.copyfile(BR_PO_BASE + '.pristine', BR_PO_BASE + '.po')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'locale', 'pt_BR', 'LC_MESSAGES', 'django.po'))
management.call_command('makemessages', locale=['pt_BR'], verbosity=0)
self.assertTrue(os.path.exists(BR_PO_BASE + '.po'))
with io.open(BR_PO_BASE + '.po', 'r', encoding='utf-8') as fp:
po_contents = force_text(fp.read())
self.assertMsgStr("Größe", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('gettext_noop should, too.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertIn("ignoring directory ignore_dir", out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertIn("ignoring directory subdir", out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertIn("ignoring file xxx_ignored.html", out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# Windows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertIn('templates_symlinked/test.html', po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertIn('Plural-Forms: nplurals=2; plural=(n != 1)', po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
def test_trans_and_plural_blocktrans_collision(self):
"""
Ensures a correct workaround for the gettext bug when handling a literal
found inside a {% trans %} tag and also in another file inside a
{% blocktrans %} with a plural (#17375).
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], extensions=['html', 'djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertNotIn("#-#-#-#-# django.pot (PACKAGE VERSION) #-#-#-#-#\\n", po_contents)
self.assertMsgId('First `trans`, then `blocktrans` with a plural', po_contents)
self.assertMsgIdPlural('Plural for a `trans` and `blocktrans` collision case', po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'This literal should also be included wrapped or not wrapped '
'depending on the use of the --no-wrap option.',
po_contents
)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId(
'""\n"This literal should also be included wrapped or not '
'wrapped depending on the "\n"use of the --no-wrap option."',
po_contents,
use_quotes=False
)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'test.html')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 'Translatable literal #6b', 'templates', 'test.html')
def test_location_comments_for_templatized_files(self):
"""
Ensure no leaky paths in comments, e.g. #: path\to\file.html.py:123
Refs #21209/#26341.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('#: templates/test.html.py', po_contents)
self.assertLocationCommentNotPresent(self.PO_FILE, None, '.html.py')
self.assertLocationCommentPresent(self.PO_FILE, 5, 'templates', 'test.html')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
super(MultipleLocaleExtractionTests, self).tearDown()
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the .po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
super(CustomLayoutExtractionTests, self).setUp()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
msg = "Unable to find a locale path to store translations for file"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=[os.path.join(this_directory, 'project_dir', 'project_locale')],
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree, os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree, os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
| bsd-3-clause | 563,069,743,160,034,900 | 44.655597 | 113 | 0.624333 | false |
Metaswitch/horizon | openstack_dashboard/dashboards/project/stacks/mappings.py | 7 | 14395 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.template.defaultfilters import register # noqa
from django.utils import html
from django.utils import safestring
import six
import six.moves.urllib.parse as urlparse
from openstack_dashboard.api import swift
LOG = logging.getLogger(__name__)
resource_urls = {
"AWS::AutoScaling::AutoScalingGroup": {
'link': 'horizon:project:stacks:detail'},
"AWS::CloudFormation::Stack": {
'link': 'horizon:project:stacks:detail'},
"AWS::EC2::Instance": {
'link': 'horizon:project:instances:detail'},
"AWS::EC2::InternetGateway": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::NetworkInterface": {
'link': 'horizon:project:networks:ports:detail'},
"AWS::EC2::RouteTable": {
'link': 'horizon:project:routers:detail'},
"AWS::EC2::SecurityGroup": {
'link': 'horizon:project:access_and_security:index'},
"AWS::EC2::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"AWS::EC2::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"AWS::EC2::VPC": {
'link': 'horizon:project:networks:detail'},
"AWS::S3::Bucket": {
'link': 'horizon:project:containers:index'},
"OS::Cinder::Volume": {
'link': 'horizon:project:volumes:volumes:detail'},
"OS::Heat::AccessPolicy": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::AutoScalingGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::CloudConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Neutron::Firewall": {
'link': 'horizon:project:firewalls:firewalldetails'},
"OS::Neutron::FirewallPolicy": {
'link': 'horizon:project:firewalls:policydetails'},
"OS::Neutron::FirewallRule": {
'link': 'horizon:project:firewalls:ruledetails'},
"OS::Heat::HARestarter": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::InstanceGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::MultipartMime": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::ResourceGroup": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::SoftwareConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::StructuredConfig": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::StructuredDeployment": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::Stack": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::WaitCondition": {
'link': 'horizon:project:stacks:detail'},
"OS::Heat::WaitConditionHandle": {
'link': 'horizon:project:stacks:detail'},
"OS::Neutron::HealthMonitor": {
'link': 'horizon:project:loadbalancers:monitordetails'},
"OS::Neutron::IKEPolicy": {
'link': 'horizon:project:vpn:ikepolicydetails'},
"OS::Neutron::IPsecPolicy": {
'link': 'horizon:project:vpn:ipsecpolicydetails'},
"OS::Neutron::IPsecSiteConnection": {
'link': 'horizon:project:vpn:ipsecsiteconnectiondetails'},
"OS::Neutron::Net": {
'link': 'horizon:project:networks:detail'},
"OS::Neutron::Pool": {
'link': 'horizon:project:loadbalancers:pooldetails'},
"OS::Neutron::PoolMember": {
'link': 'horizon:project:loadbalancers:memberdetails'},
"OS::Neutron::Port": {
'link': 'horizon:project:networks:ports:detail'},
"OS::Neutron::Router": {
'link': 'horizon:project:routers:detail'},
"OS::Neutron::Subnet": {
'link': 'horizon:project:networks:subnets:detail'},
"OS::Neutron::VPNService": {
'link': 'horizon:project:vpn:vpnservicedetails'},
"OS::Nova::KeyPair": {
'link': 'horizon:project:access_and_security:index'},
"OS::Nova::Server": {
'link': 'horizon:project:instances:detail'},
"OS::Swift::Container": {
'link': 'horizon:project:containers:index',
'format_pattern': '%s' + swift.FOLDER_DELIMITER},
}
def resource_to_url(resource):
if not resource or not resource.physical_resource_id:
return None
mapping = resource_urls.get(resource.resource_type, {})
try:
if 'link' not in mapping:
return None
format_pattern = mapping.get('format_pattern') or '%s'
rid = format_pattern % resource.physical_resource_id
url = reverse(mapping['link'], args=(rid,))
except Exception as e:
LOG.exception(e)
return None
return url
@register.filter
def stack_output(output):
if not output:
return u''
if isinstance(output, basestring):
parts = urlparse.urlsplit(output)
if parts.netloc and parts.scheme in ('http', 'https'):
url = html.escape(output)
safe_link = u'<a href="%s" target="_blank">%s</a>' % (url, url)
return safestring.mark_safe(safe_link)
if isinstance(output, dict) or isinstance(output, list):
output = json.dumps(output, indent=2)
return safestring.mark_safe(u'<pre>%s</pre>' % html.escape(output))
resource_images = {
'LB_FAILED': '/static/dashboard/img/lb-red.svg',
'LB_DELETE': '/static/dashboard/img/lb-red.svg',
'LB_IN_PROGRESS': '/static/dashboard/img/lb-gray.gif',
'LB_INIT': '/static/dashboard/img/lb-gray.svg',
'LB_COMPLETE': '/static/dashboard/img/lb-green.svg',
'DB_FAILED': '/static/dashboard/img/db-red.svg',
'DB_DELETE': '/static/dashboard/img/db-red.svg',
'DB_IN_PROGRESS': '/static/dashboard/img/db-gray.gif',
'DB_INIT': '/static/dashboard/img/db-gray.svg',
'DB_COMPLETE': '/static/dashboard/img/db-green.svg',
'STACK_FAILED': '/static/dashboard/img/stack-red.svg',
'STACK_DELETE': '/static/dashboard/img/stack-red.svg',
'STACK_IN_PROGRESS': '/static/dashboard/img/stack-gray.gif',
'STACK_INIT': '/static/dashboard/img/stack-gray.svg',
'STACK_COMPLETE': '/static/dashboard/img/stack-green.svg',
'SERVER_FAILED': '/static/dashboard/img/server-red.svg',
'SERVER_DELETE': '/static/dashboard/img/server-red.svg',
'SERVER_IN_PROGRESS': '/static/dashboard/img/server-gray.gif',
'SERVER_INIT': '/static/dashboard/img/server-gray.svg',
'SERVER_COMPLETE': '/static/dashboard/img/server-green.svg',
'ALARM_FAILED': '/static/dashboard/img/alarm-red.svg',
'ALARM_DELETE': '/static/dashboard/img/alarm-red.svg',
'ALARM_IN_PROGRESS': '/static/dashboard/img/alarm-gray.gif',
'ALARM_INIT': '/static/dashboard/img/alarm-gray.svg',
'ALARM_COMPLETE': '/static/dashboard/img/alarm-green.svg',
'VOLUME_FAILED': '/static/dashboard/img/volume-red.svg',
'VOLUME_DELETE': '/static/dashboard/img/volume-red.svg',
'VOLUME_IN_PROGRESS': '/static/dashboard/img/volume-gray.gif',
'VOLUME_INIT': '/static/dashboard/img/volume-gray.svg',
'VOLUME_COMPLETE': '/static/dashboard/img/volume-green.svg',
'IMAGE_FAILED': '/static/dashboard/img/image-red.svg',
'IMAGE_DELETE': '/static/dashboard/img/image-red.svg',
'IMAGE_IN_PROGRESS': '/static/dashboard/img/image-gray.gif',
'IMAGE_INIT': '/static/dashboard/img/image-gray.svg',
'IMAGE_COMPLETE': '/static/dashboard/img/image-green.svg',
'WAIT_FAILED': '/static/dashboard/img/wait-red.svg',
'WAIT_DELETE': '/static/dashboard/img/wait-red.svg',
'WAIT_IN_PROGRESS': '/static/dashboard/img/wait-gray.gif',
'WAIT_INIT': '/static/dashboard/img/wait-gray.svg',
'WAIT_COMPLETE': '/static/dashboard/img/wait-green.svg',
'FIREWALL_FAILED': '/static/dashboard/img/firewall-red.svg',
'FIREWALL_DELETE': '/static/dashboard/img/firewall-red.svg',
'FIREWALL_IN_PROGRESS': '/static/dashboard/img/firewall-gray.gif',
'FIREWALL_INIT': '/static/dashboard/img/firewall-gray.svg',
'FIREWALL_COMPLETE': '/static/dashboard/img/firewall-green.svg',
'FLOATINGIP_FAILED': '/static/dashboard/img/floatingip-red.svg',
'FLOATINGIP_DELETE': '/static/dashboard/img/floatingip-red.svg',
'FLOATINGIP_IN_PROGRESS': '/static/dashboard/img/floatingip-gray.gif',
'FLOATINGIP_INIT': '/static/dashboard/img/floatingip-gray.svg',
'FLOATINGIP_COMPLETE': '/static/dashboard/img/floatingip-green.svg',
'ROUTER_FAILED': '/static/dashboard/img/router-red.svg',
'ROUTER_DELETE': '/static/dashboard/img/router-red.svg',
'ROUTER_IN_PROGRESS': '/static/dashboard/img/router-gray.gif',
'ROUTER_INIT': '/static/dashboard/img/router-gray.svg',
'ROUTER_COMPLETE': '/static/dashboard/img/router-green.svg',
'POLICY_FAILED': '/static/dashboard/img/policy-red.svg',
'POLICY_DELETE': '/static/dashboard/img/policy-red.svg',
'POLICY_IN_PROGRESS': '/static/dashboard/img/policy-gray.gif',
'POLICY_INIT': '/static/dashboard/img/policy-gray.svg',
'POLICY_COMPLETE': '/static/dashboard/img/policy-green.svg',
'CONFIG_FAILED': '/static/dashboard/img/config-red.svg',
'CONFIG_DELETE': '/static/dashboard/img/config-red.svg',
'CONFIG_IN_PROGRESS': '/static/dashboard/img/config-gray.gif',
'CONFIG_INIT': '/static/dashboard/img/config-gray.svg',
'CONFIG_COMPLETE': '/static/dashboard/img/config-green.svg',
'NETWORK_FAILED': '/static/dashboard/img/network-red.svg',
'NETWORK_DELETE': '/static/dashboard/img/network-red.svg',
'NETWORK_IN_PROGRESS': '/static/dashboard/img/network-gray.gif',
'NETWORK_INIT': '/static/dashboard/img/network-gray.svg',
'NETWORK_COMPLETE': '/static/dashboard/img/network-green.svg',
'PORT_FAILED': '/static/dashboard/img/port-red.svg',
'PORT_DELETE': '/static/dashboard/img/port-red.svg',
'PORT_IN_PROGRESS': '/static/dashboard/img/port-gray.gif',
'PORT_INIT': '/static/dashboard/img/port-gray.svg',
'PORT_COMPLETE': '/static/dashboard/img/port-green.svg',
'SECURITYGROUP_FAILED': '/static/dashboard/img/securitygroup-red.svg',
'SECURITYGROUP_DELETE': '/static/dashboard/img/securitygroup-red.svg',
'SECURITYGROUP_IN_PROGRESS':
'/static/dashboard/img/securitygroup-gray.gif',
'SECURITYGROUP_INIT': '/static/dashboard/img/securitygroup-gray.svg',
'SECURITYGROUP_COMPLETE': '/static/dashboard/img/securitygroup-green.svg',
'VPN_FAILED': '/static/dashboard/img/vpn-red.svg',
'VPN_DELETE': '/static/dashboard/img/vpn-red.svg',
'VPN_IN_PROGRESS': '/static/dashboard/img/vpn-gray.gif',
'VPN_INIT': '/static/dashboard/img/vpn-gray.svg',
'VPN_COMPLETE': '/static/dashboard/img/vpn-green.svg',
'FLAVOR_FAILED': '/static/dashboard/img/flavor-red.svg',
'FLAVOR_DELETE': '/static/dashboard/img/flavor-red.svg',
'FLAVOR_IN_PROGRESS': '/static/dashboard/img/flavor-gray.gif',
'FLAVOR_INIT': '/static/dashboard/img/flavor-gray.svg',
'FLAVOR_COMPLETE': '/static/dashboard/img/flavor-green.svg',
'KEYPAIR_FAILED': '/static/dashboard/img/keypair-red.svg',
'KEYPAIR_DELETE': '/static/dashboard/img/keypair-red.svg',
'KEYPAIR_IN_PROGRESS': '/static/dashboard/img/keypair-gray.gif',
'KEYPAIR_INIT': '/static/dashboard/img/keypair-gray.svg',
'KEYPAIR_COMPLETE': '/static/dashboard/img/keypair-green.svg',
'UNKNOWN_FAILED': '/static/dashboard/img/unknown-red.svg',
'UNKNOWN_DELETE': '/static/dashboard/img/unknown-red.svg',
'UNKNOWN_IN_PROGRESS': '/static/dashboard/img/unknown-gray.gif',
'UNKNOWN_INIT': '/static/dashboard/img/unknown-gray.svg',
'UNKNOWN_COMPLETE': '/static/dashboard/img/unknown-green.svg',
}
resource_types = {
# LB
'LoadBalance': 'LB',
'HealthMonitor': 'LB',
'PoolMember': 'LB',
'Pool': 'LB',
# DB
'DBInstance': 'DB',
'Database': 'DB',
# SERVER
'Instance': 'SERVER',
'Server': 'SERVER',
# ALARM
'Alarm': 'ALARM',
'CombinationAlarm': 'ALARM',
'CWLiteAlarm': 'ALARM',
# VOLUME
'Volume': 'VOLUME',
'VolumeAttachment': 'VOLUME',
# STACK
'stack': 'STACK',
'AutoScalingGroup': 'STACK',
'InstanceGroup': 'STACK',
'ServerGroup': 'STACK',
'ResourceGroup': 'STACK',
# IMAGE
'Image': 'IMAGE',
# WAIT
'WaitCondition': 'WAIT',
'WaitConditionHandle': 'WAIT',
'UpdateWaitConditionHandle': 'WAIT',
# FIREWALL
'Firewall': 'FIREWALL',
'FirewallPolicy': 'FIREWALL',
'FirewallRule': 'FIREWALL',
# FLOATINGIP
'FloatingIP': 'FLOATINGIP',
'FloatingIPAssociation': 'FLOATINGIP',
# ROUTER
'Router': 'ROUTER',
'RouterGateway': 'ROUTER',
'RouterInterface': 'ROUTER',
# POLICY
'ScalingPolicy': 'POLICY',
# CONFIG
'CloudConfig': 'CONFIG',
'MultipartMime': 'CONFIG',
'SoftwareConfig': 'CONFIG',
'SoftwareDeployment': 'CONFIG',
'StructuredConfig': 'CONFIG',
'StructuredDeployment': 'CONFIG',
# NETWORK
'Net': 'NETWORK',
'Subnet': 'NETWORK',
'NetworkGateway': 'NETWORK',
'ProviderNet': 'NETWORK',
# PORT
'Port': 'PORT',
# SECURITYGROUP
'SecurityGroup': 'SECURITYGROUP',
# VPN
'VPNService': 'VPN',
# FLAVOR
'Flavor': 'FLAVOR',
# KEYPAIR
'KeyPair': 'KEYPAIR',
}
def get_resource_type(type):
for key, value in six.iteritems(resource_types):
if key in type:
return value
return 'UNKNOWN'
def get_resource_status(status):
if ('IN_PROGRESS' in status):
return 'IN_PROGRESS'
elif ('FAILED' in status):
return 'FAILED'
elif ('DELETE' in status):
return 'DELETE'
elif ('INIT' in status):
return 'INIT'
else:
return 'COMPLETE'
def get_resource_image(status, type):
"""Sets the image url and in_progress action sw based on status."""
resource_type = get_resource_type(type)
resource_status = get_resource_status(status)
resource_state = resource_type + "_" + resource_status
for key in resource_images:
if key == resource_state:
return resource_images.get(key)
| apache-2.0 | 5,091,092,675,694,785,000 | 40.011396 | 78 | 0.651615 | false |
Zord13appdesa/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_bdist_dumb.py | 46 | 2555 | """Tests for distutils.command.bdist_dumb."""
import unittest
import sys
import os
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_dumb import bdist_dumb
from distutils.tests import support
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
class BuildDumbTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(BuildDumbTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(self):
os.chdir(self.old_location)
sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildDumbTestCase, self).tearDown()
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_simple_built(self):
# let's create a simple package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_dumb(dist)
# so the output is the same no matter
# what is the platform
cmd.format = 'zip'
cmd.ensure_finalized()
cmd.run()
# see what we have
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
base = "%s.%s" % (dist.get_fullname(), cmd.plat_name)
if os.name == 'os2':
base = base.replace(':', '-')
wanted = ['%s.zip' % base]
self.assertEqual(dist_created, wanted)
# now let's check what we have in the zip file
# XXX to be done
def test_suite():
return unittest.makeSuite(BuildDumbTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| apache-2.0 | -3,467,793,810,045,314,000 | 27.707865 | 67 | 0.570254 | false |
skudriashev/incubator-airflow | airflow/operators/presto_to_mysql.py | 15 | 2879 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.hooks.presto_hook import PrestoHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class PrestoToMySqlTransfer(BaseOperator):
"""
Moves data from Presto to MySQL, note that for now the data is loaded
into memory before being pushed to MySQL, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param mysql_table: target MySQL table, use dot notation to target a
specific database
:type mysql_table: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param presto_conn_id: source presto connection
:type presto_conn_id: str
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data)
:type mysql_preoperator: str
"""
template_fields = ('sql', 'mysql_table', 'mysql_preoperator')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
mysql_table,
presto_conn_id='presto_default',
mysql_conn_id='mysql_default',
mysql_preoperator=None,
*args, **kwargs):
super(PrestoToMySqlTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.presto_conn_id = presto_conn_id
def execute(self, context):
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
self.log.info("Extracting data from Presto: %s", self.sql)
results = presto.get_records(self.sql)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.mysql_preoperator:
self.log.info("Running MySQL preoperator")
self.log.info(self.mysql_preoperator)
mysql.run(self.mysql_preoperator)
self.log.info("Inserting rows into MySQL")
mysql.insert_rows(table=self.mysql_table, rows=results)
| apache-2.0 | 6,872,243,548,011,420,000 | 37.905405 | 74 | 0.678013 | false |
yinrongping/awesome-python-webapp | www/pymonitor.py | 1 | 2146 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 用于监听代码修改,自动重启应用
# easy_install watchdog
__author__ = 'Michael Liao'
import os
import sys
import time
import subprocess
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def log(s):
print '[Monitor] %s' % s
class MyFileSystemEventHander(FileSystemEventHandler):
def __init__(self, fn):
super(MyFileSystemEventHander, self).__init__()
self.restart = fn
# 设置监听
def on_any_event(self, event):
# 监听任何的修改,如果是py文件的话,就重启应用
if event.src_path.endswith('.py'):
log('Python source file changed: %s' % event.src_path)
self.restart()
command = ['echo', 'ok']
process = None
# 杀掉进程
def kill_process():
global process
if process:
log('Kill process [%s]...' % process.pid)
process.kill()
process.wait()
log('Process ended with code %s.' % process.returncode)
process = None
# 启动项目
def start_process():
global process, command
log('Start process %s...' % ' '.join(command))
process = subprocess.Popen(
command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
# 重启项目
def restart_process():
kill_process()
start_process()
# 开启监听模式
def start_watch(path, callback):
observer = Observer()
observer.schedule(
MyFileSystemEventHander(restart_process), path, recursive=True)
observer.start()
log('Watching directory %s...' % path)
start_process()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
# argv = sys.argv[1:]
argv = ['wsgiapp.py']
if not argv:
print('Usage: ./pymonitor your-script.py')
exit(0)
if argv[0] != 'python':
argv.insert(0, 'python')
command = argv
path = os.path.abspath('.')
start_watch(path, None)
| gpl-2.0 | 8,677,552,191,376,297,000 | 21.356322 | 71 | 0.586122 | false |
LEDS/X-data | Xdata/app_pessoa/migrations/0001_initial.py | 1 | 8591 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-02 21:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
('sigla', models.CharField(max_length=3)),
],
options={
'db_table': 'Cor',
},
),
migrations.CreateModel(
name='Endereco',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cep', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'db_table': 'Endereco',
},
),
migrations.CreateModel(
name='Escola_Origem_Pessoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'Escola_Origem_Pessoa',
},
),
migrations.CreateModel(
name='Forma_Ingresso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=400)),
('sigla', models.CharField(max_length=50)),
],
options={
'db_table': 'Forma_Ingresso',
},
),
migrations.CreateModel(
name='Forma_Ingresso_Pessoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField(blank=True, null=True)),
('forma_ingresso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Forma_Ingresso')),
],
options={
'db_table': 'Forma_Ingresso_Pessoa',
},
),
migrations.CreateModel(
name='Pessoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano_nascimento', models.IntegerField(blank=True, null=True)),
('ano_conclusao_ensino_medio', models.IntegerField(blank=True, null=True)),
('ano_conclusao_primeiro_grau', models.IntegerField(blank=True, null=True)),
('codigo_social', models.CharField(max_length=255)),
('pai_falecido', models.BooleanField(default=False)),
('mae_falecido', models.BooleanField(default=False)),
('necessidade_fisica', models.BooleanField(default=False)),
('necessidade_auditiva', models.BooleanField(default=False)),
('necessidade_mental', models.BooleanField(default=False)),
('necessidade_multipla', models.BooleanField(default=False)),
('superdotado', models.BooleanField(default=False)),
('ano_reservista', models.IntegerField(blank=True, null=True)),
('bolsa_familia', models.BooleanField(default=False)),
('cor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Cor')),
],
options={
'db_table': 'Pessoa',
},
),
migrations.CreateModel(
name='Profissao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
],
options={
'db_table': 'Profissao',
},
),
migrations.CreateModel(
name='Renda_Familiar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=50)),
('codigo', models.IntegerField()),
],
options={
'db_table': 'Renda_Familiar',
},
),
migrations.CreateModel(
name='Renda_Familiar_Pessoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField(blank=True, null=True)),
('pessoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Pessoa')),
('renda_familiar', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Renda_Familiar')),
],
options={
'db_table': 'Renda_Familiar_Pessoa',
},
),
migrations.CreateModel(
name='Reside',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
],
options={
'db_table': 'Reside',
},
),
migrations.CreateModel(
name='Responsavel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
],
options={
'db_table': 'Responsavel',
},
),
migrations.CreateModel(
name='Sexo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
('sigla', models.CharField(max_length=1)),
],
options={
'db_table': 'Sexo',
},
),
migrations.CreateModel(
name='Tipo_Escola_Origem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=50)),
('codigo', models.CharField(max_length=1)),
],
options={
'db_table': 'Tipo_Escola_Origem',
},
),
migrations.AddField(
model_name='pessoa',
name='profissao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Profissao'),
),
migrations.AddField(
model_name='pessoa',
name='reside',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Reside'),
),
migrations.AddField(
model_name='pessoa',
name='sexo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Sexo'),
),
migrations.AddField(
model_name='pessoa',
name='tipo_responsavel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Responsavel'),
),
migrations.AddField(
model_name='forma_ingresso_pessoa',
name='pessoa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Pessoa'),
),
migrations.AddField(
model_name='escola_origem_pessoa',
name='pessoa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Pessoa'),
),
migrations.AddField(
model_name='escola_origem_pessoa',
name='tipo_escola_origem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Tipo_Escola_Origem'),
),
migrations.AddField(
model_name='endereco',
name='pessoa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_pessoa.Pessoa'),
),
]
| mit | 7,880,632,183,948,459,000 | 40.302885 | 131 | 0.528344 | false |
hogarthj/ansible | lib/ansible/plugins/filter/ipaddr.py | 15 | 29588 | # (c) 2014, Maciej Delmanowski <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _first_last(v):
if v.size == 2:
first_usable = int(netaddr.IPAddress(v.first))
last_usable = int(netaddr.IPAddress(v.last))
return first_usable, last_usable
elif v.size > 1:
first_usable = int(netaddr.IPAddress(v.first + 1))
last_usable = int(netaddr.IPAddress(v.last - 1))
return first_usable, last_usable
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _address_prefix_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 2:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _first_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size == 2:
return str(netaddr.IPAddress(int(v.network)))
elif v.size > 1:
return str(netaddr.IPAddress(int(v.network) + 1))
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ip_prefix_query(v):
if v.size == 2:
return str(v.ip) + '/' + str(v.prefixlen)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _ip_netmask_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.netmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.netmask)
'''
def _ip_wildcard_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.hostmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.hostmask)
'''
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _last_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
return str(netaddr.IPAddress(last_usable))
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
'''Return the network of a given IP or subnet'''
if v.size > 1:
return str(v.network)
def _network_id_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_netmask_query(v):
return str(v.network) + ' ' + str(v.netmask)
def _network_wildcard_query(v):
return str(v.network) + ' ' + str(v.hostmask)
def _next_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
if next_ip >= first_usable and next_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + 1))
def _prefix_query(v):
return int(v.prefixlen)
def _previous_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
if previous_ip >= first_usable and previous_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - 1))
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if (v_ip.is_unicast() and not v_ip.is_private() and
not v_ip.is_loopback() and not v_ip.is_netmask() and
not v_ip.is_hostmask()):
return value
def _range_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
first_usable = str(netaddr.IPAddress(first_usable))
last_usable = str(netaddr.IPAddress(last_usable))
return "{0}-{1}".format(first_usable, last_usable)
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _size_usable_query(v):
if v.size == 1:
return 0
elif v.size == 2:
return 2
return v.size - 2
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query='', version=False, alias='ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'first_usable': ('vtype',),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'last_usable': ('vtype',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'next_usable': ('vtype',),
'previous_usable': ('vtype',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'range_usable': ('vtype',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _address_prefix_query, # deprecate
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'first_usable': _first_usable_query,
'gateway': _gateway_query, # deprecate
'gw': _gateway_query, # deprecate
'host': _host_query,
'host/prefix': _address_prefix_query, # deprecate
'hostmask': _hostmask_query,
'hostnet': _gateway_query, # deprecate
'int': _int_query,
'ip': _ip_query,
'ip/prefix': _ip_prefix_query,
'ip_netmask': _ip_netmask_query,
# 'ip_wildcard': _ip_wildcard_query, built then could not think of use case
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'last_usable': _last_usable_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'next_usable': _next_usable_query,
'netmask': _netmask_query,
'network': _network_query,
'network_id': _network_id_query,
'network/prefix': _subnet_query,
'network_netmask': _network_netmask_query,
'network_wildcard': _network_wildcard_query,
'prefix': _prefix_query,
'previous_usable': _previous_usable_query,
'private': _private_query,
'public': _public_query,
'range_usable': _range_usable_query,
'revdns': _revdns_query,
'router': _gateway_query, # deprecate
'size': _size_query,
'size_usable': _size_usable_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wildcard': _hostmask_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value is True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
# ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query=''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version=False, alias='ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version=False, alias='ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query=''):
return ipaddr(value, query, version=4, alias='ipv4')
def ipv6(value, query=''):
return ipaddr(value, query, version=6, alias='ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query='', index='x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the next nth usable ip within a network described by value.
def next_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) + offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + offset))
# Returns the previous nth usable ip within a network described by value.
def previous_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) - offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - offset))
def _range_checker(ip_check, first, last):
'''
Tests whether an ip address is within the bounds of the first and last address.
:param ip_check: The ip to test if it is within first and last.
:param first: The first IP in the range to test against.
:param last: The last IP in the range to test against.
:return: bool
'''
if ip_check >= first and ip_check <= last:
return True
else:
return False
def _address_normalizer(value):
'''
Used to validate an address or network type and return it in a consistent format.
This is being used for future use cases not currently available such as an address range.
:param value: The string representation of an address or network.
:return: The address or network in the normalized form.
'''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address' or vtype == "network":
v = ipaddr(value, 'subnet')
except:
return False
return v
def network_in_usable(value, test):
'''
Checks whether 'test' is a useable address or addresses in 'value'
:param: value: The string representation of an address or network to test against.
:param test: The string representation of an address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'first_usable') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'last_usable') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def network_in_network(value, test):
'''
Checks whether the 'test' address or addresses are in 'value', including broadcast and network
:param: value: The network address or range to test against.
:param test: The address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'network') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'broadcast') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def reduce_on_network(value, network):
'''
Reduces a list of addresses to only the addresses that match a given network.
:param: value: The list of addresses to filter on.
:param: network: The network to validate against.
:return: The reduced list of addresses.
'''
# normalize network variable into an ipaddr
n = _address_normalizer(network)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
n_first = ipaddr(ipaddr(n, 'network') or ipaddr(n, 'address'), 'int')
n_last = ipaddr(ipaddr(n, 'broadcast') or ipaddr(n, 'address'), 'int')
# create an empty list to fill and return
r = []
for address in value:
# normalize address variables into an ipaddr
a = _address_normalizer(address)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
a_first = ipaddr(ipaddr(a, 'network') or ipaddr(a, 'address'), 'int')
a_last = ipaddr(ipaddr(a, 'broadcast') or ipaddr(a, 'address'), 'int')
if _range_checker(a_first, n_first, n_last) and _range_checker(a_last, n_first, n_last):
r.append(address)
return r
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query=''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias='slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query='', alias='hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query=''):
return hwaddr(value, query, alias='macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The %s filter requires python-netaddr be '
'installed on the ansible controller' % f_name)
def ip4_hex(arg, delimiter=''):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}'.format(*numbers, sep=delimiter)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ip4_hex': ip4_hex,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'next_nth_usable': next_nth_usable,
'network_in_network': network_in_network,
'network_in_usable': network_in_usable,
'reduce_on_network': reduce_on_network,
'nthhost': nthhost,
'previous_nth_usable': previous_nth_usable,
'slaac': slaac,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
| gpl-3.0 | 4,384,582,542,637,498,400 | 27.098765 | 117 | 0.561782 | false |
jmighion/ansible | lib/ansible/modules/network/nxos/nxos_vpc.py | 11 | 12130 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vpc
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages global VPC configuration
description:
- Manages global VPC configuration
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- The feature vpc must be enabled before this module can be used
- If not using management vrf, vrf must be globally on the device
before using in the pkl config
- Although source IP isn't required on the command line it is
required when using this module. The PKL VRF must also be configured
prior to using this module.
- Both pkl_src and pkl_dest are needed when changing PKL VRF.
options:
domain:
description:
- VPC domain
required: true
role_priority:
description:
- Role priority for device. Remember lower is better.
required: false
default: null
system_priority:
description:
- System priority device. Remember they must match between peers.
required: false
default: null
pkl_src:
description:
- Source IP address used for peer keepalive link
required: false
default: null
pkl_dest:
description:
- Destination (remote) IP address used for peer keepalive link
required: false
default: null
pkl_vrf:
description:
- VRF used for peer keepalive link
required: false
default: management
peer_gw:
description:
- Enables/Disables peer gateway
required: true
choices: ['true','false']
auto_recovery:
description:
- Enables/Disables auto recovery
required: true
choices: ['true','false']
delay_restore:
description:
- manages delay restore command and config value in seconds
required: false
default: null
state:
description:
- Manages desired state of the resource
required: true
choices: ['present','absent']
'''
EXAMPLES = '''
- name: configure a simple asn
nxos_vpc:
domain: 100
role_priority: 1000
system_priority: 2000
pkl_dest: 192.168.100.4
pkl_src: 10.1.100.20
peer_gw: true
auto_recovery: true
- name: configure
nxos_vpc:
domain: 100
role_priority: 32667
system_priority: 2000
peer_gw: true
pkl_src: 10.1.100.2
pkl_dest: 192.168.100.4
auto_recovery: true
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vpc domain 100",
"peer-keepalive destination 192.168.100.4 source 10.1.100.20 vrf management",
"auto-recovery", "peer-gateway"]
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
CONFIG_ARGS = {
'role_priority': 'role priority {role_priority}',
'system_priority': 'system-priority {system_priority}',
'delay_restore': 'delay restore {delay_restore}',
'peer_gw': '{peer_gw} peer-gateway',
'auto_recovery': '{auto_recovery} auto-recovery',
}
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vrf_list(module):
try:
body = run_commands(module, ['show vrf all | json'])[0]
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (KeyError, AttributeError):
return []
vrf_list = []
if vrf_table:
for each in vrf_table:
vrf_list.append(str(each['vrf_name'].lower()))
return vrf_list
def get_vpc(module):
body = run_commands(module, ['show vpc | json'])[0]
domain = str(body['vpc-domain-id'])
auto_recovery = 'enabled' in str(body['vpc-auto-recovery-status']).lower()
vpc = {}
if domain != 'not configured':
delay_restore = None
pkl_src = None
role_priority = '32667'
system_priority = None
pkl_dest = None
pkl_vrf = None
peer_gw = False
run = get_config(module, flags=['vpc'])
if run:
vpc_list = run.split('\n')
for each in vpc_list:
if 'delay restore' in each:
line = each.split()
if len(line) == 3:
delay_restore = line[-1]
if 'peer-keepalive destination' in each:
line = each.split()
pkl_dest = line[2]
for word in line:
if 'source' in word:
index = line.index(word)
pkl_src = line[index + 1]
if 'role priority' in each:
line = each.split()
role_priority = line[-1]
if 'system-priority' in each:
line = each.split()
system_priority = line[-1]
if 'peer-gateway' in each:
peer_gw = True
body = run_commands(module, ['show vpc peer-keepalive | json'])[0]
if body:
pkl_dest = body['vpc-keepalive-dest']
if 'N/A' in pkl_dest:
pkl_dest = None
elif len(pkl_dest) == 2:
pkl_dest = pkl_dest[0]
pkl_vrf = str(body['vpc-keepalive-vrf'])
vpc['domain'] = domain
vpc['auto_recovery'] = auto_recovery
vpc['delay_restore'] = delay_restore
vpc['pkl_src'] = pkl_src
vpc['role_priority'] = role_priority
vpc['system_priority'] = system_priority
vpc['pkl_dest'] = pkl_dest
vpc['pkl_vrf'] = pkl_vrf
vpc['peer_gw'] = peer_gw
return vpc
def get_commands_to_config_vpc(module, vpc, domain, existing):
vpc = dict(vpc)
domain_only = vpc.get('domain')
pkl_src = vpc.get('pkl_src')
pkl_dest = vpc.get('pkl_dest')
pkl_vrf = vpc.get('pkl_vrf') or existing.get('pkl_vrf')
vpc['pkl_vrf'] = pkl_vrf
commands = []
if pkl_src or pkl_dest:
if pkl_src is None:
vpc['pkl_src'] = existing.get('pkl_src')
elif pkl_dest is None:
vpc['pkl_dest'] = existing.get('pkl_dest')
pkl_command = 'peer-keepalive destination {pkl_dest}'.format(**vpc) \
+ ' source {pkl_src} vrf {pkl_vrf}'.format(**vpc)
commands.append(pkl_command)
elif pkl_vrf:
pkl_src = existing.get('pkl_src')
pkl_dest = existing.get('pkl_dest')
if pkl_src and pkl_dest:
pkl_command = ('peer-keepalive destination {0}'
' source {1} vrf {2}'.format(pkl_dest, pkl_src, pkl_vrf))
commands.append(pkl_command)
if vpc.get('auto_recovery') is False:
vpc['auto_recovery'] = 'no'
else:
vpc['auto_recovery'] = ''
if 'peer_gw' in vpc:
if vpc.get('peer_gw') is False:
vpc['peer_gw'] = 'no'
else:
vpc['peer_gw'] = ''
else:
if existing.get('peer_gw') is False:
vpc['peer_gw'] = 'no'
else:
vpc['peer_gw'] = ''
for param in vpc:
command = CONFIG_ARGS.get(param)
if command is not None:
command = command.format(**vpc).strip()
if 'peer-gateway' in command:
commands.append('terminal dont-ask')
commands.append(command)
if commands or domain_only:
commands.insert(0, 'vpc domain {0}'.format(domain))
return commands
def get_commands_to_remove_vpc_interface(portchannel, config_value):
commands = []
command = 'no vpc {0}'.format(config_value)
commands.append(command)
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
return commands
def main():
argument_spec = dict(
domain=dict(required=True, type='str'),
role_priority=dict(required=False, type='str'),
system_priority=dict(required=False, type='str'),
pkl_src=dict(required=False),
pkl_dest=dict(required=False),
pkl_vrf=dict(required=False, default='management'),
peer_gw=dict(required=True, type='bool'),
auto_recovery=dict(required=True, type='bool'),
delay_restore=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'warnings': warnings}
domain = module.params['domain']
role_priority = module.params['role_priority']
system_priority = module.params['system_priority']
pkl_src = module.params['pkl_src']
pkl_dest = module.params['pkl_dest']
pkl_vrf = module.params['pkl_vrf']
peer_gw = module.params['peer_gw']
auto_recovery = module.params['auto_recovery']
delay_restore = module.params['delay_restore']
state = module.params['state']
args = dict(domain=domain, role_priority=role_priority,
system_priority=system_priority, pkl_src=pkl_src,
pkl_dest=pkl_dest, pkl_vrf=pkl_vrf, peer_gw=peer_gw,
auto_recovery=auto_recovery,
delay_restore=delay_restore)
if not (pkl_src and pkl_dest and pkl_vrf):
# if only the source or dest is set, it'll fail and ask to set the
# other
if pkl_src or pkl_dest:
module.fail_json(msg='source AND dest IP for pkl are required at '
'this time (although source is technically not '
' required by the device.)')
args.pop('pkl_src')
args.pop('pkl_dest')
args.pop('pkl_vrf')
if pkl_vrf:
if pkl_vrf.lower() not in get_vrf_list(module):
module.fail_json(msg='The VRF you are trying to use for the peer '
'keepalive link is not on device yet. Add it'
' first, please.')
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_vpc(module)
commands = []
if state == 'present':
delta = set(proposed.items()).difference(existing.items())
if delta:
command = get_commands_to_config_vpc(module, delta, domain, existing)
commands.append(command)
elif state == 'absent':
if existing:
if domain != existing['domain']:
module.fail_json(msg="You are trying to remove a domain that "
"does not exist on the device")
else:
commands.append('terminal dont-ask')
commands.append('no vpc domain {0}'.format(domain))
cmds = flatten_list(commands)
results['commands'] = cmds
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,539,310,543,687,217,000 | 30.671018 | 89 | 0.587057 | false |
eptmp3/Sick-Beard | lib/subliminal/services/podnapisiweb.py | 22 | 8697 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import DownloadFailedError
from ..language import Language, language_set
from ..subtitles import ResultSubtitle
from ..utils import get_keywords
from ..videos import Episode, Movie
from bs4 import BeautifulSoup
import guessit
import logging
import re
from subliminal.subtitles import get_subtitle_path
from sickbeard import db
from sickbeard import logger as glog
logger = logging.getLogger("subliminal")
class PodnapisiWeb(ServiceBase):
server_url = 'http://simple.podnapisi.net'
site_url = 'http://www.podnapisi.net'
api_based = True
user_agent = 'Subliminal/0.6'
videos = [Episode, Movie]
require_video = False
languages = language_set(['Albanian', 'Arabic', 'Spanish (Argentina)', 'Belarusian', 'Bosnian', 'Portuguese (Brazil)', 'Bulgarian', 'Catalan',
'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', 'Persian',
'Finnish', 'French', 'German', 'gre', 'Kalaallisut', 'Hebrew', 'Hindi', 'Hungarian',
'Icelandic', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Kazakh', 'Korean', 'Latvian',
'Lithuanian', 'Macedonian', 'Malay', 'Norwegian', 'Polish', 'Portuguese', 'Romanian',
'Russian', 'Serbian', 'Sinhala', 'Slovak', 'Slovenian', 'Spanish', 'Swedish', 'Thai',
'Turkish', 'Ukrainian', 'Vietnamese'])
language_map = {Language('Albanian'): 29, Language('Arabic'): 12, Language('Spanish (Argentina)'): 14, Language('Belarusian'): 50,
Language('Bosnian'): 10, Language('Portuguese (Brazil)'): 48, Language('Bulgarian'): 33, Language('Catalan'): 53,
Language('Chinese'): 17, Language('Croatian'): 38, Language('Czech'): 7, Language('Danish'): 24,
Language('Dutch'): 23, Language('English'): 2, Language('Estonian'): 20, Language('Persian'): 52,
Language('Finnish'): 31, Language('French'): 8, Language('German'): 5, Language('gre'): 16,
Language('Kalaallisut'): 57, Language('Hebrew'): 22, Language('Hindi'): 42, Language('Hungarian'): 15,
Language('Icelandic'): 6, Language('Indonesian'): 54, Language('Irish'): 49, Language('Italian'): 9,
Language('Japanese'): 11, Language('Kazakh'): 58, Language('Korean'): 4, Language('Latvian'): 21,
Language('Lithuanian'): 19, Language('Macedonian'): 35, Language('Malay'): 55,
Language('Norwegian'): 3, Language('Polish'): 26, Language('Portuguese'): 32, Language('Romanian'): 13,
Language('Russian'): 27, Language('Serbian'): 36, Language('Sinhala'): 56, Language('Slovak'): 37,
Language('Slovenian'): 1, Language('Spanish'): 28, Language('Swedish'): 25, Language('Thai'): 44,
Language('Turkish'): 30, Language('Ukrainian'): 46, Language('Vietnamese'): 51,
29: Language('Albanian'), 12: Language('Arabic'), 14: Language('Spanish (Argentina)'), 50: Language('Belarusian'),
10: Language('Bosnian'), 48: Language('Portuguese (Brazil)'), 33: Language('Bulgarian'), 53: Language('Catalan'),
17: Language('Chinese'), 38: Language('Croatian'), 7: Language('Czech'), 24: Language('Danish'),
23: Language('Dutch'), 2: Language('English'), 20: Language('Estonian'), 52: Language('Persian'),
31: Language('Finnish'), 8: Language('French'), 5: Language('German'), 16: Language('gre'),
57: Language('Kalaallisut'), 22: Language('Hebrew'), 42: Language('Hindi'), 15: Language('Hungarian'),
6: Language('Icelandic'), 54: Language('Indonesian'), 49: Language('Irish'), 9: Language('Italian'),
11: Language('Japanese'), 58: Language('Kazakh'), 4: Language('Korean'), 21: Language('Latvian'),
19: Language('Lithuanian'), 35: Language('Macedonian'), 55: Language('Malay'), 40: Language('Chinese'),
3: Language('Norwegian'), 26: Language('Polish'), 32: Language('Portuguese'), 13: Language('Romanian'),
27: Language('Russian'), 36: Language('Serbian'), 47: Language('Serbian'), 56: Language('Sinhala'),
37: Language('Slovak'), 1: Language('Slovenian'), 28: Language('Spanish'), 25: Language('Swedish'),
44: Language('Thai'), 30: Language('Turkish'), 46: Language('Ukrainian'), Language('Vietnamese'): 51}
def list_checked(self, video, languages):
if isinstance(video, Movie):
return self.query(video.path or video.release, languages, video.title, year=video.year,
keywords=get_keywords(video.guess))
if isinstance(video, Episode):
return self.query(video.path or video.release, languages, video.series, season=video.season,
episode=video.episode, keywords=get_keywords(video.guess))
def query(self, filepath, languages, title, season=None, episode=None, year=None, keywords=None):
myDB = db.DBConnection()
myDBcache = db.DBConnection("cache.db")
sql_show_id = myDB.select("SELECT tvdb_id, show_name FROM tv_shows WHERE show_name LIKE ?", ['%'+title+'%'])
if sql_show_id[0][0]:
sql_scene = myDB.select("SELECT scene_season, scene_episode FROM tv_episodes WHERE showid = ? and season = ? and episode = ?", [sql_show_id[0][0],season,episode])
real_name=sql_show_id[0][1]
if sql_scene[0][0]:
season=sql_scene[0][0]
episode= sql_scene[0][1]
sql_custom_names = myDBcache.select("SELECT show_name FROM scene_exceptions WHERE tvdb_id = ? and show_name<> ? ORDER BY exception_id asc", [sql_show_id[0][0],real_name])
if sql_custom_names:
title=sql_custom_names[0][0]
glog.log(u'Searching Subtitles on Podnapisiweb with title : %s season : %s episode : %s' % (title,season,episode))
params = {'sXML': 1, 'sK': title, 'sJ': ','.join([str(self.get_code(l)) for l in languages])}
if season is not None:
params['sTS'] = season
if episode is not None:
params['sTE'] = episode
if year is not None:
params['sY'] = year
if keywords is not None:
params['sR'] = keywords
r = self.session.get(self.server_url + '/ppodnapisi/search', params=params)
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
subtitles = []
soup = BeautifulSoup(r.content, self.required_features)
for sub in soup('subtitle'):
if 'n' in sub.flags:
logger.debug(u'Skipping hearing impaired')
continue
language = l
confidence = float(sub.rating.text) / 5.0
sub_keywords = set()
for release in sub.release.text.split():
sub_keywords |= get_keywords(guessit.guess_file_info(release + '.srt', 'autodetect'))
sub_path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(sub_path, language, self.__class__.__name__.lower(),
sub.url.text, confidence=confidence, keywords=sub_keywords)
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
r = self.session.get(subtitle.link)
if r.status_code != 200:
raise DownloadFailedError()
soup = BeautifulSoup(r.content)
self.download_zip_file(self.server_url + soup.find('a', href=re.compile('download'))['href'], subtitle.path)
return subtitle
Service = PodnapisiWeb
| gpl-3.0 | 2,306,557,217,145,818,600 | 62.948529 | 182 | 0.602622 | false |
ahmadRagheb/goldenHR | erpnext/setup/utils.py | 3 | 4101 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.utils import get_datetime_str, nowdate
def get_root_of(doctype):
"""Get root element of a DocType with a tree structure"""
result = frappe.db.sql_list("""select name from `tab%s`
where lft=1 and rgt=(select max(rgt) from `tab%s` where docstatus < 2)""" %
(doctype, doctype))
return result[0] if result else None
def get_ancestors_of(doctype, name):
"""Get ancestor elements of a DocType with a tree structure"""
lft, rgt = frappe.db.get_value(doctype, name, ["lft", "rgt"])
result = frappe.db.sql_list("""select name from `tab%s`
where lft<%s and rgt>%s order by lft desc""" % (doctype, "%s", "%s"), (lft, rgt))
return result or []
def before_tests():
frappe.clear_cache()
# complete setup if missing
from frappe.desk.page.setup_wizard.setup_wizard import setup_complete
if not frappe.get_list("Company"):
setup_complete({
"currency" :"USD",
"full_name" :"Test User",
"company_name" :"Wind Power LLC",
"timezone" :"America/New_York",
"company_abbr" :"WP",
"industry" :"Manufacturing",
"country" :"United States",
"fy_start_date" :"2011-01-01",
"fy_end_date" :"2011-12-31",
"language" :"english",
"company_tagline" :"Testing",
"email" :"[email protected]",
"password" :"test",
"chart_of_accounts" : "Standard",
"domain" : "Manufacturing"
})
frappe.db.sql("delete from `tabLeave Allocation`")
frappe.db.sql("delete from `tabLeave Application`")
frappe.db.sql("delete from `tabSalary Slip`")
frappe.db.sql("delete from `tabItem Price`")
frappe.db.set_value("Stock Settings", None, "auto_insert_price_list_rate_if_missing", 0)
enable_all_roles_and_domains()
frappe.db.commit()
@frappe.whitelist()
def get_exchange_rate(from_currency, to_currency, transaction_date=None):
if not transaction_date:
transaction_date = nowdate()
if not (from_currency and to_currency):
# manqala 19/09/2016: Should this be an empty return or should it throw and exception?
return
if from_currency == to_currency:
return 1
# cksgb 19/09/2016: get last entry in Currency Exchange with from_currency and to_currency.
entries = frappe.get_all("Currency Exchange", fields = ["exchange_rate"],
filters=[
["date", "<=", get_datetime_str(transaction_date)],
["from_currency", "=", from_currency],
["to_currency", "=", to_currency]
], order_by="date desc", limit=1)
if entries:
return flt(entries[0].exchange_rate)
try:
cache = frappe.cache()
key = "currency_exchange_rate:{0}:{1}".format(from_currency, to_currency)
value = cache.get(key)
if not value:
import requests
response = requests.get("http://api.fixer.io/latest", params={
"base": from_currency,
"symbols": to_currency
})
# expire in 6 hours
response.raise_for_status()
value = response.json()["rates"][to_currency]
cache.setex(key, value, 6 * 60 * 60)
return flt(value)
except:
frappe.msgprint(_("Unable to find exchange rate for {0} to {1} for key date {2}. Please create a Currency Exchange record manually").format(from_currency, to_currency, transaction_date))
return 0.0
def enable_all_roles_and_domains():
""" enable all roles and domain for testing """
roles = frappe.get_list("Role", filters={"disabled": 1})
for role in roles:
_role = frappe.get_doc("Role", role.get("name"))
_role.disabled = 0
_role.flags.ignore_mandatory = True
_role.flags.ignore_permissions = True
_role.save()
# add all roles to users
user = frappe.get_doc("User", "Administrator")
user.add_roles(*[role.get("name") for role in roles])
domains = frappe.get_list("Domain")
if not domains:
return
domain_settings = frappe.get_doc("Domain Settings", "Domain Settings")
domain_settings.set("active_domains", [])
for domain in domains:
row = domain_settings.append("active_domains", {})
row.domain=domain.get("name")
domain_settings.save()
| gpl-3.0 | 1,440,871,702,058,972,200 | 32.341463 | 188 | 0.682273 | false |
parthea/pydatalab | datalab/utils/_dataflow_job.py | 6 | 1506 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements DataFlow Job functionality."""
from . import _job
class DataflowJob(_job.Job):
"""Represents a DataFlow Job.
"""
def __init__(self, runner_results):
"""Initializes an instance of a DataFlow Job.
Args:
runner_results: a DataflowPipelineResult returned from Pipeline.run().
"""
super(DataflowJob, self).__init__(runner_results._job.name)
self._runner_results = runner_results
def _refresh_state(self):
""" Refresh the job info. """
# DataFlow's DataflowPipelineResult does not refresh state, so we have to do it ourselves
# as a workaround.
self._runner_results._job = (
self._runner_results._runner.dataflow_client.get_job(self._runner_results.job_id()))
self._is_complete = self._runner_results.state in ['STOPPED', 'DONE', 'FAILED', 'CANCELLED']
self._fator_error = getattr(self._runner_results._runner, 'last_error_msg', None)
| apache-2.0 | 4,318,586,995,851,897,000 | 36.65 | 99 | 0.712483 | false |
aam-at/tensorflow | tensorflow/python/data/experimental/ops/grouping.py | 6 | 19761 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grouping dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.group_by_reducer")
def group_by_reducer(key_func, reducer):
"""A transformation that groups elements and performs a reduction.
This transformation maps element of a dataset to a key using `key_func` and
groups the elements by key. The `reducer` is used to process each group; its
`init_func` is used to initialize state for each group when it is created, the
`reduce_func` is used to update the state every time an element is mapped to
the matching group, and the `finalize_func` is used to map the final state to
an output value.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reducer: An instance of `Reducer`, which captures the reduction logic using
the `init_func`, `reduce_func`, and `finalize_func` functions.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByReducerDataset(dataset, key_func, reducer)
return _apply_fn
@tf_export("data.experimental.group_by_window")
def group_by_window(key_func,
reduce_func,
window_size=None,
window_size_func=None):
"""A transformation that groups windows of elements by key and reduces them.
This transformation maps each consecutive element in a dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size_func(key)` elements matching the same
key. All except the final window for each key will contain
`window_size_func(key)` elements; the final window may be smaller.
You may provide either a constant `window_size` or a window size determined by
the key through `window_size_func`.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `window_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`. Mutually exclusive with
`window_size_func`.
window_size_func: A function mapping a key to a `tf.int64` scalar
`tf.Tensor`, representing the number of consecutive elements matching
the same key to combine in a single batch, which will be passed to
`reduce_func`. Mutually exclusive with `window_size`.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
passed.
"""
if (window_size is not None and window_size_func or
not (window_size is not None or window_size_func)):
raise ValueError("Must pass either window_size or window_size_func.")
if window_size is not None:
def constant_window_func(unused_key):
return ops.convert_to_tensor(window_size, dtype=dtypes.int64)
window_size_func = constant_window_func
assert window_size_func is not None
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return _GroupByWindowDataset(dataset, key_func, reduce_func,
window_size_func)
return _apply_fn
@tf_export("data.experimental.bucket_by_sequence_length")
def bucket_by_sequence_length(element_length_func,
bucket_boundaries,
bucket_batch_sizes,
padded_shapes=None,
padding_values=None,
pad_to_bucket_boundary=False,
no_padding=False,
drop_remainder=False):
"""A transformation that buckets elements in a `Dataset` by length.
Elements of the `Dataset` are grouped together by length and then are padded
and batched.
This is useful for sequence tasks in which the elements have variable length.
Grouping together elements that have similar lengths reduces the total
fraction of padding in a batch which increases training step efficiency.
Args:
element_length_func: function from element in `Dataset` to `tf.int32`,
determines the length of the element, which will determine the bucket it
goes into.
bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
`tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
`tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
bucket), and caller must ensure that the source `Dataset` does not contain
any elements with length longer than `max(bucket_boundaries)`.
no_padding: `bool`, indicates whether to pad the batch features (features
need to be either of type `tf.sparse.SparseTensor` or of same shape).
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
"""
with ops.name_scope("bucket_by_seq_length"):
if len(bucket_batch_sizes) != (len(bucket_boundaries) + 1):
raise ValueError(
"len(bucket_batch_sizes) must equal len(bucket_boundaries) + 1")
batch_sizes = constant_op.constant(bucket_batch_sizes, dtype=dtypes.int64)
def element_to_bucket_id(*args):
"""Return int64 id of the length bucket for this element."""
seq_length = element_length_func(*args)
boundaries = list(bucket_boundaries)
buckets_min = [np.iinfo(np.int32).min] + boundaries
buckets_max = boundaries + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, seq_length),
math_ops.less(seq_length, buckets_max))
bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
# The window size is set to the batch size for this bucket
window_size = batch_sizes[bucket_id]
return window_size
def make_padded_shapes(shapes, none_filler=None):
padded = []
for shape in nest.flatten(shapes):
shape = tensor_shape.TensorShape(shape)
shape = [
none_filler if tensor_shape.dimension_value(d) is None else d
for d in shape
]
padded.append(shape)
return nest.pack_sequence_as(shapes, padded)
def batching_fn(bucket_id, grouped_dataset):
"""Batch elements in dataset."""
batch_size = window_size_fn(bucket_id)
if no_padding:
return grouped_dataset.batch(batch_size, drop_remainder=drop_remainder)
none_filler = None
if pad_to_bucket_boundary:
err_msg = ("When pad_to_bucket_boundary=True, elements must have "
"length < max(bucket_boundaries).")
check = check_ops.assert_less(
bucket_id,
constant_op.constant(len(bucket_batch_sizes) - 1,
dtype=dtypes.int64),
message=err_msg)
with ops.control_dependencies([check]):
boundaries = constant_op.constant(bucket_boundaries,
dtype=dtypes.int64)
bucket_boundary = boundaries[bucket_id]
none_filler = bucket_boundary - 1
input_shapes = dataset_ops.get_legacy_output_shapes(grouped_dataset)
shapes = make_padded_shapes(padded_shapes or input_shapes,
none_filler=none_filler)
return grouped_dataset.padded_batch(
batch_size, shapes, padding_values, drop_remainder=drop_remainder)
def _apply_fn(dataset):
return dataset.apply(
group_by_window(element_to_bucket_id, batching_fn,
window_size_func=window_size_fn))
return _apply_fn
class _GroupByReducerDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a reduction."""
def __init__(self, input_dataset, key_func, reducer):
"""See `group_by_reducer()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_init_func(reducer.init_func)
self._make_reduce_func(reducer.reduce_func, input_dataset)
self._make_finalize_func(reducer.finalize_func)
variant_tensor = ged_ops.experimental_group_by_reducer_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._init_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
key_func=self._key_func.function,
init_func=self._init_func.function,
reduce_func=self._reduce_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GroupByReducerDataset, self).__init__(input_dataset, variant_tensor)
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
self._key_func = dataset_ops.StructuredFunctionWrapper(
key_func, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
"`key_func` must return a single tf.int64 tensor. "
"Got type=%s and shape=%s"
% (self._key_func.output_types, self._key_func.output_shapes))
def _make_init_func(self, init_func):
"""Make wrapping defun for init_func."""
self._init_func = dataset_ops.StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=tensor_spec.TensorSpec([], dtypes.int64))
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
# Iteratively rerun the reduce function until reaching a fixed point on
# `self._state_structure`.
self._state_structure = self._init_func.output_structure
state_types = self._init_func.output_types
state_shapes = self._init_func.output_shapes
state_classes = self._init_func.output_classes
need_to_rerun = True
while need_to_rerun:
wrapped_func = dataset_ops.StructuredFunctionWrapper(
reduce_func,
self._transformation_name(),
input_structure=(self._state_structure, input_dataset.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
for new_state_class, state_class in zip(
nest.flatten(wrapped_func.output_classes),
nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(self._state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
for new_state_type, state_type in zip(
nest.flatten(wrapped_func.output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(self._init_func.output_types, wrapped_func.output_types))
# Extract shape information from the returned values.
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
state_shapes = nest.pack_sequence_as(
self._init_func.output_shapes, weakened_state_shapes)
self._state_structure = structure.convert_legacy_structure(
state_types, state_shapes, state_classes)
self._reduce_func = wrapped_func
self._reduce_func.function.add_to_graph(ops.get_default_graph())
def _make_finalize_func(self, finalize_func):
"""Make wrapping defun for finalize_func."""
self._finalize_func = dataset_ops.StructuredFunctionWrapper(
finalize_func, self._transformation_name(),
input_structure=self._state_structure)
@property
def element_spec(self):
return self._finalize_func.output_structure
def _functions(self):
return [
self._key_func, self._init_func, self._reduce_func, self._finalize_func
]
def _transformation_name(self):
return "tf.data.experimental.group_by_reducer()"
class _GroupByWindowDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that groups its input and performs a windowed reduction."""
def __init__(self, input_dataset, key_func, reduce_func, window_size_func):
"""See `group_by_window()` for details."""
self._input_dataset = input_dataset
self._make_key_func(key_func, input_dataset)
self._make_reduce_func(reduce_func, input_dataset)
self._make_window_size_func(window_size_func)
variant_tensor = ged_ops.group_by_window_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._key_func.function.captured_inputs,
self._reduce_func.function.captured_inputs,
self._window_size_func.function.captured_inputs,
key_func=self._key_func.function,
reduce_func=self._reduce_func.function,
window_size_func=self._window_size_func.function,
**self._flat_structure)
super(_GroupByWindowDataset, self).__init__(input_dataset, variant_tensor)
def _make_window_size_func(self, window_size_func):
"""Make wrapping defun for window_size_func."""
def window_size_func_wrapper(key):
return ops.convert_to_tensor(window_size_func(key), dtype=dtypes.int64)
self._window_size_func = dataset_ops.StructuredFunctionWrapper(
window_size_func_wrapper,
self._transformation_name(),
input_structure=tensor_spec.TensorSpec([], dtypes.int64))
if not self._window_size_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
"`window_size_func` must return a single tf.int64 scalar tensor.")
def _make_key_func(self, key_func, input_dataset):
"""Make wrapping defun for key_func."""
def key_func_wrapper(*args):
return ops.convert_to_tensor(key_func(*args), dtype=dtypes.int64)
self._key_func = dataset_ops.StructuredFunctionWrapper(
key_func_wrapper, self._transformation_name(), dataset=input_dataset)
if not self._key_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int64)):
raise ValueError(
"`key_func` must return a single tf.int64 scalar tensor.")
def _make_reduce_func(self, reduce_func, input_dataset):
"""Make wrapping defun for reduce_func."""
nested_dataset = dataset_ops.DatasetSpec(
input_dataset.element_spec)
input_structure = (tensor_spec.TensorSpec([], dtypes.int64), nested_dataset)
self._reduce_func = dataset_ops.StructuredFunctionWrapper(
reduce_func, self._transformation_name(),
input_structure=input_structure)
if not isinstance(
self._reduce_func.output_structure, dataset_ops.DatasetSpec):
raise TypeError("`reduce_func` must return a `Dataset` object.")
# pylint: disable=protected-access
self._element_spec = (
self._reduce_func.output_structure._element_spec)
@property
def element_spec(self):
return self._element_spec
def _functions(self):
return [self._key_func, self._reduce_func, self._window_size_func]
def _transformation_name(self):
return "tf.data.experimental.group_by_window()"
@tf_export("data.experimental.Reducer")
class Reducer(object):
"""A reducer is used for reducing a set of elements.
A reducer is represented as a tuple of the three functions:
1) initialization function: key => initial state
2) reduce function: (old state, input) => new state
3) finalization function: state => result
"""
def __init__(self, init_func, reduce_func, finalize_func):
self._init_func = init_func
self._reduce_func = reduce_func
self._finalize_func = finalize_func
@property
def init_func(self):
return self._init_func
@property
def reduce_func(self):
return self._reduce_func
@property
def finalize_func(self):
return self._finalize_func
| apache-2.0 | -437,927,740,501,902,140 | 41.314775 | 80 | 0.675927 | false |
kuznetz/rabbitvcs | clients/gedit/rabbitvcs-plugin.py | 1 | 34031 | #
# This is a Gedit plugin to allow for RabbitVCS integration in the Gedit
# text editor.
#
# Copyright (C) 2008-2008 by Adam Plumb <[email protected]>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
from gettext import gettext as _
import os
import gtk
import gedit
import rabbitvcs.util.helper
from rabbitvcs.vcs import create_vcs_instance
from rabbitvcs.util.contextmenu import GtkFilesContextMenuConditions, \
GtkFilesContextMenuCallbacks, MainContextMenu, MainContextMenuCallbacks, \
MenuBuilder, GtkContextMenuCaller
from rabbitvcs.util.contextmenuitems import *
# Menu item example, insert a new item in the Tools menu
ui_str = """<ui>
<menubar name="MenuBar">
<placeholder name="ExtraMenu_1">
<menu name="RabbitVCSMenu" action="RabbitVCSMenu">
<menu name="RabbitVCS::RabbitVCS_Svn" action="RabbitVCS::RabbitVCS_Svn">
<menuitem name="RabbitVCS::Update" action="RabbitVCS::Update" />
<menuitem name="RabbitVCS::Commit" action="RabbitVCS::Commit" />
<menuitem name="RabbitVCS::Checkout" action="RabbitVCS::Checkout" />
<menu name="RabbitVCS::Diff_Menu" action="RabbitVCS::Diff_Menu">
<menuitem name="RabbitVCS::Diff" action="RabbitVCS::Diff" />
<menuitem name="RabbitVCS::Diff_Previous_Revision" action="RabbitVCS::Diff_Previous_Revision" />
<menuitem name="RabbitVCS::Diff_Multiple" action="RabbitVCS::Diff_Multiple" />
<menuitem name="RabbitVCS::Compare_Tool" action="RabbitVCS::Compare_Tool" />
<menuitem name="RabbitVCS::Compare_Tool_Previous_Revision" action="RabbitVCS::Compare_Tool_Previous_Revision" />
<menuitem name="RabbitVCS::Compare_Tool_Multiple" action="RabbitVCS::Compare_Tool_Multiple" />
<menuitem name="RabbitVCS::Show_Changes" action="RabbitVCS::Show_Changes" />
</menu>
<menuitem name="RabbitVCS::Show_Log" action="RabbitVCS::Show_Log" />
<menuitem name="RabbitVCS::Repo_Browser" action="RabbitVCS::Repo_Browser" />
<menuitem name="RabbitVCS::Check_For_Modifications" action="RabbitVCS::Check_For_Modifications" />
<separator />
<menuitem name="RabbitVCS::Add" action="RabbitVCS::Add" />
<menu name="RabbitVCS::Add_To_Ignore_List" action="RabbitVCS::Add_To_Ignore_List">
<menuitem name="RabbitVCS::Ignore_By_Filename" action="RabbitVCS::Ignore_By_Filename" />
<menuitem name="RabbitVCS::Ignore_By_File_Extension" action="RabbitVCS::Ignore_By_File_Extension" />
</menu>
<separator />
<menuitem name="RabbitVCS::Update_To_Revision" action="RabbitVCS::Update_To_Revision" />
<menuitem name="RabbitVCS::Rename" action="RabbitVCS::Rename" />
<menuitem name="RabbitVCS::Delete" action="RabbitVCS::Delete" />
<menuitem name="RabbitVCS::Revert" action="RabbitVCS::Revert" />
<menuitem name="RabbitVCS::Edit_Conflicts" action="RabbitVCS::Edit_Conflicts" />
<menuitem name="RabbitVCS::Mark_Resolved" action="RabbitVCS::Mark_Resolved" />
<menuitem name="RabbitVCS::Relocate" action="RabbitVCS::Relocate" />
<menuitem name="RabbitVCS::Get_Lock" action="RabbitVCS::Get_Lock" />
<menuitem name="RabbitVCS::Unlock" action="RabbitVCS::Unlock" />
<menuitem name="RabbitVCS::Cleanup" action="RabbitVCS::Cleanup" />
<menuitem name="RabbitVCS::Annotate" action="RabbitVCS::Annotate" />
<separator />
<menuitem name="RabbitVCS::Export" action="RabbitVCS::Export" />
<menuitem name="RabbitVCS::Create_Repository" action="RabbitVCS::Create_Repository" />
<menuitem name="RabbitVCS::Import" action="RabbitVCS::Import" />
<separator />
<menuitem name="RabbitVCS::Branch_Tag" action="RabbitVCS::Branch_Tag" />
<menuitem name="RabbitVCS::Switch" action="RabbitVCS::Switch" />
<menuitem name="RabbitVCS::Merge" action="RabbitVCS::Merge" />
<separator />
<menuitem name="RabbitVCS::Apply_Patch" action="RabbitVCS::Apply_Patch" />
<menuitem name="RabbitVCS::Create_Patch" action="RabbitVCS::Create_Patch" />
<menuitem name="RabbitVCS::Properties" action="RabbitVCS::Properties" />
<separator />
</menu>
<menu name="RabbitVCS::RabbitVCS_Git" action="RabbitVCS::RabbitVCS_Git">
<menuitem name="RabbitVCS::Update" action="RabbitVCS::Update" />
<menuitem name="RabbitVCS::Commit" action="RabbitVCS::Commit" />
<menuitem name="RabbitVCS::Push" action="RabbitVCS::Push" />
<separator />
<menuitem name="RabbitVCS::Clone" action="RabbitVCS::Clone" />
<menuitem name="RabbitVCS::Initialize_Repository" action="RabbitVCS::Initialize_Repository" />
<separator />
<menu name="RabbitVCS::Diff_Menu" action="RabbitVCS::Diff_Menu">
<menuitem name="RabbitVCS::Diff" action="RabbitVCS::Diff" />
<menuitem name="RabbitVCS::Diff_Previous_Revision" action="RabbitVCS::Diff_Previous_Revision" />
<menuitem name="RabbitVCS::Diff_Multiple" action="RabbitVCS::Diff_Multiple" />
<menuitem name="RabbitVCS::Compare_Tool" action="RabbitVCS::Compare_Tool" />
<menuitem name="RabbitVCS::Compare_Tool_Previous_Revision" action="RabbitVCS::Compare_Tool_Previous_Revision" />
<menuitem name="RabbitVCS::Compare_Tool_Multiple" action="RabbitVCS::Compare_Tool_Multiple" />
<menuitem name="RabbitVCS::Show_Changes" action="RabbitVCS::Show_Changes" />
</menu>
<menuitem name="RabbitVCS::Show_Log" action="RabbitVCS::Show_Log" />
<separator />
<menuitem name="RabbitVCS::Stage" action="RabbitVCS::Stage" />
<menuitem name="RabbitVCS::Unstage" action="RabbitVCS::Unstage" />
<menu name="RabbitVCS::Add_To_Ignore_List" action="RabbitVCS::Add_To_Ignore_List">
<menuitem name="RabbitVCS::Ignore_By_Filename" action="RabbitVCS::Ignore_By_Filename" />
<menuitem name="RabbitVCS::Ignore_By_File_Extension" action="RabbitVCS::Ignore_By_File_Extension" />
</menu>
<separator />
<menuitem name="RabbitVCS::Rename" action="RabbitVCS::Rename" />
<menuitem name="RabbitVCS::Delete" action="RabbitVCS::Delete" />
<menuitem name="RabbitVCS::Revert" action="RabbitVCS::Revert" />
<menuitem name="RabbitVCS::Edit_Conflicts" action="RabbitVCS::Edit_Conflicts" />
<menuitem name="RabbitVCS::Clean" action="RabbitVCS::Clean" />
<menuitem name="RabbitVCS::Reset" action="RabbitVCS::Reset" />
<menuitem name="RabbitVCS::Checkout" action="RabbitVCS::Checkout" />
<separator />
<menuitem name="RabbitVCS::Branches" action="RabbitVCS::Branches" />
<menuitem name="RabbitVCS::Tags" action="RabbitVCS::Tags" />
<menuitem name="RabbitVCS::Remotes" action="RabbitVCS::Remotes" />
<separator />
<menuitem name="RabbitVCS::Export" action="RabbitVCS::Export" />
<menuitem name="RabbitVCS::Merge" action="RabbitVCS::Merge" />
<separator />
<menuitem name="RabbitVCS::Annotate" action="RabbitVCS::Annotate" />
<separator />
<menuitem name="RabbitVCS::Apply_Patch" action="RabbitVCS::Apply_Patch" />
<menuitem name="RabbitVCS::Create_Patch" action="RabbitVCS::Create_Patch" />
<separator />
</menu>
<menuitem name="RabbitVCS::Settings" action="RabbitVCS::Settings" />
<menuitem name="RabbitVCS::About" action="RabbitVCS::About" />
</menu>
</placeholder>
</menubar>
</ui>
"""
class RabbitVCSWindowHelper(GtkContextMenuCaller):
_menu_paths = [
# "/MenuBar/RabbitVCSMenu",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Commit",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Update",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Checkout",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Diff",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Diff_Previous_Revision",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Diff_Multiple",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Compare_Tool",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Compare_Tool_Previous_Revision",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Compare_Tool_Multiple",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Diff_Menu/RabbitVCS::Show_Changes",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Show_Log",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Repo_Browser",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Check_For_Modifications",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Add",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Add_To_Ignore_List",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Add_To_Ignore_List/RabbitVCS::Ignore_By_Filename",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Add_To_Ignore_List/RabbitVCS::Ignore_By_File_Extension",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Update_To_Revision",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Rename",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Delete",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Revert",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Edit_Conflicts",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Mark_Resolved",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Get_Lock",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Unlock",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Cleanup",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Annotate",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Export",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Create_Repository",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Import",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Branch_Tag",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Switch",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Merge",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Apply_Patch",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Create_Patch",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Svn/RabbitVCS::Properties",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Update",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Commit",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Push",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Clone",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Initialize_Repository",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Diff",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Diff_Previous_Revision",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Diff_Multiple",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Compare_Tool",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Compare_Tool_Previous_Revision",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Compare_Tool_Multiple",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Diff_Menu/RabbitVCS::Show_Changes",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Show_Log",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Stage",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Unstage",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Add_To_Ignore_List",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Add_To_Ignore_List/RabbitVCS::Ignore_By_Filename",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Add_To_Ignore_List/RabbitVCS::Ignore_By_File_Extension",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Rename",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Delete",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Revert",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Edit_Conflicts",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Clean",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Reset",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Checkout",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Branches",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Tags",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Remotes",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Export",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Merge",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Annotate",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Apply_Patch",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::RabbitVCS_Git/RabbitVCS::Create_Patch",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::Settings",
"/MenuBar/ExtraMenu_1/RabbitVCSMenu/RabbitVCS::About"
]
_default_base_dir = os.path.expanduser("~")
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
self.base_dir = self._default_base_dir
self._menubar_menu = None
self._menu_action = None
self.vcs_client = create_vcs_instance()
# Insert menu items
self._insert_menu()
def deactivate(self):
# Remove any installed menu items
self._remove_menu()
self._window = None
self.base_dir = None
self._plugin = None
self._menubar_menu = None
self._action_group = None
def _insert_menu(self):
# Get the GtkUIManager
manager = self._window.get_ui_manager()
self._menubar_menu = GeditMenu(self, self.vcs_client, self.base_dir, [self._get_document_path()])
self._menu_action = gtk.Action( name="RabbitVCSMenu", label="RabbitVCS", tooltip="Excellent Version Control for Linux", stock_id=None )
self._action_group = gtk.ActionGroup("RabbitVCSActions")
self._action_group = self._menubar_menu.get_action_group(self._action_group)
self._action_group.add_action( self._menu_action )
# Insert the action group
manager.insert_action_group(self._action_group, 0)
# Merge the UI
self._ui_id = manager.add_ui_from_string(ui_str)
def _remove_menu(self):
# Get the GtkUIManager
manager = self._window.get_ui_manager()
# Remove the ui
manager.remove_ui(self._ui_id)
# Remove the action group
manager.remove_action_group(self._action_group)
# Make sure the manager updates
manager.ensure_update()
def update_ui(self):
self.update_base_dir()
document = self._window.get_active_document()
self._action_group.set_sensitive(document != None)
if document != None:
manager = self._window.get_ui_manager()
manager.get_widget("/MenuBar/ExtraMenu_1/RabbitVCSMenu").set_sensitive(True)
self._menubar_menu.set_paths([self._get_document_path()])
self._determine_menu_sensitivity([self._get_document_path()])
def connect_view(self, view, id_name):
handler_id = view.connect("populate-popup", self.on_view_populate_popup)
view.set_data(id_name, [handler_id])
def disconnect_view(self, view, id_name):
view.disconnect(view.get_data(id_name)[0])
def on_view_populate_popup(self, view, menu):
separator = gtk.SeparatorMenuItem()
menu.append(separator)
separator.show()
context_menu = GeditMainContextMenu(self, self.vcs_client, self.base_dir, [self._get_document_path()]).get_menu()
for context_menu_item in context_menu:
menu.append(context_menu_item)
def _get_document_path(self):
document = self._window.get_active_document()
path = self.base_dir
if document:
tmp_path = document.get_uri_for_display()
if os.path.exists(tmp_path):
path = tmp_path
return path
def update_base_dir(self):
document = self._window.get_active_document()
if document:
path = document.get_uri_for_display()
if os.path.exists(path):
self.base_dir = os.path.dirname(path)
else:
self.base_dir = self._default_base_dir
self._menubar_menu.set_base_dir(self.base_dir)
def _determine_menu_sensitivity(self, paths):
self._menubar_menu.update_conditions(paths)
manager = self._window.get_ui_manager()
for menu_path in self._menu_paths:
widget = manager.get_widget(menu_path)
self._menubar_menu.update_action(widget.get_action())
# Menu activate handlers
def reload_settings(self, proc):
self.update_ui()
def on_context_menu_command_finished(self):
self.update_ui()
class RabbitVCSPlugin(gedit.Plugin):
def __init__(self):
gedit.Plugin.__init__(self)
self._instances = {}
self.id_name = "RabbitVCSContextMenuID"
def activate(self, window):
self._instances[window] = RabbitVCSWindowHelper(self, window)
handler_ids = []
for signal in ('tab-added', 'tab-removed'):
method = getattr(self, 'on_window_' + signal.replace('-', '_'))
handler_ids.append(window.connect(signal, method))
window.set_data(self.id_name, handler_ids)
if window in self._instances:
for view in window.get_views():
self._instances[window].connect_view(view, self.id_name)
def deactivate(self, window):
widgets = [window] + window.get_views()
for widget in widgets:
handler_ids = widget.get_data(self.id_name)
if handler_ids is not None:
for handler_id in handler_ids:
widget.disconnect(handler_id)
widget.set_data(self.id_name, None)
if window in self._instances:
self._instances[window].deactivate()
del self._instances[window]
def update_ui(self, window):
if window in self._instances:
self._instances[window].update_ui()
def on_window_tab_added(self, window, tab):
if window in self._instances:
self._instances[window].connect_view(tab.get_view(), self.id_name)
def on_window_tab_removed(self, window, tab):
if window in self._instances:
self._instances[window].disconnect_view(tab.get_view(), self.id_name)
class MenuIgnoreByFilename(MenuItem):
identifier = "RabbitVCS::Ignore_By_Filename"
label = _("Ignore by File Name")
tooltip = _("Ignore item by filename")
class MenuIgnoreByFileExtension(MenuItem):
identifier = "RabbitVCS::Ignore_By_File_Extension"
label = _("Ignore by File Extension")
tooltip = _("Ignore item by extension")
class GeditMenuBuilder(object):
"""
Generalised menu builder class. Subclasses must provide:
make_menu_item(self, item, id_magic) - create the menu item for whatever
toolkit (usually this should be just call a convenience method on the
MenuItem instance).
attach_submenu(self, menu_node, submenu_list) - given a list of whatever
make_menu_item(...) returns, create a submenu and attach it to the given
node.
top_level_menu(self, items) - in some circumstances we need to treat the top
level menu differently (eg. Nautilus, because Xenu said so). This processes
a list of menu items returned by make_menu_item(...) to create the overall
menu.
"""
def __init__(self, structure, conditions, callbacks, action_group):
"""
@param structure: Menu structure
@type structure: list
Note on "structure". The menu structure is defined in a list of tuples
of two elements each. The first element is a class - the MenuItem
subclass that defines the menu interface (see below).
The second element is either None (if there is no submenu) or a list of
tuples if there is a submenu. The submenus are generated recursively.
FYI, this is a list of tuples so that we retain the desired menu item
order (dicts do not retain order)
Example:
[
(MenuClassOne, [
(MenuClassOneSubA,
(MenuClassOneSubB
]),
(MenuClassTwo,
(MenuClassThree
]
"""
self.action_group = action_group
for item_class in structure:
item = item_class(conditions, callbacks)
default_name = MenuItem.make_default_name(item.identifier)
action = RabbitVCSAction(item.identifier, item.label, item.tooltip, item.icon)
if item.icon and hasattr(action, "set_icon_name"):
action.set_icon_name(item.icon)
if item.callback:
if item.callback_args:
action.connect("activate", item.callback, item.callback_args)
else:
action.connect("activate", item.callback)
action.set_property("visible", item.show())
action.set_data("item", item)
self.action_group.add_action(action)
def _get_function(self, object, name):
function = None
if hasattr(object, name):
attr = getattr(object, name)
if callable(attr):
function = attr
return function
class GeditMenu:
def __init__(self, caller, vcs_client, base_dir, paths):
"""
@param caller: The calling object
@type caller: RabbitVCS extension
@param vcs_client: The vcs client
@type vcs_client: rabbitvcs.vcs
@param base_dir: The curent working directory
@type base_dir: string
@param paths: The selected paths
@type paths: list
@param conditions: The conditions class that determines menu item visibility
@kind conditions: ContextMenuConditions
@param callbacks: The callbacks class that determines what actions are taken
@kind callbacks: ContextMenuCallbacks
"""
self.caller = caller
self.paths = paths
self.base_dir = base_dir
self.vcs_client = vcs_client
self.conditions = GtkFilesContextMenuConditions(self.vcs_client, self.paths)
self.callbacks = GtkFilesContextMenuCallbacks(
self.caller,
self.base_dir,
self.vcs_client,
self.paths
)
self.structure = [
MenuRabbitVCSSvn,
MenuRabbitVCSGit,
MenuCheckout,
MenuUpdate,
MenuCommit,
MenuPush,
MenuInitializeRepository,
MenuClone,
MenuRabbitVCS,
MenuDiffMenu,
MenuDiff,
MenuDiffPrevRev,
MenuDiffMultiple,
MenuCompareTool,
MenuCompareToolPrevRev,
MenuCompareToolMultiple,
MenuShowChanges,
MenuShowLog,
MenuRepoBrowser,
MenuCheckForModifications,
MenuAdd,
MenuStage,
MenuUnstage,
MenuAddToIgnoreList,
MenuUpdateToRevision,
MenuRename,
MenuDelete,
MenuRevert,
MenuEditConflicts,
MenuMarkResolved,
MenuRelocate,
MenuGetLock,
MenuUnlock,
MenuClean,
MenuReset,
MenuCleanup,
MenuExport,
MenuCreateRepository,
MenuImport,
MenuBranches,
MenuTags,
MenuRemotes,
MenuBranchTag,
MenuSwitch,
MenuMerge,
MenuAnnotate,
MenuCreatePatch,
MenuApplyPatch,
MenuProperties,
MenuHelp,
MenuSettings,
MenuAbout,
MenuIgnoreByFilename,
MenuIgnoreByFileExtension
]
def set_paths(self, paths):
self.paths = paths
self.conditions.paths = paths
self.callbacks.paths = paths
def set_base_dir(self, base_dir):
self.base_dir = base_dir
self.callbacks.base_dir = base_dir
self.conditions.base_dir = base_dir
def get_action_group(self, action_group):
return GeditMenuBuilder(self.structure, self.conditions, self.callbacks, action_group).action_group
def update_conditions(self, paths):
self.conditions.generate_statuses(paths)
self.conditions.generate_path_dict(paths)
def update_action(self, action):
action.set_property("visible", action.get_data("item").show())
class GeditContextMenu(MenuBuilder):
"""
Provides a standard gtk context menu (ie. a list of
"gtk.MenuItem"s).
"""
signal = "activate"
def make_menu_item(self, item, id_magic):
return item.make_gtk_menu_item(id_magic)
def attach_submenu(self, menu_node, submenu_list):
submenu = gtk.Menu()
menu_node.set_submenu(submenu)
[submenu.append(item) for item in submenu_list]
def top_level_menu(self, items):
return items
class GeditMainContextMenu(MainContextMenu):
def __init__(self, caller, vcs_client, base_dir, paths=[],
conditions=None, callbacks=None):
"""
@param caller: The calling object
@type caller: RabbitVCS extension
@param vcs_client: The vcs client
@type vcs_client: rabbitvcs.vcs
@param base_dir: The curent working directory
@type base_dir: string
@param paths: The selected paths
@type paths: list
@param conditions: The conditions class that determines menu item visibility
@kind conditions: ContextMenuConditions
@param callbacks: The callbacks class that determines what actions are taken
@kind callbacks: ContextMenuCallbacks
"""
self.caller = caller
self.paths = paths
self.base_dir = base_dir
self.vcs_client = vcs_client
self.conditions = conditions
if self.conditions is None:
self.conditions = GtkFilesContextMenuConditions(self.vcs_client, paths)
self.callbacks = callbacks
if self.callbacks is None:
self.callbacks = MainContextMenuCallbacks(
self.caller,
self.base_dir,
self.vcs_client,
paths
)
ignore_items = get_ignore_list_items(paths)
# The first element of each tuple is a key that matches a
# ContextMenuItems item. The second element is either None when there
# is no submenu, or a recursive list of tuples for desired submenus.
self.structure = [
(MenuUpdate, None),
(MenuCommit, None),
(MenuPush, None),
(MenuRabbitVCSSvn, [
(MenuCheckout, None),
(MenuDiffMenu, [
(MenuDiff, None),
(MenuDiffPrevRev, None),
(MenuDiffMultiple, None),
(MenuCompareTool, None),
(MenuCompareToolPrevRev, None),
(MenuCompareToolMultiple, None),
(MenuShowChanges, None),
]),
(MenuShowLog, None),
(MenuRepoBrowser, None),
(MenuCheckForModifications, None),
(MenuSeparator, None),
(MenuAdd, None),
(MenuAddToIgnoreList, ignore_items),
(MenuSeparator, None),
(MenuUpdateToRevision, None),
(MenuRename, None),
(MenuDelete, None),
(MenuRevert, None),
(MenuEditConflicts, None),
(MenuMarkResolved, None),
(MenuRelocate, None),
(MenuGetLock, None),
(MenuUnlock, None),
(MenuCleanup, None),
(MenuSeparator, None),
(MenuExport, None),
(MenuCreateRepository, None),
(MenuImport, None),
(MenuSeparator, None),
(MenuBranchTag, None),
(MenuSwitch, None),
(MenuMerge, None),
(MenuSeparator, None),
(MenuAnnotate, None),
(MenuSeparator, None),
(MenuCreatePatch, None),
(MenuApplyPatch, None),
(MenuProperties, None),
(MenuSeparator, None),
(MenuSettings, None),
(MenuAbout, None)
]),
(MenuRabbitVCSGit, [
(MenuClone, None),
(MenuInitializeRepository, None),
(MenuSeparator, None),
(MenuDiffMenu, [
(MenuDiff, None),
(MenuDiffPrevRev, None),
(MenuDiffMultiple, None),
(MenuCompareTool, None),
(MenuCompareToolPrevRev, None),
(MenuCompareToolMultiple, None),
(MenuShowChanges, None),
]),
(MenuShowLog, None),
(MenuStage, None),
(MenuUnstage, None),
(MenuAddToIgnoreList, ignore_items),
(MenuSeparator, None),
(MenuRename, None),
(MenuDelete, None),
(MenuRevert, None),
(MenuEditConflicts, None),
(MenuClean, None),
(MenuReset, None),
(MenuCheckout, None),
(MenuSeparator, None),
(MenuBranches, None),
(MenuTags, None),
(MenuRemotes, None),
(MenuSeparator, None),
(MenuExport, None),
(MenuMerge, None),
(MenuSeparator, None),
(MenuAnnotate, None),
(MenuSeparator, None),
(MenuCreatePatch, None),
(MenuApplyPatch, None),
(MenuSeparator, None),
(MenuSettings, None),
(MenuAbout, None)
])
]
def get_menu(self):
return GeditContextMenu(self.structure, self.conditions, self.callbacks).menu
| gpl-2.0 | -7,631,977,068,958,468,000 | 44.617962 | 143 | 0.617114 | false |
prutseltje/ansible | lib/ansible/module_utils/network/iosxr/iosxr.py | 4 | 17339 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <[email protected]>
# Copyright (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from difflib import Differ
from copy import deepcopy
from time import sleep
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.netconf import NetconfConnection
try:
from ncclient.xml_ import to_xml
HAS_NCCLIENT = True
except ImportError:
HAS_NCCLIENT = False
try:
from lxml import etree
HAS_XML = True
except ImportError:
HAS_XML = False
_EDIT_OPS = frozenset(['merge', 'create', 'replace', 'delete'])
BASE_1_0 = "{urn:ietf:params:xml:ns:netconf:base:1.0}"
NS_DICT = {
'BASE_NSMAP': {"xc": "urn:ietf:params:xml:ns:netconf:base:1.0"},
'BANNERS_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-infra-infra-cfg"},
'INTERFACES_NSMAP': {None: "http://openconfig.net/yang/interfaces"},
'INSTALL_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-installmgr-admin-oper"},
'HOST-NAMES_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-shellutil-cfg"},
'M:TYPE_NSMAP': {"idx": "urn:ietf:params:xml:ns:yang:iana-if-type"},
'ETHERNET_NSMAP': {None: "http://openconfig.net/yang/interfaces/ethernet"},
'CETHERNET_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-drivers-media-eth-cfg"},
'INTERFACE-CONFIGURATIONS_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"},
'INFRA-STATISTICS_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-infra-statsd-oper"},
'INTERFACE-PROPERTIES_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-oper"},
'IP-DOMAIN_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-ip-domain-cfg"},
'SYSLOG_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-infra-syslog-cfg"},
'AAA_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-lib-cfg"},
'AAA_LOCALD_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-locald-cfg"},
}
iosxr_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'transport': dict(type='str', default='cli', choices=['cli', 'netconf']),
}
iosxr_argument_spec = {
'provider': dict(type='dict', options=iosxr_provider_spec)
}
command_spec = {
'command': dict(),
'prompt': dict(default=None),
'answer': dict(default=None)
}
iosxr_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
iosxr_argument_spec.update(iosxr_top_spec)
def get_provider_argspec():
return iosxr_provider_spec
def get_connection(module):
if hasattr(module, 'connection'):
return module.connection
capabilities = get_device_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module.connection = Connection(module._socket_path)
elif network_api == 'netconf':
module.connection = NetconfConnection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type {!s}'.format(network_api))
return module.connection
def get_device_capabilities(module):
if hasattr(module, 'capabilities'):
return module.capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module.capabilities = json.loads(capabilities)
return module.capabilities
def build_xml_subtree(container_ele, xmap, param=None, opcode=None):
sub_root = container_ele
meta_subtree = list()
for key, meta in xmap.items():
candidates = meta.get('xpath', "").split("/")
if container_ele.tag == candidates[-2]:
parent = container_ele
elif sub_root.tag == candidates[-2]:
parent = sub_root
else:
parent = sub_root.find(".//" + meta.get('xpath', "").split(sub_root.tag + '/', 1)[1].rsplit('/', 1)[0])
if ((opcode in ('delete', 'merge') and meta.get('operation', 'unknown') == 'edit') or
meta.get('operation', None) is None):
if meta.get('tag', False) is True:
if parent.tag == container_ele.tag:
if meta.get('ns', False) is True:
child = etree.Element(candidates[-1], nsmap=NS_DICT[key.upper() + "_NSMAP"])
else:
child = etree.Element(candidates[-1])
meta_subtree.append(child)
sub_root = child
else:
if meta.get('ns', False) is True:
child = etree.SubElement(parent, candidates[-1], nsmap=NS_DICT[key.upper() + "_NSMAP"])
else:
child = etree.SubElement(parent, candidates[-1])
if meta.get('attrib', None) is not None and opcode in ('delete', 'merge'):
child.set(BASE_1_0 + meta.get('attrib'), opcode)
continue
text = None
param_key = key.split(":")
if param_key[0] == 'a':
if param is not None and param.get(param_key[1], None) is not None:
text = param.get(param_key[1])
elif param_key[0] == 'm':
if meta.get('value', None) is not None:
text = meta.get('value')
if text:
if meta.get('ns', False) is True:
child = etree.SubElement(parent, candidates[-1], nsmap=NS_DICT[key.upper() + "_NSMAP"])
else:
child = etree.SubElement(parent, candidates[-1])
child.text = text
if meta.get('attrib', None) is not None and opcode in ('delete', 'merge'):
child.set(BASE_1_0 + meta.get('attrib'), opcode)
if len(meta_subtree) > 1:
for item in meta_subtree:
container_ele.append(item)
if sub_root == container_ele:
return None
else:
return sub_root
def build_xml(container, xmap=None, params=None, opcode=None):
'''
Builds netconf xml rpc document from meta-data
Args:
container: the YANG container within the namespace
xmap: meta-data map to build xml tree
params: Input params that feed xml tree values
opcode: operation to be performed (merge, delete etc.)
Example:
Module inputs:
banner_params = [{'banner':'motd', 'text':'Ansible banner example', 'state':'present'}]
Meta-data definition:
bannermap = collections.OrderedDict()
bannermap.update([
('banner', {'xpath' : 'banners/banner', 'tag' : True, 'attrib' : "operation"}),
('a:banner', {'xpath' : 'banner/banner-name'}),
('a:text', {'xpath' : 'banner/banner-text', 'operation' : 'edit'})
])
Fields:
key: exact match to the key in arg_spec for a parameter
(prefixes --> a: value fetched from arg_spec, m: value fetched from meta-data)
xpath: xpath of the element (based on YANG model)
tag: True if no text on the element
attrib: attribute to be embedded in the element (e.g. xc:operation="merge")
operation: if edit --> includes the element in edit_config() query else ignores for get() queries
value: if key is prefixed with "m:", value is required in meta-data
Output:
<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<banners xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-infra-infra-cfg">
<banner xc:operation="merge">
<banner-name>motd</banner-name>
<banner-text>Ansible banner example</banner-text>
</banner>
</banners>
</config>
:returns: xml rpc document as a string
'''
if opcode == 'filter':
root = etree.Element("filter", type="subtree")
elif opcode in ('delete', 'merge'):
root = etree.Element("config", nsmap=NS_DICT['BASE_NSMAP'])
container_ele = etree.SubElement(root, container, nsmap=NS_DICT[container.upper() + "_NSMAP"])
if xmap is not None:
if params is None:
build_xml_subtree(container_ele, xmap, opcode=opcode)
else:
subtree_list = list()
for param in to_list(params):
subtree_ele = build_xml_subtree(container_ele, xmap, param=param, opcode=opcode)
if subtree_ele is not None:
subtree_list.append(subtree_ele)
for item in subtree_list:
container_ele.append(item)
return etree.tostring(root)
def etree_find(root, node):
try:
element = etree.fromstring(root).find('.//' + to_bytes(node, errors='surrogate_then_replace').strip())
except Exception:
element = etree.fromstring(etree.tostring(root)).find('.//' + to_bytes(node, errors='surrogate_then_replace').strip())
if element is not None:
return element
return None
def etree_findall(root, node):
try:
element = etree.fromstring(root).findall('.//' + to_bytes(node, errors='surrogate_then_replace').strip())
except Exception:
element = etree.fromstring(etree.tostring(root)).findall('.//' + to_bytes(node, errors='surrogate_then_replace').strip())
if element is not None:
return element
return None
def is_cliconf(module):
capabilities = get_device_capabilities(module)
network_api = capabilities.get('network_api')
if network_api not in ('cliconf', 'netconf'):
module.fail_json(msg=('unsupported network_api: {!s}'.format(network_api)))
return False
if network_api == 'cliconf':
return True
return False
def is_netconf(module):
capabilities = get_device_capabilities(module)
network_api = capabilities.get('network_api')
if network_api not in ('cliconf', 'netconf'):
module.fail_json(msg=('unsupported network_api: {!s}'.format(network_api)))
return False
if network_api == 'netconf':
if not HAS_NCCLIENT:
module.fail_json(msg=('ncclient is not installed'))
if not HAS_XML:
module.fail_json(msg=('lxml is not installed'))
return True
return False
def get_config_diff(module, running=None, candidate=None):
conn = get_connection(module)
if is_cliconf(module):
return conn.get('show commit changes diff')
elif is_netconf(module):
if running and candidate:
running_data = running.split("\n", 1)[1].rsplit("\n", 1)[0]
candidate_data = candidate.split("\n", 1)[1].rsplit("\n", 1)[0]
if running_data != candidate_data:
d = Differ()
diff = list(d.compare(running_data.splitlines(), candidate_data.splitlines()))
return '\n'.join(diff).strip()
return None
def discard_config(module):
conn = get_connection(module)
conn.discard_changes()
def commit_config(module, comment=None, confirmed=False, confirm_timeout=None, persist=False, check=False):
conn = get_connection(module)
reply = None
if check:
reply = conn.validate()
else:
if is_netconf(module):
reply = conn.commit(confirmed=confirmed, timeout=confirm_timeout, persist=persist)
elif is_cliconf(module):
reply = conn.commit(comment=comment)
return reply
def get_oper(module, filter=None):
conn = get_connection(module)
if filter is not None:
response = conn.get(filter)
else:
return None
return to_bytes(etree.tostring(response), errors='surrogate_then_replace').strip()
def get_config(module, config_filter=None, source='running'):
conn = get_connection(module)
# Note: Does not cache config in favour of latest config on every get operation.
out = conn.get_config(source=source, filter=config_filter)
if is_netconf(module):
out = to_xml(conn.get_config(source=source, filter=config_filter))
cfg = out.strip()
return cfg
def load_config(module, command_filter, commit=False, replace=False,
comment=None, admin=False, running=None, nc_get_filter=None):
conn = get_connection(module)
diff = None
if is_netconf(module):
# FIXME: check for platform behaviour and restore this
# conn.lock(target = 'candidate')
# conn.discard_changes()
try:
for filter in to_list(command_filter):
conn.edit_config(filter)
candidate = get_config(module, source='candidate', config_filter=nc_get_filter)
diff = get_config_diff(module, running, candidate)
if commit and diff:
commit_config(module)
else:
discard_config(module)
finally:
# conn.unlock(target = 'candidate')
pass
elif is_cliconf(module):
# to keep the pre-cliconf behaviour, make a copy, avoid adding commands to input list
cmd_filter = deepcopy(command_filter)
cmd_filter.insert(0, 'configure terminal')
if admin:
cmd_filter.insert(0, 'admin')
conn.edit_config(cmd_filter)
if module._diff:
diff = get_config_diff(module)
if replace:
cmd = list()
cmd.append({'command': 'commit replace',
'prompt': 'This commit will replace or remove the entire running configuration',
'answer': 'yes'})
cmd.append('end')
conn.edit_config(cmd)
elif commit:
commit_config(module, comment=comment)
conn.edit_config('end')
if admin:
conn.edit_config('exit')
else:
conn.discard_changes()
return diff
def run_command(module, commands):
conn = get_connection(module)
responses = list()
for cmd in to_list(commands):
try:
if isinstance(cmd, str):
cmd = json.loads(cmd)
command = cmd.get('command', None)
prompt = cmd.get('prompt', None)
answer = cmd.get('answer', None)
sendonly = cmd.get('sendonly', False)
newline = cmd.get('newline', True)
except:
command = cmd
prompt = None
answer = None
sendonly = False
newline = True
out = conn.get(command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline)
try:
responses.append(to_text(out, errors='surrogate_or_strict'))
except UnicodeError:
module.fail_json(msg=u'failed to decode output from {0}:{1}'.format(cmd, to_text(out)))
return responses
def copy_file(module, src, dst, proto='scp'):
conn = get_connection(module)
conn.copy_file(source=src, destination=dst, proto=proto)
def get_file(module, src, dst, proto='scp'):
conn = get_connection(module)
conn.get_file(source=src, destination=dst, proto=proto)
| gpl-3.0 | 1,894,512,874,704,712,000 | 35.657505 | 129 | 0.615491 | false |
fermiPy/fermipy | fermipy/srcmap_utils.py | 1 | 13439 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.ndimage.interpolation import spline_filter
from scipy.ndimage.interpolation import shift
from astropy.io import fits
import fermipy.utils as utils
import fermipy.wcs_utils as wcs_utils
class MapInterpolator(object):
"""Object that can efficiently generate source maps by
interpolation of a map object."""
def __init__(self, data, pix_ref, shape_out, rebin):
self._data = data
self._data_spline = []
for i in range(data.shape[0]):
self._data_spline += [spline_filter(self._data[i], order=2)]
self._axes = []
for i in range(data.ndim):
self._axes += [np.arange(0, data.shape[i], dtype=float)]
#self._coords = np.meshgrid(*self._axes[1:], indexing='ij')
self._rebin = rebin
# Shape of global output array
self._shape_out = shape_out
self._shape = np.array(self.data.shape)
for i in range(1, self.data.ndim):
self._shape[i] = int(self._shape[i] / self.rebin)
self._shape = tuple(self._shape)
# Reference pixel coordinates
self._pix_ref = pix_ref
@property
def data(self):
return self._data
@property
def shape(self):
return self._shape
@property
def shape_out(self):
return self._shape_out
@property
def rebin(self):
return self._rebin
@property
def ndim(self):
return self._data.ndim
def get_offsets(self, pix):
"""Get offset of the first pixel in each dimension in the
global coordinate system.
Parameters
----------
pix : `~numpy.ndarray`
Pixel coordinates in global coordinate system.
"""
idx = []
for i in range(self.ndim):
if i == 0:
idx += [0]
else:
npix1 = int(self.shape[i])
pix0 = int(pix[i - 1]) - npix1 // 2
idx += [pix0]
return idx
def shift_to_coords(self, pix, fill_value=np.nan):
"""Create a new map that is shifted to the pixel coordinates
``pix``."""
pix_offset = self.get_offsets(pix)
dpix = np.zeros(len(self.shape) - 1)
for i in range(len(self.shape) - 1):
x = self.rebin * (pix[i] - pix_offset[i + 1]
) + (self.rebin - 1.0) / 2.
dpix[i] = x - self._pix_ref[i]
pos = [pix_offset[i] + self.shape[i] // 2
for i in range(self.data.ndim)]
s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos)
k = np.zeros(self.data.shape)
for i in range(k.shape[0]):
k[i] = shift(self._data_spline[i], dpix, cval=np.nan,
order=2, prefilter=False)
for i in range(1, len(self.shape)):
k = utils.sum_bins(k, i, self.rebin)
k0 = np.ones(self.shape_out) * fill_value
if k[s1].size == 0 or k0[s0].size == 0:
return k0
k0[s0] = k[s1]
return k0
class SourceMapCache(object):
"""Object generates source maps by interpolation of map
templates."""
def __init__(self, m0, m1):
self._m0 = m0
self._m1 = m1
def create_map(self, pix):
"""Create a new map with reference pixel coordinates shifted
to the pixel coordinates ``pix``.
Parameters
----------
pix : `~numpy.ndarray`
Reference pixel of new map.
Returns
-------
out_map : `~numpy.ndarray`
The shifted map.
"""
k0 = self._m0.shift_to_coords(pix)
k1 = self._m1.shift_to_coords(pix)
k0[np.isfinite(k1)] = k1[np.isfinite(k1)]
k0[~np.isfinite(k0)] = 0
return k0
@classmethod
def create(cls, psf, exp, spatial_model, spatial_width, shape_out, cdelt,
rebin=4):
npix = shape_out[1]
pad_pix = npix // 2
xpix = (npix + pad_pix - 1.0) / 2.
ypix = (npix + pad_pix - 1.0) / 2.
pix_ref = np.array([ypix, xpix])
k0 = make_srcmap(psf, exp, spatial_model, spatial_width,
npix=npix + pad_pix,
xpix=xpix, ypix=ypix,
cdelt=cdelt)
m0 = MapInterpolator(k0, pix_ref, shape_out, 1)
npix1 = max(10, int(0.5 / cdelt)) * rebin
xpix1 = (npix1 - 1.0) / 2.
ypix1 = (npix1 - 1.0) / 2.
pix_ref = np.array([ypix1, xpix1])
k1 = make_srcmap(psf, exp, spatial_model, spatial_width,
npix=npix1,
xpix=xpix1, ypix=ypix1,
cdelt=cdelt / rebin)
m1 = MapInterpolator(k1, pix_ref, shape_out, rebin)
return cls(m0, m1)
def make_srcmap_old(psf, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,
cdelt=0.01, rebin=1, psf_scale_fn=None):
"""Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
rebin : int
Factor by which the original map will be oversampled in the
spatial dimension when computing the model.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
"""
if rebin > 1:
npix = npix * rebin
xpix = xpix * rebin + (rebin - 1.0) / 2.
ypix = ypix * rebin + (rebin - 1.0) / 2.
cdelt = cdelt / rebin
if spatial_model == 'RadialGaussian':
k = utils.make_cgauss_kernel(psf, sigma, npix, cdelt,
xpix, ypix, psf_scale_fn)
elif spatial_model == 'RadialDisk':
k = utils.make_cdisk_kernel(psf, sigma, npix, cdelt,
xpix, ypix, psf_scale_fn)
elif spatial_model == 'PointSource':
k = utils.make_psf_kernel(psf, npix, cdelt,
xpix, ypix, psf_scale_fn)
else:
raise Exception('Unsupported spatial model: %s', spatial_model)
if rebin > 1:
k = utils.sum_bins(k, 1, rebin)
k = utils.sum_bins(k, 2, rebin)
k *= psf.exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2
return k
def make_srcmap(psf, exp, spatial_model, sigma, npix=(500,500), xpix=0.0, ypix=0.0,
cdelt=0.01, psf_scale_fn=None, klims=None, sparse=False):
"""Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
exp : `~numpy.ndarray`
Array of exposures.
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
npix : tuple
Number of of map bins in the x and y direction
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
klims : tuple
Indices of lower and upper range of energy.
sparse : bool
Skip pixels in which the source amplitude is small.
"""
if spatial_model == 'RadialGaussian':
k = utils.make_radial_kernel(psf, utils.convolve2d_gauss,
sigma / 1.5095921854516636, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
elif spatial_model == 'RadialDisk':
k = utils.make_radial_kernel(psf, utils.convolve2d_disk,
sigma / 0.8246211251235321, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
elif spatial_model == 'PointSource':
k = utils.make_radial_kernel(psf, None, None, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
else:
raise Exception('Unsupported spatial model: %s', spatial_model)
if klims is not None:
exp = exp[klims[0]:klims[1] + 1, ...]
k *= exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2
return k
def make_cgauss_mapcube(skydir, psf, sigma, outfile, npix=500, cdelt=0.01,
rebin=1):
energies = psf.energies
nebin = len(energies)
k = utils.make_cgauss_kernel(psf, sigma, npix * rebin, cdelt / rebin)
if rebin > 1:
k = utils.rebin_map(k, nebin, npix, rebin)
w = wcs_utils.create_wcs(skydir, cdelt=cdelt,
crpix=npix / 2. + 0.5, naxis=3)
w.wcs.crpix[2] = 1
w.wcs.crval[2] = 10 ** energies[0]
w.wcs.cdelt[2] = energies[1] - energies[0]
w.wcs.ctype[2] = 'Energy'
ecol = fits.Column(name='Energy', format='D', array=10 ** energies)
hdu_energies = fits.BinTableHDU.from_columns([ecol], name='ENERGIES')
hdu_image = fits.PrimaryHDU(np.zeros((nebin, npix, npix)),
header=w.to_header())
hdu_image.data[...] = k
hdu_image.header['CUNIT3'] = 'MeV'
with fits.HDUList([hdu_image, hdu_energies]) as hdulist:
hdulist.writeto(outfile, overwrite=True)
def make_psf_mapcube(skydir, psf, outfile, npix=500, cdelt=0.01, rebin=1):
energies = psf.energies
nebin = len(energies)
k = utils.make_psf_kernel(psf, npix * rebin, cdelt / rebin)
if rebin > 1:
k = utils.rebin_map(k, nebin, npix, rebin)
w = wcs_utils.create_wcs(skydir, cdelt=cdelt,
crpix=npix / 2. + 0.5, naxis=3)
w.wcs.crpix[2] = 1
w.wcs.crval[2] = 10 ** energies[0]
w.wcs.cdelt[2] = energies[1] - energies[0]
w.wcs.ctype[2] = 'Energy'
ecol = fits.Column(name='Energy', format='D', array=10 ** energies)
hdu_energies = fits.BinTableHDU.from_columns([ecol], name='ENERGIES')
hdu_image = fits.PrimaryHDU(np.zeros((nebin, npix, npix)),
header=w.to_header())
hdu_image.data[...] = k
hdu_image.header['CUNIT3'] = 'MeV'
with fits.HDUList([hdu_image, hdu_energies]) as hdulist:
hdulist.writeto(outfile, overwrite=True)
def make_gaussian_spatial_map(skydir, sigma, outfile, cdelt=None, npix=None):
if cdelt is None:
cdelt = sigma / 10.
if npix is None:
npix = int(np.ceil((6.0 * (sigma + cdelt)) / cdelt))
w = wcs_utils.create_wcs(skydir, cdelt=cdelt, crpix=npix / 2. + 0.5)
hdu_image = fits.PrimaryHDU(np.zeros((npix, npix)),
header=w.to_header())
hdu_image.data[:, :] = utils.make_gaussian_kernel(sigma, npix=npix,
cdelt=cdelt)
with fits.HDUList([hdu_image]) as hdulist:
hdulist.writeto(outfile, overwrite=True)
def make_disk_spatial_map(skydir, radius, outfile, cdelt=None, npix=None):
if cdelt is None:
cdelt = radius / 10.
if npix is None:
npix = int(np.ceil((2.0 * (radius + cdelt)) / cdelt))
w = wcs_utils.create_wcs(skydir, cdelt=cdelt, crpix=npix / 2. + 0.5)
hdu_image = fits.PrimaryHDU(np.zeros((npix, npix)),
header=w.to_header())
hdu_image.data[:, :] = utils.make_disk_kernel(radius, npix=npix,
cdelt=cdelt)
with fits.HDUList([hdu_image]) as hdulist:
hdulist.writeto(outfile, overwrite=True)
def delete_source_map(srcmap_file, names, logger=None):
"""Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted.
"""
with fits.open(srcmap_file) as hdulist:
hdunames = [hdu.name.upper() for hdu in hdulist]
if not isinstance(names, list):
names = [names]
for name in names:
if not name.upper() in hdunames:
continue
del hdulist[name.upper()]
hdulist.writeto(srcmap_file, overwrite=True)
def update_source_maps(srcmap_file, srcmaps, logger=None):
with fits.open(srcmap_file) as hdulist:
hdunames = [hdu.name.upper() for hdu in hdulist]
for name, data in srcmaps.items():
if not name.upper() in hdunames:
for hdu in hdulist[1:]:
if hdu.header['XTENSION'] == 'IMAGE':
break
newhdu = fits.ImageHDU(data, hdu.header, name=name)
newhdu.header['EXTNAME'] = name
hdulist.append(newhdu)
if logger is not None:
logger.debug('Updating source map for %s' % name)
hdulist[name].data[...] = data
hdulist.writeto(srcmap_file, overwrite=True)
| bsd-3-clause | 4,039,672,651,915,733,000 | 30.180974 | 83 | 0.549967 | false |
pietroquaglio/elephant | elephant/spade_src/fast_fca.py | 2 | 58191 | """
This module provides an implementation of the formal concept analysis (FCA) in
pure Python. It is used by the SPADE analysis. The code builds on C. Lindig's
Fast Concept Analysis work (1999,2002).
Original code available at:
Copyright (C) 2008-2012 by Dominik Endres ([email protected]).
Relicensed for Elephant by permission.
Usage example:
--------------
>>> relation=[]
>>> relation+=[('monkeyHand','neuron2')]
>>> relation+=[('monkeyFace','neuron1')]
>>> relation+=[('monkeyFace','neuron2')]
>>> relation+=[('humanFace','neuron1')]
>>> relation+=[('spider','neuron3')]
>>> concepts=formalConcepts(relation)
>>> concepts.computeLattice()
>>> print(concepts)
If you generate publications based on this code, please cite the following
paper:
Endres D., Adam R., Giese M.A. & Noppeney U.. (2012).
Understanding the Semantic Structure of Human fMRI Brain Recordings with Formal Concept Analysis.
Proceedings of the 10th International Conference on Formal Concept Analysis (ICFCA 2012) LNAI 7278, Springer,pp. 96-111.
"""
import bisect
import collections
import sys
import gc
import copy
import math
class formalConcept:
""" A formal concept is comprised of an extent and and intent.
Furthermore, intentIndexes is an ordered list of attribute indexes for lectic ordering.
Also contains sets of introduced attibutes and objects and lectically ordered lists of upper and lower neighbours."""
def __init__(self,extent=frozenset(),intent=frozenset(),intentIndexes=[]):
""" intent/extent are a frozensets because they need to be hashable."""
self.cnum=0
self.extent=extent
self.intent=intent
self.introducedAttributes=set()
self.introducedObjects=set()
self.intentIndexes=intentIndexes
self.upperNeighbours=[]
self.lowerNeighbours=[]
self.visited=False # for lattice traversal
# attributes that were introduced closest in upwards direction
# useful for naming a concept that introduces no attributes.
# recompute after pruning!
self.closestIntroducedAttributes=[]
# all attributes that are introduced in the downset of this concept. useful for building search list.
self.downsetAttributes=set()
def copy(self):
"""Copy construction."""
ccopy=formalConcept()
ccopy.cnum=self.cnum
ccopy.extent=self.extent.copy()
ccopy.intent=self.intent.copy()
ccopy.closestIntroducedAttributes=self.closestIntroducedAttributes.copy()
ccopy.downsetAttributes=self.downsetAttributes.copy()
ccopy.introducedAttributes=self.introducedAttributes.copy()
ccopy.introducedObjects=self.introducedObjects.copy()
ccopy.intentIndexes=self.intentIndexes[:]
ccopy.upperNeighbours=self.upperNeighbours[:]
ccopy.lowerNeighbours=self.lowerNeighbours[:]
ccopy.visited=self.visited
return ccopy
def __eq__(self, other):
"""lectic order on intentIndexes."""
return self.intentIndexes==other.intentIndexes
def __ne__(self, other):
"""lectic order on intentIndexes."""
return self.intentIndexes==other.intentIndexes
def __lt__(self, other):
"""lectic order on intentIndexes."""
if self.intentIndexes==other.intentIndexes:
return -1
i1=0
i2len=len(other.intentIndexes)
for a1 in self.intentIndexes:
if i1>=i2len:
return -1
a2=other.intentIndexes[i1]
if a1>a2:
return 1
elif a1<a2:
return -1
i1+=1
return 1
def __le__(self, other):
"""lectic order on intentIndexes."""
if self.intentIndexes==other.intentIndexes:
return 1
i1=0
i2len=len(other.intentIndexes)
for a1 in self.intentIndexes:
if i1>=i2len:
return -1
a2=other.intentIndexes[i1]
if a1>a2:
return 1
elif a1<a2:
return -1
i1+=1
return 1
def __gt__(self, other):
"""lectic order on intentIndexes."""
if self.intentIndexes==other.intentIndexes:
return -1
i1=0
i2len=len(other.intentIndexes)
for a1 in self.intentIndexes:
if i1>=i2len:
return 1
a2=other.intentIndexes[i1]
if a1>a2:
return -1
elif a1<a2:
return 1
i1+=1
return -1
def __ge__(self, other):
"""lectic order on intentIndexes."""
if self.intentIndexes==other.intentIndexes:
return 1
i1=0
i2len=len(other.intentIndexes)
for a1 in self.intentIndexes:
if i1>=i2len:
return 1
a2=other.intentIndexes[i1]
if a1>a2:
return -1
elif a1<a2:
return 1
i1+=1
return -1
def __repr__(self):
""" print the concept."""
strrep="concept no:"+str(self.cnum)+"\n"
strrep+="extent:"+repr(self.extent)+"\n"
strrep+="intent:"+repr(self.intent)+"\n"
strrep+="introduced objects:"+repr(self.introducedObjects)+"\n"
strrep+="introduced attributes:"+repr(self.introducedAttributes)+"\n"
if hasattr(self,"stability"): strrep+="stability: {0:1.4f}".format(self.stability)+"\n"
strrep+="upper neighbours: "
for un in self.upperNeighbours:
strrep+=str(un.cnum)+", "
strrep+="\n"
strrep+="lower neighbours: "
for ln in self.lowerNeighbours:
strrep+=str(ln.cnum)+", "
strrep+="\n"
return strrep
def __hash__(self):
"""A concept is fully identified by its intent, hence the intent hash can serve as concept hash."""
return self.intent.__hash__()
class formalContext:
""" The formal context.
Builds dictionaries object=>attributes and vice versa for faster closure computation.
Set of objects and attributes are kept in lists rather than sets for lectic ordering of concepts.
"""
def __init__(self,relation,objects=None,attributes=None):
""" 'relation' has to be an iterable container of tuples. If objects or attributes are not supplied, determine from relation"""
# map from object=> set of attributes of this object
self.objectsToAttributes=dict()
# map from attributes => set of objects of this attribute
self.attributesToObjects=dict()
# objects and attributes are kept in lists rather than sets for lectic ordering of concepts.
self.objects=[]
self.attributes=[]
if objects is not None:
self.objects=list(objects)
for obj in objects: self.objectsToAttributes[obj]=set()
if attributes is not None:
self.attributes=list(attributes)
for att in attributes: self.attributesToObjects[att]=set()
for obj,att in relation:
if obj not in self.objects:
self.objects+=[obj]
if att not in self.attributes:
self.attributes+=[att]
if obj not in self.objectsToAttributes:
self.objectsToAttributes[obj]=set([att])
else:
self.objectsToAttributes[obj].add(att)
if att not in self.attributesToObjects:
self.attributesToObjects[att]=set([obj])
else:
self.attributesToObjects[att].add(obj)
self.attributes.sort()
self.attributes.reverse()
def objectsPrime(self,objectSet):
"""return a frozenset of all attributes which are shared by members of objectSet."""
if len(objectSet)==0:
return frozenset(self.attributes)
oiter=iter(objectSet)
opr=self.objectsToAttributes[next(oiter)].copy()
for obj in oiter:
opr.intersection_update(self.objectsToAttributes[obj])
return frozenset(opr)
def attributesPrime(self,attributeSet):
"""return a set of all objects which have all attributes in attribute set."""
if len(attributeSet)==0:
return frozenset(self.objects)
aiter=iter(attributeSet)
apr=self.attributesToObjects[next(aiter)].copy()
for att in aiter:
apr.intersection_update(self.attributesToObjects[att])
return frozenset(apr)
def updateIntent(self,intent,object):
"""return intersection of intent and all attributes of object."""
return frozenset(intent.intersection(self.objectsToAttributes[object]))
def updateExtent(self,extent,attribute):
"""return intersection of intent and all attributes of object."""
return frozenset(extent.intersection(self.attributesToObjects[attribute]))
def indexList(self,attributeSet):
"""return ordered list of attribute indexes. For lectic ordering of concepts."""
ilist=[]
for att in attributeSet:
ilist+=[self.attributes.index(att)]
ilist.sort()
return ilist
class formalConcepts:
""" Computes set of concepts from a binary relation by an algorithm similar to C. Lindig's Fast Concept Analysis (2002).
"""
def __init__(self,relation,objects=None,attributes=None):
""" 'relation' has to be an iterable container of tuples. If objects or attributes are not supplied, determine from relation."""
self.context=formalContext(relation,objects,attributes)
self.concepts=[] # a lectically ordered list of concepts"
self.intentToConceptDict=dict()
self.extentToConceptDict=dict()
def computeUpperNeighbours(self,concept):
""" This version of upperNeighbours runs fast enough in Python to be useful.
Based on a theorem from C. Lindig's (1999) PhD thesis.
Returns list of upper neighbours of concept."""
# The set of all objects g which are not in concept's extent G and might therefore be used to create upper neighbours via ((G u g)'',(G u g)')
upperNeighbourGeneratingObjects=set(self.context.objects).difference(concept.extent)
# dictionary of intent => set of generating objects
upperNeighbourCandidates=dict()
for g in upperNeighbourGeneratingObjects:
# an intent of a concept >= concept. Computed by intersecting i(g) with concept.intent,
# where i(g) is the set of all attributes of g.
intent=self.context.updateIntent(concept.intent,g)
# self.intentToConceptDict is a dictionary of all concepts computed so far.
if intent in self.intentToConceptDict:
curConcept=self.intentToConceptDict[intent]
extent=curConcept.extent
else:
# Store every concept in self.conceptDict, because it will eventually be used
# and the closure is expensive to compute
extent=self.context.attributesPrime(intent)
curConcept=formalConcept(extent,intent,self.context.indexList(intent))
self.intentToConceptDict[intent]=curConcept
# remember which g generated what concept
if intent in upperNeighbourCandidates:
upperNeighbourCandidates[intent].add(g)
else:
upperNeighbourCandidates[intent]=set([g])
neighbours=[]
# find all upper neighbours by Lindig's theorem:
# a concept C=((G u g)'',(G u g)') is an upper neighbour of (G,I) if and only if
# (G u g)'' \ G = set of all g which generated C.
for intent,generatingObjects in upperNeighbourCandidates.items():
extraObjects=self.intentToConceptDict[intent].extent.difference(concept.extent)
if extraObjects==generatingObjects:
neighbours+=[self.intentToConceptDict[intent]]
return neighbours
def computeLowerNeighbours(self,concept,minsize=0):
""" This dual version of upperNeighbours runs fast enough in Python to be useful.
Based on a theorem from C. Lindig's (1999) PhD thesis.
Returns list of upper neighbours of concept. Ignores lower neighbours with less than minextent objects in extent."""
# The set of all objects g which are not in concept's extent G and might therefore be used to create upper neighbours via ((G u g)'',(G u g)')
lowerNeighbourGeneratingAttributes=set(self.context.attributes).difference(concept.intent)
# dictionary of extent => set of generating attributes
lowerNeighbourCandidates=dict()
for i in lowerNeighbourGeneratingAttributes:
# an extent of a concept <= concept. Computed by intersecting g(i) with concept.extent,
# where g(i) is the set of all objects that have of i.
extent=self.context.updateExtent(concept.extent,i)
if len(extent)<minsize:
continue
# self.extentToConceptDict is a dictionary of all concepts computed so far.
if extent in self.extentToConceptDict:
curConcept=self.extentToConceptDict[extent]
intent=curConcept.intent
else:
# Store every concept in self.conceptDict, because it will eventually be used
# and the closure is expensive to compute
intent=self.context.objectsPrime(extent)
curConcept=formalConcept(extent,intent,self.context.indexList(intent))
self.extentToConceptDict[extent]=curConcept
# remember which g generated what concept
if extent in lowerNeighbourCandidates:
lowerNeighbourCandidates[extent].add(i)
else:
lowerNeighbourCandidates[extent]=set([i])
neighbours=[]
# find all lower neighbours by dual of Lindig's theorem:
# a concept C=((I u i)',(I u i)'') is a lower neighbour of (G,I) if and only if
# (I u i)'' \ I = set of all i which generated C.
for extent,generatingAttributes in lowerNeighbourCandidates.items():
extraAttributes=self.extentToConceptDict[extent].intent.difference(concept.intent)
if extraAttributes==generatingAttributes:
neighbours+=[self.extentToConceptDict[extent]]
return neighbours
def numberConceptsAndComputeIntroduced(self):
""" Numbers concepts and computes introduced objects and attributes"""
numCon=len(self.concepts)
curConNum=0
for curConcept in self.concepts:
curConcept.cnum=curConNum
curConcept.upperNeighbours.sort()
curConcept.lowerNeighbours.sort()
curConcept.introducedObjects=set(curConcept.extent)
for ln in curConcept.lowerNeighbours:
curConcept.introducedObjects.difference_update(ln.extent)
curConcept.introducedAttributes=set(curConcept.intent)
for un in curConcept.upperNeighbours:
curConcept.introducedAttributes.difference_update(un.intent)
curConNum+=1
def computeLattice(self):
""" Computes concepts and lattice.
self.concepts contains lectically ordered list of concepts after completion."""
intent=self.context.objectsPrime(set())
extent=self.context.attributesPrime(intent)
curConcept=formalConcept(extent,intent,self.context.indexList(intent))
self.concepts=[curConcept]
self.intentToConceptDict[curConcept.intent]=curConcept
curConceptIndex=0
numComputedConcepts=0
while True:
upperNeighbours=self.computeUpperNeighbours(curConcept)
for upperNeighbour in upperNeighbours:
upperNeighbourIndex=bisect.bisect(self.concepts,upperNeighbour)
if upperNeighbourIndex==0 or self.concepts[upperNeighbourIndex-1]!=upperNeighbour:
self.concepts.insert(upperNeighbourIndex,upperNeighbour)
curConceptIndex+=1
curConcept.upperNeighbours+=[upperNeighbour]
upperNeighbour.lowerNeighbours+=[curConcept]
curConceptIndex-=1
if curConceptIndex<0:
break
curConcept=self.concepts[curConceptIndex]
numComputedConcepts+=1
if numComputedConcepts % 1000 == 0:
print("Computed upper neighbours of %d concepts" % numComputedConcepts,gc.collect())
sys.stdout.flush()
self.numberConceptsAndComputeIntroduced()
def computeMinExtentLattice(self,minextent=0):
""" Computes concepts and lattice.
self.concepts contains lectically ordered list of concepts after completion."""
extent=self.context.attributesPrime(set())
intent=self.context.objectsPrime(extent)
curConcept=formalConcept(extent,intent,self.context.indexList(intent))
self.concepts=[curConcept]
self.extentToConceptDict[curConcept.extent]=curConcept
curConceptIndex=0
numComputedConcepts=0
while True:
lowerNeighbours=self.computeLowerNeighbours(curConcept,minextent)
for lowerNeighbour in lowerNeighbours:
lowerNeighbourIndex=bisect.bisect(self.concepts,lowerNeighbour)
if lowerNeighbourIndex==0 or self.concepts[lowerNeighbourIndex-1]!=lowerNeighbour:
self.concepts.insert(lowerNeighbourIndex,lowerNeighbour)
curConcept.lowerNeighbours+=[lowerNeighbour]
lowerNeighbour.upperNeighbours+=[curConcept]
curConceptIndex+=1
if curConceptIndex>=len(self.concepts):
break
curConcept=self.concepts[curConceptIndex]
numComputedConcepts+=1
if numComputedConcepts % 100 == 0:
print("Computed lower neighbours of %d concepts" % numComputedConcepts,gc.collect())
sys.stdout.flush()
self.numberConceptsAndComputeIntroduced()
def checkLowerNeighbours(self,concept,nonMembers):
"""Helper for checkDownset. Remove all elements from nonMembers which are in the downset of concept."""
if len(nonMembers)==0:
return
for ln in concept.lowerNeighbours:
if not ln.visited:
self.checkLowerNeighbours(ln,nonMembers)
if concept in nonMembers:
nonMembers.remove(concept)
concept.visited=True
def checkDownset(self,topConcept,nonMembers):
"""Remove all elements from nonMembers which are in the downset of topConcept."""
for con in self.concepts:
con.visited=False
self.checkLowerNeighbours(topConcept,nonMembers)
def enumerateConcepts(self):
"""Assigns numbers to concept based on lectic order."""
onum=0
for con in self.concepts:
con.cnum=onum
onum+=1
def delConceptFromDicts(self,concept):
if concept.intent in self.intentToConceptDict:
del self.intentToConceptDict[concept.intent]
if concept.extent in self.extentToConceptDict:
del self.extentToConceptDict[concept.extent]
def prune(self,concept,renumber=True):
"""Prune concept from lattice. Upper neighbours are connected to lower neighbours if no other
path through the lattice connects them. Returns True on success."""
if concept.intent not in self.intentToConceptDict and concept.extent not in self.extentToConceptDict:
return False
# remove concept from list of lower neighbours of its upper neighbours
for un in concept.upperNeighbours:
ci=bisect.bisect(un.lowerNeighbours,concept)-1
if ci>=0 and concept==un.lowerNeighbours[ci]:
del un.lowerNeighbours[ci]
# objects introduced in concept are now introduced in upper neighbours
un.introducedObjects.update(concept.introducedObjects)
# remove concept from list of upper neighbours of its lower neighbours
for ln in concept.lowerNeighbours:
ci=bisect.bisect(ln.upperNeighbours,concept)-1
if ci>=0 and concept==ln.upperNeighbours[ci]:
del ln.upperNeighbours[ci]
# attributes introduced in concept are now introduced in lower neighbours
ln.introducedAttributes.update(concept.introducedAttributes)
# delete the concepts
self.delConceptFromDicts(concept)
ci=bisect.bisect(self.concepts,concept)-1
if ci>=0 and self.concepts[ci]==concept:
del self.concepts[ci]
# find all lower neighbours of erased concept which are not in the downset of un
# and add them to the lower neighbours of un
# and vice versa
for un in concept.upperNeighbours:
lowerNeighbours=concept.lowerNeighbours[:]
self.checkDownset(un,lowerNeighbours)
un.lowerNeighbours+=lowerNeighbours
un.lowerNeighbours.sort()
for ln in lowerNeighbours:
ci=bisect.insort(ln.upperNeighbours,un)
# re-number concepts
if renumber:
self.enumerateConcepts()
return True
def pruneSmallerExtents(self,minNumObjects):
"""Prune all concepts at the bottom of the lattice whose |extent|<=minNumObjects.
This may lead to some attributes never being introduced! Return number of pruned concepts."""
oldConNum=len(self.concepts)
toUpdate=set() # all concepts that need updating of introduced objects after deletion
for con in self.concepts[:]:
if len(con.extent)<minNumObjects:
ci=bisect.bisect(self.concepts,con)-1
del self.concepts[ci]
self.delConceptFromDicts(con)
toUpdate.update(con.upperNeighbours) # every upper neighbour of a removed concept is a potential update candidate
toUpdate.intersection_update(self.concepts) # find all update candidates which are still in the set of concepts
# re-compute introduced objects
for con in toUpdate:
con.introducedObjects=set(con.extent)
for ln in con.lowerNeighbours[:]:
if ln.intent not in self.intentToConceptDict and ln.extent not in self.extentToConceptDict:
ci=bisect.bisect(con.lowerNeighbours,ln)-1
del con.lowerNeighbours[ci]
else:
con.introducedObjects.difference_update(ln.extent)
# re-number concepts
self.enumerateConcepts()
return oldConNum-len(self.concepts)
def getLowerNeighbours(self,con):
""" Get all lower neighbours of con. Concept must be in self.concepts!!!"""
# every concept which is < con in the lectic order is a potential lower neighbour
lowerNeighbourCandidates=filter(lambda c:c.intent.issuperset(con.intent),self.concepts[self.concepts.index(con)+1:])
lncs2=set()
for cc in reversed(lowerNeighbourCandidates):
for lnc in lncs2.copy():
if cc.intent.issubset(lnc.intent):
lncs2.remove(lnc)
lncs2.add(cc)
lowerNeighbours=list(lncs2)
lowerNeighbours.sort()
return lowerNeighbours
def getUpperNeighbours(self,con):
""" Get all upper neighbours of concept. Concept must be in self.concepts!!!"""
# every concept which is > con in the lectic order is a potential upper neighbour
upperNeighbourCandidates=filter(lambda c:c.intent.issubset(con.intent),self.concepts[:self.concepts.index(con)])
uncs2=set()
for cc in upperNeighbourCandidates:
for unc in uncs2.copy():
if cc.intent.issuperset(unc.intent):
uncs2.remove(unc)
uncs2.add(cc)
upperNeighbours=list(uncs2)
upperNeighbours.sort()
return upperNeighbours
def recomputeNeighbours(self):
print("recomputing concept order")
sys.stdout.flush()
numdone=0
for con in self.concepts:
con.lowerNeighbours=self.getLowerNeighbours(con)
con.upperNeighbours=[]
numdone+=1
if numdone % 100 == 0:
print(".",
sys.stdout.flush())
print()
print("%d lower neighbours done. Recomputing upper neighbours." % numdone)
sys.stdout.flush()
# recompute upper neighbours
for con in self.concepts:
for lcon in con.lowerNeighbours:
lcon.upperNeighbours+=[con]
self.numberConceptsAndComputeIntroduced()
def pruneNoIntroduced(self,noAttrib=True,noObject=True):
"""Starting from the bottom, prune all concepts that do not introduce at least one attribute (if noAttrib) and/or at least one object (if noObject)
Leaves top concept. Return number of pruned concepts"""
oldConNum=len(self.concepts)
numpruned=0
prunedConceptList=[]
for con in self.concepts:
if con.cnum==0:
prunedConceptList+=[con]
continue
nia=len(con.introducedAttributes)
nio=len(con.introducedObjects)
if (nia==0 or not noAttrib) and (nio==0 or not noObject):
self.delConceptFromDicts(con)
numpruned+=1
if numpruned % 100 == 0:
print(".",
sys.stdout.flush())
else:
prunedConceptList+=[con]
self.concepts=prunedConceptList
print()
print("Pruned %d concepts" % numpruned)
self.recomputeNeighbours()
return numpruned
def computeAttributeDownsets(self):
"""Iterate through all concepts and compute set of attributes which are introduced in the downset of each concept. Iteration is done in inverse lectic order, therefore each concept needs to check only its immediate subordinates."""
for con in reversed(self.concepts):
con.downsetAttributes=set(con.intent)
for ccon in con.lowerNeighbours:
con.downsetAttributes.update(ccon.downsetAttributes)
def computeClosestIntroducedAttributesConcept(self,con,num=5):
unlist=[]
#con.closestIntroducedAttributes=list(con.intent)
#return
con.closestIntroducedAttributes=set() #con.introducedAttributes.copy()
for uneigh in con.upperNeighbours:
unl=list(uneigh.introducedAttributes)+list(uneigh.closestIntroducedAttributes)
unlist+=[unl]
idx=0
foundAnother=len(con.closestIntroducedAttributes)<num
while foundAnother:
foundAnother=False
for unl in unlist:
if len(unl)>idx:
con.closestIntroducedAttributes.add(unl[idx])
foundAnother=True
if len(con.closestIntroducedAttributes)>=num:
break
idx+=1
if len(con.closestIntroducedAttributes)>=num:
break
def computeClosestIntroducedAttributes(self,num=5):
"""Iterate through all concepts and find at most num introduced attributes of closest upper neighbours of. These attributes can then serve as concept name."""
totnum=len(self.concepts)
i=0
for curCon in self.concepts:
self.computeClosestIntroducedAttributesConcept(curCon,num)
i+=1
if i%1000 == 0:
print("Named %d of %d concepts" % (i,totnum))
print("Named %d concepts" % totnum)
def findClosestIntroducedAttributes(self,concept,num):
"""Find at least num attributes that were introduced closest to concept in upward direction.
This is useful for naming concepts which introduce no attributes by which they could be named."""
for con in self.concepts:
con.visited=False
conceptDeque=collections.deque([concept])
attlist=[]
while len(conceptDeque)>0 and len(attlist)<=num:
curCon=conceptDeque.popleft()
if curCon.visited:
continue
conceptDeque.extend(curCon.upperNeighbours)
attlist+=list(curCon.introducedAttributes)
curCon.visited=True
return set(attlist)
def findLargestConcept_closure(self,attribList,startConcept):
"""find the largest concept which has all the attributes in attribList, starting at startConcept. Return None if no such concept exists."""
attSet=set(attribList)
objSet=self.context.attributesPrime(attSet)
if len(objSet)==0:
# empty extent -- no object matches search
print("EMPTY EXTENT")
return None
attSet=self.context.objectsPrime(objSet)
searchCon=formalConcept(objSet,attSet,self.context.indexList(attSet))
searchConIndex=bisect.bisect_left(self.concepts,searchCon)
print("Looking for ",attSet)
print("IDX ",searchConIndex)
if searchConIndex==len(self.concepts):
# not found in graph. Could insert instead?
return None
# look for next lower neighbour
for lnidx in range(searchConIndex,len(self.concepts)):
print("CMP ",self.concepts[lnidx].intent," to ",attSet)
if self.concepts[lnidx].intent.issuperset(attSet):
return self.concepts[lnidx]
# not found in graph. Could insert instead?
return None
def findLargestConcept(self,attribList,startConcept=None,nextLower=True):
"""find the largest concept which has all the attributes in attribList, starting at startConcept. Return None if no such concept exists."""
for att in attribList:
if att not in self.context.attributesToObjects:
return None
if startConcept is None:
startConcept=self.concepts[0]
attSet=set(attribList)
searchCon=formalConcept(frozenset([]),attSet,self.context.indexList(attSet))
searchConIndex=bisect.bisect_left(self.concepts,searchCon,startConcept.cnum)
#print "Looking for ",attSet
#print "IDX ",searchConIndex
if searchConIndex==len(self.concepts):
# not found in graph. Could insert instead?
return None
if not nextLower:
if self.concepts[searchConIndex].intent==attSet:
return self.concepts[searchConIndex]
else:
return None
# look for next lower neighbour
for lnidx in range(searchConIndex,len(self.concepts)):
#print "CMP ",self.concepts[lnidx].intent," to ",attSet
if self.concepts[lnidx].intent.issuperset(attSet):
return self.concepts[lnidx]
# not found in graph. Could insert instead?
return None
def insertNewConcept(self,attribList,numNames=5):
"""Compute closure of attrib list and insert into graph if extent is not empty. Return new concept or None (if extent is empty). returns tuple (concept,isNew)"""
for att in attribList:
if att not in self.context.attributesToObjects:
return (None,False)
extent=self.context.attributesPrime(set(attribList))
if len(extent)==0:
return (None,False)
intent=self.context.objectsPrime(extent)
newCon=formalConcept(extent,intent,self.context.indexList(intent))
newConIndex=bisect.bisect_left(self.concepts,newCon)
if newConIndex<len(self.concepts) and self.concepts[newConIndex].intent==intent:
# concept already exists
print("FOUND ",self.concepts[newConIndex].intent,intent)
return (self.concepts[newConIndex],False)
self.concepts.insert(newConIndex,newCon)
# get upper and lower neighbours
newCon.lowerNeighbours=self.getLowerNeighbours(newCon)
newCon.upperNeighbours=self.getUpperNeighbours(newCon)
newCon.introducedAttributes=set(intent)
newCon.introducedObjects=set(extent)
# fix parents' lower neighbours and introduced Objects
for parent in newCon.upperNeighbours:
#print "UN ",parent.intent
lns=set(parent.lowerNeighbours)
lns.difference_update(newCon.lowerNeighbours)
lns.add(newCon)
parent.lowerNeighbours=list(lns)
parent.lowerNeighbours.sort()
parent.introducedObjects.difference_update(extent)
newCon.introducedAttributes.difference_update(parent.intent)
#for ln in parent.lowerNeighbours:
# print "UN-LN ",ln.cnum,ln.intent
# fix children's upper neighbours and introduced attributes
for child in newCon.lowerNeighbours:
#print "LN ",parent.intent
uns=set(child.upperNeighbours)
uns.difference_update(newCon.upperNeighbours)
uns.add(newCon)
child.upperNeighbours=list(uns)
child.upperNeighbours.sort()
child.introducedAttributes.difference_update(intent)
newCon.introducedObjects.difference_update(child.extent)
# fix concept numbers
curidx=0
for con in self.concepts[curidx:]:
con.cnum=curidx
curidx+=1
# fix names of new concept, parents and children
for con in [newCon]+newCon.lowerNeighbours+newCon.upperNeighbours:
self.computeClosestIntroducedAttributesConcept(con,numNames)
return (newCon,True)
def dotPrint(self,outStream=sys.stdout,extentView=None,title=None,showObjects="all",showAttributes="all",colorlist=None,withStability=False):
"""Print ordered concept set in dot style.
outStream: open, writeable stream to plot into.
if extentView(extent,intent) is supplied, it needs to be a function that takes the extent and intent as an argument and
returns an image filename for it, which will be plotted in the node.
showObjects,showAttributes= show {all|none|introduced} objects/attributes in the concept nodes.
colorlist: draw concept boundary in colors from that list, cycle."""
self.enumerateConcepts()
if colorlist is None: colorlist=["black"]
edges=""
print(outStream,"digraph lattice {")
if title is not None: print(outStream,"label=\""+title+"\"")
for con in self.concepts:
color=colorlist[con.cnum % len(colorlist)]
if extentView is not None:
extentImg=extentView(con.extent,con.intent)
print(outStream,"node{0:d} [shapefile=\"{1:s}\",label=\"\",color=\"{2:s}\"]".format(con.cnum,extentImg,color))
else:
if showAttributes=="all": intentStr="\\n".join(map(str,con.intent))
elif showAttributes=="introduced": intentStr="\\n".join(map(str,con.introducedAttributes))
else: intentStr=""
if intentStr[-2:]=="\\n": intentStr=intentStr[:-2]
if showObjects=="all": extentStr="\\n".join(map(str,con.extent))
elif showObjects=="introduced": extentStr="\\n".join(map(str,con.introducedObjects))
else: intentStr=""
if extentStr[-2:]=="\\n": extentStr=extentStr[:-2]
if not (withStability and hasattr(con,"stability")):
print(outStream,"node{0:d} [color={1:s}, shape=Mrecord, style=bold,label=\"{0:02d}|{2:s}|{3:s}\"]".format(con.cnum,color,extentStr,intentStr))
else:
print(outStream,"node{0:d} [color={1:s}, shape=Mrecord, style=bold,label=\"{0:02d}|{2:s}|{3:s}|{4:4.2f}\"]".format(con.cnum,color,extentStr,intentStr,con.stability))
for lneigh in con.lowerNeighbours:
edges+="node{0:d} -> node{1:d} [color={2:s}]\n".format(con.cnum,lneigh.cnum,colorlist[lneigh.cnum % len(colorlist)])
print(outStream,edges[:-1])
print(outStream,"}")
def computeStability(self,extensional=True):
"""Compute stability of concepts. After calling this method, each concept has a member variable 'stability'
uses the algorithm described in roth,obiedkov,kourie (2008): on succinct representation of knowledge community taxonimies with FCA"""
if extensional: self._computeExtensionalStability()
else: self._computeIntensionalStability()
def _computeExtensionalStability(self):
concepts=set(self.concepts)
count=dict([(c.intent,len(c.lowerNeighbours)) for c in concepts])
subsets=dict([(c.intent,2**len(c.extent)) for c in concepts])
while len(concepts)>0:
curMin=set(filter(lambda c:count[c.intent]==0,concepts))
for cm in curMin:
#cm.stability=float(subsets[cm.intent])/2**len(cm.extent)
cm.stability=math.exp(math.log(subsets[cm.intent])-len(cm.extent)*math.log(2.0))
for cf in filter(lambda x:x.intent < cm.intent,concepts):
subsets[cf.intent]-=subsets[cm.intent]
if cf in cm.upperNeighbours: count[cf.intent]-=1
concepts -= curMin
def _computeIntensionalStability(self):
concepts=set(self.concepts)
count=dict([(c.intent,len(c.upperNeighbours)) for c in concepts])
subsets=dict([(c.intent,2**len(c.intent)) for c in concepts])
while len(concepts)>0:
curMax=set(filter(lambda c:count[c.intent]==0,concepts))
for cm in curMax:
cm.stability=float(subsets[cm.extent])/2**len(cm.intent)
for cf in filter(lambda x:x.intent > cm.intent,concepts):
subsets[cf.intent]-=subsets[cm.intent]
if cf in cm.lowerNeighbours: count[cf.intent]-=1
concepts -= curMax
def getStableConcepts(self,minStability=None,quantile=None,nmost=None):
"""Return a formalConcept object with the most stable concepts.
computeStability() needs to be called before this method.
One of nmost,minStability,quantile must be supplied.
minStability supersedes quantile supersedes nmost.
nmost: return the n most stable concepts
minStability: return all concepts with at least minStability
quantile: return the most stable quantile <= 1."""
if not all(map(lambda x:hasattr(x,"stability"),self.concepts)): raise RuntimeError("Please compute stability for all concepts")
if minStability is not None:
nmost=sum(map(lambda x:x.stability>=minStability,self.concepts))
elif quantile is not None:
nmost=int(quantile*len(self.concepts))
elif nmost is None: raise RuntimeError("One of minStability,nmost or quantile needs to be supplied")
conceptsStabilitySorted=self.concepts[:]
conceptsStabilitySorted.sort(cmp=lambda a,b:cmp(a.stability,b.stability),reverse=True)
conceptsStabilitySorted=conceptsStabilitySorted[:nmost]
conceptsStabilitySorted.sort() # re-order lectically. important for neighbour computation
stabFCA=formalConcepts([(1,2)])
stabFCA.context=copy.deepcopy(self.context)
stabFCA.concepts=copy.deepcopy(conceptsStabilitySorted)
stabFCA.intentToConceptDict=dict([(c.intent,c) for c in stabFCA.concepts])
stabFCA.extentToConceptDict=dict([(c.extent,c) for c in stabFCA.concepts])
stabFCA.recomputeNeighbours()
return stabFCA
def __repr__(self):
strrep="Number of concepts: "+str(len(self.concepts))+"\n"
for cnum in range(len(self.concepts)):
if cnum % 10 ==0: print("printing at concept %d of %d " % (cnum,len(self.concepts)))
strrep+="---------------------------\n"
strrep+=repr(self.concepts[cnum])
strrep+="naming suggestion:"+reduce(lambda x,y:str(x)+','+str(y),self.findClosestIntroducedAttributes(self.concepts[cnum],3),'')+"\n"
strrep+="---------------------------\n"
print("Returning string representation of lattice")
return strrep
def __getstate__(self):
"""Concepts contain references to parents/children. This may lead to a stack overflow during pickling if the lattice is large. Thus, translate concept references into concept numbers before pickling."""
dictcopy=self.__dict__.copy()
dictcopy["concepts"]=[]
dictcopy["intentToConceptDict"]=dict()
dictcopy["extentToConceptDict"]=dict()
itc=len(self.intentToConceptDict)>0
etc=len(self.extentToConceptDict)>0
for con in self.concepts:
ccopy=con.copy()
unn=map(lambda x:x.cnum,ccopy.upperNeighbours)
lnn=map(lambda x:x.cnum,ccopy.lowerNeighbours)
ccopy.upperNeighbours=unn
ccopy.lowerNeighbours=lnn
dictcopy["concepts"]+=[ccopy]
if itc:
dictcopy["intentToConceptDict"][ccopy.intent]=ccopy
if etc:
dictcopy["extentToConceptDict"][ccopy.extent]=ccopy
dictcopy["concepts"].sort()
return dictcopy
def __setstate__(self,thedict):
"""Concepts contain references to parents/children. This may lead to a stack overflow during pickling if the lattice is large. Thus, translate concept references into concept numbers before pickling and vice versa on unpickling."""
cnumToRefs=dict()
for con in thedict["concepts"]:
cnumToRefs[con.cnum]=con
for con in thedict["concepts"]:
unn=map(lambda x:cnumToRefs[x],con.upperNeighbours)
lnn=map(lambda x:cnumToRefs[x],con.lowerNeighbours)
con.upperNeighbours=unn
con.lowerNeighbours=lnn
self.__dict__=thedict
if __name__=="__main__":
import subprocess
# some FCA examples
# a simple neural code
relation=[]
relation+=[('monkeyHand','n2')]
relation+=[('monkeyFace','n1')]
relation+=[('monkeyFace','n2')]
relation+=[('humanFace','n1')]
relation+=[('dogFace','n1')]
relation+=[('spider','n3')]
concepts=formalConcepts(relation)
concepts.computeLattice()
print("Neural code example")
print(concepts)
print()
concepts.computeStability()
# write to dot-file, use colored edges.
dotfile=open("simpleNeuralCode.dot","w")
concepts.dotPrint(dotfile,colorlist=["black","red","blue","green"],withStability=True)
dotfile.close()
subprocess.call(["dot","-Tpng","-osimpleNeuralCode.png","simpleNeuralCode.dot"])
dotfile=open("simpleNeuralCodeStab.dot","w")
stableConcepts=concepts.getStableConcepts(minStability=0.51)
stableConcepts.dotPrint(dotfile,colorlist=["black","red","blue","green"],withStability=True)
dotfile.close()
subprocess.call(["dot","-Tpng","-osimpleNeuralCodeStab.png","simpleNeuralCodeStab.dot"])
sys.exit()
conlst=map(lambda x:concepts.concepts[x],[2,3,4,5,1,0])
for prune in conlst:
print("Pruning concept ",prune.cnum)
print("-------------------")
concepts.prune(prune)
print(concepts)
print()
for prunelimit in range(1,5):
concepts=formalConcepts(relation)
concepts.computeLattice()
print("Pruning all concepts with |extent|<=",prunelimit)
print("-------------------------------------")
concepts.pruneSmallerExtents(prunelimit)
print(concepts)
print()
print("Generating concepts with extent >=",prunelimit)
print("---------------------------------------")
conbyob=formalConcepts(relation)
concepts.computeMinExtentLattice(prunelimit+1)
print(concepts)
# the local code
relation=[]
for i in range(10):
relation+=[(i,i)]
concepts=formalConcepts(relation)
concepts.computeMinExtentLattice()
print("Local code")
print(concepts)
print()
# the 'anti-local' code. produces a boolean lattice
relation=[]
for i in range(10):
for j in range(10):
if i!=j:
relation+=[(i,j)]
concepts=formalConcepts(relation)
concepts.computeMinExtentLattice()
print("Anti-local code")
print(concepts)
print()
# a random sparse code
relation=[]
import random
import time
random.seed(123456)
# write context in colibri-concepts readable format for comparison
spf=open("sparse.context","w")
for i in range(1000):
print(spf,i,":",)
line=""
for j in range(1700):
if random.random()<10.0/1700:
relation+=[(i,j)]
line+=str(j)+" "
print(spf,line[:-1]+";")
spf.close()
concepts=formalConcepts(relation)
ct=time.clock()
concepts.computeLattice()
ct=time.clock()-ct
print("Sparse code")
#print concepts
print("runtime ",ct)
print("Lattice size before deletion of all concepts with less than 10 objects: ",len(concepts.concepts))
concepts.pruneSmallerExtents(10)
print("Graph size after deletion of all concepts with less than 10 objects: ",len(concepts.concepts))
if len(concepts.concepts)!=len(concepts.intentToConceptDict) and len(concepts.concepts)!=len(concepts.extentToConceptDict):
print("ERROR: concept list length does not match concept dictionary length",len(concepts.concepts),len(concepts.intentToConceptDict),len(concepts.extentToConceptDict))
for con in concepts.concepts:
if con.intent not in concepts.intentToConceptDict and con.extent not in concepts.extentToConceptDict:
print("ERROR: concept ",con.cnum," not in concept dictionary")
conceptsByObj=formalConcepts(relation)
ct=time.clock()
conceptsByObj.computeMinExtentLattice(10)
ct=time.clock()-ct
print("Sparse code by objects")
#print conceptsByObj
print("runtime ",ct)
print("Size of graph without objects having extent <10: ",len(conceptsByObj.concepts))
isOk=True
for extent,concept in conceptsByObj.extentToConceptDict.items():
if concepts.intentToConceptDict[concept.intent]!=concept:
print("ERROR:",concept)
isOk=False
if isOk:
print("Graphs equal!!!")
else:
print("ERROR: graphs NOT equal!!!")
| bsd-3-clause | 922,786,082,583,585,000 | 50.679396 | 247 | 0.510474 | false |
andrestr02/blender | archimesh/src/stairs_maker.py | 3 | 14685 | # ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
# PEP8 compliant (https://www.python.org/dev/peps/pep-0008)
# ----------------------------------------------------------
# File: stairs_maker.py
# Automatic generation of stairs
# Author: Antonio Vazquez (antonioya)
#
# ----------------------------------------------------------
# noinspection PyUnresolvedReferences
import bpy
import math
from arch_tools import *
# ------------------------------------------------------------------
# Define UI class
# Stairs
# ------------------------------------------------------------------
class STAIRS(bpy.types.Operator):
bl_idname = "mesh.archimesh_stairs"
bl_label = "Stairs"
bl_description = "Stairs Generator"
bl_category = 'Archimesh'
bl_options = {'REGISTER', 'UNDO'}
# Define properties
model = bpy.props.EnumProperty(items=(('1', "Rectangular", ""),
('2', "Rounded", "")),
name="Model",
description="Type of steps")
radio = bpy.props.FloatProperty(name='', min=0.001, max=0.500, default=0.20, precision=3,
description='Radius factor for rounded')
curve = bpy.props.BoolProperty(name="Include deformation handles",
description="Include a curve to modify the stairs curve.", default=False)
step_num = bpy.props.IntProperty(name='Number of steps', min=1, max=1000, default=3,
description='Number total of steps')
max_width = bpy.props.FloatProperty(name='Width', min=0.001, max=10, default=1, precision=3,
description='Step maximum width')
depth = bpy.props.FloatProperty(name='Depth', min=0.001, max=10, default=0.30, precision=3,
description='Depth of the step')
shift = bpy.props.FloatProperty(name='Shift', min=0.001, max=1, default=1, precision=3,
description='Step shift in Y axis')
thickness = bpy.props.FloatProperty(name='Thickness', min=0.001, max=10, default=0.03, precision=3,
description='Step thickness')
sizev = bpy.props.BoolProperty(name="Variable width", description="Steps are not equal in width.", default=False)
back = bpy.props.BoolProperty(name="Close sides", description="Close all steps side to make a solid structure.",
default=False)
min_width = bpy.props.FloatProperty(name='', min=0.001, max=10, default=1, precision=3,
description='Step minimum width')
height = bpy.props.FloatProperty(name='height', min=0.001, max=10, default=0.14, precision=3,
description='Step height')
front_gap = bpy.props.FloatProperty(name='Front', min=0, max=10, default=0.03, precision=3, description='Front gap')
side_gap = bpy.props.FloatProperty(name='Side', min=0, max=10, default=0, precision=3, description='Side gap')
crt_mat = bpy.props.BoolProperty(name="Create default Cycles materials",
description="Create default materials for Cycles render.", default=True)
# -----------------------------------------------------
# Draw (create UI interface)
# -----------------------------------------------------
# noinspection PyUnusedLocal
def draw(self, context):
layout = self.layout
space = bpy.context.space_data
if not space.local_view:
# Imperial units warning
if bpy.context.scene.unit_settings.system == "IMPERIAL":
row = layout.row()
row.label("Warning: Imperial units not supported", icon='COLOR_RED')
box = layout.box()
row = box.row()
row.prop(self, 'model')
if self.model == "2":
row.prop(self, 'radio')
box.prop(self, 'step_num')
row = box.row()
row.prop(self, 'max_width')
row.prop(self, 'depth')
row.prop(self, 'shift')
row = box.row()
row.prop(self, 'back')
row.prop(self, 'sizev')
row = box.row()
row.prop(self, 'curve')
# all equal
if self.sizev is True:
row.prop(self, 'min_width')
box = layout.box()
row = box.row()
row.prop(self, 'thickness')
row.prop(self, 'height')
row = box.row()
row.prop(self, 'front_gap')
if self.model == "1":
row.prop(self, 'side_gap')
box = layout.box()
box.prop(self, 'crt_mat')
else:
row = layout.row()
row.label("Warning: Operator does not work in local view mode", icon='ERROR')
# -----------------------------------------------------
# Execute
# -----------------------------------------------------
# noinspection PyUnusedLocal
def execute(self, context):
if bpy.context.mode == "OBJECT":
create_stairs_mesh(self)
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archimesh: Option only valid in Object mode")
return {'CANCELLED'}
# ------------------------------------------------------------------------------
# Generate mesh data
# All custom values are passed using self container (self.myvariable)
# ------------------------------------------------------------------------------
def create_stairs_mesh(self):
# deactivate others
for o in bpy.data.objects:
if o.select is True:
o.select = False
bpy.ops.object.select_all(False)
# ------------------------
# Create stairs
# ------------------------
mydata = create_stairs(self, "Stairs")
mystairs = mydata[0]
mystairs.select = True
bpy.context.scene.objects.active = mystairs
remove_doubles(mystairs)
set_normals(mystairs)
set_modifier_mirror(mystairs, "X")
# ------------------------
# Create curve handles
# ------------------------
if self.curve:
x = mystairs.location.x
y = mystairs.location.y
z = mystairs.location.z
last = mydata[1]
x1 = last[1] # use y
myp = [((0, 0, 0), (- 0.25, 0, 0), (0.25, 0, 0)),
((x1, 0, 0), (x1 - 0.25, 0, 0), (x1 + 0.25, 0, 0))] # double element
mycurve = create_bezier("Stairs_handle", myp, (x, y, z))
set_modifier_curve(mystairs, mycurve)
# ------------------------
# Create materials
# ------------------------
if self.crt_mat:
# Stairs material
mat = create_diffuse_material("Stairs_material", False, 0.8, 0.8, 0.8)
set_material(mystairs, mat)
bpy.ops.object.select_all(False)
mystairs.select = True
bpy.context.scene.objects.active = mystairs
return
# ------------------------------------------------------------------------------
# Create rectangular Stairs
# ------------------------------------------------------------------------------
def create_stairs(self, objname):
myvertex = []
myfaces = []
index = 0
lastpoint = (0, 0, 0)
for s in range(0, self.step_num):
if self.model == "1":
mydata = create_rect_step(self, lastpoint, myvertex, myfaces, index, s)
if self.model == "2":
mydata = create_round_step(self, lastpoint, myvertex, myfaces, index, s)
index = mydata[0]
lastpoint = mydata[1]
mesh = bpy.data.meshes.new(objname)
myobject = bpy.data.objects.new(objname, mesh)
myobject.location = bpy.context.scene.cursor_location
bpy.context.scene.objects.link(myobject)
mesh.from_pydata(myvertex, [], myfaces)
mesh.update(calc_edges=True)
return myobject, lastpoint
# ------------------------------------------------------------------------------
# Create rectangular step
# ------------------------------------------------------------------------------
def create_rect_step(self, origin, myvertex, myfaces, index, step):
x = origin[0]
y = origin[1]
z = origin[2]
i = index
max_depth = y + self.depth
if self.back is True:
max_depth = self.depth * self.step_num
# calculate width (no side gap)
if self.sizev is False:
width = self.max_width / 2
else:
width = (self.max_width / 2) - (step * (((self.max_width - self.min_width) / 2) / self.step_num))
# Vertical Rectangle
myvertex.extend([(x, y, z), (x, y, z + self.height), (x + width, y, z + self.height), (x + width, y, z)])
val = y + self.thickness
myvertex.extend([(x, val, z), (x, val, z + self.height), (x + width, val, z + self.height), (x + width, val, z)])
myfaces.extend([(i + 0, i + 1, i + 2, i + 3), (i + 4, i + 5, i + 6, i + 7), (i + 0, i + 3, i + 7, i + 4),
(i + 1, i + 2, i + 6, i + 5), (i + 0, i + 1, i + 5, i + 4), (i + 3, i + 2, i + 6, i + 7)])
# Side plane
myvertex.extend([(x + width, max_depth, z + self.height), (x + width, max_depth, z)])
myfaces.extend([(i + 7, i + 6, i + 8, i + 9)])
i += 10
# calculate width (side gap)
width = width + self.side_gap
# Horizontal Rectangle
z = z + self.height
myvertex.extend([(x, y - self.front_gap, z), (x, max_depth, z), (x + width, max_depth, z),
(x + width, y - self.front_gap, z)])
z = z + self.thickness
myvertex.extend([(x, y - self.front_gap, z), (x, max_depth, z), (x + width, max_depth, z),
(x + width, y - self.front_gap, z)])
myfaces.extend([(i + 0, i + 1, i + 2, i + 3), (i + 4, i + 5, i + 6, i + 7), (i + 0, i + 3, i + 7, i + 4),
(i + 1, i + 2, i + 6, i + 5), (i + 3, i + 2, i + 6, i + 7)])
i += 8
# remap origin
y = y + (self.depth * self.shift)
return i, (x, y, z)
# ------------------------------------------------------------------------------
# Create rounded step
# ------------------------------------------------------------------------------
def create_round_step(self, origin, myvertex, myfaces, index, step):
x = origin[0]
y = origin[1]
z = origin[2]
pos_x = None
i = index
li = [math.radians(270), math.radians(288), math.radians(306), math.radians(324), math.radians(342),
math.radians(0)]
max_width = self.max_width
max_depth = y + self.depth
if self.back is True:
max_depth = self.depth * self.step_num
# Resize for width
if self.sizev is True:
max_width = max_width - (step * ((self.max_width - self.min_width) / self.step_num))
half = max_width / 2
# ------------------------------------
# Vertical
# ------------------------------------
# calculate width
width = half - (half * self.radio)
myradio = half - width
myvertex.extend([(x, y, z), (x, y, z + self.height)])
# Round
for e in li:
pos_x = (math.cos(e) * myradio) + x + width - myradio
pos_y = (math.sin(e) * myradio) + y + myradio
myvertex.extend([(pos_x, pos_y, z), (pos_x, pos_y, z + self.height)])
# back point
myvertex.extend([(x + width, max_depth, z), (x + width, max_depth, z + self.height)])
myfaces.extend([(i, i + 1, i + 3, i + 2), (i + 2, i + 3, i + 5, i + 4), (i + 4, i + 5, i + 7, i + 6),
(i + 6, i + 7, i + 9, i + 8),
(i + 8, i + 9, i + 11, i + 10), (i + 10, i + 11, i + 13, i + 12), (i + 12, i + 13, i + 15, i + 14)])
i += 16
# ------------------------------------
# Horizontal
# ------------------------------------
# calculate width gap
width = half + self.front_gap - (half * self.radio)
z = z + self.height
# Vertical
myvertex.extend([(x, y - self.front_gap, z), (x, y - self.front_gap, z + self.thickness)])
# Round
for e in li:
pos_x = (math.cos(e) * myradio) + x + width - myradio
pos_y = (math.sin(e) * myradio) + y + myradio - self.front_gap
myvertex.extend([(pos_x, pos_y, z), (pos_x, pos_y, z + self.thickness)])
# back points
myvertex.extend([(pos_x, max_depth, z), (pos_x, max_depth, z + self.thickness),
(x, max_depth, z), (x, max_depth, z + self.thickness)])
myfaces.extend([(i, i + 1, i + 3, i + 2), (i + 2, i + 3, i + 5, i + 4), (i + 4, i + 5, i + 7, i + 6),
(i + 6, i + 7, i + 9, i + 8),
(i + 8, i + 9, i + 11, i + 10), (i + 10, i + 11, i + 13, i + 12), (i + 12, i + 13, i + 15, i + 14),
(i, i + 2, i + 4, i + 6, i + 8, i + 10, i + 12, i + 14, i + 16),
(i + 1, i + 3, i + 5, i + 7, i + 9, i + 11, i + 13, i + 15, i + 17),
(i + 14, i + 15, i + 17, i + 16)])
i += 18
z = z + self.thickness
# remap origin
y = y + (self.depth * self.shift)
return i, (x, y, z)
# ------------------------------------------------------------------------------
# Create bezier curve
# ------------------------------------------------------------------------------
def create_bezier(objname, points, origin):
curvedata = bpy.data.curves.new(name=objname, type='CURVE')
curvedata.dimensions = '3D'
myobject = bpy.data.objects.new(objname, curvedata)
myobject.location = origin
myobject.rotation_euler[2] = math.radians(90)
bpy.context.scene.objects.link(myobject)
polyline = curvedata.splines.new('BEZIER')
polyline.bezier_points.add(len(points)-1)
for idx, (knot, h1, h2) in enumerate(points):
point = polyline.bezier_points[idx]
point.co = knot
point.handle_left = h1
point.handle_right = h2
point.handle_left_type = 'FREE'
point.handle_right_type = 'FREE'
return myobject
| gpl-2.0 | 9,122,731,853,134,806,000 | 38.055851 | 120 | 0.489275 | false |
anthropo-lab/XP | EPHEMER/EDHEC_Project/both_change_group/pages.py | 1 | 12932 | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from . import models
from .models import Constants
from otree.api import safe_json
import random
import itertools
from collections import defaultdict
import numpy as np
# For PresenterView
from django.views.generic import TemplateView
from django.shortcuts import render
from .models import DisplayManager
import time
#################################################
#################################################
class PresenterView(TemplateView):
template_name = "both_change_group/PresenterView.html"
def get(self, request, *args, **kwargs):
# Get the data of the displayer
displayers = DisplayManager.objects.all()
if len(displayers) != 1:
# Reinitialise the displayer
DisplayManager.objects.all().delete()
# Recreate
my_displayer = DisplayManager.objects.create(current_session_code="re-init")
else:
my_displayer = displayers[0]
# Get the data to be displayed
context = my_displayer.vars_for_presenter_view()
# Pass the data to the template
return render(request, self.template_name, context)
@classmethod
def set_session_code(cls, session_code):
# Get the data
displayers = DisplayManager.objects.all()
if len(displayers) != 1:
# Reinitialise the displayer
DisplayManager.objects.all().delete()
# Recreate with the proper value
DisplayManager.objects.create(current_session_code=session_code)
else:
my_displayer = displayers[0]
my_displayer.current_session_code = session_code
my_displayer.save()
@classmethod
def set_phase(cls, phase):
# Get the data
displayers = DisplayManager.objects.all()
if len(displayers) != 1:
# Reinitialise the displayer
DisplayManager.objects.all().delete()
# Recreate with the proper value
DisplayManager.objects.create(current_phase=phase)
else:
my_displayer = displayers[0]
my_displayer.current_phase = int(phase)
my_displayer.save()
######################
class GroupingWaitPage(WaitPage):
template_name = 'both_change_group/GroupingWaitPage.html'
group_by_arrival_time = True
def get_players_for_group(self, waiting_players):
if len(waiting_players) >= self.session.config['nb_of_players_per_group']:
##########################
# Select player A
random_A = waiting_players.pop()
random_A.Role = 'A'
random_A.Revenu_initial = 0
random_A.Revenu = 0
random_A.participant.vars['Role'] = random_A.Role
random_A.participant.vars['my_group_id'] = self.subsession.group_id_counter
random_A.Choix_groupe = self.subsession.group_id_counter
# Propagate the roles and other player variables
for subsession in self.subsession.in_rounds(2, Constants.num_rounds):
player = subsession.get_players()[random_A.id_in_subsession - 1]
player.Role = random_A.participant.vars['Role']
player.Choix_groupe = random_A.Choix_groupe
player.Revenu_initial = 0
player.Revenu = 0
# Select players B&C
type_habitant = itertools.cycle(['B', 'C'])
players_BC = []
for p in waiting_players:
p.Role = next(type_habitant)
players_BC.append(p)
p.participant.vars['Role'] = p.Role
p.participant.vars['my_group_id'] = self.subsession.group_id_counter
p.Choix_groupe = self.subsession.group_id_counter
# Propagate the roles and other player variables
for subsession in self.subsession.in_rounds(2, Constants.num_rounds):
player = subsession.get_players()[p.id_in_subsession - 1]
player.Role = p.participant.vars['Role']
player.Choix_groupe = p.Choix_groupe
# Create the new group
new_group = []
new_group.append(random_A)
for j in range(int((self.session.config['nb_of_players_per_group']-1)/2)):
new_group.append(players_BC.pop())
new_group.append(players_BC.pop())
# Prepare the next group
self.subsession.group_id_counter += 1
# Set all players of this group to active
for p in new_group:
p.participant.vars['active_flag'] = time.time()
return new_group
def is_displayed(self):
return self.round_number == 1
######################
class Choix_Tx_imposition(Page):
timeout_seconds = Constants.c_timeout_inseconds
form_model = 'group'
form_fields = ['Taux_imposition']
def is_displayed(self):
return self.player.Role == 'A'
def vars_for_template(self):
self.group.my_group_id = self.player.participant.vars['my_group_id']
def before_next_page(self):
if self.timeout_happened:
self.group.Taux_imposition = np.random.randint(Constants.min_tx_imposition + 10, Constants.max_tx_imposition)
######################
class WaitPage_1(WaitPage):
template_name = 'both_change_group/WaitPage_1.html'
def is_displayed(self):
return self.session.vars['running_part_2'] == "False"
def vars_for_template(self):
pass
######################
class Stop_ForTx(Page):
def is_displayed(self):
return (self.player.Role == 'B') & (self.session.vars['running_part_2'] == "True")
def vars_for_template(self):
pass
######################
class Choix_groupe(Page):
timeout_seconds = Constants.c_timeout_inseconds
form_model = 'player'
form_fields = ['Choix_groupe']
def is_displayed(self):
return (self.player.Role == 'B') & (self.session.vars['running_part_2'] == "True")
def Choix_groupe_error_message(self, value):
if value == 999:
return 'Please select a valid group'
def vars_for_template(self):
all_tx = []
complete_tx = [(g.my_group_id, g.Taux_imposition) for g in self.subsession.get_groups() if g.my_group_id != 999]
if len(complete_tx) > Constants.c_max_migrations_choices:
# "Truncate"
my_tx = (self.group.my_group_id, self.group.Taux_imposition)
all_other_tx = [(g.my_group_id, g.Taux_imposition) for g in self.subsession.get_groups()
if((g.my_group_id != 999) & (g.my_group_id != self.group.my_group_id))]
random.shuffle(all_other_tx)
all_tx.append(my_tx)
for i in range(0, Constants.c_max_migrations_choices-1):
all_tx.append(all_other_tx[i])
else:
all_tx = complete_tx
# Shuffle the presentation
random.shuffle(all_tx)
return {
'all_tx': all_tx,
}
def before_next_page(self):
# Set the default value in case of a timeout
if self.timeout_happened:
self.player.Choix_groupe = self.group.my_group_id
######################
class Stop_ToGroup(Page):
def is_displayed(self):
return self.session.vars['running_part_2'] == "True"
######################
class WaitPage_Group(WaitPage):
wait_for_all_groups = True # Il est nécessaire de connaître les migrations de tout le monde pour pouvoir faire le grouping
def is_displayed(self):
return self.session.vars['running_part_2'] == "True"
def vars_for_template(self):
pass
def after_all_players_arrive(self):
self.subsession.groupe(self.subsession.pk)
######################
class Results(Page):
timeout_seconds = Constants.c_timeout_inseconds
def is_displayed(self):
return self.session.vars['running_part_2'] == "True"
def vars_for_template(self):
return {
'choix_du_groupe': self.player.Choix_groupe,
'nb_BCs_in_group': (len(self.group.get_players()) - 1),
}
######################
class Revenu(Page):
timeout_seconds = Constants.c_timeout_inseconds
form_model = 'player'
form_fields = ['Choix_declaration']
def is_displayed(self):
return self.player.Role != 'A'
def vars_for_template(self):
return{
'Revenu_pre_impot': self.player.Revenu_initial,
'Tx_Imposition': self.group.Taux_imposition
}
def Choix_declaration_max(self):
return self.player.Revenu_initial
def before_next_page(self):
if self.timeout_happened:
self.player.Choix_declaration = int((np.random.randint(10, 100) / 100) * self.player.Revenu_initial)
self.player.paiement_impots()
######################
class WaitPage_Declaration(WaitPage):
template_name = 'both_change_group/WaitPage_Declaration.html'
def vars_for_template(self):
pass
def after_all_players_arrive(self):
self.group.impots()
######################
class Resultats(Page):
timeout_seconds = Constants.c_timeout_inseconds
def is_displayed(self):
return self.player.Role != 'A'
def vars_for_template(self):
return{
'Montant_Impot' : self.player.Montant_impots,
'Recolte_impots': self.group.collecte,
'Role': self.player.Role,
'Revenu': self.player.Revenu,
'Control': self.player.Controle,
'Fraude': self.player.Declaration_plus_faible,
'Penalite': self.player.Penality,
'Diff': self.player.Difference_Revenu_Declaration * 1.3,
'Revenu_initial': self.player.Revenu_initial,
'Choix_decla': self.player.Choix_declaration,
'banqueroute': self.group.banqueroute,
}
######################
class Banqueroute_A(Page):
timeout_seconds = Constants.c_timeout_inseconds
def is_displayed(self):
return (self.player.Role == 'A') & (self.group.banqueroute is True)
def vars_for_template(self):
pass
######################
class Choix_Redistribution(Page):
timeout_seconds = Constants.c_timeout_inseconds
form_model = 'player'
form_fields = ['Montant_kept']
def is_displayed(self):
return (self.player.Role == 'A') & (self.group.banqueroute is False)
def vars_for_template(self):
return{
'Collecte': self.group.collecte,
'nb_BCs_in_group': (len(self.group.get_players()) - 1),
}
def Montant_kept_max(self):
return self.group.Total_collecte
def before_next_page(self):
if self.timeout_happened:
if self.group.Total_collecte > 0:
self.player.Montant_kept = np.random.randint(0, self.group.Total_collecte)
else:
self.player.Montant_kept = 0
self.group.Revenu_pour_distribution()
self.group.redistribution_and_payoff()
######################
class WaitPage_2(WaitPage):
template_name = 'both_change_group/WaitPage_2.html'
def is_displayed(self):
return self.group.banqueroute is False
######################
class Resultat_Redistribution(Page):
timeout_seconds = Constants.c_timeout_inseconds
def is_displayed(self):
return self.group.banqueroute is False
def vars_for_template(self):
categories = ['Période {}'.format(i) for i in range(1, self.subsession.round_number+1)]
return{
'Montant_garde_par_A': self.group.Total_collecte - self.group.Revenu_dispo_distribution,
'Revenu': self.player.Revenu,
'cumulative_results': self.player.participant.vars['mylist'],
'categories': categories,
'Role': self.player.Role,
}
######################
class Stop_Transition_Screen(Page):
def is_displayed(self):
return self.subsession.round_number == (Constants.num_rounds/2)
def vars_for_template(self):
return{
'jump': self.subsession.jump_2_next,
}
######################
class WinnerAnnouncement(Page):
def is_displayed(self):
return self.subsession.round_number == Constants.num_rounds
def vars_for_template(self):
return{
'winner': self.player.participant.vars['iswinner'],
}
######################
page_sequence = \
[
GroupingWaitPage,
Choix_Tx_imposition,
WaitPage_1,
Stop_ForTx,
Choix_groupe,
Stop_ToGroup,
#??? For automatic testing: WaitPage_Group,
Results,
Revenu,
WaitPage_Declaration,
Resultats,
Banqueroute_A,
Choix_Redistribution,
WaitPage_2,
Resultat_Redistribution,
Stop_Transition_Screen,
WinnerAnnouncement
]
| gpl-3.0 | -955,393,299,217,543,200 | 31.3225 | 127 | 0.588831 | false |
dbbhattacharya/kitsune | vendor/packages/logilab-common/logging_ext.py | 6 | 5983 | # -*- coding: utf-8 -*-
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extends the logging module from the standard library.
"""
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT,
rotation_parameters=None):
"""init the log service"""
if os.environ.get('APYCOT_ROOT'):
logthreshold = logging.CRITICAL
# redirect logs to stdout to avoid apycot output parsing failure
handler = logging.StreamHandler(sys.stdout)
else:
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(logfile,
**rotation_parameters)
except IOError:
handler = logging.StreamHandler()
if logthreshold is None:
logthreshold = logging.ERROR
elif isinstance(logthreshold, basestring):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
# configure the root logger
logger = logging.getLogger()
logger.setLevel(logthreshold)
# only addHandler and removeHandler method while I would like a
# setHandler method, so do it this way :$
logger.handlers = [handler]
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if debug and isatty and sys.platform != 'win32':
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
| bsd-3-clause | -3,310,948,359,902,950,400 | 35.042169 | 82 | 0.601872 | false |
TouK/vumi | vumi/transports/parlayx/server.py | 4 | 7130 | # -*- test-case-name: vumi.transports.parlayx.tests.test_server -*-
import iso8601
from collections import namedtuple
from twisted.internet.defer import maybeDeferred, fail
from twisted.python import log
from twisted.python.constants import Values, ValueConstant
from twisted.web import http
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from vumi.transports.parlayx.client import PARLAYX_COMMON_NS
from vumi.transports.parlayx.soaputil import (
soap_envelope, unwrap_soap_envelope, soap_fault, SoapFault)
from vumi.transports.parlayx.xmlutil import (
Namespace, elemfind, gettext, split_qualified, parse_document, tostring)
from vumi.utils import normalize_msisdn
NOTIFICATION_NS = Namespace(
'http://www.csapi.org/schema/parlayx/sms/notification/v2_2/local', 'loc')
def normalize_address(address):
"""
Normalize a ParlayX address.
"""
if address.startswith('tel:'):
address = address[4:]
return normalize_msisdn(address)
class DeliveryStatus(Values):
"""
ParlayX `DeliveryStatus` enumeration type.
"""
DeliveredToNetwork = ValueConstant('delivered')
DeliveryUncertain = ValueConstant('pending')
DeliveryImpossible = ValueConstant('failed')
MessageWaiting = ValueConstant('pending')
DeliveredToTerminal = ValueConstant('delivered')
DeliveryNotificationNotSupported = ValueConstant('failed')
class SmsMessage(namedtuple('SmsMessage',
['message', 'sender_address',
'service_activation_number', 'timestamp'])):
"""
ParlayX `SmsMessage` complex type.
"""
@classmethod
def from_element(cls, root):
"""
Create an `SmsMessage` instance from an ElementTree element.
"""
return cls(
message=gettext(root, 'message'),
sender_address=gettext(
root, 'senderAddress', parse=normalize_address),
service_activation_number=gettext(
root, 'smsServiceActivationNumber', parse=normalize_address),
timestamp=gettext(root, 'dateTime', parse=iso8601.parse_date))
class DeliveryInformation(namedtuple('DeliveryInformation',
['address', 'delivery_status'])):
"""
ParlayX `DeliveryInformation` complex type.
"""
@classmethod
def from_element(cls, root):
"""
Create a `DeliveryInformation` instance from an ElementTree element.
"""
try:
delivery_status = gettext(
root, 'deliveryStatus', parse=DeliveryStatus.lookupByName)
except ValueError, e:
raise ValueError(
'No such delivery status enumeration value: %r' % (str(e),))
else:
return cls(
address=gettext(root, 'address', parse=normalize_address),
delivery_status=delivery_status)
class SmsNotificationService(Resource):
"""
Web resource to handle SOAP requests for ParlayX SMS deliveries and
delivery receipts.
"""
isLeaf = True
def __init__(self, callback_message_received, callback_message_delivered):
self.callback_message_received = callback_message_received
self.callback_message_delivered = callback_message_delivered
Resource.__init__(self)
def render_POST(self, request):
"""
Process a SOAP request and convert any exceptions into SOAP faults.
"""
def _writeResponse(response):
request.setHeader('Content-Type', 'text/xml; charset="utf-8"')
request.write(tostring(soap_envelope(response)))
request.finish()
def _handleSuccess(result):
request.setResponseCode(http.OK)
return result
def _handleError(f):
# XXX: Perhaps report this back to the transport somehow???
log.err(f, 'Failure processing SOAP request')
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
faultcode = u'soapenv:Server'
if f.check(SoapFault):
return f.value.to_element()
return soap_fault(faultcode, f.getErrorMessage())
try:
tree = parse_document(request.content)
body, header = unwrap_soap_envelope(tree)
except:
d = fail()
else:
d = maybeDeferred(self.process, request, body, header)
d.addCallback(_handleSuccess)
d.addErrback(_handleError)
d.addCallback(_writeResponse)
return NOT_DONE_YET
def process(self, request, body, header=None):
"""
Process a SOAP request.
"""
for child in body.getchildren():
# Since there is no SOAPAction header, and these requests are not
# made to different endpoints, the only way to handle these is to
# switch on the root element's name. Yuck.
localname = split_qualified(child.tag)[1]
meth = getattr(self, 'process_' + localname, self.process_unknown)
return meth(child, header, localname)
raise SoapFault(u'soapenv:Client', u'No actionable items')
def process_unknown(self, root, header, name):
"""
Process unknown notification deliverables.
"""
raise SoapFault(u'soapenv:Server', u'No handler for %s' % (name,))
def process_notifySmsReception(self, root, header, name):
"""
Process a received text message.
"""
linkid = None
if header is not None:
linkid = gettext(header, './/' + str(PARLAYX_COMMON_NS.linkid))
correlator = gettext(root, NOTIFICATION_NS.correlator)
message = SmsMessage.from_element(
elemfind(root, NOTIFICATION_NS.message))
d = maybeDeferred(
self.callback_message_received, correlator, linkid, message)
d.addCallback(
lambda ignored: NOTIFICATION_NS.notifySmsReceptionResponse())
return d
def process_notifySmsDeliveryReceipt(self, root, header, name):
"""
Process a text message delivery receipt.
"""
correlator = gettext(root, NOTIFICATION_NS.correlator)
delivery_info = DeliveryInformation.from_element(
elemfind(root, NOTIFICATION_NS.deliveryStatus))
d = maybeDeferred(self.callback_message_delivered,
correlator, delivery_info.delivery_status.value)
d.addCallback(
lambda ignored: NOTIFICATION_NS.notifySmsDeliveryReceiptResponse())
return d
# XXX: Only used for debugging with SoapUI:
# twistd web --class=vumi.transports.parlayx.server.Root --port=9080
class Root(Resource):
def getChild(self, path, request):
from twisted.internet.defer import succeed
noop = lambda *a, **kw: succeed(None)
if request.postpath == ['services', 'SmsNotification']:
return SmsNotificationService(noop, noop)
return None
__all__ = [
'normalize_address', 'DeliveryStatus', 'SmsMessage', 'DeliveryInformation',
'SmsNotificationService']
| bsd-3-clause | -4,572,806,064,318,097,400 | 35.010101 | 79 | 0.637588 | false |
tchx84/social-sugar | extensions/cpsection/keyboard/model.py | 4 | 6730 | # Copyright (C) 2009 OLPC
# Author: Sayamindu Dasgupta <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import xklavier
from gi.repository import GConf
_GROUP_NAME = 'grp' # The XKB name for group switch options
_LAYOUTS_KEY = '/desktop/sugar/peripherals/keyboard/layouts'
_OPTIONS_KEY = '/desktop/sugar/peripherals/keyboard/options'
_MODEL_KEY = '/desktop/sugar/peripherals/keyboard/model'
class KeyboardManager(object):
def __init__(self, display):
self._engine = xklavier.Engine(display)
self._configregistry = xklavier.ConfigRegistry(self._engine)
self._configregistry.load(False)
self._configrec = xklavier.ConfigRec()
self._configrec.get_from_server(self._engine)
self._gconf_client = GConf.Client.get_default()
def _populate_one(self, config_registry, item, store):
store.append([item.get_description(), item.get_name()])
def _populate_two(self, config_registry, item, subitem, store):
layout = item.get_name()
if subitem:
description = '%s, %s' % (subitem.get_description(), \
item.get_description())
variant = subitem.get_name()
else:
description = 'Default layout, %s' % item.get_description()
variant = ''
store.append([description, ('%s(%s)' % (layout, variant))])
def get_models(self):
"""Return list of supported keyboard models"""
models = []
self._configregistry.foreach_model(self._populate_one, models)
models.sort()
return models
def get_languages(self):
"""Return list of supported keyboard languages"""
languages = []
self._configregistry.foreach_language(self._populate_one, languages)
languages.sort()
return languages
def get_layouts_for_language(self, language):
"""Return list of supported keyboard layouts for a given language"""
layouts = []
self._configregistry.foreach_language_variant(language, \
self._populate_two, layouts)
layouts.sort()
return layouts
def get_options_group(self):
"""Return list of supported options for switching keyboard group"""
options = []
self._configregistry.foreach_option(_GROUP_NAME, self._populate_one,
options)
options.sort()
return options
def get_current_model(self):
"""Return the enabled keyboard model"""
model = self._gconf_client.get_string(_MODEL_KEY)
if model:
return model
else:
model = self._configrec.get_model()
self.set_model(model)
return model
def get_current_layouts(self):
"""Return the enabled keyboard layouts with variants"""
# FIXME, gconf_client_get_list not introspectable #681433
layouts_from_gconf = self._gconf_client.get(
'/desktop/sugar/peripherals/keyboard/layouts')
layouts = []
if layouts_from_gconf:
for gval in layouts_from_gconf.get_list():
layout = gval.get_string()
layouts.append(layout)
if layouts:
return layouts
layouts = self._configrec.get_layouts()
variants = self._configrec.get_variants()
layout_list = []
i = 0
for layout in layouts:
if len(variants) <= i or variants[i] == '':
layout_list.append('%s(%s)' % (layout, ''))
else:
layout_list.append('%s(%s)' % (layout, variants[i]))
i += 1
self.set_layouts(layout_list)
return layout_list
def get_current_option_group(self):
"""Return the enabled option for switching keyboard group"""
options = []
# FIXME, gconf_client_get_list not introspectable #681433
options_from_gconf = gconf_client.get(\
'/desktop/sugar/peripherals/keyboard/options')
if options_from_gconf:
for gval in options_from_gconf.get_list():
option = gval.get_string()
options.append(option)
if not options:
options = self._configrec.get_options()
self.set_option_group(options)
for option in options:
if option.startswith(_GROUP_NAME):
return option
return None
def get_max_layouts(self):
"""Return the maximum number of layouts supported simultaneously"""
return self._engine.get_max_num_groups()
def set_model(self, model):
"""Sets the supplied keyboard model"""
if model is None or not model:
return
self._gconf_client.set_string(_MODEL_KEY, model)
self._configrec.set_model(model)
self._configrec.activate(self._engine)
def set_option_group(self, option_group):
"""Sets the supplied option for switching keyboard group"""
#XXX: Merge, not overwrite previous options
if not option_group:
options = ['']
elif isinstance(option_group, list):
options = option_group
else:
options = [option_group]
self._gconf_client.set_list(_OPTIONS_KEY, GConf.ValueType.STRING, options)
self._configrec.set_options(options)
self._configrec.activate(self._engine)
def set_layouts(self, layouts):
"""Sets the supplied keyboard layouts (with variants)"""
if layouts is None or not layouts:
return
self._gconf_client.set_list(_LAYOUTS_KEY, GConf.ValueType.STRING, layouts)
layouts_list = []
variants_list = []
for layout in layouts:
layouts_list.append(layout.split('(')[0])
variants_list.append(layout.split('(')[1][:-1])
self._configrec.set_layouts(layouts_list)
self._configrec.set_variants(variants_list)
self._configrec.activate(self._engine)
| gpl-2.0 | -2,193,670,078,052,347,000 | 35.775956 | 82 | 0.609064 | false |
gangadharkadam/sterp | erpnext/setup/doctype/company/company.py | 2 | 15316 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os
from frappe import _
from frappe.utils import cint
import frappe.defaults
from frappe.model.document import Document
class Company(Document):
def onload(self):
self.get("__onload").transactions_exist = self.check_if_transactions_exist()
def check_if_transactions_exist(self):
exists = False
for doctype in ["Sales Invoice", "Delivery Note", "Sales Order", "Quotation",
"Purchase Invoice", "Purchase Receipt", "Purchase Order", "Supplier Quotation"]:
if frappe.db.sql("""select name from `tab%s` where company=%s and docstatus=1
limit 1""" % (doctype, "%s"), self.name):
exists = True
break
return exists
def validate(self):
if self.get('__islocal') and len(self.abbr) > 5:
frappe.throw(_("Abbreviation cannot have more than 5 characters"))
self.previous_default_currency = frappe.db.get_value("Company", self.name, "default_currency")
if self.default_currency and self.previous_default_currency and \
self.default_currency != self.previous_default_currency and \
self.check_if_transactions_exist():
frappe.throw(_("Cannot change company's default currency, because there are existing transactions. Transactions must be cancelled to change the default currency."))
self.validate_default_accounts()
def validate_default_accounts(self):
for field in ["default_bank_account", "default_cash_account", "receivables_group", "payables_group",
"default_expense_account", "default_income_account", "stock_received_but_not_billed",
"stock_adjustment_account", "expenses_included_in_valuation"]:
if self.get(field):
for_company = frappe.db.get_value("Account", self.get(field), "company")
if for_company != self.name:
frappe.throw(_("Account {0} does not belong to company: {1}")
.format(self.get(field), self.name))
def on_update(self):
if not frappe.db.sql("""select name from tabAccount
where company=%s and docstatus<2 limit 1""", self.name):
self.create_default_accounts()
self.create_default_warehouses()
self.install_country_fixtures()
if not frappe.db.get_value("Cost Center", {"group_or_ledger": "Ledger",
"company": self.name}):
self.create_default_cost_center()
self.set_default_accounts()
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
def install_country_fixtures(self):
if os.path.exists(os.path.join(os.path.dirname(__file__), "fixtures", self.country.lower())):
frappe.get_attr("erpnext.setup.doctype.company.fixtures.{0}.install".format(self.country.lower()))(self)
def create_default_warehouses(self):
for whname in (_("Stores"), _("Work In Progress"), _("Finished Goods")):
if not frappe.db.exists("Warehouse", whname + " - " + self.abbr):
stock_group = frappe.db.get_value("Account", {"account_type": "Stock",
"group_or_ledger": "Group", "company": self.name})
if stock_group:
frappe.get_doc({
"doctype":"Warehouse",
"warehouse_name": whname,
"company": self.name,
"create_account_under": stock_group
}).insert()
def create_default_accounts(self):
if self.chart_of_accounts:
self.import_chart_of_account()
else:
self.create_standard_accounts()
frappe.db.set(self, "receivables_group", _("Accounts Receivable") + " - " + self.abbr)
frappe.db.set(self, "payables_group", _("Accounts Payable") + " - " + self.abbr)
def import_chart_of_account(self):
chart = frappe.get_doc("Chart of Accounts", self.chart_of_accounts)
chart.create_accounts(self.name)
def add_acc(self, lst):
account = frappe.get_doc({
"doctype": "Account",
"freeze_account": "No",
"master_type": "",
"company": self.name
})
for d in self.fld_dict.keys():
account.set(d, (d == 'parent_account' and lst[self.fld_dict[d]]) and lst[self.fld_dict[d]] +' - '+ self.abbr or lst[self.fld_dict[d]])
if not account.parent_account:
account.ignore_mandatory = True
account.insert()
def set_default_accounts(self):
def _set_default_account(fieldname, account_type):
if self.get(fieldname):
return
account = frappe.db.get_value("Account", {"account_type": account_type,
"group_or_ledger": "Ledger", "company": self.name})
if account:
self.db_set(fieldname, account)
_set_default_account("default_cash_account", "Cash")
_set_default_account("default_bank_account", "Bank")
if cint(frappe.db.get_value("Accounts Settings", None, "auto_accounting_for_stock")):
_set_default_account("stock_received_but_not_billed", "Stock Received But Not Billed")
_set_default_account("stock_adjustment_account", "Stock Adjustment")
_set_default_account("expenses_included_in_valuation", "Expenses Included In Valuation")
if not self.default_income_account:
self.db_set("default_income_account", frappe.db.get_value("Account",
{"account_name": _("Sales"), "company": self.name}))
def create_default_cost_center(self):
cc_list = [
{
'cost_center_name': self.name,
'company':self.name,
'group_or_ledger':'Group',
'parent_cost_center':None
},
{
'cost_center_name':_('Main'),
'company':self.name,
'group_or_ledger':'Ledger',
'parent_cost_center':self.name + ' - ' + self.abbr
},
]
for cc in cc_list:
cc.update({"doctype": "Cost Center"})
cc_doc = frappe.get_doc(cc)
cc_doc.ignore_permissions = True
if cc.get("cost_center_name") == self.name:
cc_doc.ignore_mandatory = True
cc_doc.insert()
frappe.db.set(self, "cost_center", _("Main") + " - " + self.abbr)
def on_trash(self):
"""
Trash accounts and cost centers for this company if no gl entry exists
"""
rec = frappe.db.sql("SELECT name from `tabGL Entry` where company = %s", self.name)
if not rec:
#delete tabAccount
frappe.db.sql("delete from `tabAccount` where company = %s order by lft desc, rgt desc", self.name)
#delete cost center child table - budget detail
frappe.db.sql("delete bd.* from `tabBudget Detail` bd, `tabCost Center` cc where bd.parent = cc.name and cc.company = %s", self.name)
#delete cost center
frappe.db.sql("delete from `tabCost Center` WHERE company = %s order by lft desc, rgt desc", self.name)
if not frappe.db.get_value("Stock Ledger Entry", {"company": self.name}):
frappe.db.sql("""delete from `tabWarehouse` where company=%s""", self.name)
frappe.defaults.clear_default("company", value=self.name)
frappe.db.sql("""update `tabSingles` set value=""
where doctype='Global Defaults' and field='default_company'
and value=%s""", self.name)
def before_rename(self, olddn, newdn, merge=False):
if merge:
frappe.throw(_("Sorry, companies cannot be merged"))
def after_rename(self, olddn, newdn, merge=False):
frappe.db.set(self, "company_name", newdn)
frappe.db.sql("""update `tabDefaultValue` set defvalue=%s
where defkey='Company' and defvalue=%s""", (newdn, olddn))
frappe.defaults.clear_cache()
def create_standard_accounts(self):
self.fld_dict = {
'account_name': 0,
'parent_account': 1,
'group_or_ledger': 2,
'account_type': 3,
'report_type': 4,
'tax_rate': 5,
'root_type': 6
}
acc_list_common = [
[_('Application of Funds (Assets)'), None,'Group', None,'Balance Sheet', None, 'Asset'],
[_('Current Assets'),_('Application of Funds (Assets)'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Accounts Receivable'),_('Current Assets'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Bank Accounts'),_('Current Assets'),'Group','Bank','Balance Sheet', None, 'Asset'],
[_('Cash In Hand'),_('Current Assets'),'Group','Cash','Balance Sheet', None, 'Asset'],
[_('Cash'),_('Cash In Hand'),'Ledger','Cash','Balance Sheet', None, 'Asset'],
[_('Loans and Advances (Assets)'),_('Current Assets'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Securities and Deposits'),_('Current Assets'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Earnest Money'),_('Securities and Deposits'),'Ledger', None,'Balance Sheet', None, 'Asset'],
[_('Stock Assets'),_('Current Assets'),'Group','Stock','Balance Sheet', None, 'Asset'],
[_('Tax Assets'),_('Current Assets'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Fixed Assets'),_('Application of Funds (Assets)'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Capital Equipments'),_('Fixed Assets'),'Ledger','Fixed Asset','Balance Sheet', None, 'Asset'],
[_('Computers'),_('Fixed Assets'),'Ledger','Fixed Asset','Balance Sheet', None, 'Asset'],
[_('Furniture and Fixture'),_('Fixed Assets'),'Ledger','Fixed Asset','Balance Sheet', None, 'Asset'],
[_('Office Equipments'),_('Fixed Assets'),'Ledger','Fixed Asset','Balance Sheet', None, 'Asset'],
[_('Plant and Machinery'),_('Fixed Assets'),'Ledger','Fixed Asset','Balance Sheet', None, 'Asset'],
[_('Investments'),_('Application of Funds (Assets)'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Temporary Accounts (Assets)'),_('Application of Funds (Assets)'),'Group', None,'Balance Sheet', None, 'Asset'],
[_('Temporary Assets'),_('Temporary Accounts (Assets)'),'Ledger', None,'Balance Sheet', None, 'Asset'],
[_('Expenses'), None,'Group','Expense Account','Profit and Loss', None, 'Expense'],
[_('Direct Expenses'),_('Expenses'),'Group','Expense Account','Profit and Loss', None, 'Expense'],
[_('Stock Expenses'),_('Direct Expenses'),'Group','Expense Account','Profit and Loss', None, 'Expense'],
[_('Cost of Goods Sold'),_('Stock Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Stock Adjustment'),_('Stock Expenses'),'Ledger','Stock Adjustment','Profit and Loss', None, 'Expense'],
[_('Expenses Included In Valuation'), _("Stock Expenses"), 'Ledger', 'Expenses Included In Valuation', 'Profit and Loss', None, 'Expense'],
[_('Indirect Expenses'), _('Expenses'),'Group','Expense Account','Profit and Loss', None, 'Expense'],
[_('Marketing Expenses'), _('Indirect Expenses'),'Ledger','Chargeable','Profit and Loss', None, 'Expense'],
[_('Sales Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Administrative Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Charity and Donations'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Commission on Sales'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Travel Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Entertainment Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Depreciation'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Freight and Forwarding Charges'), _('Indirect Expenses'),'Ledger','Chargeable','Profit and Loss', None, 'Expense'],
[_('Legal Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Miscellaneous Expenses'), _('Indirect Expenses'),'Ledger','Chargeable','Profit and Loss', None, 'Expense'],
[_('Office Maintenance Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Office Rent'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Postal Expenses'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Print and Stationary'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Rounded Off'), _('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Salary') ,_('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Telephone Expenses') ,_('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Utility Expenses') ,_('Indirect Expenses'),'Ledger','Expense Account','Profit and Loss', None, 'Expense'],
[_('Income'), None,'Group', None,'Profit and Loss', None, 'Income'],
[_('Direct Income'),_('Income'),'Group','Income Account','Profit and Loss', None, 'Income'],
[_('Sales'),_('Direct Income'),'Ledger','Income Account','Profit and Loss', None, 'Income'],
[_('Service'),_('Direct Income'),'Ledger','Income Account','Profit and Loss', None, 'Income'],
[_('Indirect Income'),_('Income'),'Group','Income Account','Profit and Loss', None, 'Income'],
[_('Source of Funds (Liabilities)'), None,'Group', None,'Balance Sheet', None, 'Liability'],
[_('Capital Account'),_('Source of Funds (Liabilities)'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Reserves and Surplus'),_('Capital Account'),'Ledger', None,'Balance Sheet', None, 'Liability'],
[_('Shareholders Funds'),_('Capital Account'),'Ledger', None,'Balance Sheet', None, 'Liability'],
[_('Current Liabilities'),_('Source of Funds (Liabilities)'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Accounts Payable'),_('Current Liabilities'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Stock Liabilities'),_('Current Liabilities'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Stock Received But Not Billed'), _('Stock Liabilities'), 'Ledger', 'Stock Received But Not Billed', 'Balance Sheet', None, 'Liability'],
[_('Duties and Taxes'),_('Current Liabilities'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Loans (Liabilities)'),_('Current Liabilities'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Secured Loans'),_('Loans (Liabilities)'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Unsecured Loans'),_('Loans (Liabilities)'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Bank Overdraft Account'),_('Loans (Liabilities)'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Temporary Accounts (Liabilities)'),_('Source of Funds (Liabilities)'),'Group', None,'Balance Sheet', None, 'Liability'],
[_('Temporary Liabilities'),_('Temporary Accounts (Liabilities)'),'Ledger', None,'Balance Sheet', None, 'Liability']
]
# load common account heads
for d in acc_list_common:
self.add_acc(d)
@frappe.whitelist()
def replace_abbr(company, old, new):
frappe.db.set_value("Company", company, "abbr", new)
def _rename_record(dt):
for d in frappe.db.sql("select name from `tab%s` where company=%s" % (dt, '%s'), company):
parts = d[0].split(" - ")
if parts[-1].lower() == old.lower():
name_without_abbr = " - ".join(parts[:-1])
frappe.rename_doc(dt, d[0], name_without_abbr + " - " + new)
for dt in ["Account", "Cost Center", "Warehouse"]:
_rename_record(dt)
frappe.db.commit()
def get_name_with_abbr(name, company):
company_abbr = frappe.db.get_value("Company", company, "abbr")
parts = name.split(" - ")
if parts[-1].lower() != company_abbr.lower():
parts.append(company_abbr)
return " - ".join(parts)
| agpl-3.0 | 7,579,635,123,552,390,000 | 50.053333 | 168 | 0.654675 | false |
manmeetsaini/etools | code/scripts/ssh_dashboard.py | 2 | 1668 | from tkinter import *
from tkinter import ttk
#from etools import *
def calculate(*args):
pass
def test():
pass
root = Tk()
# Title of the Window
root.title("Network Console")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
feet = StringVar()
meters = StringVar()
ip = ""
user = ""
password = ""
# Entry Field in UI
feet_entry = ttk.Entry(mainframe, width=16, textvariable=ip)
feet_entry.grid(column=2, row=2, sticky=(W, E))
feet_entry = ttk.Entry(mainframe, width=16, textvariable=user)
feet_entry.grid(column=2, row=3, sticky=(W, E))
feet_entry = ttk.Entry(mainframe, width=16, textvariable=password)
feet_entry.grid(column=2, row=4, sticky=(W, E))
#To Print result in UI
ttk.Label(mainframe, textvariable=meters).grid(column=2, row=6, sticky=(W, E))
#To Create Button
ttk.Button(mainframe, text="Connect", command=test).grid(column=2, row=5, sticky=W)
ttk.Button(mainframe, text="Disconnect", command=calculate).grid(column=3, row=5, sticky=W)
#Labels in UI
ttk.Label(mainframe, text="Select Device type").grid(column=1, row=1, sticky=W)
ttk.Label(mainframe, text="Select Model").grid(Column1, row=2,
ttk.Label(mainframe, text="IP Address").grid(column=1, row=2, sticky=W)
ttk.Label(mainframe, text="User").grid(column=1, row=3, sticky=W)
ttk.Label(mainframe, text="Password").grid(column=1, row=4, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
feet_entry.focus()
root.bind('<Return>', calculate)
root.mainloop()
| gpl-3.0 | 2,872,606,875,422,960,000 | 27.785714 | 91 | 0.692446 | false |
WillieMaddox/scipy | scipy/sparse/base.py | 4 | 28818 | """Base class for sparse matrices"""
from __future__ import division, print_function, absolute_import
__all__ = ['spmatrix', 'isspmatrix', 'issparse',
'SparseWarning','SparseEfficiencyWarning']
import sys
import numpy as np
from scipy._lib.six import xrange
from .sputils import isdense, isscalarlike, isintlike
class SparseWarning(Warning):
pass
class SparseFormatWarning(SparseWarning):
pass
class SparseEfficiencyWarning(SparseWarning):
pass
# The formats that we might potentially understand.
_formats = {'csc':[0, "Compressed Sparse Column"],
'csr':[1, "Compressed Sparse Row"],
'dok':[2, "Dictionary Of Keys"],
'lil':[3, "LInked List"],
'dod':[4, "Dictionary of Dictionaries"],
'sss':[5, "Symmetric Sparse Skyline"],
'coo':[6, "COOrdinate"],
'lba':[7, "Linpack BAnded"],
'egd':[8, "Ellpack-itpack Generalized Diagonal"],
'dia':[9, "DIAgonal"],
'bsr':[10, "Block Sparse Row"],
'msr':[11, "Modified compressed Sparse Row"],
'bsc':[12, "Block Sparse Column"],
'msc':[13, "Modified compressed Sparse Column"],
'ssk':[14, "Symmetric SKyline"],
'nsk':[15, "Nonsymmetric SKyline"],
'jad':[16, "JAgged Diagonal"],
'uss':[17, "Unsymmetric Sparse Skyline"],
'vbr':[18, "Variable Block Row"],
'und':[19, "Undefined"]
}
# These univariate ufuncs preserve zeros.
_ufuncs_with_fixed_point_at_zero = frozenset([
np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
MAXPRINT = 50
class spmatrix(object):
""" This class provides a base class for all sparse matrices. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
ndim = 2
def __init__(self, maxprint=MAXPRINT):
self.format = self.__class__.__name__[:3]
self._shape = None
if self.format == 'spm':
raise ValueError("This class is not intended"
" to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self,shape):
shape = tuple(shape)
if len(shape) != 2:
raise ValueError("Only two-dimensional sparse arrays "
"are supported.")
try:
shape = int(shape[0]),int(shape[1]) # floats, other weirdness
except:
raise TypeError('invalid shape')
if not (shape[0] >= 0 and shape[1] >= 0):
raise ValueError('invalid shape')
if (self._shape != shape) and (self._shape is not None):
try:
self = self.reshape(shape)
except NotImplementedError:
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
self._shape = shape
def get_shape(self):
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def reshape(self, shape):
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__)
def astype(self, t):
return self.tocsr().astype(t).asformat(self.format)
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f','d','F','D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
def __iter__(self):
for r in xrange(self.shape[0]):
yield self[r,:]
def getmaxprint(self):
try:
maxprint = self.maxprint
except AttributeError:
maxprint = MAXPRINT
return maxprint
# def typecode(self):
# try:
# typ = self.dtype.char
# except AttributeError:
# typ = None
# return typ
def getnnz(self):
try:
return self.nnz
except AttributeError:
raise AttributeError("nnz not defined")
def getformat(self):
try:
format = self.format
except AttributeError:
format = 'und'
return format
def __repr__(self):
nnz = self.getnnz()
format = self.getformat()
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements in %s format>" % \
(self.shape + (self.dtype.type, nnz, _formats[format][1]))
def __str__(self):
maxprint = self.getmaxprint()
A = self.tocoo()
nnz = self.getnnz()
# helper function, outputs "(i,j) v"
def tostr(row,col,data):
triples = zip(list(zip(row,col)),data)
return '\n'.join([(' %s\t%s' % t) for t in triples])
if nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
def __bool__(self): # Simple -- other ideas?
if self.shape == (1, 1):
return True if self.nnz == 1 else False
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all().")
__nonzero__ = __bool__
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
# return self.getnnz()
raise TypeError("sparse matrix length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format):
"""Return this matrix in a given sparse format
Parameters
----------
format : {string, None}
desired sparse matrix format
- None for no format conversion
- "csr" for csr_matrix format
- "csc" for csc_matrix format
- "lil" for lil_matrix format
- "dok" for dok_matrix format and so on
"""
if format is None or format == self.format:
return self
else:
return getattr(self,'to' + format)()
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse matrix format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
return self.tocsr().multiply(other)
def maximum(self, other):
return self.tocsr().maximum(other)
def minimum(self, other):
return self.tocsr().minimum(other)
def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
return self * other
def power(self, n, dtype=None):
return self.tocsr().power(n, dtype=dtype)
def __eq__(self, other):
return self.tocsr().__eq__(other)
def __ne__(self, other):
return self.tocsr().__ne__(other)
def __lt__(self,other):
return self.tocsr().__lt__(other)
def __gt__(self,other):
return self.tocsr().__gt__(other)
def __le__(self,other):
return self.tocsr().__le__(other)
def __ge__(self,other):
return self.tocsr().__ge__(other)
def __abs__(self):
return abs(self.tocsr())
def __add__(self, other): # self + other
return self.tocsr().__add__(other)
def __radd__(self, other): # other + self
return self.tocsr().__radd__(other)
def __sub__(self, other): # self - other
# note: this can't be replaced by self + (-other) for unsigned types
return self.tocsr().__sub__(other)
def __rsub__(self, other): # other - self
return self.tocsr().__rsub__(other)
def __mul__(self, other):
"""interpret other and call one of the following
self._mul_scalar()
self._mul_vector()
self._mul_multivector()
self._mul_sparse_matrix()
"""
M,N = self.shape
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._mul_vector(other)
elif other.shape == (N, 1):
return self._mul_vector(other.ravel()).reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._mul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[1] != other.shape[0]:
raise ValueError('dimension mismatch')
return self._mul_sparse_matrix(other)
try:
other.shape
except AttributeError:
# If it's a list or whatever, treat it like a matrix
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# Not interpretable as an array; return NotImplemented so that
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N,1):
raise ValueError('dimension mismatch')
result = self._mul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1,1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch')
result = self._mul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = np.asmatrix(result)
return result
else:
raise ValueError('could not interpret dimensions')
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _mul_vector(self, other):
return self.tocsr()._mul_vector(other)
def _mul_multivector(self, other):
return self.tocsr()._mul_multivector(other)
def _mul_sparse_matrix(self, other):
return self.tocsr()._mul_sparse_matrix(other)
def __rmul__(self, other): # other * self
if isscalarlike(other):
return self.__mul__(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
return (self.transpose() * tr).transpose()
####################
# Other Arithmetic #
####################
def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float_):
return self.astype(np.float_)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer)
and np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
return np.true_divide(self.todense(), other)
else:
return np.divide(self.todense(), other)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif isspmatrix(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float_):
return self_csr.astype(np.float_)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
def __truediv__(self, other):
return self._divide(other, true_divide=True)
def __div__(self, other):
# Always do true division
return self._divide(other, true_divide=True)
def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __rdiv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __imul__(self, other):
return NotImplemented
def __idiv__(self, other):
return self.__itruediv__(other)
def __itruediv__(self, other):
return NotImplemented
def __pow__(self, other):
if self.shape[0] != self.shape[1]:
raise TypeError('matrix is not square')
if isintlike(other):
other = int(other)
if other < 0:
raise ValueError('exponent must be >= 0')
if other == 0:
from .construct import eye
return eye(self.shape[0], dtype=self.dtype)
elif other == 1:
return self.copy()
else:
tmp = self.__pow__(other//2)
if (other % 2):
return self * tmp * tmp
else:
return tmp * tmp
elif isscalarlike(other):
raise ValueError('exponent must be an integer')
else:
return NotImplemented
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
elif attr == 'H':
return self.getH()
elif attr == 'real':
return self._real()
elif attr == 'imag':
return self._imag()
elif attr == 'size':
return self.getnnz()
else:
raise AttributeError(attr + " not found")
def transpose(self):
return self.tocsr().transpose()
def conj(self):
return self.tocsr().conj()
def conjugate(self):
return self.conj()
# Renamed conjtranspose() -> getH() for compatibility with dense matrices
def getH(self):
return self.transpose().conj()
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask],A.col[nz_mask])
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
from .csc import csc_matrix
n = self.shape[1]
if j < 0:
j += n
if j < 0 or j >= n:
raise IndexError("index out of bounds")
col_selector = csc_matrix(([1], [[j], [0]]), shape=(n,1), dtype=self.dtype)
return self * col_selector
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
from .csr import csr_matrix
m = self.shape[0]
if i < 0:
i += m
if i < 0 or i >= m:
raise IndexError("index out of bounds")
row_selector = csr_matrix(([1], [[0], [i]]), shape=(1,m), dtype=self.dtype)
return row_selector * self
# def __array__(self):
# return self.toarray()
def todense(self, order=None, out=None):
"""
Return a dense matrix representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array (or `numpy.matrix`) as the
output buffer instead of allocating a new array to
return. The provided array must have the same shape and
dtype as the sparse matrix on which you are calling the
method.
Returns
-------
arr : numpy.matrix, 2-dimensional
A NumPy matrix object with the same shape and containing
the same data represented by the sparse matrix, with the
requested memory order. If `out` was passed and was an
array (rather than a `numpy.matrix`), it will be filled
with the appropriate values and returned wrapped in a
`numpy.matrix` object that shares the same memory.
"""
return np.asmatrix(self.toarray(order=order, out=out))
def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-dimensional
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo().toarray(order=order, out=out)
def todok(self):
return self.tocoo().todok()
def tocoo(self):
return self.tocsr().tocoo()
def tolil(self):
return self.tocsr().tolil()
def todia(self):
return self.tocoo().todia()
def tobsr(self, blocksize=None):
return self.tocsr().tobsr(blocksize=blocksize)
def copy(self):
return self.__class__(self,copy=True)
def sum(self, axis=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# We use multiplication by an array of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
if np.issubdtype(self.dtype, np.float_):
res_dtype = np.float_
elif (np.issubdtype(self.dtype, np.int_) or
np.issubdtype(self.dtype, np.bool_)):
res_dtype = np.int_
elif np.issubdtype(self.dtype, np.complex_):
res_dtype = np.complex_
else:
res_dtype = self.dtype
if axis is None:
# sum over rows and columns
return (self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))).sum()
if axis < 0:
axis += 2
if axis == 0:
# sum over columns
return np.asmatrix(np.ones((1, m), dtype=res_dtype)) * self
elif axis == 1:
# sum over rows
return self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))
else:
raise ValueError("axis out of bounds")
def mean(self, axis=None):
"""Average the matrix over the given axis. If the axis is None,
average over both rows and columns, returning a scalar.
"""
# Mimic numpy's casting.
if (np.issubdtype(self.dtype, np.float_) or
np.issubdtype(self.dtype, np.integer) or
np.issubdtype(self.dtype, np.bool_)):
res_dtype = np.float_
elif np.issubdtype(self.dtype, np.complex_):
res_dtype = np.complex_
else:
res_dtype = self.dtype
if axis is None:
return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
if axis < 0:
axis += 2
if axis == 0:
mean = self.astype(res_dtype).sum(0)
mean *= 1.0 / self.shape[0]
return mean
elif axis == 1:
mean = self.astype(res_dtype).sum(1)
mean *= 1.0 / self.shape[1]
return mean
else:
raise ValueError("axis out of bounds")
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
# TODO support k != 0
return self.tocsr().diagonal()
def setdiag(self, values, k=0):
"""
Set diagonal or off-diagonal elements of the array.
Parameters
----------
values : array_like
New values of the diagonal elements.
Values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values if
longer than the diagonal, then the remaining values are ignored.
If a scalar value is given, all of the diagonal is set to it.
k : int, optional
Which off-diagonal to set, corresponding to elements a[i,i+k].
Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds matrix dimensions")
self._setdiag(np.asarray(values), k)
def _setdiag(self, values, k):
M, N = self.shape
if k < 0:
if values.ndim == 0:
# broadcast
max_index = min(M+k, N)
for i in xrange(max_index):
self[i - k, i] = values
else:
max_index = min(M+k, N, len(values))
if max_index <= 0:
return
for i,v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
if values.ndim == 0:
# broadcast
max_index = min(M, N-k)
for i in xrange(max_index):
self[i, i + k] = values
else:
max_index = min(M, N-k, len(values))
if max_index <= 0:
return
for i,v in enumerate(values[:max_index]):
self[i, i + k] = v
def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse matrix')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
"""Method for compatibility with NumPy's ufuncs and dot
functions.
"""
if any(not isinstance(x, spmatrix) and np.asarray(x).dtype == object
for x in inputs):
# preserve previous behavior with object arrays
with_self = list(inputs)
with_self[pos] = np.asarray(self, dtype=object)
return getattr(func, method)(*with_self, **kwargs)
out = kwargs.pop('out', None)
if method != '__call__' or kwargs:
return NotImplemented
without_self = list(inputs)
del without_self[pos]
without_self = tuple(without_self)
if func is np.multiply:
result = self.multiply(*without_self)
elif func is np.add:
result = self.__add__(*without_self)
elif func is np.dot:
if pos == 0:
result = self.__mul__(inputs[1])
else:
result = self.__rmul__(inputs[0])
elif func is np.subtract:
if pos == 0:
result = self.__sub__(inputs[1])
else:
result = self.__rsub__(inputs[0])
elif func is np.divide:
true_divide = (sys.version_info[0] >= 3)
rdivide = (pos == 1)
result = self._divide(*without_self,
true_divide=true_divide,
rdivide=rdivide)
elif func is np.true_divide:
rdivide = (pos == 1)
result = self._divide(*without_self, true_divide=True, rdivide=rdivide)
elif func is np.maximum:
result = self.maximum(*without_self)
elif func is np.minimum:
result = self.minimum(*without_self)
elif func is np.absolute:
result = abs(self)
elif func in _ufuncs_with_fixed_point_at_zero:
func_name = func.__name__
if hasattr(self, func_name):
result = getattr(self, func_name)()
else:
result = getattr(self.tocsr(), func_name)()
else:
return NotImplemented
if out is not None:
if not isinstance(out, spmatrix) and isinstance(result, spmatrix):
out[...] = result.todense()
else:
out[...] = result
result = out
return result
def isspmatrix(x):
return isinstance(x, spmatrix)
issparse = isspmatrix
| bsd-3-clause | 7,602,071,765,283,832,000 | 31.97254 | 83 | 0.526338 | false |
charbeljc/account-invoicing | account_invoice_validation_workflow/invoice.py | 15 | 11721 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2010-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
from openerp.osv import orm
from openerp.tools.translate import _
from openerp import netsvc
class AccountInvoice(models.Model):
_inherit = "account.invoice"
@api.multi
def action_to_valid(self):
"""Check if analytic account of each lines is not closed"""
str_error_lines = ""
errors = False
for inv in self:
for line in inv.invoice_line:
if line.account_analytic_id and \
line.account_analytic_id.state in ['close',
'cancelled']:
str_error_lines += "\n- %s" % line.name
errors = True
if errors:
raise exceptions.Warning(
_("You are trying to validate invoice lines linked to a "
"closed or cancelled Analytic Account.\n\n"
"Check the following lines:") + str_error_lines)
self.write({'state': 'to_valid'})
return True
state = fields.Selection(
[
('draft', 'Draft'),
('to_send', 'To Send'),
('to_valid', 'To Validate'),
('proforma2', 'Pro-forma'),
('open', 'Open'),
('paid', 'Paid'),
('cancel', 'Canceled')
], 'State', select=True, readonly=True)
class AccountInvoiceRefund(orm.TransientModel):
_inherit = "account.invoice.refund"
def compute_refund(self, cr, uid, ids, mode='refund', context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the account invoice refund’s ID or list of IDs
"""
inv_obj = self.pool['account.invoice']
reconcile_obj = self.pool['account.move.reconcile']
account_m_line_obj = self.pool['account.move.line']
mod_obj = self.pool['ir.model.data']
act_obj = self.pool['ir.actions.act_window']
wf_service = netsvc.LocalService('workflow')
inv_tax_obj = self.pool['account.invoice.tax']
inv_line_obj = self.pool['account.invoice.line']
res_users_obj = self.pool['res.users']
if context is None:
context = {}
for form in self.read(cr, uid, ids, context=context):
created_inv = []
date = False
period = False
description = False
journal_id = False
company = res_users_obj.browse(
cr, uid, uid, context=context).company_id
if form.get('journal_id', False):
journal_id = form['journal_id'][0]
for inv in inv_obj.browse(cr, uid, context.get('active_ids'),
context=context):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise orm.except_orm(
_('Error !'),
_('Can not %s draft/proforma/cancel invoice.') % mode)
if inv.reconciled and mode in ('cancel', 'modify'):
raise orm.except_orm(
_('Error !'),
_('Can not %s invoice which is already reconciled, '
'invoice should be unreconciled first. You can only '
'Refund this invoice') % mode)
if form.get('period'):
period = form['period'][0]
else:
period = inv.period_id and inv.period_id.id or False
if not journal_id:
journal_id = inv.journal_id.id
if form['date']:
date = form['date']
if not form['period']:
cr.execute("select name from ir_model_fields \
where model = 'account.period' \
and name = 'company_id'")
result_query = cr.fetchone()
if result_query:
cr.execute(
"""select p.id
from account_fiscalyear y,
account_period p
where y.id=p.fiscalyear_id
and date(%s) between p.date_start
AND p.date_stop and y.company_id = %s
limit 1""", (date, company.id,))
else:
cr.execute(
"""SELECT id
from account_period
where date(%s) between date_start AND date_stop
limit 1 """, (date,))
res = cr.fetchone()
if res:
period = res[0]
else:
date = inv.date_invoice
if form['description']:
description = form['description']
else:
description = inv.name
if not period:
raise orm.except_orm(_('Data Insufficient !'),
_('No Period found on Invoice!'))
refund_id = inv_obj.refund(
cr, uid, [inv.id], date, period, description, journal_id)
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
inv_obj.write(
cr, uid, [refund.id],
{'date_due': date, 'check_total': inv.check_total})
inv_obj.button_compute(cr, uid, refund_id)
created_inv.append(refund_id[0])
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_id
to_reconcile_ids = {}
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_ids[line.account_id.id] = [line.id]
if type(line.reconcile_id) != orm.orm.browse_null:
reconcile_obj.unlink(cr, uid, line.reconcile_id.id)
# Specific to c2c need to trigger specific wrkf before
# create the refund
wf_service.trg_validate(uid, 'account.invoice',
refund.id, 'invoice_to_valid', cr)
wf_service.trg_validate(uid, 'account.invoice',
refund.id, 'invoice_to_send', cr)
wf_service.trg_validate(uid, 'account.invoice',
refund.id, 'invoice_open', cr)
refund = inv_obj.browse(
cr, uid, refund_id[0], context=context)
for tmpline in refund.move_id.line_id:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_ids[
tmpline.account_id.id].append(tmpline.id)
for account in to_reconcile_ids:
account_m_line_obj.reconcile(
cr, uid, to_reconcile_ids[account],
writeoff_period_id=period,
writeoff_journal_id=inv.journal_id.id,
writeoff_acc_id=inv.account_id.id,
context={'date_p': date})
if mode == 'modify':
invoice = inv_obj.read(
cr, uid, [inv.id],
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'address_contact_id', 'address_invoice_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term', 'account_id',
'currency_id', 'invoice_line', 'tax_line',
'journal_id', 'period_id'], context=context)
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.read(
cr, uid, invoice['invoice_line'], context=context)
invoice_lines = inv_obj._refund_cleanup_lines(
cr, uid, invoice_lines)
tax_lines = inv_tax_obj.read(
cr, uid, invoice['tax_line'], context=context)
tax_lines = inv_obj._refund_cleanup_lines(
cr, uid, tax_lines)
invoice.update({
'type': inv.type,
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'period_id': period,
'name': description
})
for field in ('address_contact_id',
'address_invoice_id', 'partner_id',
'account_id', 'currency_id',
'payment_term', 'journal_id'):
invoice[field] = invoice[
field] and invoice[field][0]
inv_id = inv_obj.create(cr, uid, invoice, {})
if inv.payment_term.id:
data = inv_obj.onchange_payment_term_date_invoice(
cr, uid, [inv_id], inv.payment_term.id, date)
if 'value' in data and data['value']:
inv_obj.write(cr, uid, [inv_id], data['value'])
created_inv.append(inv_id)
if inv.type in ('out_invoice', 'out_refund'):
xml_id = 'action_invoice_tree3'
else:
xml_id = 'action_invoice_tree4'
result = mod_obj.get_object_reference(cr, uid, 'account', xml_id)
id = result and result[1] or False
result = act_obj.read(cr, uid, id, context=context)
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
| agpl-3.0 | 1,847,550,876,600,673,300 | 49.068376 | 79 | 0.44691 | false |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/linear_model/plot_logistic_multinomial.py | 1 | 2483 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# Licence: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| mit | -755,196,933,928,153,700 | 33.013699 | 74 | 0.588401 | false |
clongmore/program-with-objects_1 | entertainment_center.py | 1 | 2235 | import fresh_tomatoes
import media
blind_side = media.Movie("The Blind Side", "The remarkable true story of all-American football star Michael Oher",
"http://upload.wikimedia.org/wikipedia/en/thumb/6/60/Blind_side_poster.jpg/220px-Blind_side_poster.jpg",
"https://www.youtube.com/watch?v=I24d30buecw")
school_of_rock = media.Movie("School of Rock", "Using rock music to learn",
"http://upload.wikimedia.org/wikipedia/en/1/11/School_of_Rock_Poster.jpg",
"https://www.youtube.com/watch?v=3PsUJFEBC74")
accepted = media.Movie("Accepted", "Boy decides to make up a college after every college turns him down",
"http://upload.wikimedia.org/wikipedia/en/thumb/b/b6/Acceptedposter.jpg/220px-Acceptedposter.jpg",
"https://www.youtube.com/watch?v=41C-VouZ7NY")
i_married_who = media.Movie("I Married Who?", "Straight-laced girl wakes up married to a movie star intead of her fiance",
"http://upload.wikimedia.org/wikipedia/en/c/cf/I_Married_Who_poster.jpg",
"https://www.youtube.com/watch?v=GSQwMMg7HcU")
why_get_married = media.Movie("Why Did I Get Married?", "Eight married college friends begin to question their own marriage",
"http://upload.wikimedia.org/wikipedia/en/thumb/2/23/Why_did_i_get_married_ver2.jpg/215px-Why_did_i_get_married_ver2.jpg",
"https://www.youtube.com/watch?v=COjDsU0K0fw")
why_get_married_too = media.Movie("Why Did I Get Married Too?", "Four close couples eagerly reconnect, sharing news about their lives and relationships",
"http://upload.wikimedia.org/wikipedia/en/thumb/b/bb/WhyDidIGotMarried2Poster2.jpg/220px-WhyDidIGotMarried2Poster2.jpg",
"https://www.youtube.com/watch?v=xW1zSg6FLko")
movies = (blind_side, school_of_rock, accepted, i_married_who, why_get_married, why_get_married_too)
fresh_tomatoes.open_movies_page(movies)
#print (media.Movie.VALID_RATINGS)
print (media.Movie.__module__)
| mit | -333,464,672,204,078,850 | 60.083333 | 154 | 0.626398 | false |
alexston/calibre-webserver | src/calibre/gui2/actions/match_books.py | 5 | 1351 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from calibre.gui2 import error_dialog
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.match_books import MatchBooks
class MatchBookAction(InterfaceAction):
name = 'Match Books'
action_spec = (_('Match book to library'), 'book.png',
_('Match this book to a book in the library'),
())
dont_add_to = frozenset(['menubar', 'toolbar', 'context-menu', 'toolbar-child'])
action_type = 'current'
def genesis(self):
self.qaction.triggered.connect(self.match_books_in_library)
def location_selected(self, loc):
enabled = loc != 'library'
self.qaction.setEnabled(enabled)
def match_books_in_library(self, *args):
view = self.gui.current_view()
rows = view.selectionModel().selectedRows()
if not rows or len(rows) != 1:
d = error_dialog(self.gui, _('Match books'), _('You must select one book'))
d.exec_()
return
id_ = view.model().indices(rows)[0]
MatchBooks(self.gui, view, id_, rows[0]).exec_()
| gpl-3.0 | -1,667,387,852,331,383,800 | 33.641026 | 87 | 0.626203 | false |
ilvar/django-mailer | mailer/__init__.py | 2 | 3346 | VERSION = (0, 2, 0, "a", "1-ses") # following PEP 386
DEV_N = None
def get_version():
version = "%s.%s" % (VERSION[0], VERSION[1])
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version()
PRIORITY_MAPPING = {
"high": "1",
"medium": "2",
"low": "3",
"deferred": "4",
}
# replacement for django.core.mail.send_mail
def send_mail(subject, message, from_email, recipient_list, priority="medium",
fail_silently=False, auth_user=None, auth_password=None):
from django.utils.encoding import force_unicode
from mailer.models import make_message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
message = force_unicode(message)
make_message(subject=subject,
body=message,
from_email=from_email,
to=recipient_list,
priority=priority).save()
return 1
def send_html_mail(subject, message, message_html, from_email, recipient_list,
priority="medium", fail_silently=False, auth_user=None,
auth_password=None):
"""
Function to queue HTML e-mails
"""
from django.utils.encoding import force_unicode
from django.core.mail import EmailMultiAlternatives
from mailer.models import make_message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
message = force_unicode(message)
msg = make_message(subject=subject,
body=message,
from_email=from_email,
to=recipient_list,
priority=priority)
email = msg.email
email = EmailMultiAlternatives(email.subject, email.body, email.from_email, email.to)
email.attach_alternative(message_html, "text/html")
msg.email = email
msg.save()
return 1
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
from mailer.models import make_message
num_sent = 0
for subject, message, sender, recipient in datatuple:
num_sent += send_mail(subject, message, sender, recipient)
return num_sent
def mail_admins(subject, message, fail_silently=False, connection=None, priority="medium"):
from django.conf import settings
from django.utils.encoding import force_unicode
return send_mail(settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject),
message,
settings.SERVER_EMAIL,
[a[1] for a in settings.ADMINS])
def mail_managers(subject, message, fail_silently=False, connection=None, priority="medium"):
from django.conf import settings
from django.utils.encoding import force_unicode
return send_mail(settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject),
message,
settings.SERVER_EMAIL,
[a[1] for a in settings.MANAGERS])
| mit | 5,560,995,170,789,437,000 | 31.173077 | 93 | 0.615362 | false |
LukeCarrier/py3k-pexpect | tests/test_misc.py | 1 | 7706 | #!/usr/bin/env python
import pexpect
import unittest
from . import PexpectTestCase
import time
import os
import re
class TestCaseMisc(PexpectTestCase.PexpectTestCase):
def test_isatty (self):
child = pexpect.spawn('cat')
assert child.isatty(), "Not returning True. Should always be True."
def test_read (self):
child = pexpect.spawn('cat')
child.sendline ("abc")
child.sendeof()
assert child.read(0) == '', "read(0) did not return ''"
assert child.read(1) == 'a', "read(1) did not return 'a'"
assert child.read(1) == 'b', "read(1) did not return 'b'"
assert child.read(1) == 'c', "read(1) did not return 'c'"
assert child.read(2) == '\r\n', "read(2) did not return '\\r\\n'"
assert child.read() == 'abc\r\n', "read() did not return 'abc\\r\\n'"
def test_readline (self):
child = pexpect.spawn('cat')
child.sendline ("abc")
child.sendline ("123")
child.sendeof()
line1 = child.readline(0)
line2 = child.readline()
line3 = child.readline(2)
line4 = child.readline(1)
line5 = child.readline()
assert line1 == '', "readline(0) did not return ''. Returned: " + repr(line1)
assert line2 == 'abc\r\n', "readline() did not return 'abc\\r\\n'. Returned: " + repr(line2)
assert line3 == 'abc\r\n', "readline(2) did not return 'abc\\r\\n'. Returned: " + repr(line3)
assert line4 == '123\r\n', "readline(1) did not return '123\\r\\n'. Returned: " + repr(line4)
assert line5 == '123\r\n', "readline() did not return '123\\r\\n'. Returned: " + repr(line5)
def test_iter (self):
child = pexpect.spawn('cat')
child.sendline ("abc")
child.sendline ("123")
child.sendeof()
# Don't use "".join() because we want to test the ITERATOR.
page = ""
for line in child:
page = page + line
assert page == 'abc\r\nabc\r\n123\r\n123\r\n', "iterator did not work. page=%s"%repr(page)
def test_readlines(self):
child = pexpect.spawn('cat')
child.sendline ("abc")
child.sendline ("123")
child.sendeof()
page = child.readlines()
page = ''.join(page)
assert page == 'abc\r\nabc\r\n123\r\n123\r\n', "readlines() did not work. page=%s"%repr(page)
def test_write (self):
child = pexpect.spawn('cat')
child.write('a')
child.write('\r')
assert child.readline() == 'a\r\n', "write() did not work"
def test_writelines (self):
child = pexpect.spawn('cat')
child.writelines(['abc','123','xyz','\r'])
child.sendeof()
line = child.readline()
assert line == 'abc123xyz\r\n', "writelines() did not work. line=%s"%repr(line)
def test_eof(self):
child = pexpect.spawn('cat')
child.sendeof()
try:
child.expect ('the unexpected')
except:
pass
assert child.eof(), "child.eof() did not return True"
def test_terminate(self):
child = pexpect.spawn('cat')
child.terminate(force=1)
assert child.terminated, "child.terminated is not True"
def test_bad_child_pid(self):
child = pexpect.spawn('cat')
child.terminate(force=1)
child.terminated = 0 # Force invalid state to test code
try:
child.isalive()
except pexpect.ExceptionPexpect as e:
pass
else:
self.fail ("child.isalive() should have raised a pexpect.ExceptionPexpect")
child.terminated = 1 # Force back to valid state so __del__ won't complain
def test_bad_arguments (self):
"""This tests that we get a graceful error when passing bad arguments."""
try:
p = pexpect.spawn(1)
except pexpect.ExceptionPexpect as e:
pass
else:
self.fail ("pexpect.spawn(1) should have raised a pexpect.ExceptionPexpect.")
try:
p = pexpect.spawn('ls', '-la') # should really use pexpect.spawn('ls', ['-ls'])
except TypeError as e:
pass
else:
self.fail ("pexpect.spawn('ls', '-la') should have raised a TypeError.")
try:
p = pexpect.spawn('cat')
p.close()
p.read_nonblocking(size=1, timeout=3)
except ValueError as e:
pass
else:
self.fail ("read_nonblocking on closed spawn object should have raised a ValueError.")
def test_isalive(self):
child = pexpect.spawn('cat')
assert child.isalive(), "child.isalive() did not return True"
child.sendeof()
child.expect(pexpect.EOF)
assert not child.isalive(), "child.isalive() did not return False"
def test_bad_type_in_expect(self):
child = pexpect.spawn('cat')
try:
child.expect({}) # We don't support dicts yet. Should give TypeError
except TypeError as e:
pass
else:
self.fail ("child.expect({}) should have raised a TypeError")
def test_winsize(self):
child = pexpect.spawn('cat')
child.setwinsize(10,13)
assert child.getwinsize()==(10,13), "getwinsize() did not return (10,13)"
def test_env(self):
default = pexpect.run('env')
userenv = pexpect.run('env', env={'foo':'pexpect'})
assert default!=userenv, "'default' and 'userenv' should be different"
assert 'foo' in userenv and 'pexpect' in userenv, "'foo' and 'pexpect' should be in 'userenv'"
def test_cwd (self): # This assumes 'pwd' and '/tmp' exist on this platform.
default = pexpect.run('pwd')
tmpdir = pexpect.run('pwd', cwd='/tmp')
assert default!=tmpdir, "'default' and 'tmpdir' should be different"
assert ('tmp' in tmpdir), "'tmp' should be returned by 'pwd' command"
def test_which (self):
p = os.defpath
ep = os.environ['PATH']
os.defpath = ":/tmp"
os.environ['PATH'] = ":/tmp"
wp = pexpect.which ("ticker.py")
assert wp == 'ticker.py', "Should return a string. Returned %s" % wp
os.defpath = "/tmp"
os.environ['PATH'] = "/tmp"
wp = pexpect.which ("ticker.py")
assert wp == None, "Executable should not be found. Returned %s" % wp
os.defpath = p
os.environ['PATH'] = ep
def test_searcher_re (self):
ss = pexpect.searcher_re ([re.compile('this'),re.compile('that'),re.compile('and'),re.compile('the'),re.compile('other')])
assert ss.__str__() == 'searcher_re:\n 0: re.compile("this")\n 1: re.compile("that")\n 2: re.compile("and")\n 3: re.compile("the")\n 4: re.compile("other")'
ss = pexpect.searcher_re ([pexpect.TIMEOUT,re.compile('this'),re.compile('that'),re.compile('and'),pexpect.EOF,re.compile('other')])
assert ss.__str__() == 'searcher_re:\n 0: TIMEOUT\n 1: re.compile("this")\n 2: re.compile("that")\n 3: re.compile("and")\n 4: EOF\n 5: re.compile("other")'
def test_searcher_string (self):
ss = pexpect.searcher_string (['this','that','and','the','other'])
assert ss.__str__() == 'searcher_string:\n 0: "this"\n 1: "that"\n 2: "and"\n 3: "the"\n 4: "other"', repr(ss.__str__())
ss = pexpect.searcher_string (['this',pexpect.EOF,'that','and','the','other',pexpect.TIMEOUT])
assert ss.__str__() == 'searcher_string:\n 0: "this"\n 1: EOF\n 2: "that"\n 3: "and"\n 4: "the"\n 5: "other"\n 6: TIMEOUT'
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(TestCaseMisc,'test')
| mit | -2,376,140,708,706,264,600 | 44.597633 | 181 | 0.567869 | false |
srowen/spark | python/pyspark/pandas/data_type_ops/date_ops.py | 5 | 3954 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import warnings
from typing import Any, Union
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F
from pyspark.sql.types import BooleanType, DateType, StringType
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import pandas_on_spark_type
class DateOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with spark type: DateType.
"""
@property
def pretty_name(self) -> str:
return "dates"
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
# Note that date subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.
msg = (
"Note that there is a behavior difference of date subtraction. "
"The date subtraction returns an integer in days, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, DateType):
warnings.warn(msg, UserWarning)
return column_op(F.datediff)(left, right).astype("long")
elif isinstance(right, datetime.date) and not isinstance(right, datetime.datetime):
warnings.warn(msg, UserWarning)
return column_op(F.datediff)(left, SF.lit(right)).astype("long")
else:
raise TypeError("date subtraction can only be applied to date series.")
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
# Note that date subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.
msg = (
"Note that there is a behavior difference of date subtraction. "
"The date subtraction returns an integer in days, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(right, datetime.date) and not isinstance(right, datetime.datetime):
warnings.warn(msg, UserWarning)
return -column_op(F.datediff)(left, SF.lit(right)).astype("long")
else:
raise TypeError("date subtraction can only be applied to date series.")
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(pd.NaT))
else:
return _as_other_type(index_ops, dtype, spark_type)
| apache-2.0 | -5,969,488,601,596,109,000 | 42.450549 | 94 | 0.691705 | false |
alberto-antonietti/nest-simulator | pynest/nest/tests/test_siegert_neuron.py | 3 | 4878 | # -*- coding: utf-8 -*-
#
# test_siegert_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the siegert_neuron in NEST.
import nest
import unittest
import numpy as np
HAVE_GSL = nest.ll_api.sli_func("statusdict/have_gsl ::")
@nest.ll_api.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class SiegertNeuronTestCase(unittest.TestCase):
"""
Test siegert_neuron
Details
-------
Compares the rate of a Poisson-driven iaf_psc_delta neuron
with the prediction from the siegert neuron.
"""
def setUp(self):
# test parameter to compare analytic solution to simulation
self.rtol = 1.0
# test parameters
self.N = 100
self.rate_ex = 1.5 * 1e4
self.J = 0.1
# simulation parameters
self.simtime = 500.
self.dt = 0.1
self.start = 200.
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus({'resolution': self.dt, 'use_wfr': False})
# set up driven integrate-and-fire neuron
self.iaf_psc_delta = nest.Create(
'iaf_psc_delta', self.N) # , params={"C_m": 1.0})
self.poisson_generator = nest.Create(
'poisson_generator', params={'rate': self.rate_ex})
nest.Connect(self.poisson_generator, self.iaf_psc_delta,
syn_spec={'weight': self.J, 'delay': self.dt})
self.spike_detector = nest.Create(
"spike_detector", params={'start': self.start})
nest.Connect(
self.iaf_psc_delta, self.spike_detector)
# set up driven siegert neuron
neuron_status = nest.GetStatus(self.iaf_psc_delta)[0]
siegert_params = {'tau_m': neuron_status['tau_m'],
't_ref': neuron_status['t_ref'],
'theta': neuron_status['V_th'] -
neuron_status['E_L'],
'V_reset': neuron_status['V_reset'] -
neuron_status['E_L']}
self.siegert_neuron = nest.Create(
'siegert_neuron', params=siegert_params)
self.siegert_drive = nest.Create(
'siegert_neuron', 1,
params={'mean': self.rate_ex, 'theta': siegert_params['theta']})
J_mu_ex = neuron_status['tau_m'] * 1e-3 * self.J
J_sigma_ex = neuron_status['tau_m'] * 1e-3 * self.J ** 2
syn_dict = {'drift_factor': J_mu_ex, 'diffusion_factor':
J_sigma_ex, 'synapse_model': 'diffusion_connection'}
nest.Connect(
self.siegert_drive, self.siegert_neuron, syn_spec=syn_dict)
self.multimeter = nest.Create(
"multimeter", params={'record_from': ['rate'],
'interval': self.dt})
nest.Connect(
self.multimeter, self.siegert_neuron)
def test_RatePrediction(self):
"""Check the rate prediction of the siegert neuron"""
# simulate
nest.Simulate(self.simtime)
# get rate prediction from siegert neuron
events = nest.GetStatus(self.multimeter)[0]["events"]
senders = events['senders']
rate = events['rate'][np.where(
senders == self.siegert_neuron.get('global_id'))]
rate_prediction = rate[-1]
# get simulated rate of integrate-and-fire neuron
rate_iaf = nest.GetStatus(self.spike_detector)[0][
"n_events"] / ((self.simtime - self.start) * 1e-3) / self.N
# test rate prediction against simulated rate of
# integrate-and-fire neuron
self.assertTrue(np.isclose(rate_iaf, rate_prediction, rtol=self.rtol))
# test rate prediction against hard coded result
rate_prediction_test = 27.1095934379
self.assertTrue(np.isclose(rate_prediction_test, rate_prediction))
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
SiegertNeuronTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 | 270,003,277,879,200,900 | 32.875 | 78 | 0.609471 | false |
imbstack/pasquino | migrations/env.py | 2 | 2643 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
from pasquino import app
target_metadata = app.db.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | 3,909,830,976,631,564,000 | 31.62963 | 78 | 0.673477 | false |
aliasfalse/rowboat | rowboat/models/custcommands.py | 1 | 1500 | from peewee import (
BigIntegerField, TextField, DateTimeField, CompositeKey, IntegerField
)
from datetime import datetime
from holster.enum import Enum
from rowboat.sql import BaseModel
@BaseModel.register
class CustomCommands(BaseModel):
Types = Enum(
'CMD',
'LISTEN',
bitmask=False,
)
ListenTypes = Enum(
'GuildMemberAdd',
'GuildMemberRemove',
'GuildMemberUpdate',
'GuildMembersChunk',
'GuildRoleCreate',
'GuildRoleUpdate',
'GuildRoleDelete',
'GuildEmojisUpdate',
'ChannelCreate',
'ChannelUpdate',
'ChannelDelete',
'VoiceStateUpdate',
'MessageCreate',
'PresenceUpdate',
bitmask=False,
)
guild_id = BigIntegerField()
author_id = BigIntegerField()
type_ = IntegerField(db_column='type')
listen_type_ = IntegerField(db_column='listen_type',default=0)
name = TextField()
command = TextField()
times_used = IntegerField(default=0)
created_at = DateTimeField(default=datetime.utcnow)
class Meta:
db_table = 'custom_commands'
primary_key = CompositeKey('guild_id', 'name')
@classmethod
def create_cust(cls, plugin, event, name, cmdtype, ltype, content):
cls.create(
guild_id=event.guild.id,
author_id=event.author.id,
name=name,
command=content,
type_=cmdtype,
listen_type_=ltype
)
| mit | -4,021,120,537,533,538,300 | 23.590164 | 73 | 0.607333 | false |
raccoongang/socraticqs2 | mysite/fsm/fsm_base.py | 2 | 4839 | """Module contains FSMStack object - main interface to our current FSM if any
FSMStack methods:
* ``event`` - top-level interface for passing event to a running FSM instance
* ``push`` - start running a new FSM instance (layer)
* ``pop`` - pop current FSM state and pass event to next stack state if any
* ``resume`` - resume an orphaned activity
* ``get_current_url`` - get URL for resuming at current FSM state
"""
from fsm.models import FSM, FSMState, FSMBadUserError, FSMStackResumeError
class FSMStack(object):
"""
Main interface to our current FSM if any.
"""
CHAT_NAMES = ('chat', 'additional', 'courselet_preview', 'faq')
def __init__(self, request, **kwargs):
try:
fsmID = request.session['fsmID']
except KeyError:
self.state = None
return
try:
self.state = FSMState.objects.select_related('fsmNode')\
.prefetch_related('fsmNode__outgoing').get(pk=fsmID)
except FSMState.DoesNotExist:
del request.session['fsmID']
self.state = None
return
for edge in self.state.fsmNode.outgoing.all(): # detect selection edges
if edge.name.startswith('select_'):
setattr(self, edge.name, edge) # make available to HTML templates
def event(self, request, eventName='next', pageData=None, **kwargs):
"""Top-level interface for passing event to a running FSM instance
If FSM handles this event, return a redirect that over-rides
the generic UI behavior. Otherwise return None,
indicating NO over-ride of generic UI behavior.
"""
if self.state is None: # no ongoing activity
return
state = self.state # remember current state
path = self.state.event(
self, request, eventName, pageData=pageData, **kwargs
)
if self.state and self.state != state: # pushed or popped
path = self.state.path # use path of new FSM
if self.state.fsmNode.node_name_is_one_of('END'): # reached terminal state
pageData.set_refresh_timer(request, False) # reset timer if on
if self.state.fsmNode.help: # save its help message
request.session['statusMessage'] = self.state.fsmNode.help
parentPath = self.pop(request, pageData=pageData)
if parentPath: # let parent FSM redirect us to its current state
return parentPath
return path
def push(self, request, fsmName, stateData=None, startArgs=None,
activity=None, **kwargs):
"""
Start running a new FSM instance (layer).
"""
stateData = stateData or {}
startArgs = startArgs or {}
fsm = FSM.objects.select_related('startNode').get(name=fsmName)
activity = None
if not activity and self.state and fsmName not in self.CHAT_NAMES:
activity = self.state.activity
self.state = FSMState(
user=request.user,
fsmNode=fsm.startNode,
parentState=self.state,
activity=activity,
title=fsm.title,
hideTabs=fsm.hideTabs,
hideLinks=fsm.hideLinks,
hideNav=fsm.hideNav,
**kwargs
)
path = self.state.start_fsm(self, request, stateData, **startArgs)
if fsmName not in self.CHAT_NAMES:
request.session['fsmID'] = self.state.pk
return path
def pop(self, request, eventName='return', pageData=None, **kwargs):
"""
Pop current FSM state and pass event to next stack state if any.
"""
nextState = self.state.parentState
if '.live.' in self.state.fsmNode.funcName:
for child in self.state.linkChildren.all():
child.linkState = None
child.save()
self.state.delete()
self.state = nextState
if nextState is not None:
request.session['fsmID'] = nextState.pk
return self.event(request, eventName, pageData, **kwargs)
else:
del request.session['fsmID']
def resume(self, request, stateID):
"""
Resume an orphaned activity.
"""
state = FSMState.objects.get(pk=int(stateID))
if state.user_id != request.user.id:
raise FSMBadUserError('user mismatch!!')
elif state.children.count() > 0:
raise FSMStackResumeError('can only resume innermost stack level')
self.state = state
request.session['fsmID'] = self.state.pk
return self.get_current_url()
def get_current_url(self):
"""
Get URL for resuming at current FSM state.
"""
if self.state:
return self.state.path
| apache-2.0 | -922,452,241,754,503,800 | 38.663934 | 83 | 0.596818 | false |
vhf/confusable_homoglyphs | confusable_homoglyphs/cli.py | 1 | 2745 | # -*- coding: utf-8 -*-
import re
from collections import defaultdict
from .utils import u, get, dump
try:
import click
except ImportError:
print('Install this package with the [cli] extras to enable the CLI.')
raise
@click.group()
def cli():
pass
@cli.command()
def update():
"""
Update the homoglyph data files from https://www.unicode.org
"""
generate_categories()
generate_confusables()
def generate_categories():
"""Generates the categories JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool
"""
# inspired by https://gist.github.com/anonymous/2204527
code_points_ranges = []
iso_15924_aliases = []
categories = []
match = re.compile(r'([0-9A-F]+)(?:\.\.([0-9A-F]+))?\W+(\w+)\s*#\s*(\w+)',
re.UNICODE)
url = 'ftp://ftp.unicode.org/Public/UNIDATA/Scripts.txt'
file = get(url)
for line in file:
p = re.findall(match, line)
if p:
code_point_range_from, code_point_range_to, alias, category = p[0]
alias = u(alias.upper())
category = u(category)
if alias not in iso_15924_aliases:
iso_15924_aliases.append(alias)
if category not in categories:
categories.append(category)
code_points_ranges.append((
int(code_point_range_from, 16),
int(code_point_range_to or code_point_range_from, 16),
iso_15924_aliases.index(alias), categories.index(category))
)
code_points_ranges.sort()
categories_data = {
'iso_15924_aliases': iso_15924_aliases,
'categories': categories,
'code_points_ranges': code_points_ranges,
}
dump('categories.json', categories_data)
def generate_confusables():
"""Generates the confusables JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool
"""
url = 'ftp://ftp.unicode.org/Public/security/latest/confusables.txt'
file = get(url)
confusables_matrix = defaultdict(list)
match = re.compile(r'[0-9A-F ]+\s+;\s*[0-9A-F ]+\s+;\s*\w+\s*#'
r'\*?\s*\( (.+) → (.+) \) (.+) → (.+)\t#',
re.UNICODE)
for line in file:
p = re.findall(match, line)
if p:
char1, char2, name1, name2 = p[0]
confusables_matrix[char1].append({
'c': char2,
'n': name2,
})
confusables_matrix[char2].append({
'c': char1,
'n': name1,
})
dump('confusables.json', dict(confusables_matrix))
| mit | 1,355,311,340,272,766,200 | 27.852632 | 79 | 0.557826 | false |
mhenke/gerrit-topic-reviews | contrib/check-valid-commit.py | 20 | 2869 | #!/usr/bin/env python
import commands
import getopt
import sys
SSH_USER = 'bot'
SSH_HOST = 'localhost'
SSH_PORT = 29418
SSH_COMMAND = 'ssh %s@%s -p %d gerrit approve ' % (SSH_USER, SSH_HOST, SSH_PORT)
FAILURE_SCORE = '--code-review=-2'
FAILURE_MESSAGE = 'This commit message does not match the standard.' \
+ ' Please correct the commit message and upload a replacement patch.'
PASS_SCORE = '--code-review=0'
PASS_MESSAGE = ''
def main():
change = None
project = None
branch = None
commit = None
patchset = None
try:
opts, args = getopt.getopt(sys.argv[1:], '', \
['change=', 'project=', 'branch=', 'commit=', 'patchset='])
except getopt.GetoptError, err:
print 'Error: %s' % (err)
usage()
sys.exit(-1)
for arg, value in opts:
if arg == '--change':
change = value
elif arg == '--project':
project = value
elif arg == '--branch':
branch = value
elif arg == '--commit':
commit = value
elif arg == '--patchset':
patchset = value
else:
print 'Error: option %s not recognized' % (arg)
usage()
sys.exit(-1)
if change == None or project == None or branch == None \
or commit == None or patchset == None:
usage()
sys.exit(-1)
command = 'git cat-file commit %s' % (commit)
status, output = commands.getstatusoutput(command)
if status != 0:
print 'Error running \'%s\'. status: %s, output:\n\n%s' % \
(command, status, output)
sys.exit(-1)
commitMessage = output[(output.find('\n\n')+2):]
commitLines = commitMessage.split('\n')
if len(commitLines) > 1 and len(commitLines[1]) != 0:
fail(commit, 'Invalid commit summary. The summary must be ' \
+ 'one line followed by a blank line.')
i = 0
for line in commitLines:
i = i + 1
if len(line) > 80:
fail(commit, 'Line %d is over 80 characters.' % i)
passes(commit)
def usage():
print 'Usage:\n'
print sys.argv[0] + ' --change <change id> --project <project name> ' \
+ '--branch <branch> --commit <sha1> --patchset <patchset id>'
def fail( commit, message ):
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
+ '\\\" ' + commit
commands.getstatusoutput(command)
sys.exit(1)
def passes( commit ):
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
commands.getstatusoutput(command)
def _shell_escape(x):
s = ''
for c in x:
if c in '\n':
s = s + '\\\"$\'\\n\'\\\"'
else:
s = s + c
return s
if __name__ == '__main__':
main()
| apache-2.0 | -4,344,717,063,191,373,000 | 26.586538 | 80 | 0.534333 | false |
smainand/scapy | scapy/arch/common.py | 1 | 2718 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Functions common to different architectures
"""
import socket
from fcntl import ioctl
import os
import struct
import ctypes
from ctypes import POINTER, Structure
from ctypes import c_uint, c_uint32, c_ushort, c_ubyte
from scapy.config import conf
import scapy.modules.six as six
# UTILS
def get_if(iff, cmd):
"""Ease SIOCGIF* ioctl calls"""
sck = socket.socket()
ifreq = ioctl(sck, cmd, struct.pack("16s16x", iff.encode("utf8")))
sck.close()
return ifreq
# BPF HANDLERS
class bpf_insn(Structure):
""""The BPF instruction data structure"""
_fields_ = [("code", c_ushort),
("jt", c_ubyte),
("jf", c_ubyte),
("k", c_uint32)]
class bpf_program(Structure):
""""Structure for BIOCSETF"""
_fields_ = [("bf_len", c_uint),
("bf_insns", POINTER(bpf_insn))]
def _legacy_bpf_pointer(tcpdump_lines):
"""Get old-format BPF Pointer. Deprecated"""
X86_64 = os.uname()[4] in ['x86_64', 'aarch64']
size = int(tcpdump_lines[0])
bpf = b""
for l in tcpdump_lines[1:]:
if six.PY2:
int_type = long # noqa: F821
else:
int_type = int
bpf += struct.pack("HBBI", *map(int_type, l.split()))
# Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501
if conf.use_pypy:
str_buffer = ctypes.create_string_buffer(bpf)
return struct.pack('HL', size, ctypes.addressof(str_buffer))
else:
# XXX. Argl! We need to give the kernel a pointer on the BPF,
# Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501
if X86_64:
return struct.pack("HL", size, id(bpf) + 36)
else:
return struct.pack("HI", size, id(bpf) + 20)
def get_bpf_pointer(tcpdump_lines):
"""Create a BPF Pointer for TCPDump filter"""
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
# Allocate BPF instructions
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
# Create the BPF program
return bpf_program(size, bip)
| gpl-2.0 | -8,084,600,124,679,608,000 | 27.610526 | 98 | 0.608168 | false |
zhouhoo/wiki_zh_vec | wiki_token.py | 1 | 1187 | import io
import jieba
def cut_words(input_file, output_file):
count = 0
with io.open(output_file, mode = 'w', encoding = 'utf-8') as outfile:
with io.open(input_file, mode = 'r', encoding = 'utf-8') as infile:
for line in infile:
line = line.strip()
if len(line) < 1: # empty line
continue
if line.startswith('doc'): # start or end of a passage
if line == 'doc': # end of a passage
outfile.write(u'\n')
count = count + 1
if(count % 1000 == 0):
print('%s articles were finished.......' %count)
continue
for word in jieba.cut(line):
outfile.write(word + ' ')
print('%s articles were finished.......' %count)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='punctuationed wiki of input')
parser.add_argument('output', help='tokened wiki file to output to')
args = parser.parse_args()
cut_words(args.input, args.output) | apache-2.0 | 5,425,672,249,229,324,000 | 36.125 | 76 | 0.509688 | false |
bigmlcom/python | bigml/deepnet.py | 2 | 16730 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A local Predictive Deepnet.
This module defines a Deepnet to make predictions locally or
embedded into your application without needing to send requests to
BigML.io.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
You can also visualize your predictive model in IF-THEN rule format
and even generate a python function that implements the model.
Example usage (assuming that you have previously set up the BIGML_USERNAME
and BIGML_API_KEY environment variables and that you own the model/id below):
from bigml.api import BigML
from bigml.deepnet import Deepnet
api = BigML()
deepnet = Deepnet('deepnet/5026965515526876630001b2')
deepnet.predict({"petal length": 3, "petal width": 1})
"""
import logging
from functools import cmp_to_key
from bigml.api import FINISHED
from bigml.api import get_status, get_api_connection, get_deepnet_id
from bigml.util import cast, use_cache, load, PRECISION
from bigml.basemodel import get_resource_dict, extract_objective
from bigml.modelfields import ModelFields
from bigml.laminar.constants import NUMERIC
from bigml.model import parse_operating_point, sort_categories
import bigml.laminar.numpy_ops as net
import bigml.laminar.preprocess_np as pp
LOGGER = logging.getLogger('BigML')
MEAN = "mean"
STANDARD_DEVIATION = "stdev"
def moments(amap):
"""Extracts mean and stdev
"""
return amap[MEAN], amap[STANDARD_DEVIATION]
def expand_terms(terms_list, input_terms):
"""Builds a list of occurrences for all the available terms
"""
terms_occurrences = [0.0] * len(terms_list)
for term, occurrences in input_terms:
index = terms_list.index(term)
terms_occurrences[index] = occurrences
return terms_occurrences
class Deepnet(ModelFields):
""" A lightweight wrapper around Deepnet model.
Uses a BigML remote model to build a local version that can be used
to generate predictions locally.
"""
def __init__(self, deepnet, api=None, cache_get=None):
"""The Deepnet constructor can be given as first argument:
- a deepnet structure
- a deepnet id
- a path to a JSON file containing a deepnet structure
"""
if use_cache(cache_get):
# using a cache to store the model attributes
self.__dict__ = load(get_deepnet_id(deepnet), cache_get)
return
self.resource_id = None
self.regression = False
self.network = None
self.networks = None
self.input_fields = []
self.class_names = []
self.preprocess = []
self.optimizer = None
self.default_numeric_value = None
self.missing_numerics = False
api = get_api_connection(api)
self.resource_id, deepnet = get_resource_dict( \
deepnet, "deepnet", api=api)
if 'object' in deepnet and isinstance(deepnet['object'], dict):
deepnet = deepnet['object']
self.input_fields = deepnet['input_fields']
self.default_numeric_value = deepnet.get('default_numeric_value')
if 'deepnet' in deepnet and isinstance(deepnet['deepnet'], dict):
status = get_status(deepnet)
objective_field = deepnet['objective_fields']
deepnet = deepnet['deepnet']
if 'code' in status and status['code'] == FINISHED:
self.fields = deepnet['fields']
missing_tokens = deepnet.get('missing_tokens')
ModelFields.__init__(
self, self.fields,
objective_id=extract_objective(objective_field),
terms=True, categories=True, missing_tokens=missing_tokens)
self.regression = \
self.fields[self.objective_id]['optype'] == NUMERIC
if not self.regression:
self.class_names = [category for category, _ in \
self.fields[self.objective_id][ \
'summary']['categories']]
self.class_names.sort()
# order matters
self.objective_categories = [category for \
category, _ in self.fields[self.objective_id][ \
"summary"]["categories"]]
self.missing_numerics = deepnet.get('missing_numerics', False)
if 'network' in deepnet:
network = deepnet['network']
self.network = network
self.networks = network.get('networks', [])
self.preprocess = network.get('preprocess')
self.optimizer = network.get('optimizer', {})
else:
raise Exception("The deepnet isn't finished yet")
else:
raise Exception("Cannot create the Deepnet instance. Could not"
" find the 'deepnet' key in the resource:\n\n%s" %
deepnet)
def fill_array(self, input_data, unique_terms):
""" Filling the input array for the network with the data in the
input_data dictionary. Numeric missings are added as a new field
and texts/items are processed.
"""
columns = []
for field_id in self.input_fields:
# if the field is text or items, we need to expand the field
# in one field per term and get its frequency
if field_id in self.tag_clouds:
terms_occurrences = expand_terms(self.tag_clouds[field_id],
unique_terms.get(field_id,
[]))
columns.extend(terms_occurrences)
elif field_id in self.items:
terms_occurrences = expand_terms(self.items[field_id],
unique_terms.get(field_id,
[]))
columns.extend(terms_occurrences)
elif field_id in self.categories:
category = unique_terms.get(field_id)
if category is not None:
category = category[0][0]
columns.append([category])
else:
# when missing_numerics is True and the field had missings
# in the training data, then we add a new "is missing?" element
# whose value is 1 or 0 according to whether the field is
# missing or not in the input data
if self.missing_numerics \
and self.fields[field_id][\
"summary"]["missing_count"] > 0:
if field_id in input_data:
columns.extend([input_data[field_id], 0.0])
else:
columns.extend([0.0, 1.0])
else:
columns.append(input_data.get(field_id))
return pp.preprocess(columns, self.preprocess)
def predict(self, input_data, operating_point=None, operating_kind=None,
full=False):
"""Makes a prediction based on a number of field values.
input_data: Input data to be predicted
operating_point: In classification models, this is the point of the
ROC curve where the model will be used at. The
operating point can be defined in terms of:
- the positive_class, the class that is important to
predict accurately
- the probability_threshold,
the probability that is stablished
as minimum for the positive_class to be predicted.
The operating_point is then defined as a map with
two attributes, e.g.:
{"positive_class": "Iris-setosa",
"probability_threshold": 0.5}
operating_kind: "probability". Sets the
property that decides the prediction. Used only if
no operating_point is used
full: Boolean that controls whether to include the prediction's
attributes. By default, only the prediction is produced. If set
to True, the rest of available information is added in a
dictionary format. The dictionary keys can be:
- prediction: the prediction value
- probability: prediction's probability
- unused_fields: list of fields in the input data that
are not being used in the model
"""
# Checks and cleans input_data leaving the fields used in the model
unused_fields = []
norm_input_data = self.filter_input_data( \
input_data, add_unused_fields=full)
if full:
norm_input_data, unused_fields = norm_input_data
# Strips affixes for numeric values and casts to the final field type
cast(norm_input_data, self.fields)
# When operating_point is used, we need the probabilities
# of all possible classes to decide, so se use
# the `predict_probability` method
if operating_point:
if self.regression:
raise ValueError("The operating_point argument can only be"
" used in classifications.")
return self.predict_operating( \
norm_input_data, operating_point=operating_point)
if operating_kind:
if self.regression:
raise ValueError("The operating_point argument can only be"
" used in classifications.")
return self.predict_operating_kind( \
norm_input_data, operating_kind=operating_kind)
# Computes text and categorical field expansion
unique_terms = self.get_unique_terms(norm_input_data)
input_array = self.fill_array(norm_input_data, unique_terms)
if self.networks:
prediction = self.predict_list(input_array)
else:
prediction = self.predict_single(input_array)
if full:
if not isinstance(prediction, dict):
prediction = {"prediction": prediction}
prediction.update({"unused_fields": unused_fields})
else:
if isinstance(prediction, dict):
prediction = prediction["prediction"]
return prediction
def predict_single(self, input_array):
"""Makes a prediction with a single network
"""
if self.network['trees'] is not None:
input_array = pp.tree_transform(input_array, self.network['trees'])
return self.to_prediction(self.model_predict(input_array,
self.network))
def predict_list(self, input_array):
"""Makes predictions with a list of networks
"""
if self.network['trees'] is not None:
input_array_trees = pp.tree_transform(input_array,
self.network['trees'])
youts = []
for model in self.networks:
if model['trees']:
youts.append(self.model_predict(input_array_trees, model))
else:
youts.append(self.model_predict(input_array, model))
return self.to_prediction(net.sum_and_normalize(youts,
self.regression))
def model_predict(self, input_array, model):
"""Prediction with one model
"""
layers = net.init_layers(model['layers'])
y_out = net.propagate(input_array, layers)
if self.regression:
y_mean, y_stdev = moments(model['output_exposition'])
y_out = net.destandardize(y_out, y_mean, y_stdev)
return y_out[0][0]
return y_out
def to_prediction(self, y_out):
"""Structuring prediction in a dictionary output
"""
if self.regression:
return float(y_out)
prediction = sorted(enumerate(y_out[0]), key=lambda x: -x[1])[0]
prediction = {"prediction": self.class_names[prediction[0]],
"probability": round(prediction[1], PRECISION),
"distribution": [{"category": category,
"probability": round(y_out[0][i],
PRECISION)} \
for i, category in enumerate(self.class_names)]}
return prediction
def predict_probability(self, input_data, compact=False):
"""Predicts a probability for each possible output class,
based on input values. The input fields must be a dictionary
keyed by field name or field ID.
:param input_data: Input data to be predicted
:param compact: If False, prediction is returned as a list of maps, one
per class, with the keys "prediction" and "probability"
mapped to the name of the class and it's probability,
respectively. If True, returns a list of probabilities
ordered by the sorted order of the class names.
"""
if self.regression:
prediction = self.predict(input_data, full=not compact)
if compact:
return [prediction]
return prediction
distribution = self.predict(input_data, full=True)['distribution']
distribution.sort(key=lambda x: x['category'])
if compact:
return [category['probability'] for category in distribution]
return distribution
def _sort_predictions(self, a, b, criteria):
"""Sorts the categories in the predicted node according to the
given criteria
"""
if a[criteria] == b[criteria]:
return sort_categories(a, b, self.objective_categories)
return 1 if b[criteria] > a[criteria] else - 1
def predict_operating_kind(self, input_data, operating_kind=None):
"""Computes the prediction based on a user-given operating kind.
"""
kind = operating_kind.lower()
if kind == "probability":
predictions = self.predict_probability(input_data, False)
else:
raise ValueError("Only probability is allowed as operating kind"
" for deepnets.")
predictions.sort( \
key=cmp_to_key( \
lambda a, b: self._sort_predictions(a, b, kind)))
prediction = predictions[0]
prediction["prediction"] = prediction["category"]
del prediction["category"]
return prediction
def predict_operating(self, input_data, operating_point=None):
"""Computes the prediction based on a user-given operating point.
"""
kind, threshold, positive_class = parse_operating_point( \
operating_point, ["probability"], self.class_names)
predictions = self.predict_probability(input_data, False)
position = self.class_names.index(positive_class)
if predictions[position][kind] > threshold:
prediction = predictions[position]
else:
# if the threshold is not met, the alternative class with
# highest probability or confidence is returned
predictions.sort( \
key=cmp_to_key( \
lambda a, b: self._sort_predictions(a, b, kind)))
prediction = predictions[0 : 2]
if prediction[0]["category"] == positive_class:
prediction = prediction[1]
else:
prediction = prediction[0]
prediction["prediction"] = prediction["category"]
del prediction["category"]
return prediction
| apache-2.0 | -4,185,756,928,750,375,400 | 40.308642 | 79 | 0.578004 | false |
NolanZhao/OpenBazaar | installers/windows/openbazaar.py | 13 | 1835 | #
# A short note why this bootstraper is needed:
# setup.py must specify a python file to be inserted into openbazaar.exe
# Before starting node/openbazaar.py it is needs to be bootstraped properly.
# In a development environment all packages and dependent libraries will be
# easy accessible. However, in an installed environment we have to make sure
# that our exe file can locate its depedencies without problems.
#
import ctypes
import ctypes.util
import os
import sys
# Get the folder of the exe file.
EXE_FILE = sys.argv[0]
PATH = os.path.dirname(os.path.realpath(EXE_FILE))
# We need to add the folder of libeay32.dll and gpg.exe on the path.
# This is needed for some packages to be able to detect them and function properly.
# Modifying the PATH variable on the installed system is prone to errors
# One example is if a x64 version of libeay32.dll is in PATH *before*
# ours, OpenBazaar won't be able to load libeay32.dll and fail to run.
os.environ["PATH"] = ";".join((PATH, r"%s\gpg\gpg" % PATH, os.environ["PATH"]))
# Add the full path to the egg file containing pycountry.
# This needs to be done before importing node (which depends on pycountry)
sys.path.append(os.path.join(PATH, "pycountry-1.8-py2.7.egg"))
from node import openbazaar
def main():
# Try to locate gpg.exe
if not ctypes.util.find_library('gpg.exe'):
message_box = ctypes.windll.user32.MessageBoxA
message_box(
None,
'Gpg4win could not be detected.\n'
'Please download and install gpg4win from http://gpg4win.org/ before continuing.',
'Open Bazaar',
0
)
return
# Workaround to make it possible to double click on the exe file
sys.argv.append('start')
# Start OpenBazaar
openbazaar.main()
if __name__ == '__main__':
main()
| mit | 5,457,212,010,593,020,000 | 32.363636 | 94 | 0.699183 | false |
orione7/plugin.video.streamondemand-pureita | lib/jsbeautifier/tests/testjsbeautifier.py | 41 | 119070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import unittest
import jsbeautifier
import six
class TestJSBeautifier(unittest.TestCase):
def test_unescape(self):
# Test cases contributed by <chrisjshull on GitHub.com>
test_fragment = self.decodesto
bt = self.bt
bt('"\\\\s"') # == "\\s" in the js source
bt("'\\\\s'") # == '\\s' in the js source
bt("'\\\\\\s'") # == '\\\s' in the js source
bt("'\\s'") # == '\s' in the js source
bt('"•"')
bt('"—"')
bt('"\\x41\\x42\\x43\\x01"', '"\\x41\\x42\\x43\\x01"')
bt('"\\u2022"', '"\\u2022"')
bt('a = /\s+/')
#bt('a = /\\x41/','a = /A/')
bt('"\\u2022";a = /\s+/;"\\x41\\x42\\x43\\x01".match(/\\x41/);','"\\u2022";\na = /\s+/;\n"\\x41\\x42\\x43\\x01".match(/\\x41/);')
bt('"\\x22\\x27",\'\\x22\\x27\',"\\x5c",\'\\x5c\',"\\xff and \\xzz","unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"', '"\\x22\\x27", \'\\x22\\x27\', "\\x5c", \'\\x5c\', "\\xff and \\xzz", "unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"')
self.options.unescape_strings = True
bt('"\\x41\\x42\\x43\\x01"', '"ABC\\x01"')
bt('"\\u2022"', '"\\u2022"')
bt('a = /\s+/')
bt('"\\u2022";a = /\s+/;"\\x41\\x42\\x43\\x01".match(/\\x41/);','"\\u2022";\na = /\s+/;\n"ABC\\x01".match(/\\x41/);')
bt('"\\x22\\x27",\'\\x22\\x27\',"\\x5c",\'\\x5c\',"\\xff and \\xzz","unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"', '"\\"\'", \'"\\\'\', "\\\\", \'\\\\\', "\\xff and \\xzz", "unicode \\u0000 \\" \' \\\\ \\uffff \\uzzzz"')
self.options.unescape_strings = False
def test_beautifier(self):
test_fragment = self.decodesto
bt = self.bt
true = True
false = False
def unicode_char(value):
return six.unichr(value)
self.options.indent_size = 4
self.options.indent_char = ' '
self.options.preserve_newlines = true
self.options.jslint_happy = false
self.options.keep_array_indentation = false
self.options.brace_style = 'collapse'
# Unicode Support
bt('var ' + unicode_char(3232) + '_' + unicode_char(3232) + ' = "hi";')
bt(
'var ' + unicode_char(228) + 'x = {\n' +
' ' + unicode_char(228) + 'rgerlich: true\n' +
'};')
# End With Newline - (eof = "\n")
self.options.end_with_newline = true
test_fragment('', '\n')
test_fragment(' return .5', ' return .5\n')
test_fragment(' \n\nreturn .5\n\n\n\n', ' return .5\n')
test_fragment('\n')
# End With Newline - (eof = "")
self.options.end_with_newline = false
test_fragment('')
test_fragment(' return .5')
test_fragment(' \n\nreturn .5\n\n\n\n', ' return .5')
test_fragment('\n', '')
# Comma-first option - (c0 = "\n, ", c1 = "\n , ", c2 = "\n , ", c3 = "\n , ")
self.options.comma_first = true
bt('{a:1, b:2}', '{\n a: 1\n , b: 2\n}')
bt('var a=1, b=c[d], e=6;', 'var a = 1\n , b = c[d]\n , e = 6;')
bt('for(var a=1,b=2,c=3;d<3;d++)\ne', 'for (var a = 1, b = 2, c = 3; d < 3; d++)\n e')
bt('for(var a=1,b=2,\nc=3;d<3;d++)\ne', 'for (var a = 1, b = 2\n , c = 3; d < 3; d++)\n e')
bt('function foo() {\n return [\n "one"\n , "two"\n ];\n}')
bt('a=[[1,2],[4,5],[7,8]]', 'a = [\n [1, 2]\n , [4, 5]\n , [7, 8]\n]')
bt('a=[[1,2],[4,5],[7,8],]', 'a = [\n [1, 2]\n , [4, 5]\n , [7, 8]\n, ]')
bt('a=[[1,2],[4,5],function(){},[7,8]]', 'a = [\n [1, 2]\n , [4, 5]\n , function() {}\n , [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},function(){},[7,8]]', 'a = [\n [1, 2]\n , [4, 5]\n , function() {}\n , function() {}\n , [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},[7,8]]', 'a = [\n [1, 2]\n , [4, 5]\n , function() {}\n , [7, 8]\n]')
bt('a=[b,c,function(){},function(){},d]', 'a = [b, c, function() {}, function() {}, d]')
bt('a=[b,c,\nfunction(){},function(){},d]', 'a = [b, c\n , function() {}\n , function() {}\n , d\n]')
bt('a=[a[1],b[4],c[d[7]]]', 'a = [a[1], b[4], c[d[7]]]')
bt('[1,2,[3,4,[5,6],7],8]', '[1, 2, [3, 4, [5, 6], 7], 8]')
bt('[[["1","2"],["3","4"]],[["5","6","7"],["8","9","0"]],[["1","2","3"],["4","5","6","7"],["8","9","0"]]]', '[\n [\n ["1", "2"]\n , ["3", "4"]\n ]\n , [\n ["5", "6", "7"]\n , ["8", "9", "0"]\n ]\n , [\n ["1", "2", "3"]\n , ["4", "5", "6", "7"]\n , ["8", "9", "0"]\n ]\n]')
# Comma-first option - (c0 = ",\n", c1 = ",\n ", c2 = ",\n ", c3 = ",\n ")
self.options.comma_first = false
bt('{a:1, b:2}', '{\n a: 1,\n b: 2\n}')
bt('var a=1, b=c[d], e=6;', 'var a = 1,\n b = c[d],\n e = 6;')
bt('for(var a=1,b=2,c=3;d<3;d++)\ne', 'for (var a = 1, b = 2, c = 3; d < 3; d++)\n e')
bt('for(var a=1,b=2,\nc=3;d<3;d++)\ne', 'for (var a = 1, b = 2,\n c = 3; d < 3; d++)\n e')
bt('function foo() {\n return [\n "one",\n "two"\n ];\n}')
bt('a=[[1,2],[4,5],[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n [7, 8]\n]')
bt('a=[[1,2],[4,5],[7,8],]', 'a = [\n [1, 2],\n [4, 5],\n [7, 8],\n]')
bt('a=[[1,2],[4,5],function(){},[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n function() {},\n [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},function(){},[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n function() {},\n function() {},\n [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n function() {},\n [7, 8]\n]')
bt('a=[b,c,function(){},function(){},d]', 'a = [b, c, function() {}, function() {}, d]')
bt('a=[b,c,\nfunction(){},function(){},d]', 'a = [b, c,\n function() {},\n function() {},\n d\n]')
bt('a=[a[1],b[4],c[d[7]]]', 'a = [a[1], b[4], c[d[7]]]')
bt('[1,2,[3,4,[5,6],7],8]', '[1, 2, [3, 4, [5, 6], 7], 8]')
bt('[[["1","2"],["3","4"]],[["5","6","7"],["8","9","0"]],[["1","2","3"],["4","5","6","7"],["8","9","0"]]]', '[\n [\n ["1", "2"],\n ["3", "4"]\n ],\n [\n ["5", "6", "7"],\n ["8", "9", "0"]\n ],\n [\n ["1", "2", "3"],\n ["4", "5", "6", "7"],\n ["8", "9", "0"]\n ]\n]')
# New Test Suite
# Async / await tests
bt('async function foo() {}')
bt('let w = async function foo() {}')
bt('async function foo() {}\nvar x = await foo();')
# async function as an input to another function
bt('wrapper(async function foo() {})')
# await on inline anonymous function. should have a space after await
bt(
'async function() {\n var w = await(async function() {\n return await foo();\n })();\n}',
'async function() {\n var w = await (async function() {\n return await foo();\n })();\n}')
# ensure that this doesn't break anyone with the async library
bt('async.map(function(t) {})')
# e4x - Test that e4x literals passed through when e4x-option is enabled
self.options.e4x = true
bt('xml=<a b="c"><d/><e>\n foo</e>x</a>;', 'xml = <a b="c"><d/><e>\n foo</e>x</a>;')
bt('<a b=\'This is a quoted "c".\'/>')
bt('<a b="This is a quoted \'c\'."/>')
bt('<a b="A quote \' inside string."/>')
bt('<a b=\'A quote " inside string.\'/>')
bt('<a b=\'Some """ quotes "" inside string.\'/>')
# Handles inline expressions
bt('xml=<{a} b="c"><d/><e v={z}>\n foo</e>x</{a}>;', 'xml = <{a} b="c"><d/><e v={z}>\n foo</e>x</{a}>;')
bt('xml=<{a} b="c">\n <e v={z}>\n foo</e>x</{a}>;', 'xml = <{a} b="c">\n <e v={z}>\n foo</e>x</{a}>;')
# xml literals with special characters in elem names - see http://www.w3.org/TR/REC-xml/#NT-NameChar
bt('xml = <_:.valid.xml- _:.valid.xml-="123"/>;')
# Handles CDATA
bt('xml=<![CDATA[ b="c"><d/><e v={z}>\n foo</e>x/]]>;', 'xml = <![CDATA[ b="c"><d/><e v={z}>\n foo</e>x/]]>;')
bt('xml=<![CDATA[]]>;', 'xml = <![CDATA[]]>;')
bt('xml=<a b="c"><![CDATA[d/></a></{}]]></a>;', 'xml = <a b="c"><![CDATA[d/></a></{}]]></a>;')
# JSX - working jsx from http://prettydiff.com/unit_tests/beautification_javascript_jsx.txt
bt(
'var ListItem = React.createClass({\n' +
' render: function() {\n' +
' return (\n' +
' <li className="ListItem">\n' +
' <a href={ "/items/" + this.props.item.id }>\n' +
' this.props.item.name\n' +
' </a>\n' +
' </li>\n' +
' );\n' +
' }\n' +
'});')
bt(
'var List = React.createClass({\n' +
' renderList: function() {\n' +
' return this.props.items.map(function(item) {\n' +
' return <ListItem item={item} key={item.id} />;\n' +
' });\n' +
' },\n' +
'\n' +
' render: function() {\n' +
' return <ul className="List">\n' +
' this.renderList()\n' +
' </ul>\n' +
' }\n' +
'});')
bt(
'var Mist = React.createClass({\n' +
' renderList: function() {\n' +
' return this.props.items.map(function(item) {\n' +
' return <ListItem item={return <tag>{item}</tag>} key={item.id} />;\n' +
' });\n' +
' }\n' +
'});')
bt(
'// JSX\n' +
'var box = <Box>\n' +
' {shouldShowAnswer(user) ?\n' +
' <Answer value={false}>no</Answer> : <Box.Comment>\n' +
' Text Content\n' +
' </Box.Comment>}\n' +
' </Box>;\n' +
'var a = function() {\n' +
' return <tsdf>asdf</tsdf>;\n' +
'};\n' +
'\n' +
'var HelloMessage = React.createClass({\n' +
' render: function() {\n' +
' return <div>Hello {this.props.name}</div>;\n' +
' }\n' +
'});\n' +
'React.render(<HelloMessage name="John" />, mountNode);')
bt(
'var Timer = React.createClass({\n' +
' getInitialState: function() {\n' +
' return {\n' +
' secondsElapsed: 0\n' +
' };\n' +
' },\n' +
' tick: function() {\n' +
' this.setState({\n' +
' secondsElapsed: this.state.secondsElapsed + 1\n' +
' });\n' +
' },\n' +
' componentDidMount: function() {\n' +
' this.interval = setInterval(this.tick, 1000);\n' +
' },\n' +
' componentWillUnmount: function() {\n' +
' clearInterval(this.interval);\n' +
' },\n' +
' render: function() {\n' +
' return (\n' +
' <div>Seconds Elapsed: {this.state.secondsElapsed}</div>\n' +
' );\n' +
' }\n' +
'});\n' +
'React.render(<Timer />, mountNode);')
bt(
'var TodoList = React.createClass({\n' +
' render: function() {\n' +
' var createItem = function(itemText) {\n' +
' return <li>{itemText}</li>;\n' +
' };\n' +
' return <ul>{this.props.items.map(createItem)}</ul>;\n' +
' }\n' +
'});')
bt(
'var TodoApp = React.createClass({\n' +
' getInitialState: function() {\n' +
' return {\n' +
' items: [],\n' +
' text: \'\'\n' +
' };\n' +
' },\n' +
' onChange: function(e) {\n' +
' this.setState({\n' +
' text: e.target.value\n' +
' });\n' +
' },\n' +
' handleSubmit: function(e) {\n' +
' e.preventDefault();\n' +
' var nextItems = this.state.items.concat([this.state.text]);\n' +
' var nextText = \'\';\n' +
' this.setState({\n' +
' items: nextItems,\n' +
' text: nextText\n' +
' });\n' +
' },\n' +
' render: function() {\n' +
' return (\n' +
' <div>\n' +
' <h3>TODO</h3>\n' +
' <TodoList items={this.state.items} />\n' +
' <form onSubmit={this.handleSubmit}>\n' +
' <input onChange={this.onChange} value={this.state.text} />\n' +
' <button>{\'Add #\' + (this.state.items.length + 1)}</button>\n' +
' </form>\n' +
' </div>\n' +
' );\n' +
' }\n' +
'});\n' +
'React.render(<TodoApp />, mountNode);')
bt(
'var converter = new Showdown.converter();\n' +
'var MarkdownEditor = React.createClass({\n' +
' getInitialState: function() {\n' +
' return {value: \'Type some *markdown* here!\'};\n' +
' },\n' +
' handleChange: function() {\n' +
' this.setState({value: this.refs.textarea.getDOMNode().value});\n' +
' },\n' +
' render: function() {\n' +
' return (\n' +
' <div className="MarkdownEditor">\n' +
' <h3>Input</h3>\n' +
' <textarea\n' +
' onChange={this.handleChange}\n' +
' ref="textarea"\n' +
' defaultValue={this.state.value} />\n' +
' <h3>Output</h3>\n' +
' <div\n' +
' className="content"\n' +
' dangerouslySetInnerHTML=\n' +
' />\n' +
' </div>\n' +
' );\n' +
' }\n' +
'});\n' +
'React.render(<MarkdownEditor />, mountNode);',
'var converter = new Showdown.converter();\n' +
'var MarkdownEditor = React.createClass({\n' +
' getInitialState: function() {\n' +
' return {\n' +
' value: \'Type some *markdown* here!\'\n' +
' };\n' +
' },\n' +
' handleChange: function() {\n' +
' this.setState({\n' +
' value: this.refs.textarea.getDOMNode().value\n' +
' });\n' +
' },\n' +
' render: function() {\n' +
' return (\n' +
' <div className="MarkdownEditor">\n' +
' <h3>Input</h3>\n' +
' <textarea\n' +
' onChange={this.handleChange}\n' +
' ref="textarea"\n' +
' defaultValue={this.state.value} />\n' +
' <h3>Output</h3>\n' +
' <div\n' +
' className="content"\n' +
' dangerouslySetInnerHTML=\n' +
' />\n' +
' </div>\n' +
' );\n' +
' }\n' +
'});\n' +
'React.render(<MarkdownEditor />, mountNode);')
# JSX - Not quite correct jsx formatting that still works
bt(
'var content = (\n' +
' <Nav>\n' +
' {/* child comment, put {} around */}\n' +
' <Person\n' +
' /* multi\n' +
' line\n' +
' comment */\n' +
' //attr="test"\n' +
' name={window.isLoggedIn ? window.name : \'\'} // end of line comment\n' +
' />\n' +
' </Nav>\n' +
' );\n' +
'var qwer = <DropDown> A dropdown list <Menu> <MenuItem>Do Something</MenuItem> <MenuItem>Do Something Fun!</MenuItem> <MenuItem>Do Something Else</MenuItem> </Menu> </DropDown>;\n' +
'render(dropdown);',
'var content = (\n' +
' <Nav>\n' +
' {/* child comment, put {} around */}\n' +
' <Person\n' +
' /* multi\n' +
' line\n' +
' comment */\n' +
' //attr="test"\n' +
' name={window.isLoggedIn ? window.name : \'\'} // end of line comment\n' +
' />\n' +
' </Nav>\n' +
');\n' +
'var qwer = <DropDown> A dropdown list <Menu> <MenuItem>Do Something</MenuItem> <MenuItem>Do Something Fun!</MenuItem> <MenuItem>Do Something Else</MenuItem> </Menu> </DropDown>;\n' +
'render(dropdown);')
# Handles messed up tags, as long as it isn't the same name
# as the root tag. Also handles tags of same name as root tag
# as long as nesting matches.
bt(
'xml=<a x="jn"><c></b></f><a><d jnj="jnn"><f></a ></nj></a>;',
'xml = <a x="jn"><c></b></f><a><d jnj="jnn"><f></a ></nj></a>;')
# If xml is not terminated, the remainder of the file is treated
# as part of the xml-literal (passed through unaltered)
test_fragment(
'xml=<a></b>\nc<b;',
'xml = <a></b>\nc<b;')
# Issue #646 = whitespace is allowed in attribute declarations
bt(
'let a = React.createClass({\n' +
' render() {\n' +
' return (\n' +
' <p className=\'a\'>\n' +
' <span>c</span>\n' +
' </p>\n' +
' );\n' +
' }\n' +
'});')
bt(
'let a = React.createClass({\n' +
' render() {\n' +
' return (\n' +
' <p className = \'b\'>\n' +
' <span>c</span>\n' +
' </p>\n' +
' );\n' +
' }\n' +
'});')
bt(
'let a = React.createClass({\n' +
' render() {\n' +
' return (\n' +
' <p className = "c">\n' +
' <span>c</span>\n' +
' </p>\n' +
' );\n' +
' }\n' +
'});')
bt(
'let a = React.createClass({\n' +
' render() {\n' +
' return (\n' +
' <{e} className = {d}>\n' +
' <span>c</span>\n' +
' </{e}>\n' +
' );\n' +
' }\n' +
'});')
# e4x disabled
self.options.e4x = false
bt(
'xml=<a b="c"><d/><e>\n foo</e>x</a>;',
'xml = < a b = "c" > < d / > < e >\n foo < /e>x</a > ;')
# Multiple braces
bt('{{}/z/}', '{\n {}\n /z/\n}')
# Beautify preserve formatting
bt('/* beautify preserve:start */\n/* beautify preserve:end */')
bt('/* beautify preserve:start */\n var a = 1;\n/* beautify preserve:end */')
bt('var a = 1;\n/* beautify preserve:start */\n var a = 1;\n/* beautify preserve:end */')
bt('/* beautify preserve:start */ {asdklgh;y;;{}dd2d}/* beautify preserve:end */')
bt(
'var a = 1;\n/* beautify preserve:start */\n var a = 1;\n/* beautify preserve:end */',
'var a = 1;\n/* beautify preserve:start */\n var a = 1;\n/* beautify preserve:end */')
bt(
'var a = 1;\n /* beautify preserve:start */\n var a = 1;\n/* beautify preserve:end */',
'var a = 1;\n/* beautify preserve:start */\n var a = 1;\n/* beautify preserve:end */')
bt(
'var a = {\n' +
' /* beautify preserve:start */\n' +
' one : 1\n' +
' two : 2,\n' +
' three : 3,\n' +
' ten : 10\n' +
' /* beautify preserve:end */\n' +
'};')
bt(
'var a = {\n' +
'/* beautify preserve:start */\n' +
' one : 1,\n' +
' two : 2,\n' +
' three : 3,\n' +
' ten : 10\n' +
'/* beautify preserve:end */\n' +
'};',
'var a = {\n' +
' /* beautify preserve:start */\n' +
' one : 1,\n' +
' two : 2,\n' +
' three : 3,\n' +
' ten : 10\n' +
'/* beautify preserve:end */\n' +
'};')
# one space before and after required, only single spaces inside.
bt(
'var a = {\n' +
'/* beautify preserve:start */\n' +
' one : 1,\n' +
' two : 2,\n' +
' three : 3,\n' +
' ten : 10\n' +
'};',
'var a = {\n' +
' /* beautify preserve:start */\n' +
' one: 1,\n' +
' two: 2,\n' +
' three: 3,\n' +
' ten: 10\n' +
'};')
bt(
'var a = {\n' +
'/*beautify preserve:start*/\n' +
' one : 1,\n' +
' two : 2,\n' +
' three : 3,\n' +
' ten : 10\n' +
'};',
'var a = {\n' +
' /*beautify preserve:start*/\n' +
' one: 1,\n' +
' two: 2,\n' +
' three: 3,\n' +
' ten: 10\n' +
'};')
bt(
'var a = {\n' +
'/*beautify preserve:start*/\n' +
' one : 1,\n' +
' two : 2,\n' +
' three : 3,\n' +
' ten : 10\n' +
'};',
'var a = {\n' +
' /*beautify preserve:start*/\n' +
' one: 1,\n' +
' two: 2,\n' +
' three: 3,\n' +
' ten: 10\n' +
'};')
# Directive: ignore
bt('/* beautify ignore:start */\n/* beautify ignore:end */')
bt('/* beautify ignore:start */\n var a,,,{ 1;\n/* beautify ignore:end */')
bt('var a = 1;\n/* beautify ignore:start */\n var a = 1;\n/* beautify ignore:end */')
bt('/* beautify ignore:start */ {asdklgh;y;+++;dd2d}/* beautify ignore:end */')
bt(
'var a = 1;\n/* beautify ignore:start */\n var a,,,{ 1;\n/* beautify ignore:end */',
'var a = 1;\n/* beautify ignore:start */\n var a,,,{ 1;\n/* beautify ignore:end */')
bt(
'var a = 1;\n /* beautify ignore:start */\n var a,,,{ 1;\n/* beautify ignore:end */',
'var a = 1;\n/* beautify ignore:start */\n var a,,,{ 1;\n/* beautify ignore:end */')
bt(
'var a = {\n' +
' /* beautify ignore:start */\n' +
' one : 1\n' +
' two : 2,\n' +
' three : {\n' +
' ten : 10\n' +
' /* beautify ignore:end */\n' +
'};')
bt(
'var a = {\n' +
'/* beautify ignore:start */\n' +
' one : 1\n' +
' two : 2,\n' +
' three : {\n' +
' ten : 10\n' +
'/* beautify ignore:end */\n' +
'};',
'var a = {\n' +
' /* beautify ignore:start */\n' +
' one : 1\n' +
' two : 2,\n' +
' three : {\n' +
' ten : 10\n' +
'/* beautify ignore:end */\n' +
'};')
# Directives - multiple and interacting
bt(
'var a = {\n' +
'/* beautify preserve:start */\n' +
'/* beautify preserve:start */\n' +
' one : 1,\n' +
' /* beautify preserve:end */\n' +
' two : 2,\n' +
' three : 3,\n' +
'/* beautify preserve:start */\n' +
' ten : 10\n' +
'/* beautify preserve:end */\n' +
'};',
'var a = {\n' +
' /* beautify preserve:start */\n' +
'/* beautify preserve:start */\n' +
' one : 1,\n' +
' /* beautify preserve:end */\n' +
' two: 2,\n' +
' three: 3,\n' +
' /* beautify preserve:start */\n' +
' ten : 10\n' +
'/* beautify preserve:end */\n' +
'};')
bt(
'var a = {\n' +
'/* beautify ignore:start */\n' +
' one : 1\n' +
' /* beautify ignore:end */\n' +
' two : 2,\n' +
'/* beautify ignore:start */\n' +
' three : {\n' +
' ten : 10\n' +
'/* beautify ignore:end */\n' +
'};',
'var a = {\n' +
' /* beautify ignore:start */\n' +
' one : 1\n' +
' /* beautify ignore:end */\n' +
' two: 2,\n' +
' /* beautify ignore:start */\n' +
' three : {\n' +
' ten : 10\n' +
'/* beautify ignore:end */\n' +
'};')
# Starts can occur together, ignore:end must occur alone.
bt(
'var a = {\n' +
'/* beautify ignore:start */\n' +
' one : 1\n' +
' NOTE: ignore end block does not support starting other directives\n' +
' This does not match the ending the ignore...\n' +
' /* beautify ignore:end preserve:start */\n' +
' two : 2,\n' +
'/* beautify ignore:start */\n' +
' three : {\n' +
' ten : 10\n' +
' ==The next comment ends the starting ignore==\n' +
'/* beautify ignore:end */\n' +
'};',
'var a = {\n' +
' /* beautify ignore:start */\n' +
' one : 1\n' +
' NOTE: ignore end block does not support starting other directives\n' +
' This does not match the ending the ignore...\n' +
' /* beautify ignore:end preserve:start */\n' +
' two : 2,\n' +
'/* beautify ignore:start */\n' +
' three : {\n' +
' ten : 10\n' +
' ==The next comment ends the starting ignore==\n' +
'/* beautify ignore:end */\n' +
'};')
bt(
'var a = {\n' +
'/* beautify ignore:start preserve:start */\n' +
' one : {\n' +
' /* beautify ignore:end */\n' +
' two : 2,\n' +
' /* beautify ignore:start */\n' +
' three : {\n' +
'/* beautify ignore:end */\n' +
' ten : 10\n' +
' // This is all preserved\n' +
'};',
'var a = {\n' +
' /* beautify ignore:start preserve:start */\n' +
' one : {\n' +
' /* beautify ignore:end */\n' +
' two : 2,\n' +
' /* beautify ignore:start */\n' +
' three : {\n' +
'/* beautify ignore:end */\n' +
' ten : 10\n' +
' // This is all preserved\n' +
'};')
bt(
'var a = {\n' +
'/* beautify ignore:start preserve:start */\n' +
' one : {\n' +
' /* beautify ignore:end */\n' +
' two : 2,\n' +
' /* beautify ignore:start */\n' +
' three : {\n' +
'/* beautify ignore:end */\n' +
' ten : 10,\n' +
'/* beautify preserve:end */\n' +
' eleven: 11\n' +
'};',
'var a = {\n' +
' /* beautify ignore:start preserve:start */\n' +
' one : {\n' +
' /* beautify ignore:end */\n' +
' two : 2,\n' +
' /* beautify ignore:start */\n' +
' three : {\n' +
'/* beautify ignore:end */\n' +
' ten : 10,\n' +
'/* beautify preserve:end */\n' +
' eleven: 11\n' +
'};')
# Template Formatting
bt('<?=$view["name"]; ?>')
bt('a = <?= external() ?>;')
bt(
'<?php\n' +
'for($i = 1; $i <= 100; $i++;) {\n' +
' #count to 100!\n' +
' echo($i . "</br>");\n' +
'}\n' +
'?>')
bt('a = <%= external() %>;')
# jslint and space after anon function - (f = " ", c = "")
self.options.jslint_happy = true
self.options.space_after_anon_function = true
bt(
'a=typeof(x)',
'a = typeof (x)')
bt(
'x();\n\nfunction(){}',
'x();\n\nfunction () {}')
bt(
'function () {\n var a, b, c, d, e = [],\n f;\n}')
bt(
'switch(x) {case 0: case 1: a(); break; default: break}',
'switch (x) {\ncase 0:\ncase 1:\n a();\n break;\ndefault:\n break\n}')
bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\ncase -1:\n break;\ncase !y:\n break;\n}')
# typical greasemonkey start
test_fragment('// comment 2\n(function ()')
bt(
'var a2, b2, c2, d2 = 0, c = function() {}, d = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function () {},\n d = \'\';')
bt(
'var a2, b2, c2, d2 = 0, c = function() {},\nd = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function () {},\n d = \'\';')
bt(
'var o2=$.extend(a);function(){alert(x);}',
'var o2 = $.extend(a);\n\nfunction () {\n alert(x);\n}')
bt('function*() {\n yield 1;\n}', 'function* () {\n yield 1;\n}')
bt('function* x() {\n yield 1;\n}')
# jslint and space after anon function - (f = " ", c = "")
self.options.jslint_happy = true
self.options.space_after_anon_function = false
bt(
'a=typeof(x)',
'a = typeof (x)')
bt(
'x();\n\nfunction(){}',
'x();\n\nfunction () {}')
bt(
'function () {\n var a, b, c, d, e = [],\n f;\n}')
bt(
'switch(x) {case 0: case 1: a(); break; default: break}',
'switch (x) {\ncase 0:\ncase 1:\n a();\n break;\ndefault:\n break\n}')
bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\ncase -1:\n break;\ncase !y:\n break;\n}')
# typical greasemonkey start
test_fragment('// comment 2\n(function ()')
bt(
'var a2, b2, c2, d2 = 0, c = function() {}, d = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function () {},\n d = \'\';')
bt(
'var a2, b2, c2, d2 = 0, c = function() {},\nd = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function () {},\n d = \'\';')
bt(
'var o2=$.extend(a);function(){alert(x);}',
'var o2 = $.extend(a);\n\nfunction () {\n alert(x);\n}')
bt('function*() {\n yield 1;\n}', 'function* () {\n yield 1;\n}')
bt('function* x() {\n yield 1;\n}')
# jslint and space after anon function - (f = " ", c = " ")
self.options.jslint_happy = false
self.options.space_after_anon_function = true
bt(
'a=typeof(x)',
'a = typeof (x)')
bt(
'x();\n\nfunction(){}',
'x();\n\nfunction () {}')
bt(
'function () {\n var a, b, c, d, e = [],\n f;\n}')
bt(
'switch(x) {case 0: case 1: a(); break; default: break}',
'switch (x) {\n case 0:\n case 1:\n a();\n break;\n default:\n break\n}')
bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\n case -1:\n break;\n case !y:\n break;\n}')
# typical greasemonkey start
test_fragment('// comment 2\n(function ()')
bt(
'var a2, b2, c2, d2 = 0, c = function() {}, d = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function () {},\n d = \'\';')
bt(
'var a2, b2, c2, d2 = 0, c = function() {},\nd = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function () {},\n d = \'\';')
bt(
'var o2=$.extend(a);function(){alert(x);}',
'var o2 = $.extend(a);\n\nfunction () {\n alert(x);\n}')
bt('function*() {\n yield 1;\n}', 'function* () {\n yield 1;\n}')
bt('function* x() {\n yield 1;\n}')
# jslint and space after anon function - (f = "", c = " ")
self.options.jslint_happy = false
self.options.space_after_anon_function = false
bt(
'a=typeof(x)',
'a = typeof(x)')
bt(
'x();\n\nfunction(){}',
'x();\n\nfunction() {}')
bt(
'function () {\n var a, b, c, d, e = [],\n f;\n}',
'function() {\n var a, b, c, d, e = [],\n f;\n}')
bt(
'switch(x) {case 0: case 1: a(); break; default: break}',
'switch (x) {\n case 0:\n case 1:\n a();\n break;\n default:\n break\n}')
bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\n case -1:\n break;\n case !y:\n break;\n}')
# typical greasemonkey start
test_fragment('// comment 2\n(function()')
bt(
'var a2, b2, c2, d2 = 0, c = function() {}, d = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function() {},\n d = \'\';')
bt(
'var a2, b2, c2, d2 = 0, c = function() {},\nd = \'\';',
'var a2, b2, c2, d2 = 0,\n c = function() {},\n d = \'\';')
bt(
'var o2=$.extend(a);function(){alert(x);}',
'var o2 = $.extend(a);\n\nfunction() {\n alert(x);\n}')
bt('function*() {\n yield 1;\n}')
bt('function* x() {\n yield 1;\n}')
# Regression tests
# Issue 241
bt(
'obj\n' +
' .last({\n' +
' foo: 1,\n' +
' bar: 2\n' +
' });\n' +
'var test = 1;')
bt(
'obj\n' +
' .last(a, function() {\n' +
' var test;\n' +
' });\n' +
'var test = 1;')
bt(
'obj.first()\n' +
' .second()\n' +
' .last(function(err, response) {\n' +
' console.log(err);\n' +
' });')
# Issue 268 and 275
bt(
'obj.last(a, function() {\n' +
' var test;\n' +
'});\n' +
'var test = 1;')
bt(
'obj.last(a,\n' +
' function() {\n' +
' var test;\n' +
' });\n' +
'var test = 1;')
bt(
'(function() {if (!window.FOO) window.FOO || (window.FOO = function() {var b = {bar: "zort"};});})();',
'(function() {\n' +
' if (!window.FOO) window.FOO || (window.FOO = function() {\n' +
' var b = {\n' +
' bar: "zort"\n' +
' };\n' +
' });\n' +
'})();')
# Issue 281
bt(
'define(["dojo/_base/declare", "my/Employee", "dijit/form/Button",\n' +
' "dojo/_base/lang", "dojo/Deferred"\n' +
'], function(declare, Employee, Button, lang, Deferred) {\n' +
' return declare(Employee, {\n' +
' constructor: function() {\n' +
' new Button({\n' +
' onClick: lang.hitch(this, function() {\n' +
' new Deferred().then(lang.hitch(this, function() {\n' +
' this.salary * 0.25;\n' +
' }));\n' +
' })\n' +
' });\n' +
' }\n' +
' });\n' +
'});')
bt(
'define(["dojo/_base/declare", "my/Employee", "dijit/form/Button",\n' +
' "dojo/_base/lang", "dojo/Deferred"\n' +
' ],\n' +
' function(declare, Employee, Button, lang, Deferred) {\n' +
' return declare(Employee, {\n' +
' constructor: function() {\n' +
' new Button({\n' +
' onClick: lang.hitch(this, function() {\n' +
' new Deferred().then(lang.hitch(this, function() {\n' +
' this.salary * 0.25;\n' +
' }));\n' +
' })\n' +
' });\n' +
' }\n' +
' });\n' +
' });')
# Issue 459
bt(
'(function() {\n' +
' return {\n' +
' foo: function() {\n' +
' return "bar";\n' +
' },\n' +
' bar: ["bar"]\n' +
' };\n' +
'}());')
# Issue 505 - strings should end at newline unless continued by backslash
bt(
'var name = "a;\n' +
'name = "b";')
bt(
'var name = "a;\\\n' +
' name = b";')
# Issue 514 - some operators require spaces to distinguish them
bt('var c = "_ACTION_TO_NATIVEAPI_" + ++g++ + +new Date;')
bt('var c = "_ACTION_TO_NATIVEAPI_" - --g-- - -new Date;')
# Issue 440 - reserved words can be used as object property names
bt(
'a = {\n' +
' function: {},\n' +
' "function": {},\n' +
' throw: {},\n' +
' "throw": {},\n' +
' var: {},\n' +
' "var": {},\n' +
' set: {},\n' +
' "set": {},\n' +
' get: {},\n' +
' "get": {},\n' +
' if: {},\n' +
' "if": {},\n' +
' then: {},\n' +
' "then": {},\n' +
' else: {},\n' +
' "else": {},\n' +
' yay: {}\n' +
'};')
# Issue 331 - if-else with braces edge case
bt(
'if(x){a();}else{b();}if(y){c();}',
'if (x) {\n' +
' a();\n' +
'} else {\n' +
' b();\n' +
'}\n' +
'if (y) {\n' +
' c();\n' +
'}')
# Issue 485 - ensure function declarations behave the same in arrays as elsewhere
bt(
'var v = ["a",\n' +
' function() {\n' +
' return;\n' +
' }, {\n' +
' id: 1\n' +
' }\n' +
'];')
bt(
'var v = ["a", function() {\n' +
' return;\n' +
'}, {\n' +
' id: 1\n' +
'}];')
# Issue 382 - initial totally cursory support for es6 module export
bt(
'module "Even" {\n' +
' import odd from "Odd";\n' +
' export function sum(x, y) {\n' +
' return x + y;\n' +
' }\n' +
' export var pi = 3.141593;\n' +
' export default moduleName;\n' +
'}')
bt(
'module "Even" {\n' +
' export default function div(x, y) {}\n' +
'}')
# Issue 508
bt('set["name"]')
bt('get["name"]')
bt(
'a = {\n' +
' set b(x) {},\n' +
' c: 1,\n' +
' d: function() {}\n' +
'};')
bt(
'a = {\n' +
' get b() {\n' +
' retun 0;\n' +
' },\n' +
' c: 1,\n' +
' d: function() {}\n' +
'};')
# Issue 298 - do not under indent if/while/for condtionals experesions
bt(
'\'use strict\';\n' +
'if ([].some(function() {\n' +
' return false;\n' +
' })) {\n' +
' console.log("hello");\n' +
'}')
# Issue 298 - do not under indent if/while/for condtionals experesions
bt(
'\'use strict\';\n' +
'if ([].some(function() {\n' +
' return false;\n' +
' })) {\n' +
' console.log("hello");\n' +
'}')
# Issue 552 - Typescript? Okay... we didn't break it before, so try not to break it now.
bt(
'class Test {\n' +
' blah: string[];\n' +
' foo(): number {\n' +
' return 0;\n' +
' }\n' +
' bar(): number {\n' +
' return 0;\n' +
' }\n' +
'}')
bt(
'interface Test {\n' +
' blah: string[];\n' +
' foo(): number {\n' +
' return 0;\n' +
' }\n' +
' bar(): number {\n' +
' return 0;\n' +
' }\n' +
'}')
# Issue 583 - Functions with comments after them should still indent correctly.
bt(
'function exit(code) {\n' +
' setTimeout(function() {\n' +
' phantom.exit(code);\n' +
' }, 0);\n' +
' phantom.onError = function() {};\n' +
'}\n' +
'// Comment')
# Old tests
bt('')
test_fragment(' return .5')
test_fragment(' return .5;\n a();')
test_fragment(' return .5;\n a();')
test_fragment(' return .5;\n a();')
test_fragment(' < div')
bt('a = 1', 'a = 1')
bt('a=1', 'a = 1')
bt('(3) / 2')
bt('["a", "b"].join("")')
bt('a();\n\nb();')
bt('var a = 1 var b = 2', 'var a = 1\nvar b = 2')
bt('var a=1, b=c[d], e=6;', 'var a = 1,\n b = c[d],\n e = 6;')
bt('var a,\n b,\n c;')
bt('let a = 1 let b = 2', 'let a = 1\nlet b = 2')
bt('let a=1, b=c[d], e=6;', 'let a = 1,\n b = c[d],\n e = 6;')
bt('let a,\n b,\n c;')
bt('const a = 1 const b = 2', 'const a = 1\nconst b = 2')
bt('const a=1, b=c[d], e=6;', 'const a = 1,\n b = c[d],\n e = 6;')
bt('const a,\n b,\n c;')
bt('a = " 12345 "')
bt('a = \' 12345 \'')
bt('if (a == 1) b = 2;')
bt('if(1){2}else{3}', 'if (1) {\n 2\n} else {\n 3\n}')
bt('if(1||2);', 'if (1 || 2);')
bt('(a==1)||(b==2)', '(a == 1) || (b == 2)')
bt('var a = 1 if (2) 3;', 'var a = 1\nif (2) 3;')
bt('a = a + 1')
bt('a = a == 1')
bt('/12345[^678]*9+/.match(a)')
bt('a /= 5')
bt('a = 0.5 * 3')
bt('a *= 10.55')
bt('a < .5')
bt('a <= .5')
bt('a<.5', 'a < .5')
bt('a<=.5', 'a <= .5')
bt('a = 0xff;')
bt('a=0xff+4', 'a = 0xff + 4')
bt('a = [1, 2, 3, 4]')
bt('F*(g/=f)*g+b', 'F * (g /= f) * g + b')
bt('a.b({c:d})', 'a.b({\n c: d\n})')
bt('a.b\n(\n{\nc:\nd\n}\n)', 'a.b({\n c: d\n})')
bt('a.b({c:"d"})', 'a.b({\n c: "d"\n})')
bt('a.b\n(\n{\nc:\n"d"\n}\n)', 'a.b({\n c: "d"\n})')
bt('a=!b', 'a = !b')
bt('a=!!b', 'a = !!b')
bt('a?b:c', 'a ? b : c')
bt('a?1:2', 'a ? 1 : 2')
bt('a?(b):c', 'a ? (b) : c')
bt('x={a:1,b:w=="foo"?x:y,c:z}', 'x = {\n a: 1,\n b: w == "foo" ? x : y,\n c: z\n}')
bt('x=a?b?c?d:e:f:g;', 'x = a ? b ? c ? d : e : f : g;')
bt('x=a?b?c?d:{e1:1,e2:2}:f:g;', 'x = a ? b ? c ? d : {\n e1: 1,\n e2: 2\n} : f : g;')
bt('function void(void) {}')
bt('if(!a)foo();', 'if (!a) foo();')
bt('a=~a', 'a = ~a')
bt('a;/*comment*/b;', 'a; /*comment*/\nb;')
bt('a;/* comment */b;', 'a; /* comment */\nb;')
# simple comments don't get touched at all
test_fragment('a;/*\ncomment\n*/b;', 'a;\n/*\ncomment\n*/\nb;')
bt('a;/**\n* javadoc\n*/b;', 'a;\n/**\n * javadoc\n */\nb;')
test_fragment('a;/**\n\nno javadoc\n*/b;', 'a;\n/**\n\nno javadoc\n*/\nb;')
# comment blocks detected and reindented even w/o javadoc starter
bt('a;/*\n* javadoc\n*/b;', 'a;\n/*\n * javadoc\n */\nb;')
bt('if(a)break;', 'if (a) break;')
bt('if(a){break}', 'if (a) {\n break\n}')
bt('if((a))foo();', 'if ((a)) foo();')
bt('for(var i=0;;) a', 'for (var i = 0;;) a')
bt('for(var i=0;;)\na', 'for (var i = 0;;)\n a')
bt('a++;')
bt('for(;;i++)a()', 'for (;; i++) a()')
bt('for(;;i++)\na()', 'for (;; i++)\n a()')
bt('for(;;++i)a', 'for (;; ++i) a')
bt('return(1)', 'return (1)')
bt('try{a();}catch(b){c();}finally{d();}', 'try {\n a();\n} catch (b) {\n c();\n} finally {\n d();\n}')
# magic function call
bt('(xx)()')
# another magic function call
bt('a[1]()')
bt('if(a){b();}else if(c) foo();', 'if (a) {\n b();\n} else if (c) foo();')
bt('switch(x) {case 0: case 1: a(); break; default: break}', 'switch (x) {\n case 0:\n case 1:\n a();\n break;\n default:\n break\n}')
bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\n case -1:\n break;\n case !y:\n break;\n}')
bt('a !== b')
bt('if (a) b(); else c();', 'if (a) b();\nelse c();')
# typical greasemonkey start
bt('// comment\n(function something() {})')
# duplicating newlines
bt('{\n\n x();\n\n}')
bt('if (a in b) foo();')
bt('if(X)if(Y)a();else b();else c();', 'if (X)\n if (Y) a();\n else b();\nelse c();')
bt('if (foo) bar();\nelse break')
bt('var a, b;')
bt('var a = new function();')
test_fragment('new function')
bt('var a, b')
bt('{a:1, b:2}', '{\n a: 1,\n b: 2\n}')
bt('a={1:[-1],2:[+1]}', 'a = {\n 1: [-1],\n 2: [+1]\n}')
bt('var l = {\'a\':\'1\', \'b\':\'2\'}', 'var l = {\n \'a\': \'1\',\n \'b\': \'2\'\n}')
bt('if (template.user[n] in bk) foo();')
bt('return 45')
bt('return this.prevObject ||\n\n this.constructor(null);')
bt('If[1]')
bt('Then[1]')
bt('a = 1e10')
bt('a = 1.3e10')
bt('a = 1.3e-10')
bt('a = -1.3e-10')
bt('a = 1e-10')
bt('a = e - 10')
bt('a = 11-10', 'a = 11 - 10')
bt('a = 1;// comment', 'a = 1; // comment')
bt('a = 1; // comment')
bt('a = 1;\n // comment', 'a = 1;\n// comment')
bt('a = [-1, -1, -1]')
# The exact formatting these should have is open for discussion, but they are at least reasonable
bt('a = [ // comment\n -1, -1, -1\n]')
bt('var a = [ // comment\n -1, -1, -1\n]')
bt('a = [ // comment\n -1, // comment\n -1, -1\n]')
bt('var a = [ // comment\n -1, // comment\n -1, -1\n]')
bt('o = [{a:b},{c:d}]', 'o = [{\n a: b\n}, {\n c: d\n}]')
# was: extra space appended
bt('if (a) {\n do();\n}')
# if/else statement with empty body
bt('if (a) {\n// comment\n}else{\n// comment\n}', 'if (a) {\n // comment\n} else {\n // comment\n}')
# multiple comments indentation
bt('if (a) {\n// comment\n// comment\n}', 'if (a) {\n // comment\n // comment\n}')
bt('if (a) b() else c();', 'if (a) b()\nelse c();')
bt('if (a) b() else if c() d();', 'if (a) b()\nelse if c() d();')
bt('{}')
bt('{\n\n}')
bt('do { a(); } while ( 1 );', 'do {\n a();\n} while (1);')
bt('do {} while (1);')
bt('do {\n} while (1);', 'do {} while (1);')
bt('do {\n\n} while (1);')
bt('var a = x(a, b, c)')
bt('delete x if (a) b();', 'delete x\nif (a) b();')
bt('delete x[x] if (a) b();', 'delete x[x]\nif (a) b();')
bt('for(var a=1,b=2)d', 'for (var a = 1, b = 2) d')
bt('for(var a=1,b=2,c=3) d', 'for (var a = 1, b = 2, c = 3) d')
bt('for(var a=1,b=2,c=3;d<3;d++)\ne', 'for (var a = 1, b = 2, c = 3; d < 3; d++)\n e')
bt('function x(){(a||b).c()}', 'function x() {\n (a || b).c()\n}')
bt('function x(){return - 1}', 'function x() {\n return -1\n}')
bt('function x(){return ! a}', 'function x() {\n return !a\n}')
bt('x => x')
bt('(x) => x')
bt('x => { x }', 'x => {\n x\n}')
bt('(x) => { x }', '(x) => {\n x\n}')
# a common snippet in jQuery plugins
bt(
'settings = $.extend({},defaults,settings);',
'settings = $.extend({}, defaults, settings);')
bt('$http().then().finally().default()')
bt('$http()\n.then()\n.finally()\n.default()', '$http()\n .then()\n .finally()\n .default()')
bt('$http().when.in.new.catch().throw()')
bt('$http()\n.when\n.in\n.new\n.catch()\n.throw()', '$http()\n .when\n .in\n .new\n .catch()\n .throw()')
bt('{xxx;}()', '{\n xxx;\n}()')
bt('a = \'a\'\nb = \'b\'')
bt('a = /reg/exp')
bt('a = /reg/')
bt('/abc/.test()')
bt('/abc/i.test()')
bt('{/abc/i.test()}', '{\n /abc/i.test()\n}')
bt('var x=(a)/a;', 'var x = (a) / a;')
bt('x != -1')
bt('for (; s-->0;)t', 'for (; s-- > 0;) t')
bt('for (; s++>0;)u', 'for (; s++ > 0;) u')
bt('a = s++>s--;', 'a = s++ > s--;')
bt('a = s++>--s;', 'a = s++ > --s;')
bt('{x=#1=[]}', '{\n x = #1=[]\n}')
bt('{a:#1={}}', '{\n a: #1={}\n}')
bt('{a:#1#}', '{\n a: #1#\n}')
test_fragment('"incomplete-string')
test_fragment('\'incomplete-string')
test_fragment('/incomplete-regex')
test_fragment('`incomplete-template-string')
test_fragment('{a:1},{a:2}', '{\n a: 1\n}, {\n a: 2\n}')
test_fragment('var ary=[{a:1}, {a:2}];', 'var ary = [{\n a: 1\n}, {\n a: 2\n}];')
# incomplete
test_fragment('{a:#1', '{\n a: #1')
# incomplete
test_fragment('{a:#', '{\n a: #')
# incomplete
test_fragment('}}}', '}\n}\n}')
test_fragment('<!--\nvoid();\n// -->')
# incomplete regexp
test_fragment('a=/regexp', 'a = /regexp')
bt('{a:#1=[],b:#1#,c:#999999#}', '{\n a: #1=[],\n b: #1#,\n c: #999999#\n}')
bt('a = 1e+2')
bt('a = 1e-2')
bt('do{x()}while(a>1)', 'do {\n x()\n} while (a > 1)')
bt('x(); /reg/exp.match(something)', 'x();\n/reg/exp.match(something)')
test_fragment('something();(', 'something();\n(')
test_fragment('#!she/bangs, she bangs\nf=1', '#!she/bangs, she bangs\n\nf = 1')
test_fragment('#!she/bangs, she bangs\n\nf=1', '#!she/bangs, she bangs\n\nf = 1')
test_fragment('#!she/bangs, she bangs\n\n/* comment */')
test_fragment('#!she/bangs, she bangs\n\n\n/* comment */')
test_fragment('#')
test_fragment('#!')
bt('function namespace::something()')
test_fragment('<!--\nsomething();\n-->')
test_fragment('<!--\nif(i<0){bla();}\n-->', '<!--\nif (i < 0) {\n bla();\n}\n-->')
bt('{foo();--bar;}', '{\n foo();\n --bar;\n}')
bt('{foo();++bar;}', '{\n foo();\n ++bar;\n}')
bt('{--bar;}', '{\n --bar;\n}')
bt('{++bar;}', '{\n ++bar;\n}')
bt('if(true)++a;', 'if (true) ++a;')
bt('if(true)\n++a;', 'if (true)\n ++a;')
bt('if(true)--a;', 'if (true) --a;')
bt('if(true)\n--a;', 'if (true)\n --a;')
bt('elem[array]++;')
bt('elem++ * elem[array]++;')
bt('elem-- * -elem[array]++;')
bt('elem-- + elem[array]++;')
bt('elem-- - elem[array]++;')
bt('elem-- - -elem[array]++;')
bt('elem-- - +elem[array]++;')
# Handling of newlines around unary ++ and -- operators
bt('{foo\n++bar;}', '{\n foo\n ++bar;\n}')
bt('{foo++\nbar;}', '{\n foo++\n bar;\n}')
# This is invalid, but harder to guard against. Issue #203.
bt('{foo\n++\nbar;}', '{\n foo\n ++\n bar;\n}')
# regexps
bt('a(/abc\\/\\/def/);b()', 'a(/abc\\/\\/def/);\nb()')
bt('a(/a[b\\[\\]c]d/);b()', 'a(/a[b\\[\\]c]d/);\nb()')
# incomplete char class
test_fragment('a(/a[b\\[')
# allow unescaped / in char classes
bt('a(/[a/b]/);b()', 'a(/[a/b]/);\nb()')
bt('typeof /foo\\//;')
bt('yield /foo\\//;')
bt('throw /foo\\//;')
bt('do /foo\\//;')
bt('return /foo\\//;')
bt('switch (a) {\n case /foo\\//:\n b\n}')
bt('if (a) /foo\\//\nelse /foo\\//;')
bt('if (foo) /regex/.test();')
bt('for (index in [1, 2, 3]) /^test$/i.test(s)')
bt('result = yield pgClient.query_(queryString);')
bt('function foo() {\n return [\n "one",\n "two"\n ];\n}')
bt('a=[[1,2],[4,5],[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n function() {},\n [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},function(){},[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n function() {},\n function() {},\n [7, 8]\n]')
bt('a=[[1,2],[4,5],function(){},[7,8]]', 'a = [\n [1, 2],\n [4, 5],\n function() {},\n [7, 8]\n]')
bt('a=[b,c,function(){},function(){},d]', 'a = [b, c, function() {}, function() {}, d]')
bt('a=[b,c,\nfunction(){},function(){},d]', 'a = [b, c,\n function() {},\n function() {},\n d\n]')
bt('a=[a[1],b[4],c[d[7]]]', 'a = [a[1], b[4], c[d[7]]]')
bt('[1,2,[3,4,[5,6],7],8]', '[1, 2, [3, 4, [5, 6], 7], 8]')
bt('[[["1","2"],["3","4"]],[["5","6","7"],["8","9","0"]],[["1","2","3"],["4","5","6","7"],["8","9","0"]]]', '[\n [\n ["1", "2"],\n ["3", "4"]\n ],\n [\n ["5", "6", "7"],\n ["8", "9", "0"]\n ],\n [\n ["1", "2", "3"],\n ["4", "5", "6", "7"],\n ["8", "9", "0"]\n ]\n]')
bt('{[x()[0]];indent;}', '{\n [x()[0]];\n indent;\n}')
bt('/*\n foo trailing space \n * bar trailing space \n**/')
bt('{\n /*\n foo \n * bar \n */\n}')
bt('return ++i')
bt('return !!x')
bt('return !x')
bt('return [1,2]', 'return [1, 2]')
bt('return;')
bt('return\nfunc')
bt('catch(e)', 'catch (e)')
bt('yield [1, 2]')
bt('var a=1,b={foo:2,bar:3},{baz:4,wham:5},c=4;', 'var a = 1,\n b = {\n foo: 2,\n bar: 3\n },\n {\n baz: 4,\n wham: 5\n }, c = 4;')
bt('var a=1,b={foo:2,bar:3},{baz:4,wham:5},\nc=4;', 'var a = 1,\n b = {\n foo: 2,\n bar: 3\n },\n {\n baz: 4,\n wham: 5\n },\n c = 4;')
# inline comment
bt(
'function x(/*int*/ start, /*string*/ foo)',
'function x( /*int*/ start, /*string*/ foo)')
# javadoc comment
bt('/**\n* foo\n*/', '/**\n * foo\n */')
bt('{\n/**\n* foo\n*/\n}', '{\n /**\n * foo\n */\n}')
# starless block comment
bt('/**\nfoo\n*/')
bt('/**\nfoo\n**/')
bt('/**\nfoo\nbar\n**/')
bt('/**\nfoo\n\nbar\n**/')
bt('/**\nfoo\n bar\n**/')
bt('{\n/**\nfoo\n*/\n}', '{\n /**\n foo\n */\n}')
bt('{\n/**\nfoo\n**/\n}', '{\n /**\n foo\n **/\n}')
bt('{\n/**\nfoo\nbar\n**/\n}', '{\n /**\n foo\n bar\n **/\n}')
bt('{\n/**\nfoo\n\nbar\n**/\n}', '{\n /**\n foo\n\n bar\n **/\n}')
bt('{\n/**\nfoo\n bar\n**/\n}', '{\n /**\n foo\n bar\n **/\n}')
bt('{\n /**\n foo\nbar\n **/\n}')
bt('var a,b,c=1,d,e,f=2;', 'var a, b, c = 1,\n d, e, f = 2;')
bt('var a,b,c=[],d,e,f=2;', 'var a, b, c = [],\n d, e, f = 2;')
bt('function() {\n var a, b, c, d, e = [],\n f;\n}')
bt('do/regexp/;\nwhile(1);', 'do /regexp/;\nwhile (1);')
bt('var a = a,\na;\nb = {\nb\n}', 'var a = a,\n a;\nb = {\n b\n}')
bt('var a = a,\n /* c */\n b;')
bt('var a = a,\n // c\n b;')
# weird element referencing
bt('foo.("bar");')
bt('if (a) a()\nelse b()\nnewline()')
bt('if (a) a()\nnewline()')
bt('a=typeof(x)', 'a = typeof(x)')
bt('var a = function() {\n return null;\n },\n b = false;')
bt('var a = function() {\n func1()\n}')
bt('var a = function() {\n func1()\n}\nvar b = function() {\n func2()\n}')
# code with and without semicolons
bt(
'var whatever = require("whatever");\nfunction() {\n a = 6;\n}',
'var whatever = require("whatever");\n\nfunction() {\n a = 6;\n}')
bt('var whatever = require("whatever")\nfunction() {\n a = 6\n}', 'var whatever = require("whatever")\n\nfunction() {\n a = 6\n}')
bt('{"x":[{"a":1,"b":3},\n7,8,8,8,8,{"b":99},{"a":11}]}', '{\n "x": [{\n "a": 1,\n "b": 3\n },\n 7, 8, 8, 8, 8, {\n "b": 99\n }, {\n "a": 11\n }\n ]\n}')
bt('{"x":[{"a":1,"b":3},7,8,8,8,8,{"b":99},{"a":11}]}', '{\n "x": [{\n "a": 1,\n "b": 3\n }, 7, 8, 8, 8, 8, {\n "b": 99\n }, {\n "a": 11\n }]\n}')
bt('{"1":{"1a":"1b"},"2"}', '{\n "1": {\n "1a": "1b"\n },\n "2"\n}')
bt('{a:{a:b},c}', '{\n a: {\n a: b\n },\n c\n}')
bt('{[y[a]];keep_indent;}', '{\n [y[a]];\n keep_indent;\n}')
bt('if (x) {y} else { if (x) {y}}', 'if (x) {\n y\n} else {\n if (x) {\n y\n }\n}')
bt('if (foo) one()\ntwo()\nthree()')
bt('if (1 + foo() && bar(baz()) / 2) one()\ntwo()\nthree()')
bt('if (1 + foo() && bar(baz()) / 2) one();\ntwo();\nthree();')
bt('var a=1,b={bang:2},c=3;', 'var a = 1,\n b = {\n bang: 2\n },\n c = 3;')
bt('var a={bing:1},b=2,c=3;', 'var a = {\n bing: 1\n },\n b = 2,\n c = 3;')
bt('{{}/z/}', "{\n {}\n /z/\n}")
self.options.indent_size = 1;
self.options.indent_char = ' ';
bt('{ one_char() }', "{\n one_char()\n}")
bt('var a,b=1,c=2', 'var a, b = 1,\n c = 2')
self.options.indent_size = 4;
self.options.indent_char = ' ';
bt('{ one_char() }', "{\n one_char()\n}")
self.options.indent_size = 1;
self.options.indent_char = "\t";
bt('{ one_char() }', "{\n\tone_char()\n}")
bt('x = a ? b : c; x;', 'x = a ? b : c;\nx;')
#set to something else than it should change to, but with tabs on, should override
self.options.indent_size = 5;
self.options.indent_char = ' ';
self.options.indent_with_tabs = True;
bt('{ one_char() }', "{\n\tone_char()\n}")
bt('x = a ? b : c; x;', 'x = a ? b : c;\nx;')
self.options.indent_size = 4;
self.options.indent_char = ' ';
self.options.indent_with_tabs = False;
self.options.preserve_newlines = False;
bt('var\na=dont_preserve_newlines;', 'var a = dont_preserve_newlines;')
# make sure the blank line between function definitions stays
# even when preserve_newlines = False
bt('function foo() {\n return 1;\n}\n\nfunction foo() {\n return 1;\n}')
bt('function foo() {\n return 1;\n}\nfunction foo() {\n return 1;\n}',
'function foo() {\n return 1;\n}\n\nfunction foo() {\n return 1;\n}'
)
bt('function foo() {\n return 1;\n}\n\n\nfunction foo() {\n return 1;\n}',
'function foo() {\n return 1;\n}\n\nfunction foo() {\n return 1;\n}'
)
self.options.preserve_newlines = True;
bt('var\na=do_preserve_newlines;', 'var\n a = do_preserve_newlines;')
bt('// a\n// b\n\n// c\n// d')
bt('if (foo) // comment\n{\n bar();\n}')
self.options.keep_array_indentation = False;
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f']",
"a = ['a', 'b', 'c',\n 'd', 'e', 'f'\n]")
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']",
"a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i'\n]")
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']",
"a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i'\n]")
bt('var x = [{}\n]', 'var x = [{}]')
bt('var x = [{foo:bar}\n]', 'var x = [{\n foo: bar\n}]')
bt("a = ['something',\n 'completely',\n 'different'];\nif (x);",
"a = ['something',\n 'completely',\n 'different'\n];\nif (x);")
bt("a = ['a','b','c']", "a = ['a', 'b', 'c']")
bt("a = ['a', 'b','c']", "a = ['a', 'b', 'c']")
bt("x = [{'a':0}]",
"x = [{\n 'a': 0\n}]")
bt('{a([[a1]], {b;});}',
'{\n a([\n [a1]\n ], {\n b;\n });\n}')
bt("a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\n[\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n].toString();")
bt("a();\na = [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\na = [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n].toString();")
bt("function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}",
"function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}")
bt('function foo() {\n return [\n "one",\n "two"\n ];\n}')
# 4 spaces per indent input, processed with 4-spaces per indent
bt( "function foo() {\n" +
" return [\n" +
" {\n" +
" one: 'x',\n" +
" two: [\n" +
" {\n" +
" id: 'a',\n" +
" name: 'apple'\n" +
" }, {\n" +
" id: 'b',\n" +
" name: 'banana'\n" +
" }\n" +
" ]\n" +
" }\n" +
" ];\n" +
"}",
"function foo() {\n" +
" return [{\n" +
" one: 'x',\n" +
" two: [{\n" +
" id: 'a',\n" +
" name: 'apple'\n" +
" }, {\n" +
" id: 'b',\n" +
" name: 'banana'\n" +
" }]\n" +
" }];\n" +
"}")
# 3 spaces per indent input, processed with 4-spaces per indent
bt( "function foo() {\n" +
" return [\n" +
" {\n" +
" one: 'x',\n" +
" two: [\n" +
" {\n" +
" id: 'a',\n" +
" name: 'apple'\n" +
" }, {\n" +
" id: 'b',\n" +
" name: 'banana'\n" +
" }\n" +
" ]\n" +
" }\n" +
" ];\n" +
"}",
"function foo() {\n" +
" return [{\n" +
" one: 'x',\n" +
" two: [{\n" +
" id: 'a',\n" +
" name: 'apple'\n" +
" }, {\n" +
" id: 'b',\n" +
" name: 'banana'\n" +
" }]\n" +
" }];\n" +
"}")
self.options.keep_array_indentation = True;
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f']")
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']")
bt("a = ['a', 'b', 'c',\n 'd', 'e', 'f',\n 'g', 'h', 'i']")
bt('var x = [{}\n]', 'var x = [{}\n]')
bt('var x = [{foo:bar}\n]', 'var x = [{\n foo: bar\n }\n]')
bt("a = ['something',\n 'completely',\n 'different'];\nif (x);")
bt("a = ['a','b','c']", "a = ['a', 'b', 'c']")
bt("a = ['a', 'b','c']", "a = ['a', 'b', 'c']")
bt("x = [{'a':0}]",
"x = [{\n 'a': 0\n}]")
bt('{a([[a1]], {b;});}',
'{\n a([[a1]], {\n b;\n });\n}')
bt("a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\n [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();")
bt("a();\na = [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();",
"a();\na = [\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ].toString();")
bt("function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}",
"function() {\n Foo([\n ['sdfsdfsd'],\n ['sdfsdfsdf']\n ]);\n}")
bt('function foo() {\n return [\n "one",\n "two"\n ];\n}')
# 4 spaces per indent input, processed with 4-spaces per indent
bt( "function foo() {\n" +
" return [\n" +
" {\n" +
" one: 'x',\n" +
" two: [\n" +
" {\n" +
" id: 'a',\n" +
" name: 'apple'\n" +
" }, {\n" +
" id: 'b',\n" +
" name: 'banana'\n" +
" }\n" +
" ]\n" +
" }\n" +
" ];\n" +
"}")
# 3 spaces per indent input, processed with 4-spaces per indent
# Should be unchanged, but is not - #445
# bt( "function foo() {\n" +
# " return [\n" +
# " {\n" +
# " one: 'x',\n" +
# " two: [\n" +
# " {\n" +
# " id: 'a',\n" +
# " name: 'apple'\n" +
# " }, {\n" +
# " id: 'b',\n" +
# " name: 'banana'\n" +
# " }\n" +
# " ]\n" +
# " }\n" +
# " ];\n" +
# "}")
self.options.keep_array_indentation = False;
bt('a = //comment\n /regex/;')
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}', 'if (a) {\n b;\n} else {\n c;\n}')
bt('var a = new function();')
test_fragment('new function')
# START tests for brace positioning
# If this is ever supported, update tests for each brace style.
# test_fragment('return\n{', 'return\n{') # can't support this?, but that's an improbable and extreme case anyway.
self.options.brace_style = 'expand';
bt('//case 1\nif (a == 1)\n{}\n//case 2\nelse if (a == 2)\n{}')
bt('if(1){2}else{3}', "if (1)\n{\n 2\n}\nelse\n{\n 3\n}")
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try\n{\n a();\n}\ncatch (b)\n{\n c();\n}\ncatch (d)\n{}\nfinally\n{\n e();\n}")
bt('if(a){b();}else if(c) foo();',
"if (a)\n{\n b();\n}\nelse if (c) foo();")
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a)\n{\n // comment\n}\nelse\n{\n // comment\n}") # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x)\n{\n y\n}\nelse\n{\n if (x)\n {\n y\n }\n}')
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a)\n{\n b;\n}\nelse\n{\n c;\n}')
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo)\n {\n bar();\n }')
bt('if (foo)\n{}\nelse /regex/.test();')
test_fragment('if (foo) {', 'if (foo)\n{')
test_fragment('foo {', 'foo\n{')
test_fragment('return {', 'return {') # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {')
test_fragment('return;\n{', 'return;\n{')
bt("throw {}")
bt("throw {\n foo;\n}")
bt('var foo = {}')
bt('function x() {\n foo();\n}zzz', 'function x()\n{\n foo();\n}\nzzz')
test_fragment('a: do {} while (); xxx', 'a: do {} while ();\nxxx')
bt('{a: do {} while (); xxx}', '{\n a: do {} while ();xxx\n}')
bt('var a = new function() {};')
bt('var a = new function a() {};', 'var a = new function a()\n{};')
bt('var a = new function()\n{};', 'var a = new function() {};')
bt('var a = new function a()\n{};')
bt('var a = new function a()\n {},\n b = new function b()\n {};')
bt("foo({\n 'a': 1\n},\n10);",
"foo(\n {\n 'a': 1\n },\n 10);")
bt('(["foo","bar"]).each(function(i) {return i;});',
'(["foo", "bar"]).each(function(i)\n{\n return i;\n});')
bt('(function(i) {return i;})();',
'(function(i)\n{\n return i;\n})();')
bt( "test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test(\n" +
"/*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"},\n" +
"/*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test(\n" +
" /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test( /*Argument 1*/\n" +
"{\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */\n" +
"{\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
self.options.brace_style = 'collapse';
bt('//case 1\nif (a == 1) {}\n//case 2\nelse if (a == 2) {}')
bt('if(1){2}else{3}', "if (1) {\n 2\n} else {\n 3\n}")
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try {\n a();\n} catch (b) {\n c();\n} catch (d) {} finally {\n e();\n}")
bt('if(a){b();}else if(c) foo();',
"if (a) {\n b();\n} else if (c) foo();")
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a) {\n // comment\n} else {\n // comment\n}") # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x) {\n y\n} else {\n if (x) {\n y\n }\n}')
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a) {\n b;\n} else {\n c;\n}')
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo) {\n bar();\n }')
bt('if (foo) {} else /regex/.test();')
test_fragment('if (foo) {', 'if (foo) {')
test_fragment('foo {', 'foo {')
test_fragment('return {', 'return {') # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {')
test_fragment('return;\n{', 'return; {')
bt("throw {}")
bt("throw {\n foo;\n}")
bt('var foo = {}')
bt('function x() {\n foo();\n}zzz', 'function x() {\n foo();\n}\nzzz')
test_fragment('a: do {} while (); xxx', 'a: do {} while ();\nxxx')
bt('{a: do {} while (); xxx}', '{\n a: do {} while ();xxx\n}')
bt('var a = new function() {};')
bt('var a = new function a() {};')
bt('var a = new function()\n{};', 'var a = new function() {};')
bt('var a = new function a()\n{};', 'var a = new function a() {};')
bt('var a = new function a()\n {},\n b = new function b()\n {};', 'var a = new function a() {},\n b = new function b() {};')
bt("foo({\n 'a': 1\n},\n10);",
"foo({\n 'a': 1\n },\n 10);")
bt('(["foo","bar"]).each(function(i) {return i;});',
'(["foo", "bar"]).each(function(i) {\n return i;\n});')
bt('(function(i) {return i;})();',
'(function(i) {\n return i;\n})();')
bt( "test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test(\n" +
"/*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"},\n" +
"/*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test(\n" +
" /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test( /*Argument 1*/\n" +
"{\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */\n" +
"{\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
self.options.brace_style = "end-expand";
bt('//case 1\nif (a == 1) {}\n//case 2\nelse if (a == 2) {}')
bt('if(1){2}else{3}', "if (1) {\n 2\n}\nelse {\n 3\n}")
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try {\n a();\n}\ncatch (b) {\n c();\n}\ncatch (d) {}\nfinally {\n e();\n}")
bt('if(a){b();}else if(c) foo();',
"if (a) {\n b();\n}\nelse if (c) foo();")
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a) {\n // comment\n}\nelse {\n // comment\n}") # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x) {\n y\n}\nelse {\n if (x) {\n y\n }\n}')
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a) {\n b;\n}\nelse {\n c;\n}')
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo) {\n bar();\n }')
bt('if (foo) {}\nelse /regex/.test();')
test_fragment('if (foo) {', 'if (foo) {')
test_fragment('foo {', 'foo {')
test_fragment('return {', 'return {') # return needs the brace.
test_fragment('return /* inline */ {', 'return /* inline */ {')
test_fragment('return;\n{', 'return; {')
bt("throw {}")
bt("throw {\n foo;\n}")
bt('var foo = {}')
bt('function x() {\n foo();\n}zzz', 'function x() {\n foo();\n}\nzzz')
test_fragment('a: do {} while (); xxx', 'a: do {} while ();\nxxx')
bt('{a: do {} while (); xxx}', '{\n a: do {} while ();xxx\n}')
bt('var a = new function() {};')
bt('var a = new function a() {};')
bt('var a = new function()\n{};', 'var a = new function() {};')
bt('var a = new function a()\n{};', 'var a = new function a() {};')
bt('var a = new function a()\n {},\n b = new function b()\n {};', 'var a = new function a() {},\n b = new function b() {};')
bt("foo({\n 'a': 1\n},\n10);",
"foo({\n 'a': 1\n },\n 10);")
bt('(["foo","bar"]).each(function(i) {return i;});',
'(["foo", "bar"]).each(function(i) {\n return i;\n});')
bt('(function(i) {return i;})();',
'(function(i) {\n return i;\n})();')
bt( "test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test(\n" +
"/*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"},\n" +
"/*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test(\n" +
" /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test( /*Argument 1*/\n" +
"{\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */\n" +
"{\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
self.options.brace_style = 'none';
bt('//case 1\nif (a == 1)\n{}\n//case 2\nelse if (a == 2)\n{}')
bt('if(1){2}else{3}', "if (1) {\n 2\n} else {\n 3\n}")
bt('try{a();}catch(b){c();}catch(d){}finally{e();}',
"try {\n a();\n} catch (b) {\n c();\n} catch (d) {} finally {\n e();\n}")
bt('if(a){b();}else if(c) foo();',
"if (a) {\n b();\n} else if (c) foo();")
bt("if (a) {\n// comment\n}else{\n// comment\n}",
"if (a) {\n // comment\n} else {\n // comment\n}") # if/else statement with empty body
bt('if (x) {y} else { if (x) {y}}',
'if (x) {\n y\n} else {\n if (x) {\n y\n }\n}')
bt('if (a)\n{\nb;\n}\nelse\n{\nc;\n}',
'if (a)\n{\n b;\n}\nelse\n{\n c;\n}')
test_fragment(' /*\n* xx\n*/\n// xx\nif (foo) {\n bar();\n}',
' /*\n * xx\n */\n // xx\n if (foo) {\n bar();\n }')
bt('if (foo)\n{}\nelse /regex/.test();')
test_fragment('if (foo) {')
test_fragment('foo {')
test_fragment('return {') # return needs the brace.
test_fragment('return /* inline */ {')
test_fragment('return;\n{')
bt("throw {}")
bt("throw {\n foo;\n}")
bt('var foo = {}')
bt('function x() {\n foo();\n}zzz', 'function x() {\n foo();\n}\nzzz')
test_fragment('a: do {} while (); xxx', 'a: do {} while ();\nxxx')
bt('{a: do {} while (); xxx}', '{\n a: do {} while ();xxx\n}')
bt('var a = new function() {};')
bt('var a = new function a() {};')
bt('var a = new function()\n{};', 'var a = new function() {};')
bt('var a = new function a()\n{};')
bt('var a = new function a()\n {},\n b = new function b()\n {};')
bt("foo({\n 'a': 1\n},\n10);",
"foo({\n 'a': 1\n },\n 10);")
bt('(["foo","bar"]).each(function(i) {return i;});',
'(["foo", "bar"]).each(function(i) {\n return i;\n});')
bt('(function(i) {return i;})();',
'(function(i) {\n return i;\n})();')
bt( "test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test(\n" +
"/*Argument 1*/ {\n" +
" 'Value1': '1'\n" +
"},\n" +
"/*Argument 2\n" +
" */ {\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test(\n" +
" /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
bt( "test( /*Argument 1*/\n" +
"{\n" +
" 'Value1': '1'\n" +
"}, /*Argument 2\n" +
" */\n" +
"{\n" +
" 'Value2': '2'\n" +
"});",
# expected
"test( /*Argument 1*/\n" +
" {\n" +
" 'Value1': '1'\n" +
" },\n" +
" /*Argument 2\n" +
" */\n" +
" {\n" +
" 'Value2': '2'\n" +
" });")
# END tests for brace position
self.options.brace_style = 'collapse';
test_fragment('roo = {\n /*\n ****\n FOO\n ****\n */\n BAR: 0\n};')
test_fragment("if (zz) {\n // ....\n}\n(function")
self.options.preserve_newlines = True;
bt('var a = 42; // foo\n\nvar b;')
bt('var a = 42; // foo\n\n\nvar b;')
bt("var a = 'foo' +\n 'bar';")
bt("var a = \"foo\" +\n \"bar\";")
bt('"foo""bar""baz"', '"foo"\n"bar"\n"baz"')
bt("'foo''bar''baz'", "'foo'\n'bar'\n'baz'")
bt("{\n get foo() {}\n}")
bt("{\n var a = get\n foo();\n}")
bt("{\n set foo() {}\n}")
bt("{\n var a = set\n foo();\n}")
bt("var x = {\n get function()\n}")
bt("var x = {\n set function()\n}")
# According to my current research get/set have no special meaning outside of an object literal
bt("var x = set\n\na() {}", "var x = set\n\na() {}")
bt("var x = set\n\nfunction() {}", "var x = set\n\nfunction() {}")
bt('<!-- foo\nbar();\n-->')
bt('<!-- dont crash') # -->
bt('for () /abc/.test()')
bt('if (k) /aaa/m.test(v) && l();')
bt('switch (true) {\n case /swf/i.test(foo):\n bar();\n}')
bt('createdAt = {\n type: Date,\n default: Date.now\n}')
bt('switch (createdAt) {\n case a:\n Date,\n default:\n Date.now\n}')
bt('return function();')
bt('var a = function();')
bt('var a = 5 + function();')
bt('{\n foo // something\n ,\n bar // something\n baz\n}')
bt('function a(a) {} function b(b) {} function c(c) {}', 'function a(a) {}\n\nfunction b(b) {}\n\nfunction c(c) {}')
bt('3.*7;', '3. * 7;')
bt('a = 1.e-64 * 0.5e+4 / 6e-23;')
bt('import foo.*;', 'import foo.*;') # actionscript's import
test_fragment('function f(a: a, b: b)') # actionscript
bt('foo(a, function() {})')
bt('foo(a, /regex/)')
bt('/* foo */\n"x"')
self.options.break_chained_methods = False
self.options.preserve_newlines = False
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo.bar().baz().cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo.bar().baz().cucumber(fat);\nfoo.bar().baz().cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo.bar().baz().cucumber(fat)\nfoo.bar().baz().cucumber(fat)')
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this.something = foo.bar().baz().cucumber(fat)')
bt('this.something.xxx = foo.moo.bar()')
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this.something.xxx = foo.moo.bar()')
self.options.break_chained_methods = False
self.options.preserve_newlines = True
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo\n .bar()\n .baz().cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz().cucumber(fat);\nfoo.bar().baz().cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz().cucumber(fat)\nfoo.bar().baz().cucumber(fat)')
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this\n .something = foo.bar()\n .baz().cucumber(fat)')
bt('this.something.xxx = foo.moo.bar()')
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this\n .something\n .xxx = foo.moo\n .bar()')
self.options.break_chained_methods = True
self.options.preserve_newlines = False
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo.bar()\n .baz()\n .cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo.bar()\n .baz()\n .cucumber(fat);\nfoo.bar()\n .baz()\n .cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo.bar()\n .baz()\n .cucumber(fat)\nfoo.bar()\n .baz()\n .cucumber(fat)')
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this.something = foo.bar()\n .baz()\n .cucumber(fat)')
bt('this.something.xxx = foo.moo.bar()')
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this.something.xxx = foo.moo.bar()')
self.options.break_chained_methods = True
self.options.preserve_newlines = True
bt('foo\n.bar()\n.baz().cucumber(fat)', 'foo\n .bar()\n .baz()\n .cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat); foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz()\n .cucumber(fat);\nfoo.bar()\n .baz()\n .cucumber(fat)')
bt('foo\n.bar()\n.baz().cucumber(fat)\n foo.bar().baz().cucumber(fat)', 'foo\n .bar()\n .baz()\n .cucumber(fat)\nfoo.bar()\n .baz()\n .cucumber(fat)')
bt('this\n.something = foo.bar()\n.baz().cucumber(fat)', 'this\n .something = foo.bar()\n .baz()\n .cucumber(fat)')
bt('this.something.xxx = foo.moo.bar()')
bt('this\n.something\n.xxx = foo.moo\n.bar()', 'this\n .something\n .xxx = foo.moo\n .bar()')
self.options.break_chained_methods = False
self.options.preserve_newlines = False
# Line wrap test intputs
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
wrap_input_1=('foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();\n' +
'object_literal = {\n' +
' propertx: first_token + 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap + but_this_can,\n' +
' propertz: first_token_should_never_wrap + !but_this_can,\n' +
' proper: "first_token_should_never_wrap" + "but_this_can"\n' +
'}')
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
wrap_input_2=('{\n' +
' foo.bar().baz().cucumber((fat && "sassy") || (leans\n&& mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n.but_this_can\n' +
' if (wraps_can_occur && inside_an_if_block) that_is_\n.okay();\n' +
' object_literal = {\n' +
' propertx: first_token + 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap + but_this_can,\n' +
' propertz: first_token_should_never_wrap + !but_this_can,\n' +
' proper: "first_token_should_never_wrap" + "but_this_can"\n' +
' }' +
'}')
self.options.preserve_newlines = False
self.options.wrap_line_length = 0
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_.okay();\n' +
'object_literal = {\n' +
' propertx: first_token + 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap + but_this_can,\n' +
' propertz: first_token_should_never_wrap + !but_this_can,\n' +
' proper: "first_token_should_never_wrap" + "but_this_can"\n' +
'}')
self.options.wrap_line_length = 70
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap.but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_.okay();\n' +
'object_literal = {\n' +
' propertx: first_token + 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap + but_this_can,\n' +
' propertz: first_token_should_never_wrap + !but_this_can,\n' +
' proper: "first_token_should_never_wrap" + "but_this_can"\n' +
'}')
self.options.wrap_line_length = 40
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat &&\n' +
' "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_.okay();\n' +
'object_literal = {\n' +
' propertx: first_token +\n' +
' 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap +\n' +
' but_this_can,\n' +
' propertz: first_token_should_never_wrap +\n' +
' !but_this_can,\n' +
' proper: "first_token_should_never_wrap" +\n' +
' "but_this_can"\n' +
'}')
self.options.wrap_line_length = 41
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_.okay();\n' +
'object_literal = {\n' +
' propertx: first_token +\n' +
' 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap +\n' +
' but_this_can,\n' +
' propertz: first_token_should_never_wrap +\n' +
' !but_this_can,\n' +
' proper: "first_token_should_never_wrap" +\n' +
' "but_this_can"\n' +
'}')
self.options.wrap_line_length = 45
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_2,
# expected #
'{\n' +
' foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
' if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_.okay();\n' +
' object_literal = {\n' +
' propertx: first_token +\n' +
' 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap +\n' +
' but_this_can,\n' +
' propertz: first_token_should_never_wrap +\n' +
' !but_this_can,\n' +
' proper: "first_token_should_never_wrap" +\n' +
' "but_this_can"\n' +
' }\n'+
'}')
self.options.preserve_newlines = True
self.options.wrap_line_length = 0
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n' +
' .okay();\n' +
'object_literal = {\n' +
' propertx: first_token + 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap + but_this_can,\n' +
' propertz: first_token_should_never_wrap + !but_this_can,\n' +
' proper: "first_token_should_never_wrap" + "but_this_can"\n' +
'}')
self.options.wrap_line_length = 70
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat && "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur && inside_an_if_block) that_is_\n' +
' .okay();\n' +
'object_literal = {\n' +
' propertx: first_token + 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap + but_this_can,\n' +
' propertz: first_token_should_never_wrap + !but_this_can,\n' +
' proper: "first_token_should_never_wrap" + "but_this_can"\n' +
'}')
self.options.wrap_line_length = 40
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat &&\n' +
' "sassy") || (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_\n' +
' .okay();\n' +
'object_literal = {\n' +
' propertx: first_token +\n' +
' 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap +\n' +
' but_this_can,\n' +
' propertz: first_token_should_never_wrap +\n' +
' !but_this_can,\n' +
' proper: "first_token_should_never_wrap" +\n' +
' "but_this_can"\n' +
'}')
self.options.wrap_line_length = 41
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_1,
# expected #
'foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
'Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
'if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_\n' +
' .okay();\n' +
'object_literal = {\n' +
' propertx: first_token +\n' +
' 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap +\n' +
' but_this_can,\n' +
' propertz: first_token_should_never_wrap +\n' +
' !but_this_can,\n' +
' proper: "first_token_should_never_wrap" +\n' +
' "but_this_can"\n' +
'}')
self.options.wrap_line_length = 45
# NOTE: wrap is only best effort - line continues until next wrap point is found.
#..............---------1---------2---------3---------4---------5---------6---------7
#..............1234567890123456789012345678901234567890123456789012345678901234567890
test_fragment(wrap_input_2,
# expected #
'{\n' +
' foo.bar().baz().cucumber((fat && "sassy") ||\n' +
' (leans && mean));\n' +
' Test_very_long_variable_name_this_should_never_wrap\n' +
' .but_this_can\n' +
' if (wraps_can_occur &&\n' +
' inside_an_if_block) that_is_\n' +
' .okay();\n' +
' object_literal = {\n' +
' propertx: first_token +\n' +
' 12345678.99999E-6,\n' +
' property: first_token_should_never_wrap +\n' +
' but_this_can,\n' +
' propertz: first_token_should_never_wrap +\n' +
' !but_this_can,\n' +
' proper: "first_token_should_never_wrap" +\n' +
' "but_this_can"\n' +
' }\n'+
'}')
self.options.wrap_line_length = 0
self.options.preserve_newlines = False
bt('if (foo) // comment\n bar();')
bt('if (foo) // comment\n (bar());')
bt('if (foo) // comment\n (bar());')
bt('if (foo) // comment\n /asdf/;')
bt('this.oa = new OAuth(\n' +
' _requestToken,\n' +
' _accessToken,\n' +
' consumer_key\n' +
');',
'this.oa = new OAuth(_requestToken, _accessToken, consumer_key);')
bt('foo = {\n x: y, // #44\n w: z // #44\n}')
bt('switch (x) {\n case "a":\n // comment on newline\n break;\n case "b": // comment on same line\n break;\n}')
bt('this.type =\n this.options =\n // comment\n this.enabled null;',
'this.type = this.options =\n // comment\n this.enabled null;')
bt('someObj\n .someFunc1()\n // This comment should not break the indent\n .someFunc2();',
'someObj.someFunc1()\n // This comment should not break the indent\n .someFunc2();')
bt('if (true ||\n!true) return;', 'if (true || !true) return;')
# these aren't ready yet.
#bt('if (foo) // comment\n bar() /*i*/ + baz() /*j\n*/ + asdf();')
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\na();',
'if (foo)\n if (bar)\n if (baz) whee();\na();')
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\nelse\na();',
'if (foo)\n if (bar)\n if (baz) whee();\n else a();')
bt('if (foo)\nbar();\nelse\ncar();',
'if (foo) bar();\nelse car();')
bt('if (foo) if (bar) if (baz);\na();',
'if (foo)\n if (bar)\n if (baz);\na();')
bt('if (foo) if (bar) if (baz) whee();\na();',
'if (foo)\n if (bar)\n if (baz) whee();\na();')
bt('if (foo) a()\nif (bar) if (baz) whee();\na();',
'if (foo) a()\nif (bar)\n if (baz) whee();\na();')
bt('if (foo);\nif (bar) if (baz) whee();\na();',
'if (foo);\nif (bar)\n if (baz) whee();\na();')
bt('if (options)\n' +
' for (var p in options)\n' +
' this[p] = options[p];',
'if (options)\n'+
' for (var p in options) this[p] = options[p];')
bt('if (options) for (var p in options) this[p] = options[p];',
'if (options)\n for (var p in options) this[p] = options[p];')
bt('if (options) do q(); while (b());',
'if (options)\n do q(); while (b());')
bt('if (options) while (b()) q();',
'if (options)\n while (b()) q();')
bt('if (options) do while (b()) q(); while (a());',
'if (options)\n do\n while (b()) q(); while (a());')
bt('function f(a, b, c,\nd, e) {}',
'function f(a, b, c, d, e) {}')
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}')
bt('function f(a,b) {if(a) b()}\n\n\n\nfunction g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}')
# This is not valid syntax, but still want to behave reasonably and not side-effect
bt('(if(a) b())(if(a) b())',
'(\n if (a) b())(\n if (a) b())')
bt('(if(a) b())\n\n\n(if(a) b())',
'(\n if (a) b())\n(\n if (a) b())')
# space between functions
bt('/*\n * foo\n */\nfunction foo() {}')
bt('// a nice function\nfunction foo() {}')
bt('function foo() {}\nfunction foo() {}',
'function foo() {}\n\nfunction foo() {}'
)
bt('[\n function() {}\n]')
bt("if\n(a)\nb();", "if (a) b();")
bt('var a =\nfoo', 'var a = foo')
bt('var a = {\n"a":1,\n"b":2}', "var a = {\n \"a\": 1,\n \"b\": 2\n}")
bt("var a = {\n'a':1,\n'b':2}", "var a = {\n 'a': 1,\n 'b': 2\n}")
bt('var a = /*i*/ "b";')
bt('var a = /*i*/\n"b";', 'var a = /*i*/ "b";')
bt('var a = /*i*/\nb;', 'var a = /*i*/ b;')
bt('{\n\n\n"x"\n}', '{\n "x"\n}')
bt('if(a &&\nb\n||\nc\n||d\n&&\ne) e = f', 'if (a && b || c || d && e) e = f')
bt('if(a &&\n(b\n||\nc\n||d)\n&&\ne) e = f', 'if (a && (b || c || d) && e) e = f')
test_fragment('\n\n"x"', '"x"')
bt('a = 1;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb = 2;',
'a = 1;\nb = 2;')
self.options.preserve_newlines = True
bt('if (foo) // comment\n bar();')
bt('if (foo) // comment\n (bar());')
bt('if (foo) // comment\n (bar());')
bt('if (foo) // comment\n /asdf/;')
bt('this.oa = new OAuth(\n' +
' _requestToken,\n' +
' _accessToken,\n' +
' consumer_key\n' +
');')
bt('foo = {\n x: y, // #44\n w: z // #44\n}')
bt('switch (x) {\n case "a":\n // comment on newline\n break;\n case "b": // comment on same line\n break;\n}')
bt('this.type =\n this.options =\n // comment\n this.enabled null;')
bt('someObj\n .someFunc1()\n // This comment should not break the indent\n .someFunc2();')
bt('if (true ||\n!true) return;', 'if (true ||\n !true) return;')
# these aren't ready yet.
# bt('if (foo) // comment\n bar() /*i*/ + baz() /*j\n*/ + asdf();')
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\na();',
'if (foo)\n if (bar)\n if (baz)\n whee();\na();')
bt('if\n(foo)\nif\n(bar)\nif\n(baz)\nwhee();\nelse\na();',
'if (foo)\n if (bar)\n if (baz)\n whee();\n else\n a();')
bt('if (foo) bar();\nelse\ncar();',
'if (foo) bar();\nelse\n car();')
bt('if (foo) if (bar) if (baz);\na();',
'if (foo)\n if (bar)\n if (baz);\na();')
bt('if (foo) if (bar) if (baz) whee();\na();',
'if (foo)\n if (bar)\n if (baz) whee();\na();')
bt('if (foo) a()\nif (bar) if (baz) whee();\na();',
'if (foo) a()\nif (bar)\n if (baz) whee();\na();')
bt('if (foo);\nif (bar) if (baz) whee();\na();',
'if (foo);\nif (bar)\n if (baz) whee();\na();')
bt('if (options)\n' +
' for (var p in options)\n' +
' this[p] = options[p];')
bt('if (options) for (var p in options) this[p] = options[p];',
'if (options)\n for (var p in options) this[p] = options[p];')
bt('if (options) do q(); while (b());',
'if (options)\n do q(); while (b());')
bt('if (options) do; while (b());',
'if (options)\n do; while (b());')
bt('if (options) while (b()) q();',
'if (options)\n while (b()) q();')
bt('if (options) do while (b()) q(); while (a());',
'if (options)\n do\n while (b()) q(); while (a());')
bt('function f(a, b, c,\nd, e) {}',
'function f(a, b, c,\n d, e) {}')
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}')
bt('function f(a,b) {if(a) b()}\n\n\n\nfunction g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\n\n\nfunction g(a, b) {\n if (!a) b()\n}')
# This is not valid syntax, but still want to behave reasonably and not side-effect
bt('(if(a) b())(if(a) b())',
'(\n if (a) b())(\n if (a) b())')
bt('(if(a) b())\n\n\n(if(a) b())',
'(\n if (a) b())\n\n\n(\n if (a) b())')
bt("if\n(a)\nb();", "if (a)\n b();")
bt('var a =\nfoo', 'var a =\n foo')
bt('var a = {\n"a":1,\n"b":2}', "var a = {\n \"a\": 1,\n \"b\": 2\n}")
bt("var a = {\n'a':1,\n'b':2}", "var a = {\n 'a': 1,\n 'b': 2\n}")
bt('var a = /*i*/ "b";')
bt('var a = /*i*/\n"b";', 'var a = /*i*/\n "b";')
bt('var a = /*i*/\nb;', 'var a = /*i*/\n b;')
bt('{\n\n\n"x"\n}', '{\n\n\n "x"\n}')
bt('if(a &&\nb\n||\nc\n||d\n&&\ne) e = f', 'if (a &&\n b ||\n c || d &&\n e) e = f')
bt('if(a &&\n(b\n||\nc\n||d)\n&&\ne) e = f', 'if (a &&\n (b ||\n c || d) &&\n e) e = f')
test_fragment('\n\n"x"', '"x"')
# this beavior differs between js and python, defaults to unlimited in js, 10 in python
bt('a = 1;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb = 2;',
'a = 1;\n\n\n\n\n\n\n\n\n\nb = 2;')
self.options.max_preserve_newlines = 8;
bt('a = 1;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nb = 2;',
'a = 1;\n\n\n\n\n\n\n\nb = 2;')
# Test the option to have spaces within parens
self.options.space_in_paren = False
self.options.space_in_empty_paren = False
bt('if(p) foo(a,b)', 'if (p) foo(a, b)')
bt('try{while(true){willThrow()}}catch(result)switch(result){case 1:++result }',
'try {\n while (true) {\n willThrow()\n }\n} catch (result) switch (result) {\n case 1:\n ++result\n}')
bt('((e/((a+(b)*c)-d))^2)*5;', '((e / ((a + (b) * c) - d)) ^ 2) * 5;')
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f(a, b) {\n if (a) b()\n}\n\nfunction g(a, b) {\n if (!a) b()\n}')
bt('a=[];',
'a = [];')
bt('a=[b,c,d];',
'a = [b, c, d];')
bt('a= f[b];',
'a = f[b];')
self.options.space_in_paren = True
bt('if(p) foo(a,b)', 'if ( p ) foo( a, b )')
bt('try{while(true){willThrow()}}catch(result)switch(result){case 1:++result }',
'try {\n while ( true ) {\n willThrow()\n }\n} catch ( result ) switch ( result ) {\n case 1:\n ++result\n}')
bt('((e/((a+(b)*c)-d))^2)*5;', '( ( e / ( ( a + ( b ) * c ) - d ) ) ^ 2 ) * 5;')
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f( a, b ) {\n if ( a ) b()\n}\n\nfunction g( a, b ) {\n if ( !a ) b()\n}')
bt('a=[ ];',
'a = [];')
bt('a=[b,c,d];',
'a = [ b, c, d ];')
bt('a= f[b];',
'a = f[ b ];')
self.options.space_in_empty_paren = True
bt('if(p) foo(a,b)', 'if ( p ) foo( a, b )')
bt('try{while(true){willThrow()}}catch(result)switch(result){case 1:++result }',
'try {\n while ( true ) {\n willThrow( )\n }\n} catch ( result ) switch ( result ) {\n case 1:\n ++result\n}')
bt('((e/((a+(b)*c)-d))^2)*5;', '( ( e / ( ( a + ( b ) * c ) - d ) ) ^ 2 ) * 5;')
bt('function f(a,b) {if(a) b()}function g(a,b) {if(!a) b()}',
'function f( a, b ) {\n if ( a ) b( )\n}\n\nfunction g( a, b ) {\n if ( !a ) b( )\n}')
bt('a=[ ];',
'a = [ ];')
bt('a=[b,c,d];',
'a = [ b, c, d ];')
bt('a= f[b];',
'a = f[ b ];')
self.options.space_in_paren = False
self.options.space_in_empty_paren = False
# Test template strings
bt('`This is a ${template} string.`', '`This is a ${template} string.`')
bt('`This\n is\n a\n ${template}\n string.`', '`This\n is\n a\n ${template}\n string.`')
bt('a = `This is a continuation\\\nstring.`', 'a = `This is a continuation\\\nstring.`');
bt('a = "This is a continuation\\\nstring."', 'a = "This is a continuation\\\nstring."');
def decodesto(self, input, expectation=None):
if expectation == None:
expectation = input
self.assertMultiLineEqual(
jsbeautifier.beautify(input, self.options), expectation)
# if the expected is different from input, run it again
# expected output should be unchanged when run twice.
if not expectation == None:
self.assertMultiLineEqual(
jsbeautifier.beautify(expectation, self.options), expectation)
# Everywhere we do newlines, they should be replaced with opts.eol
self.options.eol = '\r\\n';
expectation = expectation.replace('\n', '\r\n')
self.assertMultiLineEqual(
jsbeautifier.beautify(input, self.options), expectation)
input = input.replace('\n', '\r\n')
self.assertMultiLineEqual(
jsbeautifier.beautify(input, self.options), expectation)
self.options.eol = '\n'
def wrap(self, text):
return self.wrapregex.sub(' \\1', text)
def bt(self, input, expectation=None):
if expectation == None:
expectation = input
self.decodesto(input, expectation)
# If we set raw, input should be unchanged
self.options.test_output_raw = True
if self.options.end_with_newline:
elf.decodesto(input, input)
self.options.test_output_raw = False
if self.options.indent_size == 4 and input:
wrapped_input = '{\n%s\n foo = bar;\n}' % self.wrap(input)
wrapped_expect = '{\n%s\n foo = bar;\n}' % self.wrap(expectation)
self.decodesto(wrapped_input, wrapped_expect)
# If we set raw, input should be unchanged
self.options.test_output_raw = True
if self.options.end_with_newline:
elf.decodesto(wrapped_input, wrapped_input)
self.options.test_output_raw = False
@classmethod
def setUpClass(cls):
options = jsbeautifier.default_options()
options.indent_size = 4
options.indent_char = ' '
options.preserve_newlines = True
options.jslint_happy = False
options.keep_array_indentation = False
options.brace_style = 'collapse'
options.indent_level = 0
options.break_chained_methods = False
options.eol = '\n'
cls.options = options
cls.wrapregex = re.compile('^(.+)$', re.MULTILINE)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,402,752,420,700,130,300 | 45.293157 | 347 | 0.364814 | false |
timabell/gpodder | src/gpodder/gtkui/interface/progress.py | 3 | 4066 | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import gobject
import pango
import gpodder
_ = gpodder.gettext
from gpodder.gtkui.widgets import SpinningProgressIndicator
class ProgressIndicator(object):
# Delayed time until window is shown (for short operations)
DELAY = 500
# Time between GUI updates after window creation
INTERVAL = 100
def __init__(self, title, subtitle=None, cancellable=False, parent=None):
self.title = title
self.subtitle = subtitle
self.cancellable = cancellable
self.parent = parent
self.dialog = None
self.progressbar = None
self.indicator = None
self._initial_message = None
self._initial_progress = None
self._progress_set = False
self.source_id = gobject.timeout_add(self.DELAY, self._create_progress)
def _on_delete_event(self, window, event):
if self.cancellable:
self.dialog.response(gtk.RESPONSE_CANCEL)
return True
def _create_progress(self):
if gpodder.ui.fremantle:
self.dialog = gtk.Dialog(self.title, self.parent, 0, \
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
import hildon
hildon.hildon_gtk_window_set_progress_indicator(self.dialog, True)
else:
self.dialog = gtk.MessageDialog(self.parent, \
0, 0, gtk.BUTTONS_CANCEL, self.subtitle or self.title)
self.dialog.label.set_selectable(False)
self.dialog.connect('delete-event', self._on_delete_event)
self.dialog.set_title(self.title)
self.dialog.set_deletable(self.cancellable)
self.dialog.set_response_sensitive(gtk.RESPONSE_CANCEL, \
self.cancellable)
self.progressbar = gtk.ProgressBar()
self.progressbar.set_ellipsize(pango.ELLIPSIZE_END)
# If the window is shown after the first update, set the progress
# info so that when the window appears, data is there already
if self._initial_progress is not None:
self.progressbar.set_fraction(self._initial_progress)
if self._initial_message is not None:
self.progressbar.set_text(self._initial_message)
self.dialog.vbox.add(self.progressbar)
if not gpodder.ui.fremantle:
self.indicator = SpinningProgressIndicator()
self.dialog.set_image(self.indicator)
self.dialog.show_all()
gobject.source_remove(self.source_id)
self.source_id = gobject.timeout_add(self.INTERVAL, self._update_gui)
return False
def _update_gui(self):
if self.indicator:
self.indicator.step_animation()
if not self._progress_set and self.progressbar:
self.progressbar.pulse()
return True
def on_message(self, message):
if self.progressbar:
self.progressbar.set_text(message)
else:
self._initial_message = message
def on_progress(self, progress):
self._progress_set = True
if self.progressbar:
self.progressbar.set_fraction(progress)
else:
self._initial_progress = progress
def on_finished(self):
if self.dialog is not None:
self.dialog.destroy()
gobject.source_remove(self.source_id)
| gpl-3.0 | -9,015,536,793,855,132,000 | 34.051724 | 79 | 0.655435 | false |
kennethreitz/ghsync | ghsync/core.py | 1 | 4791 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Kenneth Reitz's GitHub Syncer
This script uses the GitHub API to get a list of all forked, mirrored, public, and
private repos in your GitHub account. If the repo already exists locally, it will
update it via git-pull. Otherwise, it will properly clone the repo.
It will organize your repos into the following directory structure:
+ repos
├── forks (public fork repos)
├── mirrors (public mirror repos)
├── private (private repos)
├── public (public repos)
└── watched (public watched repos)
Requires Ask Solem's github2 (http://pypi.python.org/pypi/github2).
Inspired by Gisty (http://github.com/swdyh/gisty).
"""
import os
import sys
from clint import args
from clint.textui import puts, colored, indent
import requests
import json
from github2.client import Github
try:
# check_output is new in 2.7.
from subprocess import check_output
def cmd(command):
return check_output(command, shell=True).strip()
except ImportError:
# commands is deprecated and doesn't work on Windows
from commands import getoutput as cmd
__author__ = 'Kenneth Reitz'
__license__ = 'ISC'
__copyright__ = '2011 Kenneth REitz'
__version__ = '0.3.1'
# GitHub configurations
GITHUB_USER = cmd('git config github.user')
GITHUB_TOKEN = cmd('git config github.token')
GHSYNC_DIR = os.environ.get('GHSYNC_DIR', '.')
def run():
# cli flags
upstream_on = args.flags.contains('--upstream')
only_type = args.grouped.get('--only', False)
organization = args[0]
os.chdir(GHSYNC_DIR)
# API Object
github = Github(username=GITHUB_USER, api_token=GITHUB_TOKEN)
# repo slots
repos = {}
if not organization:
repos['watched'] = [r for r in github.repos.watching(GITHUB_USER)]
repos['private'] = []
repos['mirrors'] = []
repos['public'] = []
repos['forks'] = []
# Collect GitHub repos via API
for repo in github.repos.list(organization):
if repo.private:
repos['private'].append(repo)
elif repo.fork:
repos['forks'].append(repo)
elif 'mirror' in repo.description.lower():
# mirrors owned by self if mirror in description...
repos['mirrors'].append(repo)
else:
repos['public'].append(repo)
for org, repos in repos.iteritems():
for repo in repos:
# create org directory (safely)
try:
os.makedirs(org)
except OSError:
pass
# enter org dir
os.chdir(org)
# I own the repo
is_private = (org in ('private', 'forks', 'mirror', 'public'))
is_fork = (org == 'forks')
if is_fork:
_url = 'http://github.com/api/v2/json/repos/show/{repo.owner}/{repo.name}'.format(repo=repo)
repo.parent = json.loads(requests.get(_url, ).content)['repository'].get('parent')
if not only_type or (org in only_type):
# just `git pull` if it's already there
if os.path.exists(repo.name):
os.chdir(repo.name)
puts(colored.red('Updating repo: {repo.name}'.format(repo=repo)))
os.system('git pull')
if is_fork and upstream_on:
print repo.__dict__
puts(colored.red('Adding upstream: {repo.parent}'.format(repo=repo)))
os.system('git remote add upstream [email protected]:{repo.parent}.git'.format(repo=repo))
os.chdir('..')
else:
if is_private:
puts(colored.red('Cloning private repo: {repo.name}'.format(repo=repo)))
os.system('git clone [email protected]:{repo.owner}/{repo.name}.git'.format(repo=repo))
print ('git clone [email protected]:%s/%s.git' % (repo.owner, repo.name))
if is_fork and upstream_on:
os.chdir(repo.name)
puts(colored.red('Adding upstream: {repo.parent}'.format(repo=repo)))
os.system('git remote add upstream [email protected]:{repo.parent}.git'.format(repo=repo))
os.chdir('..')
else:
puts(colored.red('Cloning repo: {repo.name}'.format(repo=repo)))
os.system('git clone git://github.com/%s/%s.git' % (repo.owner, repo.name))
print ('git clone git://github.com/%s/%s.git' % (repo.owner, repo.name))
# return to base
os.chdir('..')
if __name__ == '__main__':
run()
| isc | 302,543,416,765,623,040 | 30.529801 | 115 | 0.563327 | false |
jeedom/plugin-sms | resources/smsd/gsmmodem/serial_comms.py | 9 | 6451 | #!/usr/bin/env python
""" Low-level serial communications handling """
import sys, threading, logging
import re
import serial # pyserial: http://pyserial.sourceforge.net
from .exceptions import TimeoutException
from . import compat # For Python 2.6 compatibility
class SerialComms(object):
""" Wraps all low-level serial communications (actual read/write operations) """
log = logging.getLogger('gsmmodem.serial_comms.SerialComms')
# End-of-line read terminator
RX_EOL_SEQ = '\r\n'
# End-of-response terminator
RESPONSE_TERM = re.compile(r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')
# Default timeout for serial port reads (in seconds)
timeout = 1
def __init__(self, port, baudrate=115200, notifyCallbackFunc=None, fatalErrorCallbackFunc=None, *args, **kwargs):
""" Constructor
:param fatalErrorCallbackFunc: function to call if a fatal error occurs in the serial device reading thread
:type fatalErrorCallbackFunc: func
"""
self.alive = False
self.port = port
self.baudrate = baudrate
self._responseEvent = None # threading.Event()
self._expectResponseTermSeq = None # expected response terminator sequence
self._response = None # Buffer containing response to a written command
self._notification = [] # Buffer containing lines from an unsolicited notification from the modem
# Reentrant lock for managing concurrent write access to the underlying serial port
self._txLock = threading.RLock()
self.notifyCallback = notifyCallbackFunc or self._placeholderCallback
self.fatalErrorCallback = fatalErrorCallbackFunc or self._placeholderCallback
def connect(self):
""" Connects to the device and starts the read thread """
self.serial = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout)
# Start read thread
self.alive = True
self.rxThread = threading.Thread(target=self._readLoop)
self.rxThread.daemon = True
self.rxThread.start()
def close(self):
""" Stops the read thread, waits for it to exit cleanly, then closes the underlying serial port """
self.alive = False
self.rxThread.join()
self.serial.close()
def _handleLineRead(self, line, checkForResponseTerm=True):
#print 'sc.hlineread:',line
if self._responseEvent and not self._responseEvent.is_set():
# A response event has been set up (another thread is waiting for this response)
self._response.append(line)
if not checkForResponseTerm or self.RESPONSE_TERM.match(line):
# End of response reached; notify waiting thread
#print 'response:', self._response
self.log.debug('response: %s', self._response)
self._responseEvent.set()
else:
# Nothing was waiting for this - treat it as a notification
self._notification.append(line)
if self.serial.inWaiting() == 0:
# No more chars on the way for this notification - notify higher-level callback
#print 'notification:', self._notification
self.log.debug('notification: %s', self._notification)
self.notifyCallback(self._notification)
self._notification = []
def _placeholderCallback(self, *args, **kwargs):
""" Placeholder callback function (does nothing) """
def _readLoop(self):
""" Read thread main loop
Reads lines from the connected device
"""
try:
readTermSeq = list(self.RX_EOL_SEQ)
readTermLen = len(readTermSeq)
rxBuffer = []
while self.alive:
data = self.serial.read(1)
if data != '': # check for timeout
#print >> sys.stderr, ' RX:', data,'({0})'.format(ord(data))
rxBuffer.append(data)
if rxBuffer[-readTermLen:] == readTermSeq:
# A line (or other logical segment) has been read
line = ''.join(rxBuffer[:-readTermLen])
rxBuffer = []
if len(line) > 0:
#print 'calling handler'
self._handleLineRead(line)
elif self._expectResponseTermSeq:
if rxBuffer[-len(self._expectResponseTermSeq):] == self._expectResponseTermSeq:
line = ''.join(rxBuffer)
rxBuffer = []
self._handleLineRead(line, checkForResponseTerm=False)
#else:
#' <RX timeout>'
except serial.SerialException as e:
self.alive = False
try:
self.serial.close()
except Exception: #pragma: no cover
pass
# Notify the fatal error handler
self.fatalErrorCallback(e)
def write(self, data, waitForResponse=True, timeout=5, expectedResponseTermSeq=None):
with self._txLock:
if waitForResponse:
if expectedResponseTermSeq:
self._expectResponseTermSeq = list(expectedResponseTermSeq)
self._response = []
self._responseEvent = threading.Event()
self.serial.write(data)
if self._responseEvent.wait(timeout):
self._responseEvent = None
self._expectResponseTermSeq = False
return self._response
else: # Response timed out
self._responseEvent = None
self._expectResponseTermSeq = False
if len(self._response) > 0:
# Add the partial response to the timeout exception
raise TimeoutException(self._response)
else:
raise TimeoutException()
else:
self.serial.write(data)
| gpl-2.0 | 1,671,215,766,286,385,200 | 44.751773 | 130 | 0.554643 | false |
operasoftware/presto-testo | css/image-fit/reftests/img-svg-tall-viewBox/build.py | 4 | 4649 | #!/usr/bin/python
import sys
import os
sys.path.insert(0, os.path.abspath("../../include/"))
import allpairs
imgfilename = 'tall-viewBox.svg'
reffilename = 'tall-viewBox-none.svg'
imgwidth = 160.0
imgheight = 240.0
test_template = """<!doctype html>
<!-- This file is generated by build.py. -->
<title>img %s; %s</title>
<link rel="stylesheet" href="../../support/reftests.css">
<style>
#test > * { %s }
</style>
<div id="test">
<img src="../../support/%s">
</div>
"""
ref_template = """<!doctype html>
<!-- This file is generated by build.py. -->
<title>Reference for img %s; %s</title>
<link rel="stylesheet" href="../../support/reftests.css">
<style>
.helper { overflow:%s }
.helper > * { %s }
</style>
<div id="ref">
<span class="helper"><img src="../../support/%s"></span>
</div>
"""
reftest_list = ''
ref_hashes = {}
for overflow,fit,x,y in allpairs.tests:
refx = refy = ''
testx = x
testy = y
xx = x
if x.find('%') != -1:
xx = x[:-1]
yy = y
if y.find('%') != -1:
yy = y[:-1]
# reference dimensions
if fit == 'none':
refdims = 'width:160px; height:240px'
centerx = 100 - (imgwidth/2)
centery = 100 - (imgheight/2)
if fit == 'fill':
refdims = 'width:200px; height:200px'
centerx = 0.0
centery = 0.0
elif fit == 'contain' or fit == 'auto':
refdims = 'width:133.33333px; height:200px'
centerx = 100 - ((imgwidth * 200 / imgheight)/2)
centery = 0.0
elif fit == 'cover':
refdims = 'width:200px; height:300px'
centerx = 0.0
centery = 100 - ((imgheight * 200 / imgwidth)/2)
centerx = 'left:'+str(centerx)+'px'
centery = 'top:'+str(centery)+'px'
# reference position
invalid = False
# invalid cases use center center
if ((xx != x or x in ('1em', '30px', '2cm')) and (y in ('left', 'right')) or
(yy != y or y in ('1em', '30px', '2cm')) and (x in ('top', 'bottom')) or
(x == 'top' and y == 'bottom') or (x == 'bottom' and y == 'top') or
(x == 'left' and y == 'right') or (x == 'right' and y == 'left') or
x == y == 'left' or x == y == 'right' or x == y == 'top' or x == y == 'bottom'):
refx = centerx
refy = centery
invalid = True
# valid cases
elif fit == 'auto': # 'object-fit: auto' in SVG means ignore object-position
refx = centerx
refy = centery
else:
# normalize the order
if (x in ('top', 'center', 'bottom') and y in ('left' , 'center', 'right')):
x, y = y, x
# x
# center
if x == '50%' or x == 'center' or x == '':
refx = centerx
# left
elif x == '0%' or x == 'left':
refx = 'left:0'
# right
elif x == '100%' or x == 'right':
refx = 'right:0'
# lengths
elif x == '1em' or x == '30px' or x == '2cm':
refx = 'left:'+x
# y
# center
if y == '50%' or y == 'center':
refy = centery
# top
elif y == '0%' or y == 'top':
refy = 'top:0'
# bottom
elif y == '100%' or y == 'bottom':
refy = 'bottom:0'
# lengths
elif y == '1em' or y == '30px' or y == '2cm':
refy = 'top:'+y
# single keyword
elif y == '':
# y value in x
if x == 'top':
refx = centerx
refy = 'top:0'
elif x == 'bottom':
refx = centerx
refy = 'bottom:0'
# x value in x
else:
refy = centery
test_filename = "%s_%s_%s_%s.html" % (overflow, fit, xx, yy)
style = "overflow:%s; -o-object-fit:%s; -o-object-position:%s %s" % (overflow, fit, testx, testy)
if invalid:
style += " /* INVALID */"
test_file = open(test_filename, 'w')
test_file.write(test_template % (imgfilename, style, style, imgfilename))
test_file.close()
refstyle = "%s; %s; %s" % (refx, refy, refdims)
if [v for k, v in ref_hashes.iteritems() if k == overflow+refstyle] == []:
ref_filename = "%s_%s_%s_%s-ref.html" % (overflow, fit, xx, yy)
ref_hashes[overflow+refstyle] = ref_filename
ref_file = open(ref_filename, 'w')
ref_file.write(ref_template % (imgfilename, style, overflow, refstyle, reffilename))
ref_file.close()
else:
ref_filename = ref_hashes[overflow+refstyle]
reftest_list += '== ' + test_filename + ' ' + ref_filename + '\n'
list_file = open('reftest.list', 'w')
list_file.write(reftest_list)
list_file.close()
| bsd-3-clause | -4,559,569,907,050,102,300 | 29.993333 | 101 | 0.502259 | false |
fedora-conary/rbuild | rbuild_test/unit_test/pluginstest/rebasetest.py | 1 | 10982 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild_test import rbuildhelp
from testutils import mock
class Rebase(rbuildhelp.RbuildHelper):
def testRebaseCommandParsing(self):
handle = self.getRbuildHandle(mock.MockObject())
handle.Rebase.registerCommands()
handle.Rebase.initialize()
cmd = handle.Commands.getCommandClass('rebase')()
mock.mockMethod(handle.Rebase.rebaseProduct)
cmd.runCommand(handle, {}, ['rbuild', 'rebase', 'localhost@rpl:1'])
handle.Rebase.rebaseProduct._mock.assertCalled(
interactive=False, label='localhost@rpl:1', test=False)
cmd.runCommand(handle, {}, ['rbuild', 'rebase'])
handle.Rebase.rebaseProduct._mock.assertCalled(
interactive=False, label=None, test=False)
def testRebaseCommandArgParsing(self):
self.getRbuildHandle()
self.checkRbuild('rebase --interactive',
'rbuild_plugins.rebase.RebaseCommand.runCommand',
[None, None, {'interactive' : True},
['rbuild', 'rebase']])
self.checkRbuild('rebase --test',
'rbuild_plugins.rebase.RebaseCommand.runCommand',
[None, None, {'test' : True},
['rbuild', 'rebase']])
def testRebaseProduct(self):
handle = self.getRbuildHandle(mock.MockObject())
mock.mock(handle, 'ui')
mock.mockMethod(handle.facade.conary._getConaryClient)
conaryClient = handle.facade.conary._getConaryClient()
handle.product = mock.MockObject()
handle.product._mock.set(preMigrateVersion='2.0')
platVers = [
'platform-definition=/conary.rpath.com@rpl:2/1.0-1',
'platform-definition=/conary.rpath.com@rpl:2/1.1-2']
handle.product.getPlatformSourceTrove._mock.setReturns(platVers)
mock.mockMethod(handle.Rebase._getrBuilderProductDefinitionSchemaVersion)
handle.Rebase._getrBuilderProductDefinitionSchemaVersion._mock.setDefaultReturn('4.0')
mock.mockMethod(handle.Rebase._raiseErrorIfModified)
handle.Rebase._raiseErrorIfModified._mock.setDefaultReturn(None)
mock.mockMethod(handle.Rebase._raiseErrorIfConflicts)
handle.Rebase._raiseErrorIfConflicts._mock.setDefaultReturn(None)
handle.productStore.getProductDefinitionDirectory._mock.setDefaultReturn('/proddir')
handle.Rebase.rebaseProduct(label='localhost@rpl:1')
handle.product.rebase._mock.assertCalled(conaryClient,
label='localhost@rpl:1')
handle.product.saveToRepository._mock.assertCalled(conaryClient,
version='4.0')
# should be called twice (RBLD-155)
handle.productStore.update._mock.assertCalled()
handle.productStore.update._mock.assertCalled()
handle.Rebase._getrBuilderProductDefinitionSchemaVersion._mock.assertCalled('2.0')
# should be called once (RBLD-164)
handle.Rebase._raiseErrorIfModified._mock.assertCalled('/proddir')
# should be called two times (RBLD-164)
handle.Rebase._raiseErrorIfConflicts._mock.assertCalled('/proddir')
handle.Rebase._raiseErrorIfConflicts._mock.assertCalled('/proddir')
handle.ui.info._mock.assertCalled(
'Update %s -> %s', platVers[0], platVers[1].split('/')[-1])
# test a rebase to a new platform
platVers = [
'platform-definition=/conary.rpath.com@rpl:2/1.0-1',
'platform-definition=/unrelated.foobar.com@rpl:2/1.1-2']
handle.product.getPlatformSourceTrove._mock.setReturns(platVers)
handle.Rebase.rebaseProduct(label='unrelated.foobar.com@rpl:2')
handle.ui.info._mock.assertCalled(
'Update %s -> %s', platVers[0], platVers[1].split('=')[-1][1:])
class sp:
def __init__(self, n,l,v):
self.troveName = n
self.label = l
self.version = v
# test searchPath change with no platdef change (RBLD-316)
handle.product.saveToRepository._mock.popCall()
platVers = [
'platform-definition=/conary.rpath.com@rpl:2/1.1-3',
'platform-definition=/conary.rpath.com@rpl:2/1.1-3']
handle.product.getPlatformSourceTrove._mock.setReturns(platVers)
searchPaths = [
(sp('group-foo', 'a@b:c', '1'), sp('group-bar', 'd@e:f', '1')),
(sp('group-foo', 'a@b:c', '2'), sp('group-bar', 'd@e:f', '3')),
]
handle.product.getSearchPaths._mock.setReturns(searchPaths)
handle.Rebase.rebaseProduct(test=True)
handle.ui.info._mock.assertCalled(
'Update search path from:\n%s\nto:\n%s',
' group-foo=a@b:c/1\n'
' group-bar=d@e:f/1',
' group-foo=a@b:c/2\n'
' group-bar=d@e:f/3'
)
handle.product.saveToRepository._mock.assertNotCalled()
# test searchPath change with platdef change (RBLD-316)
platVers = [
'platform-definition=/conary.rpath.com@rpl:2/1.1-3',
'platform-definition=/conary.rpath.com@rpl:2/1.1-4']
handle.product.getPlatformSourceTrove._mock.setReturns(platVers)
searchPaths = [
(sp('group-foo', 'a@b:c', '1'), sp('group-bar', 'd@e:f', '1')),
(sp('group-foo', 'a@b:c', '2'), sp('group-bar', 'd@e:f', '3')),
]
handle.product.getSearchPaths._mock.setReturns(searchPaths)
handle.ui.getYn._mock.setDefaultReturn(False)
handle.product.saveToRepository._mock.assertNotCalled()
handle.Rebase.rebaseProduct(interactive=True)
handle.ui.info._mock.assertCalled(
'Update search path from:\n%s\nto:\n%s',
' group-foo=a@b:c/1\n'
' group-bar=d@e:f/1',
' group-foo=a@b:c/2\n'
' group-bar=d@e:f/3'
)
handle.ui.info._mock.assertCalled(
'Update %s -> %s', platVers[0], platVers[1].split('/')[-1])
handle.ui.info._mock.assertNotCalled()
handle.product.saveToRepository._mock.assertNotCalled()
def testOldProddefSchemaHandling(self):
# This test will need to be removed when rpath-product-definition
# 4.0 is fully retired and the tested backward compatibility code
# is removed from rbuild.
# Lots of work required to avoid preMigrateVersion existing...
handle = self.getRbuildHandle(mock.MockObject())
class product: pass
handle.productStore = mock.MockObject()
handle.product = product()
handle.product.rebase = mock.MockObject()
handle.product.saveToRepository = mock.MockObject()
handle.product.getPlatformSourceTrove = mock.MockObject()
handle.product.getSearchPaths = mock.MockObject()
handle.facade = mock.MockObject()
conaryClient = handle.facade.conary._getConaryClient()
mock.mockMethod(handle.Rebase._raiseErrorIfConflicts)
handle.Rebase.rebaseProduct()
handle.product.saveToRepository._mock.assertCalled(conaryClient)
def testRaiseErrorIfProddefSchemaIncompatible(self):
handle = self.getRbuildHandle()
from rbuild_plugins.rebase import proddef
from rbuild_plugins.rebase import IncompatibleProductDefinitionError
from rbuild_plugins.rebase import OlderProductDefinitionError
rbuilder = handle.facade.rbuilder
mock.mockMethod(rbuilder.getProductDefinitionSchemaVersion)
mock.mock(proddef, 'ProductDefinition')
proddef.ProductDefinition._mock.set(version='4.0')
# client newer than server, no change in schema version
rbuilder.getProductDefinitionSchemaVersion._mock.setReturn('2.0')
self.failUnlessEqual('2.0',
handle.Rebase._getrBuilderProductDefinitionSchemaVersion('2.0'))
# client newer than server, change in schema version
self.failUnlessEqual('2.0',
handle.Rebase._getrBuilderProductDefinitionSchemaVersion('1.0'))
# client same version as server
rbuilder.getProductDefinitionSchemaVersion._mock.setReturn('4.0')
self.failUnlessEqual('4.0',
handle.Rebase._getrBuilderProductDefinitionSchemaVersion('4.0'))
# client older than server
rbuilder.getProductDefinitionSchemaVersion._mock.setReturn('5.0')
self.failUnlessRaises(OlderProductDefinitionError,
handle.Rebase._getrBuilderProductDefinitionSchemaVersion,
'4.0')
self.failUnlessRaises(IncompatibleProductDefinitionError,
handle.Rebase._getrBuilderProductDefinitionSchemaVersion,
'4.0')
def testRaiseErrorIfModified(self):
handle = self.getRbuildHandle(mock.MockObject())
from rbuild_plugins.rebase import ModifiedFilesError
mock.mockMethod(handle.Rebase._modifiedFileNames)
handle.Rebase._modifiedFileNames._mock.setDefaultReturn('/proddir/1')
self.assertRaises(ModifiedFilesError,
handle.Rebase._raiseErrorIfModified, '/proddir')
handle.Rebase._modifiedFileNames._mock.setDefaultReturn(None)
handle.Rebase._raiseErrorIfModified('/proddir')
def testRaiseErrorIfConflicts(self):
handle = self.getRbuildHandle(mock.MockObject())
from rbuild_plugins.rebase import FileConflictsError
mock.mockMethod(handle.Rebase._fileConflictNames)
handle.Rebase._fileConflictNames._mock.setDefaultReturn(
['/proddir/1.conflicts'])
self.assertRaises(FileConflictsError,
handle.Rebase._raiseErrorIfConflicts, '/proddir')
handle.Rebase._fileConflictNames._mock.setDefaultReturn(None)
handle.Rebase._raiseErrorIfConflicts('/proddir')
def testModifiedFileNames(self):
handle = self.getRbuildHandle()
cf = handle.facade.conary
mock.mockMethod(cf.getCheckoutStatus)
cf.getCheckoutStatus._mock.setReturn((('A', '/p/1'), ('M', '/p/2')),
'/p')
self.assertEquals(handle.Rebase._modifiedFileNames('/p'), ['/p/2'])
def testFileConflictNames(self):
handle = self.getRbuildHandle()
file(self.workDir+'/foo', 'w')
file(self.workDir+'/foo.conflicts', 'w')
self.assertEquals(handle.Rebase._fileConflictNames(self.workDir),
['foo.conflicts'])
| apache-2.0 | -6,248,334,375,915,877,000 | 45.533898 | 94 | 0.651521 | false |
ytjiang/django | tests/gis_tests/test_spatialrefsys.py | 21 | 4912 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.db import connection
from django.test import skipUnlessDBFeature
from django.utils import six
from .utils import SpatialRefSys, oracle, postgis, spatialite
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj4_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj4_re': r'\+proj=lcc \+lat_1=30.28333333333333 \+lat_2=28.38333333333333 \+lat_0=27.83333333333333 '
r'\+lon_0=-99 \+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@unittest.skipUnless(HAS_GDAL, "SpatialRefSysTest needs gdal support")
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(unittest.TestCase):
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
six.assertRegex(self, srs.proj4text, sd['proj4_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
six.assertRegex(self, srs.proj4, sd['proj4_re'])
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite < 4
if not spatialite or connection.ops.spatial_version[0] >= 4:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
| bsd-3-clause | 1,315,265,438,731,829,000 | 39.262295 | 108 | 0.599145 | false |
ozgurturkiye/istihzapython | 000.JustForFunPrograms/008_decimal_to_binary/character_to_bit.py | 1 | 1865 | +r"""008_binary_kodlama_sistemi - Program hakkında açıklamalar -
+
+Türk alfabesindeki (sadece büyük harf) harfleri ve boşluk karakterini
+basit bir kodlama sistemine göre binary karşılıklarını yazan kodlar.
+
+Onlu sayma sistemindeki sayıların ikili sayma sistemindeki
+karşılıklarını yazdırmaktan başka birşey yapmıyoruz aslında :)
+
+Geliştirmek için hazır bir sözlük kullanmak yerine anlık olarak
+onlu sistemdeki sayıyı; ikili sistemdeki sayıya çevirilebilir.
+
+"""
+
+sayBitSoz = {" ": "00000000", ## Boşluk
+ "A": "00000001", ## 1
+ "B": "00000010", ## 2
+ "C": "00000011", ## 3
+ "Ç": "00000100", ## 4
+ "D": "00000101", ## 5
+ "E": "00000110", ## 6
+ "F": "00000111", ## 7
+ "G": "00001000", ## 8
+ "Ğ": "00001001", ## 9
+ "H": "00001010", ## 10
+ "I": "00001011", ## 11
+ "İ": "00001100", ## 12
+ "J": "00001101", ## 13
+ "K": "00001110", ## 14
+ "L": "00001111", ## 15
+ "M": "00010000", ## 16
+ "N": "00010001", ## 17
+ "O": "00010010", ## 18
+ "Ö": "00010011", ## 19
+ "P": "00010100", ## 20
+ "R": "00010101", ## 21
+ "S": "00010110", ## 22
+ "Ş": "00010111", ## 23
+ "T": "00011000", ## 24
+ "U": "00011001", ## 25
+ "Ü": "00011010", ## 26
+ "V": "00011011", ## 27
+ "Y": "00011100", ## 28
+ "Z": "00011101" ## 29
+
+ }
+
+kardiz = input("Ad Soyad...:")
+
+for karakter in kardiz:
+ print(karakter, sayBitSoz[karakter])
| gpl-3.0 | 2,501,016,351,503,522,000 | 35.46 | 71 | 0.429512 | false |
bumfo/sublime-real-javascript | libs/js-beautify/python/cssbeautifier/tests/test.py | 7 | 2850 | import unittest
import cssbeautifier
class CSSBeautifierTest(unittest.TestCase):
def resetOptions(self):
self.options = cssbeautifier.default_options()
self.options.indent_size = 1
self.options.indent_char = '\t'
self.options.selector_separator_newline = True
self.options.end_with_newline = True
def testBasics(self):
self.resetOptions()
t = self.decodesto
t("", "\n")
t(".tabs{}", ".tabs {}\n")
t(".tabs{color:red}", ".tabs {\n\tcolor: red\n}\n")
t(".tabs{color:rgb(255, 255, 0)}", ".tabs {\n\tcolor: rgb(255, 255, 0)\n}\n")
t(".tabs{background:url('back.jpg')}", ".tabs {\n\tbackground: url('back.jpg')\n}\n")
t("#bla, #foo{color:red}", "#bla,\n#foo {\n\tcolor: red\n}\n")
t("@media print {.tab{}}", "@media print {\n\t.tab {}\n}\n")
def testComments(self):
self.resetOptions()
t = self.decodesto
t("/* test */", "/* test */\n\n")
t(".tabs{/* test */}", ".tabs {\n\t/* test */\n}\n")
t("/* header */.tabs {}", "/* header */\n\n.tabs {}\n")
#single line comment support (less/sass)
t(".tabs{\n// comment\nwidth:10px;\n}", ".tabs {\n\t// comment\n\twidth: 10px;\n}\n")
t(".tabs{// comment\nwidth:10px;\n}", ".tabs {\n\t// comment\n\twidth: 10px;\n}\n")
t("//comment\n.tabs{width:10px;}", "//comment\n.tabs {\n\twidth: 10px;\n}\n")
t(".tabs{//comment\n//2nd single line comment\nwidth:10px;}", ".tabs {\n\t//comment\n\t//2nd single line comment\n\twidth: 10px;\n}\n")
t(".tabs{width:10px;//end of line comment\n}", ".tabs {\n\twidth: 10px;//end of line comment\n}\n")
t(".tabs{width:10px;//end of line comment\nheight:10px;}", ".tabs {\n\twidth: 10px;//end of line comment\n\theight: 10px;\n}\n")
t(".tabs{width:10px;//end of line comment\nheight:10px;//another\n}", ".tabs {\n\twidth: 10px;//end of line comment\n\theight: 10px;//another\n}\n")
def testSeperateSelectors(self):
self.resetOptions()
t = self.decodesto
t("#bla, #foo{color:red}", "#bla,\n#foo {\n\tcolor: red\n}\n")
t("a, img {padding: 0.2px}", "a,\nimg {\n\tpadding: 0.2px\n}\n")
def testOptions(self):
self.resetOptions()
self.options.indent_size = 2
self.options.indent_char = ' '
self.options.selector_separator_newline = False
t = self.decodesto
t("#bla, #foo{color:green}", "#bla, #foo {\n color: green\n}\n")
t("@media print {.tab{}}", "@media print {\n .tab {}\n}\n")
t("#bla, #foo{color:black}", "#bla, #foo {\n color: black\n}\n")
def decodesto(self, input, expectation=None):
self.assertEqual(
cssbeautifier.beautify(input, self.options), expectation or input)
if __name__ == '__main__':
unittest.main()
| mit | -934,259,057,452,923,000 | 40.304348 | 156 | 0.562456 | false |
chen0031/Dato-Core | src/unity/python/graphlab/test/test_connect.py | 13 | 7539 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab
import mock
import os
import random
import stat
import tempfile
import unittest
import logging
from graphlab.connect import main as glconnect
from graphlab.connect import server
from graphlab.test.util import SubstringMatcher
from graphlab.test.util import create_server, start_test_tcp_server
class ConnectLocalTests(unittest.TestCase):
def test_launch(self):
#default launch
glconnect.launch()
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
#launch with server address
tmpname = tempfile.NamedTemporaryFile().name
tmpaddr = 'ipc://' + tmpname
glconnect.launch(tmpaddr)
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
#check that the ipc file gets deleted
self.assertFalse(os.path.exists(tmpname))
#launch address and binary
graphlab_bin = os.getenv("GRAPHLAB_UNITY")
glconnect.launch(server_addr=tmpaddr,
server_bin=graphlab_bin)
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
self.assertFalse(os.path.exists(tmpname))
@mock.patch('graphlab.connect.main.__LOGGER__')
def test_launch_with_exception(self, mock_logging):
# Assert warning logged when launching without stopping
glconnect.launch()
glconnect.launch()
self.assertTrue(mock_logging.warning.called_once_with(SubstringMatcher(containing="existing server")))
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
# launch with bogus server binary (path is not executable)
with tempfile.NamedTemporaryFile() as f:
random_server_bin = f.name
glconnect.launch(server_bin=random_server_bin)
self.assertTrue(mock_logging.error.called_once_with(SubstringMatcher(containing="Invalid server binary")))
self.assertFalse(glconnect.is_connected())
#launch with server address without permission
tmpaddr = 'ipc:///root/bad_server'
glconnect.launch(tmpaddr)
self.assertTrue(mock_logging.warning.called_once_with(SubstringMatcher(containing="communication error")))
self.assertFalse(glconnect.is_connected())
glconnect.stop()
# launch with binary that does not exist
tmpname = tempfile.NamedTemporaryFile().name
glconnect.launch(server_bin=tmpname)
self.assertTrue(mock_logging.error.called_once_with(SubstringMatcher(containing="Invalid server binary")))
self.assertFalse(glconnect.is_connected())
# launch with bogus server binary (path is a faked executable)
with tempfile.NamedTemporaryFile() as f:
os.chmod(f.name, stat.S_IXUSR)
random_server_bin = f.name
glconnect.launch(server_bin=random_server_bin)
self.assertTrue(mock_logging.error.called_once_with(SubstringMatcher(containing="Invalid server binary")))
self.assertFalse(glconnect.is_connected())
# TODO:: launch with bad server binary (takes too long to start or does not connect)
class ConnectRemoteTests(unittest.TestCase):
def test_launch_to_ipc(self):
ipc_addr = 'ipc://' + tempfile.NamedTemporaryFile().name
auth_token = 'graphlab_awesome'
ipc_server = create_server(ipc_addr, auth_token)
ipc_server.start()
#default local launch
glconnect.launch()
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
#launch with remote server ipc address
glconnect.launch(ipc_addr, auth_token=auth_token)
self.assertTrue(glconnect.is_connected())
self.assertTrue(isinstance(glconnect.get_server(), server.RemoteServer))
glconnect.stop()
self.assertFalse(glconnect.is_connected())
#launch with remote server addr, and server_bin(ignored)
glconnect.launch(ipc_addr, os.getenv("GRAPHLAB_UNITY"), auth_token=auth_token)
self.assertTrue(glconnect.is_connected())
self.assertTrue(isinstance(glconnect.get_server(), server.RemoteServer))
glconnect.stop()
self.assertFalse(glconnect.is_connected())
ipc_server.stop()
def test_launch_to_tcp(self):
auth_token = 'graphlab_awesome'
tcp_server = start_test_tcp_server(auth_token)
#launch with remote server tcp address
glconnect.launch(tcp_server.get_server_addr(), auth_token=auth_token)
self.assertTrue(glconnect.is_connected())
self.assertTrue(isinstance(glconnect.get_server(), server.RemoteServer))
glconnect.stop()
self.assertFalse(glconnect.is_connected())
tcp_server.stop()
@mock.patch('graphlab.connect.main.__LOGGER__')
def test_launch_with_exception(self, mock_logging):
ipc_addr = 'ipc://' + tempfile.NamedTemporaryFile().name
auth_token = 'graphlab_awesome'
ipc_server = create_server(ipc_addr, auth_token)
ipc_server.start()
#default launch without stopping
glconnect.launch(server_addr=ipc_addr)
glconnect.launch()
self.assertTrue(mock_logging.warning.called_once_with(SubstringMatcher(containing="existing server")))
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
# launch with bogus server path (path is not listend by server)
with tempfile.NamedTemporaryFile() as f:
glconnect.launch(server_addr=('ipc://' + f.name))
self.assertTrue(mock_logging.warning.called_once_with(SubstringMatcher(containing="communication failure")))
self.assertFalse(glconnect.is_connected())
@mock.patch('graphlab.connect.main.__LOGGER__')
def test_secure_communication(self, mock_logging):
SERVER_PUBLIC_KEY = "Ee4##T$OmI4]hzyKqZT@H&Fixt95^.72&%MK!UR."
SERVER_SECRET_KEY = "lIn2Szq0.mpPiB<N)t6fR2/4^4&wYnFs-x72HlTz"
# Non-error case
ipc_addr = 'ipc://' + tempfile.NamedTemporaryFile().name
server = create_server(ipc_addr, public_key=SERVER_PUBLIC_KEY, secret_key=SERVER_SECRET_KEY)
server.start()
glconnect.launch(server_addr=ipc_addr, server_public_key=SERVER_PUBLIC_KEY)
self.assertTrue(glconnect.is_connected())
glconnect.stop()
self.assertFalse(glconnect.is_connected())
# Tests with bogus key
BOGUS_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
ipc_addr = 'ipc://' + tempfile.NamedTemporaryFile().name
server = create_server(ipc_addr, public_key=BOGUS_KEY, secret_key=SERVER_SECRET_KEY)
try:
server.start()
except:
pass
else:
self.fail("Server started with bogus key.")
ipc_addr = 'ipc://' + tempfile.NamedTemporaryFile().name
server = create_server(ipc_addr, public_key=SERVER_PUBLIC_KEY, secret_key=BOGUS_KEY)
try:
server.start()
except:
pass
else:
self.fail("Server started with bogus key.")
| agpl-3.0 | 7,017,280,372,514,797,000 | 40.196721 | 120 | 0.666932 | false |
bernardokyotoku/skillplant | django/contrib/gis/geos/prototypes/geom.py | 12 | 4584 | from ctypes import c_char_p, c_int, c_size_t, c_ubyte, c_uint, POINTER
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, PREPGEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import \
check_geom, check_minus_one, check_sized_string, check_string, check_zero
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
### ctypes generation functions ###
def bin_constructor(func):
"Generates a prototype for binary construction (HEX, WKB) GEOS routines."
func.argtypes = [c_char_p, c_size_t]
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
# HEX & WKB output
def bin_output(func):
"Generates a prototype for the routines that return a a sized string."
func.argtypes = [GEOM_PTR, POINTER(c_size_t)]
func.errcheck = check_sized_string
func.restype = c_uchar_p
return func
def geom_output(func, argtypes):
"For GEOS routines that return a geometry."
if argtypes: func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
def geom_index(func):
"For GEOS routines that return geometries from an index."
return geom_output(func, [GEOM_PTR, c_int])
def int_from_geom(func, zero=False):
"Argument is a geometry, return type is an integer."
func.argtypes = [GEOM_PTR]
func.restype = c_int
if zero:
func.errcheck = check_zero
else:
func.errcheck = check_minus_one
return func
def string_from_geom(func):
"Argument is a Geometry, return type is a string."
func.argtypes = [GEOM_PTR]
func.restype = geos_char_p
func.errcheck = check_string
return func
### ctypes prototypes ###
# Deprecated creation routines from WKB, HEX, WKT
from_hex = bin_constructor(GEOSFunc('GEOSGeomFromHEX_buf'))
from_wkb = bin_constructor(GEOSFunc('GEOSGeomFromWKB_buf'))
from_wkt = geom_output(GEOSFunc('GEOSGeomFromWKT'), [c_char_p])
# Deprecated output routines
to_hex = bin_output(GEOSFunc('GEOSGeomToHEX_buf'))
to_wkb = bin_output(GEOSFunc('GEOSGeomToWKB_buf'))
to_wkt = string_from_geom(GEOSFunc('GEOSGeomToWKT'))
# The GEOS geometry type, typeid, num_coordites and number of geometries
geos_normalize = int_from_geom(GEOSFunc('GEOSNormalize'))
geos_type = string_from_geom(GEOSFunc('GEOSGeomType'))
geos_typeid = int_from_geom(GEOSFunc('GEOSGeomTypeId'))
get_dims = int_from_geom(GEOSFunc('GEOSGeom_getDimensions'), zero=True)
get_num_coords = int_from_geom(GEOSFunc('GEOSGetNumCoordinates'))
get_num_geoms = int_from_geom(GEOSFunc('GEOSGetNumGeometries'))
# Geometry creation factories
create_point = geom_output(GEOSFunc('GEOSGeom_createPoint'), [CS_PTR])
create_linestring = geom_output(GEOSFunc('GEOSGeom_createLineString'), [CS_PTR])
create_linearring = geom_output(GEOSFunc('GEOSGeom_createLinearRing'), [CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = geom_output(GEOSFunc('GEOSGeom_createPolygon'), None)
create_collection = geom_output(GEOSFunc('GEOSGeom_createCollection'), None)
# Ring routines
get_extring = geom_output(GEOSFunc('GEOSGetExteriorRing'), [GEOM_PTR])
get_intring = geom_index(GEOSFunc('GEOSGetInteriorRingN'))
get_nrings = int_from_geom(GEOSFunc('GEOSGetNumInteriorRings'))
# Collection Routines
get_geomn = geom_index(GEOSFunc('GEOSGetGeometryN'))
# Cloning
geom_clone = GEOSFunc('GEOSGeom_clone')
geom_clone.argtypes = [GEOM_PTR]
geom_clone.restype = GEOM_PTR
# Destruction routine.
destroy_geom = GEOSFunc('GEOSGeom_destroy')
destroy_geom.argtypes = [GEOM_PTR]
destroy_geom.restype = None
# SRID routines
geos_get_srid = GEOSFunc('GEOSGetSRID')
geos_get_srid.argtypes = [GEOM_PTR]
geos_get_srid.restype = c_int
geos_set_srid = GEOSFunc('GEOSSetSRID')
geos_set_srid.argtypes = [GEOM_PTR, c_int]
geos_set_srid.restype = None
| bsd-3-clause | -7,688,265,712,280,169,000 | 36.521008 | 88 | 0.718368 | false |
msrb/freeipa | ipalib/plugins/hbacsvcgroup.py | 5 | 5417 | # Authors:
# Rob Crittenden <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ipalib import api, errors
from ipalib.plugable import Registry
from ipalib.plugins.baseldap import *
from ipalib import _, ngettext
__doc__ = _("""
HBAC Service Groups
HBAC service groups can contain any number of individual services,
or "members". Every group must have a description.
EXAMPLES:
Add a new HBAC service group:
ipa hbacsvcgroup-add --desc="login services" login
Add members to an HBAC service group:
ipa hbacsvcgroup-add-member --hbacsvcs=sshd --hbacsvcs=login login
Display information about a named group:
ipa hbacsvcgroup-show login
Delete an HBAC service group:
ipa hbacsvcgroup-del login
""")
register = Registry()
topic = ('hbac', _('Host based access control commands'))
@register()
class hbacsvcgroup(LDAPObject):
"""
HBAC service group object.
"""
container_dn = api.env.container_hbacservicegroup
object_name = _('HBAC service group')
object_name_plural = _('HBAC service groups')
object_class = ['ipaobject', 'ipahbacservicegroup']
permission_filter_objectclasses = ['ipahbacservicegroup']
default_attributes = [ 'cn', 'description', 'member' ]
uuid_attribute = 'ipauniqueid'
attribute_members = {
'member': ['hbacsvc'],
}
managed_permissions = {
'System: Read HBAC Service Groups': {
'replaces_global_anonymous_aci': True,
'ipapermbindruletype': 'all',
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'businesscategory', 'cn', 'description', 'ipauniqueid',
'member', 'o', 'objectclass', 'ou', 'owner', 'seealso',
'memberuser', 'memberhost',
},
},
'System: Add HBAC Service Groups': {
'ipapermright': {'add'},
'replaces': [
'(target = "ldap:///cn=*,cn=hbacservicegroups,cn=hbac,$SUFFIX")(version 3.0;acl "permission:Add HBAC service groups";allow (add) groupdn = "ldap:///cn=Add HBAC service groups,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'HBAC Administrator'},
},
'System: Delete HBAC Service Groups': {
'ipapermright': {'delete'},
'replaces': [
'(target = "ldap:///cn=*,cn=hbacservicegroups,cn=hbac,$SUFFIX")(version 3.0;acl "permission:Delete HBAC service groups";allow (delete) groupdn = "ldap:///cn=Delete HBAC service groups,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'HBAC Administrator'},
},
'System: Manage HBAC Service Group Membership': {
'ipapermright': {'write'},
'ipapermdefaultattr': {'member'},
'replaces': [
'(targetattr = "member")(target = "ldap:///cn=*,cn=hbacservicegroups,cn=hbac,$SUFFIX")(version 3.0;acl "permission:Manage HBAC service group membership";allow (write) groupdn = "ldap:///cn=Manage HBAC service group membership,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'HBAC Administrator'},
},
}
label = _('HBAC Service Groups')
label_singular = _('HBAC Service Group')
takes_params = (
Str('cn',
cli_name='name',
label=_('Service group name'),
primary_key=True,
normalizer=lambda value: value.lower(),
),
Str('description?',
cli_name='desc',
label=_('Description'),
doc=_('HBAC service group description'),
),
)
@register()
class hbacsvcgroup_add(LDAPCreate):
__doc__ = _('Add a new HBAC service group.')
msg_summary = _('Added HBAC service group "%(value)s"')
@register()
class hbacsvcgroup_del(LDAPDelete):
__doc__ = _('Delete an HBAC service group.')
msg_summary = _('Deleted HBAC service group "%(value)s"')
@register()
class hbacsvcgroup_mod(LDAPUpdate):
__doc__ = _('Modify an HBAC service group.')
msg_summary = _('Modified HBAC service group "%(value)s"')
@register()
class hbacsvcgroup_find(LDAPSearch):
__doc__ = _('Search for an HBAC service group.')
msg_summary = ngettext(
'%(count)d HBAC service group matched', '%(count)d HBAC service groups matched', 0
)
@register()
class hbacsvcgroup_show(LDAPRetrieve):
__doc__ = _('Display information about an HBAC service group.')
@register()
class hbacsvcgroup_add_member(LDAPAddMember):
__doc__ = _('Add members to an HBAC service group.')
@register()
class hbacsvcgroup_remove_member(LDAPRemoveMember):
__doc__ = _('Remove members from an HBAC service group.')
| gpl-3.0 | -4,154,120,432,591,282,000 | 31.244048 | 277 | 0.631715 | false |
edminer/pymodules | pythonsamp.py | 1 | 5581 | #!/usr/local/bin/python -u
import sys,os,logging,re,traceback
sys.path.append("/usr/local/bin/pymodules")
from emgenutil import EXENAME,EXEPATH,GeneralError
import emgenutil
#------------------------------------------------------------------------------
# GLOBALS
#------------------------------------------------------------------------------
logger=logging.getLogger(EXENAME)
G_myGlobalVar = 'abc'
#------------------------------------------------------------------------------
# USAGE
#------------------------------------------------------------------------------
def usage():
from string import Template
usagetext = """
$EXENAME
Function: Whatever
Syntax : $EXENAME {--debug #}
Note : Parm Description
---------- --------------------------------------------------------
--debug optionally specifies debug option
0=off 1=STDERR 2=FILE
Examples: $EXENAME
Change History:
em XX/XX/2016 first written
.
"""
template = Template(usagetext)
return(template.substitute({'EXENAME':EXENAME}))
#------------------------------------------------------------------------------
# Subroutine: main
# Function : Main routine
# Parms : none (in sys.argv)
# Returns : nothing
# Assumes : sys.argv has parms, if any
#------------------------------------------------------------------------------
def main():
##############################################################################
#
# Main - initialize
#
##############################################################################
initialize()
##############################################################################
#
# Logic
#
##############################################################################
try:
# Example use of config file options.
if 'simpleoption' in G_config:
print("simpleoption:", G_config['simpleoption'])
if 'listoption' in G_config:
print("listoption:", G_config['listoption'])
if 'dictoption' in G_config:
print("dictoption:", G_config['dictoption'])
(returncode,cmdoutput,cmderror) = emgenutil.execCommand("/bin/ls -l "+emgenutil.G_options.myrequiredarg)
if returncode == 0:
for line in cmdoutput.splitlines():
print("line:",line)
else:
raise GeneralError('execCommand non-Zero returncode: %d\nSTDERR:\n%s' % (returncode,cmderror))
except GeneralError as e:
if emgenutil.G_options.debug:
# Fuller display of the Exception type and where the exception occured in the code
(eType, eValue, eTraceback) = sys.exc_info()
tbprintable = ''.join(traceback.format_tb(eTraceback))
emgenutil.exitWithErrorMessage("%s Exception: %s\n%s" % (eType.__name__, eValue, tbprintable), errorCode=e.errorCode)
else:
emgenutil.exitWithErrorMessage(e.message, errorCode=e.errorCode)
except Exception as e:
if emgenutil.G_options.debug:
# Fuller display of the Exception type and where the exception occured in the code
(eType, eValue, eTraceback) = sys.exc_info()
tbprintable = ''.join(traceback.format_tb(eTraceback))
emgenutil.exitWithErrorMessage("%s Exception: %s\n%s" % (eType.__name__, eValue, tbprintable))
else:
emgenutil.exitWithErrorMessage(str(e))
##############################################################################
#
# Finish up
#
##############################################################################
logger.info(EXENAME+" exiting")
logging.shutdown()
exit()
#------------------------------------------------------------------------------
# Subroutine: initialize
# Function : performs initialization of variable, CONSTANTS, other
# Parms : none
# Returns : nothing
# Assumes : ARGV has parms, if any
#------------------------------------------------------------------------------
def initialize():
# PROCESS COMMAND LINE PARAMETERS
import argparse # http://www.pythonforbeginners.com/modules-in-python/argparse-tutorial/
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument('myrequiredarg') # positional, required
parser.add_argument('myoptionalarg', nargs='?') # positional, optional
parser.add_argument('myremainingoptionalargs', nargs='*') # positional, optional, zero OR MORE
parser.add_argument('--debug', dest="debug", type=int, help='0=no debug, 1=STDERR, 2=log file')
parser.add_argument('-o', '--option1', action="store_true", dest="option1", help='help for this option')
emgenutil.G_options = parser.parse_args()
if emgenutil.G_options.debug == None or emgenutil.G_options.debug == 0:
logging.disable(logging.CRITICAL) # effectively disable all logging
else:
if emgenutil.G_options.debug == 9:
emgenutil.configureLogging(loglevel='DEBUG')
else:
emgenutil.configureLogging()
if emgenutil.G_options.option1:
logger.info("option1 is true")
if emgenutil.G_options.myoptionalarg: logger.info("myoptionalarg: "+emgenutil.G_options.myoptionalarg)
if emgenutil.G_options.myremainingoptionalargs: logger.info("myremainingoptionalargs"+str(emgenutil.G_options.myremainingoptionalargs))
global G_config
G_config = emgenutil.processConfigFile()
logger.info(EXENAME+" starting:"+__name__+" with these args:"+str(sys.argv))
# Standard boilerplate to call the main() function to begin the program.
if __name__ == "__main__":
main()
| gpl-3.0 | 1,954,630,745,844,918,800 | 33.88125 | 138 | 0.521949 | false |
monikagrabowska/osf.io | api_tests/registrations/views/test_registration_embeds.py | 12 | 2922 | from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
json = res.json
embeds = json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
ids = ['{}-{}'.format(self.registration._id, id_) for id_ in ids]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_identifiers(self):
url = '/{0}registrations/{1}/?embed=identifiers'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
| apache-2.0 | 826,183,483,024,836,200 | 40.15493 | 103 | 0.656057 | false |
izrik/tudor | tests/persistence_t/sqlalchemy/layer/test_get_note.py | 1 | 1484 |
from tests.persistence_t.sqlalchemy.util import PersistenceLayerTestBase
class GetNoteTest(PersistenceLayerTestBase):
def setUp(self):
self.pl = self.generate_pl()
self.pl.create_all()
def test_get_note_none_raises(self):
# expect
self.assertRaises(ValueError, self.pl.get_note, None)
def test_get_note_non_existent_yields_none(self):
# expect
self.assertIsNone(self.pl.get_note(1))
def test_get_note_existing_yields_that_note(self):
# given
note = self.pl.create_note('note')
self.pl.add(note)
self.pl.commit()
# precondition
self.assertIsNotNone(note.id)
# when
result = self.pl.get_note(note.id)
# then
self.assertIsNotNone(result)
self.assertIs(note, result)
def test_get_db_note_none_raises(self):
# expect
self.assertRaises(ValueError, self.pl._get_db_note, None)
def test_get_db_note_non_existent_yields_none(self):
# expect
self.assertIsNone(self.pl._get_db_note(1))
def test_get_db_note_existing_yields_that_dbnote(self):
# given
dbnote = self.pl.DbNote('note')
self.pl.db.session.add(dbnote)
self.pl.db.session.commit()
# precondition
self.assertIsNotNone(dbnote.id)
# when
result = self.pl._get_db_note(dbnote.id)
# then
self.assertIsNotNone(result)
self.assertIs(dbnote, result)
| gpl-2.0 | 6,242,855,809,844,425,000 | 28.68 | 72 | 0.617251 | false |
elbeardmorez/quodlibet | quodlibet/quodlibet/plugins/gstelement.py | 1 | 1397 | # -*- coding: utf-8 -*-
# Copyright 2012 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
class GStreamerPlugin(object):
"""GStreamer Plugins define an element that gets inserted into the
GStreamer pipeline before the audio sink and after the playbin.
The method setup_element should return a new element instance or None:
self.setup_element()
One optional method can be implemented:
self.update_element(element)
update_element should apply all settings and will be called after
queue_update or on pipeline creation etc.
All plugin elements will be sorted by their priority attribute
(higher priority elements come first)
To notify setting changes, call queue_update.
"""
PLUGIN_ICON = "audio-volume-high"
_handler = None
priority = 0
@classmethod
def setup_element(cls):
"""Return a new element instance or None"""
return None
@classmethod
def update_element(cls, element):
"""Apply settings to the instance"""
pass
@classmethod
def queue_update(cls):
"""Call if you want to update settings"""
cls._handler._queue_update(cls)
| gpl-2.0 | -4,628,161,674,052,333,000 | 28.723404 | 74 | 0.692198 | false |
cyanogen/uchroma | uchroma/server/device_base.py | 1 | 17370 | #
# uchroma - Copyright (C) 2021 Stefanie Kondik
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
import asyncio
import functools
import re
from concurrent import futures
from contextlib import contextmanager
import hidapi
from wrapt import synchronized
from uchroma.log import Log
from uchroma.util import ensure_future, Signal, ValueAnimator
from uchroma.version import __version__
from .anim import AnimationManager
from .input import InputManager
from .hardware import Hardware, Quirks
from .prefs import PreferenceManager
from .report import RazerReport
from .types import BaseCommand
class BaseUChromaDevice:
"""
Base class for device objects
"""
class Command(BaseCommand):
"""
Standard commands used by all Chroma devices
"""
# info queries, class 0
GET_FIRMWARE_VERSION = (0x00, 0x81, 0x02)
GET_SERIAL = (0x00, 0x82, 0x16)
def __init__(self, hardware: Hardware, devinfo: hidapi.DeviceInfo, index: int,
sys_path: str, input_devices=None, *args, **kwargs):
self._hardware = hardware
self._devinfo = devinfo
self._devindex = index
self._sys_path = sys_path
self.logger = Log.get('uchroma.driver-%d' % index)
# needed for mixins
super(BaseUChromaDevice, self).__init__(*args, **kwargs)
self._dev = None
self._serial_number = None
self._firmware_version = None
self._last_cmd_time = None
self._prefs = None
self._offline = False
self._suspended = False
self.power_state_changed = Signal()
self.restore_prefs = Signal()
self._input_manager = None
if input_devices is not None:
self._input_manager = InputManager(self, input_devices)
self._animation_manager = None
if self.width > 0 and self.height > 0:
self._animation_manager = AnimationManager(self)
self._brightness_animator = ValueAnimator(self._update_brightness)
self._fx_manager = None
self._ref_count = 0
self._executor = futures.ThreadPoolExecutor(max_workers=1)
async def shutdown(self):
"""
Shuts down all services associated with the device and closes the HID instance.
"""
if asyncio.get_event_loop().is_running():
if hasattr(self, '_animation_manager') and self.animation_manager is not None:
await self.animation_manager.shutdown()
if hasattr(self, '_input_manager') and self._input_manager is not None:
await self._input_manager.shutdown()
self.close(True)
def close(self, force: bool = False):
if not force:
if self.animation_manager is not None and self.is_animating:
return
if self._ref_count > 0:
return
if hasattr(self, '_dev') and self._dev is not None:
try:
self._dev.close()
except Exception:
pass
self._dev = None
def has_fx(self, fx_type: str) -> bool:
"""
Test if the device supports a particular built-in effect
:param fx_type: the effect to test
:return: true if the effect is supported
"""
if self.fx_manager is None:
return False
return fx_type in self.fx_manager.available_fx
@property
def animation_manager(self):
"""
Animation manager for this device
"""
if hasattr(self, '_animation_manager'):
return self._animation_manager
return None
@property
def is_animating(self):
"""
True if an animation is currently running
"""
if self.animation_manager is not None:
return self.animation_manager.running
return False
@property
def fx_manager(self):
"""
Built-in effects manager for this device
"""
return self._fx_manager
@property
def input_manager(self):
"""
Input manager service for this device
"""
return self._input_manager
@property
def input_devices(self):
"""
Input devices associated with this instance
"""
if self._input_manager is None:
return None
return self._input_manager.input_devices
@property
def hid(self):
"""
The lower-layer hidapi device
"""
return self._dev
@property
def last_cmd_time(self):
"""
Timestamp of the last command sent to the hardware, used for delay enforcement
"""
return self._last_cmd_time
@last_cmd_time.setter
def last_cmd_time(self, last_cmd_time):
self._last_cmd_time = last_cmd_time
def _set_brightness(self, level: float) -> bool:
return False
def _get_brightness(self) -> float:
return 0.0
async def _update_brightness(self, level):
await ensure_future(asyncio.get_event_loop().run_in_executor( \
self._executor, functools.partial(self._set_brightness, level)))
suspended = self.suspended and level == 0
self.power_state_changed.fire(level, suspended)
@property
def suspended(self):
"""
The power state of the device, true if suspended
"""
return self._suspended
def suspend(self, fast=False):
"""
Suspend the device
Performs any actions necessary to suspend the device. By default,
the current brightness level is saved and set to zero.
"""
if self._suspended:
return
self.preferences.brightness = self.brightness
if fast:
self._set_brightness(0)
else:
if self._device_open():
self._brightness_animator.animate(self.brightness, 0,
done_cb=self._done_cb)
self._suspended = True
def resume(self):
"""
Resume the device
Performs any actions necessary to resume the device. By default,
the saved brightness level is restored.
"""
if not self._suspended:
return
self._suspended = False
self.brightness = self.preferences.brightness
@property
def brightness(self):
"""
The current brightness level of the device lighting
"""
if self._suspended:
return self.preferences.brightness
return self._get_brightness()
@brightness.setter
def brightness(self, level: float):
"""
Set the brightness level of the main device lighting
:param level: Brightness level, 0-100
"""
if not self._suspended:
if self._device_open():
self._brightness_animator.animate(self.brightness, level,
done_cb=self._done_cb)
self.preferences.brightness = level
def _ensure_open(self) -> bool:
try:
if self._dev is None:
self._dev = hidapi.Device(self._devinfo, blocking=False)
except Exception as err:
self.logger.exception("Failed to open connection", exc_info=err)
return False
return True
def get_report(self, command_class: int, command_id: int, data_size: int,
*args, transaction_id: int, remaining_packets: int = 0x00) -> RazerReport:
"""
Create and initialize a new RazerReport on this device
"""
if transaction_id is None:
if self.has_quirk(Quirks.TRANSACTION_CODE_3F):
transaction_id = 0x3F
else:
transaction_id = 0xFF
self.logger.debug('Transaction id: %d quirks: %s' % (transaction_id, self.hardware.quirks))
report = RazerReport(self, command_class, command_id, data_size,
transaction_id=transaction_id,
remaining_packets=remaining_packets)
if args is not None:
for arg in args:
if arg is not None:
report.args.put(arg)
return report
def _get_timeout_cb(self):
"""
Getter for report timeout handler
"""
return None
def run_with_result(self, command: BaseCommand, *args,
transaction_id: int = None, delay: float = None,
remaining_packets: int = 0x00) -> bytes:
"""
Run a command and return the result
Executes the given command with the provided list of arguments, returning
the result report.
Transaction id is only necessary for specialized commands or hardware.
The connection to the device will be automatically closed by default.
:param command: The command to run
:param args: The list of arguments to call the command with
:type args: varies
:param transaction_id: Transaction identified, defaults to 0xFF
:return: The result report from the hardware
"""
report = self.get_report(*command.value, *args, transaction_id=transaction_id,
remaining_packets=remaining_packets)
result = None
if self.run_report(report, delay=delay):
result = report.result
return result
@synchronized
def run_report(self, report: RazerReport, delay: float = None) -> bool:
"""
Runs a previously initialized RazerReport on the device
:param report: the report to run
:param delay: custom delay to enforce between commands
:return: True if successful
"""
with self.device_open():
return report.run(delay=delay, timeout_cb=self._get_timeout_cb())
def run_command(self, command: BaseCommand, *args, transaction_id: int = None,
delay: float = None, remaining_packets: int = 0x00) -> bool:
"""
Run a command
Executes the given command with the provided list of arguments.
Transaction id is only necessary for specialized commands or hardware.
The connection to the device will be automatically closed by default.
:param command: The command to run
:param args: The list of arguments to call the command with
:type args: varies
:param transaction_id: Transaction identified, defaults to 0xFF
:param timeout_cb: Callback to invoke on a timeout
:return: True if the command was successful
"""
report = self.get_report(*command.value, *args, transaction_id=transaction_id,
remaining_packets=remaining_packets)
return self.run_report(report, delay=delay)
def _decode_serial(self, value: bytes) -> str:
if value is not None:
try:
return value.decode()
except UnicodeDecodeError:
return self.key
return None
def _get_serial_number(self) -> str:
"""
Get the serial number from the hardware directly
Laptops don't return a serial number for their devices,
so we return the model name.
"""
value = self.run_with_result(BaseUChromaDevice.Command.GET_SERIAL)
return self._decode_serial(value)
@property
def serial_number(self) -> str:
"""
The hardware serial number of this device
On laptops, this is not available.
"""
if self._serial_number is not None:
return self._serial_number
serial = self._get_serial_number()
if serial is not None:
self._serial_number = re.sub(r'\W+', r'', serial)
return self._serial_number
def _get_firmware_version(self) -> str:
"""
Get the firmware version from the hardware directly
"""
return self.run_with_result(BaseUChromaDevice.Command.GET_FIRMWARE_VERSION)
@property
def firmware_version(self) -> str:
"""
The firmware version present on this device
"""
if self._firmware_version is None:
version = self._get_firmware_version()
if version is None:
self._firmware_version = '(unknown)'
else:
self._firmware_version = 'v%d.%d' % (int(version[0]), int(version[1]))
return self._firmware_version
@property
def is_offline(self) -> bool:
"""
Some devices (such as wireless models) might be "offline" in that
the dock or other USB receiver might be plugged in, but the actual
device is switched off. In this case, we can't interact with it
but it will still enumerate.
"""
return self._offline
@property
def name(self) -> str:
"""
The name of this device
"""
return self.hardware.name
@property
def device_index(self) -> int:
"""
The internal index of this device in the device manager
"""
return self._devindex
@property
def sys_path(self) -> str:
"""
The sysfs path of this device
"""
return self._sys_path
@property
def key(self) -> str:
"""
Unique key which identifies this device to the device manager
"""
return '%04x:%04x.%02d' % (self.vendor_id, self.product_id, self.device_index)
@property
def hardware(self) -> Hardware:
"""
The sub-enumeration of Hardware
"""
return self._hardware
@property
def product_id(self) -> int:
"""
The USB product identifier of this device
"""
return self._devinfo.product_id
@property
def vendor_id(self) -> int:
"""
The USB vendor identifier of this device
"""
return self._devinfo.vendor_id
@property
def manufacturer(self) -> str:
"""
The manufacturer of this device
"""
return self._hardware.manufacturer
@property
def device_type(self) -> Hardware.Type:
"""
The type of this device, from the Hardware.Type enumeration
"""
return self.hardware.type
@property
def driver_version(self):
"""
Get the uChroma version
"""
return __version__
@property
def width(self) -> int:
"""
Gets the width of the key matrix (if applicable)
"""
if self.hardware.dimensions is None:
return 0
return self.hardware.dimensions.x
@property
def height(self) -> int:
"""
Gets the height of the key matrix (if applicable)
"""
if self.hardware.dimensions is None:
return 0
return self.hardware.dimensions.y
@property
def has_matrix(self) -> bool:
"""
True if the device supports matrix control
"""
return self.hardware.has_matrix
def has_quirk(self, quirk) -> bool:
"""
True if the quirk is required for this device
"""
return self.hardware.has_quirk(quirk)
@property
def key_mapping(self):
"""
The mapping between keycodes and lighting matrix coordinates
"""
return self.hardware.key_mapping
@property
def preferences(self):
"""
Saved preferences for this device
"""
if self._prefs is None:
self._prefs = PreferenceManager().get(self.serial_number)
return self._prefs
def reset(self) -> bool:
"""
Reset effects and other configuration to defaults
"""
return True
def fire_restore_prefs(self):
"""
Restore saved preferences
"""
with self.preferences.observers_paused():
if hasattr(self, 'brightness') and self.preferences.brightness is not None:
setattr(self, 'brightness', self.preferences.brightness)
self.restore_prefs.fire(self.preferences)
def __repr__(self):
return "%s(name=%s, type=%s, product_id=0x%04x, index=%d, quirks=%s)" % \
(self.__class__.__name__, self.name, self.device_type.value,
self.product_id, self.device_index, self.hardware.quirks)
def _device_open(self):
self._ref_count += 1
return self._ensure_open()
def _device_close(self):
self._ref_count -= 1
self.close()
def _done_cb(self, future):
self._device_close()
@contextmanager
def device_open(self):
try:
if self._device_open():
yield
finally:
self._device_close()
def __del__(self):
self.close(force=True)
| lgpl-3.0 | -2,896,111,960,248,422,000 | 25.600306 | 99 | 0.581693 | false |
oarriaga/single_shot_multibox_detector | src/utils/tests/vis_bbox.py | 1 | 1726 | import pickle
import matplotlib.pyplot as plt
"""
from datasets import DataManager
from utils.generator import ImageGenerator
from utils.boxes import to_point_form
from utils.boxes import create_prior_boxes
from tqdm import tqdm
datasets = ['VOC2007', 'VOC2012']
splits = ['trainval', 'trainval']
class_names = 'all'
difficult_boxes = True
batch_size = 32
box_scale_factors = [.1, .1, .2, .2]
dataset_manager = DataManager(datasets, splits, class_names, difficult_boxes)
train_data = dataset_manager.load_data()
val_data = test_data = DataManager('VOC2007', 'test').load_data()
class_names = dataset_manager.class_names
num_classes = len(class_names)
# generator
prior_boxes = to_point_form(create_prior_boxes())
generator = ImageGenerator(train_data, val_data, prior_boxes, batch_size,
box_scale_factors, num_classes)
steps_per_epoch = int(len(train_data) / batch_size)
train_generator = generator.flow('train')
data = []
for step_arg in tqdm(range(steps_per_epoch)):
batch = next(train_generator)
sample = batch[-1]['predictions']
positive_mask = sample[:, :, 4] != 1
positive_samples = sample[positive_mask]
data.append(positive_samples)
"""
encoded_positive_boxes = pickle.load(open('encoded_positive_boxes.pkl', 'rb'))
encoded_cx = encoded_positive_boxes[:, 0]
plt.hist(encoded_cx, bins=25)
plt.title('encoded center x')
plt.show()
encoded_cy = encoded_positive_boxes[:, 1]
plt.hist(encoded_cy, bins=25)
plt.title('encoded center y')
plt.show()
encoded_w = encoded_positive_boxes[:, 2]
plt.hist(encoded_w, bins=50)
plt.title('encoded widths')
plt.show()
encoded_h = encoded_positive_boxes[:, 3]
plt.hist(encoded_h, bins=50)
plt.title('encoded heights')
plt.show()
| mit | -4,195,889,074,880,591,000 | 27.766667 | 78 | 0.71263 | false |
CohibAA/p2pool-doge1-8 | p2pool/bitcoin/networks/kittehcoin.py | 10 | 1210 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'c0c0c0c0'.decode('hex') #pchmessagestart
P2P_PORT = 22566
ADDRESS_VERSION = 45 #pubkey_address
RPC_PORT = 22565
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'kittehcoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 1000*100000000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 60 # s
SYMBOL = 'MEOW'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'kittehcoin')
if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/kittehcoin/')
if platform.system() == 'Darwin' else os.path.expanduser('~/.kittehcoin'), 'kittehcoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://kitexplorer.tk/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://kitexplorer.tk/address/'
TX_EXPLORER_URL_PREFIX = 'http://kitexplorer.tk/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.00001e8
| gpl-3.0 | -1,299,162,831,974,034,200 | 39.333333 | 105 | 0.71405 | false |
tensorflow/tpu | models/official/mask_rcnn/object_detection/tf_example_decoder.py | 1 | 7436 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow.compat.v1 as tf
def _get_source_id_from_encoded_image(parsed_tensors):
return tf.strings.as_string(
tf.strings.to_hash_bucket_fast(parsed_tensors['image/encoded'],
2**63 - 1))
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, use_instance_mask=False, regenerate_source_id=False):
self._use_instance_mask = use_instance_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64),
'image/width': tf.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
'image/object/polygon': tf.VarLenFeature(tf.float32),
'image/object/attribute/label': tf.VarLenFeature(tf.int64),
'image/object/difficult': tf.VarLenFeature(tf.int64),
'image/object/group_of': tf.VarLenFeature(tf.int64),
}
if use_instance_mask:
self._keys_to_features.update({
'image/object/mask':
tf.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
tf.greater(tf.size(masks), 0),
lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_attributes - 1D int64 tensor of shape [None].
Optional:
- groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
- groundtruth_group_of - 1D bool tensor of shape
[None] indicating if the boxes represent `group_of` instances.
- groundtruth_instance_masks - 3D float32 tensor of
shape [None, None, None] containing instance masks.
- groundtruth_polygons - 1D float tensor of shape [None]
"""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
if self._regenerate_source_id:
source_id = _get_source_id_from_encoded_image(parsed_tensors)
else:
source_id = tf.cond(
tf.greater(tf.strings.length(parsed_tensors['image/source_id']),
0), lambda: parsed_tensors['image/source_id'],
lambda: _get_source_id_from_encoded_image(parsed_tensors))
if self._use_instance_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': source_id,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': tf.cast(parsed_tensors['image/object/is_crowd'],
dtype=tf.bool),
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
'groundtruth_attributes': parsed_tensors[
'image/object/attribute/label'],
}
if self._use_instance_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_polygons': parsed_tensors['image/object/polygon'],
'groundtruth_difficult':
parsed_tensors['image/object/difficult'],
'groundtruth_group_of':
parsed_tensors['image/object/group_of'],
})
return decoded_tensors
| apache-2.0 | 3,985,326,566,496,668,700 | 41.982659 | 80 | 0.642684 | false |
tailhook/sqlparse | examples/extract_table_names.py | 9 | 1799 | # This example illustrates how to extract table names from nested
# SELECT statements.
# See:
# http://groups.google.com/group/sqlparse/browse_thread/thread/b0bd9a022e9d4895
sql = """
select K.a,K.b from (select H.b from (select G.c from (select F.d from
(select E.e from A, B, C, D, E), F), G), H), I, J, K order by 1,2;
"""
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword, DML
def is_subselect(parsed):
if not parsed.is_group():
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def extract_from_part(parsed):
from_seen = False
for item in parsed.tokens:
if from_seen:
if is_subselect(item):
for x in extract_from_part(item):
yield x
elif item.ttype is Keyword:
raise StopIteration
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
def extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_name()
elif isinstance(item, Identifier):
yield item.get_name()
# It's a bug to check for Keyword here, but in the example
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def extract_tables():
stream = extract_from_part(sqlparse.parse(sql)[0])
return list(extract_table_identifiers(stream))
if __name__ == '__main__':
print 'Tables: %s' % ', '.join(extract_tables())
| bsd-3-clause | 3,245,527,873,267,118,000 | 28.983333 | 79 | 0.617565 | false |
AlexAkulov/worker | moira/graphite/grammar.py | 2 | 3871 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from pyparsing import (
ParserElement, Forward, Combine, Optional, Word, Literal, CaselessKeyword,
CaselessLiteral, Group, FollowedBy, LineEnd, OneOrMore, ZeroOrMore,
nums, alphas, alphanums, printables, delimitedList, quotedString,
__version__,
)
ParserElement.enablePackrat()
grammar = Forward()
expression = Forward()
# Literals
intNumber = Combine(
Optional('-') + Word(nums)
)('integer')
floatNumber = Combine(
Optional('-') + Word(nums) + Literal('.') + Word(nums)
)('float')
sciNumber = Combine(
(floatNumber | intNumber) + CaselessLiteral('e') + intNumber
)('scientific')
aString = quotedString('string')
# Use lookahead to match only numbers in a list (can't remember why this
# is necessary)
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(sciNumber + afterNumber) |
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
argname = Word(alphas + '_', alphanums + '_')('argname')
funcname = Word(alphas + '_', alphanums + '_')('funcname')
# Symbols
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
# Function calls
# Symbols
leftBrace = Literal('{')
rightBrace = Literal('}')
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
backslash = Literal('\\').suppress()
symbols = '''(){},=.'"\\'''
arg = Group(
boolean |
number |
aString |
expression
)('args*')
kwarg = Group(argname + equal + arg)('kwargs*')
args = delimitedList(~kwarg + arg) # lookahead to prevent failing on equals
kwargs = delimitedList(kwarg)
call = Group(
funcname + leftParen +
Optional(
args + Optional(
comma + kwargs
)
) + rightParen
)('call')
# Metric pattern (aka. pathExpression)
validMetricChars = ''.join((set(printables) - set(symbols)))
escapedChar = backslash + Word(symbols, exact=1)
partialPathElem = Combine(
OneOrMore(
escapedChar | Word(validMetricChars)
)
)
matchEnum = Combine(
leftBrace +
delimitedList(partialPathElem, combine=True) +
rightBrace
)
pathElement = Combine(
Group(partialPathElem | matchEnum) +
ZeroOrMore(matchEnum | partialPathElem)
)
pathExpression = delimitedList(
pathElement,
delim='.',
combine=True)('pathExpression')
litarg = Group(
number | aString
)('args*')
litkwarg = Group(argname + equal + litarg)('kwargs*')
# lookahead to prevent failing on equals
litargs = delimitedList(~litkwarg + litarg)
litkwargs = delimitedList(litkwarg)
template = Group(
Literal('template') + leftParen +
(call | pathExpression) +
Optional(comma + (litargs | litkwargs)) +
rightParen
)('template')
if __version__.startswith('1.'):
expression << Group(template | call | pathExpression)('expression')
grammar << expression
else:
expression <<= Group(template | call | pathExpression)('expression')
grammar <<= expression
def enableDebug():
for name, obj in globals().items():
try:
obj.setName(name)
obj.setDebug(True)
except:
pass
| gpl-3.0 | 1,981,972,231,787,074,600 | 24.806667 | 78 | 0.673986 | false |
TeskaLabs/SeaCat-Client-Python3 | seacat/spdy/alx1_http.py | 1 | 2080 | import struct
from .spdy import *
from .vle import spdy_add_vle_string, spdy_read_vle_string
forbidden_fields = frozenset([
"Host",
"Connection"
])
def build_syn_stream_frame(frame, stream_id, host, method, path, headers, fin_flag):
hdr_len = struct.calcsize('!HH4BIIBB')
assert((frame.position + hdr_len) <= frame.capacity)
struct.pack_into('!HH4BIIBB', frame.data, frame.position,
CNTL_FRAME_VERSION_ALX1, CNTL_FRAME_TYPE_SYN_STREAM,
0, # Flags
0xFF, 0xFE, 0xFD, # Placeholder for real length
stream_id,
0,
0b00100000, # Priority
0 # Slot
)
frame.position += hdr_len
spdy_add_vle_string(frame, host)
spdy_add_vle_string(frame, method)
spdy_add_vle_string(frame, path)
for hdr, value in headers.items():
if (hdr in forbidden_fields): continue
spdy_add_vle_string(frame, hdr)
spdy_add_vle_string(frame, value)
# Calculate length
lenb = struct.pack('!I', frame.position - SPDY_HEADER_SIZE)
frame.data[5:8] = lenb[1:]
frame.data[4] = SPDY_FLAG_FIN if fin_flag else 0
def build_data_frame(frame, stream_id, data, fin_flag):
data_len = len(data)
frame_len = struct.calcsize('!II') + data_len
assert((frame.position + frame_len) <= frame.capacity)
struct.pack_into('!II', frame.data, frame.position,
stream_id,
((SPDY_FLAG_FIN if fin_flag else 0) << 24 ) | data_len
)
# Not sure how this line is efficient
frame.data[SPDY_HEADER_SIZE:SPDY_HEADER_SIZE+data_len] = data[0:data_len]
frame.position += frame_len
def parse_rst_stream_frame(frame):
'''returns (stream_id, status_code) '''
assert(frame.limit == frame.position + 8)
return struct.unpack_from("!II", frame.data, frame.position)
frame.position += 8
def parse_alx1_syn_reply_frame(frame):
assert(frame.limit >= frame.position + 8)
stream_id, status_code, _ = struct.unpack_from("!Ihh", frame.data, frame.position)
frame.position += 8
kv = []
while frame.position < frame.limit:
hname = spdy_read_vle_string(frame)
vname = spdy_read_vle_string(frame)
kv.append((hname.decode('utf-8'), vname.decode('utf-8')))
return stream_id, status_code, kv
| bsd-3-clause | -7,424,434,996,138,187,000 | 26.012987 | 84 | 0.69375 | false |
wkrzemien/DIRAC | Core/Utilities/TimeLeft/test/Test_LSFTimeLeft.py | 8 | 2567 | """ Test class for LSFTimeLeft utility
"""
import os
import unittest
from mock import MagicMock, patch
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.TimeLeft.LSFTimeLeft import LSFTimeLeft
LSF_KEK_BQUEUES = """ CPULIMIT
720.0 min
RUNLIMIT
1440.0 min
"""
LSF_LSHOSTS = """ HOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCES
b6688e710f SLC6_64 i6_16_63f2h24_266 2.5 16 29999M 19999M Yes (intel share aishare cvmfs wan exe lcg wigner slot15)
"""
LSF_CERN_BQUEUES = """ CPULIMIT
10080.0 min of KSI2K
RUNLIMIT
30240.0 min of KSI2K
"""
#returns with S_ERROR
LSF_CERN_LSHOSTS_1= """KSI2K: unknown host name.
"""
##shortened
LSF_CERN_LSINFO= """MODEL_NAME CPU_FACTOR ARCHITECTURE
i6_12_62d7h20_266 3.06
ai_intel_8 2.44
"""
class LSFTimeLeftTest( unittest.TestCase ):
""" test LSFTimeLeft """
def setUp( self ):
gLogger.setLevel( 'DEBUG' )
def test_init( self ):
rcMock = MagicMock()
retValues = ( LSF_KEK_BQUEUES, LSF_LSHOSTS )
rcMock.side_effect = ( S_OK( retValue ) for retValue in retValues )
with patch( "DIRAC.Core.Utilities.TimeLeft.LSFTimeLeft.runCommand", new=rcMock ), \
patch.dict( os.environ, {'LSB_HOSTS': 'b6688e710f'} ):
lsfTimeLeft = LSFTimeLeft()
self.assertEqual( lsfTimeLeft.cpuLimit, 720 * 60 / 2.5 )
self.assertEqual( lsfTimeLeft.wallClockLimit, 1440 * 60 / 2.5 )
def test_init_cern( self ):
rcMock = MagicMock()
retValues = ( S_OK(LSF_CERN_BQUEUES), S_ERROR(LSF_CERN_LSHOSTS_1), S_OK(LSF_CERN_LSINFO), S_OK(LSF_LSHOSTS) )
rcMock.side_effect = retValues
sourceMock = MagicMock( return_value=S_ERROR( "no lsf.sh" ) )
with patch( "DIRAC.Core.Utilities.TimeLeft.LSFTimeLeft.runCommand", new=rcMock ), \
patch.dict( os.environ, {'LSB_HOSTS': 'b6688e710f', 'LSF_ENVDIR': "/dev/null"} ), \
patch( "os.path.isfile", new=MagicMock( return_value=True ) ), \
patch( "DIRAC.Core.Utilities.TimeLeft.LSFTimeLeft.sourceEnv", new=sourceMock ):
lsfTimeLeft = LSFTimeLeft()
normrefExpected = 1.0
hostnormExpected = 2.5
self.assertEqual( lsfTimeLeft.cpuLimit, 10080 * 60 / hostnormExpected / normrefExpected )
self.assertEqual( lsfTimeLeft.wallClockLimit, 30240 * 60 / hostnormExpected / normrefExpected )
self.assertEqual( lsfTimeLeft.cpuRef, "KSI2K" )
self.assertEqual( lsfTimeLeft.normRef, normrefExpected )
self.assertEqual( lsfTimeLeft.hostNorm, hostnormExpected )
| gpl-3.0 | 7,858,330,069,552,106,000 | 31.910256 | 141 | 0.667316 | false |
thkoch2001/rur-ple | rur_py/robot_factory.py | 4 | 17165 | # -*- coding: utf-8
""" RUR-PLE: Roberge's Used Robot - a Python Learning Environment
robot_factory.py - see description below
Version 0.8.7
Author: Andre Roberge Copyright 2005, 2006
[email protected]
robot_factory includes four classes:
Robot_brain1(), which incorporates the basic logic of the robot, including
a limited ability to sense the outside world.
Robot_brain2(), which incorporates an "advanced" version, capable of more
remote sensing ability, akin to Pattis's original Karel as well as
turning right directly.
Used_robot() subclasses Robot_brain1 and adds a body (i.e. physical
representation: images for visual display, time delay for execution
of movements, etc.)
New_improved_robot() subclasses both Used_robot (for physical display) and
Robot_brain2 for the improved logic.
"""
import dialogs
import images
from images import getImage
import random
from translation import _
import conf
from sound import play
import os
#---------------------------------------------------------------------------
class Robot_brain1(object):
_directions = [ (0, 1), (-1, 0), (0, -1), (1, 0) ]
_orient_dict = { 'E': 3, 'S': 2, 'W': 1, 'N': 0}
def __init__(self, parent=None, avenues=1, streets=1,
orient_key = 'E', beepers=0):
# East; value by default - tied to lessons
self.parent = parent
#--- Basic variables
self._beeper_bag = beepers
self._x = avenues
self._y = streets
self._facing = self._orient_dict[orient_key.upper()]
#--- "public" getter
def getPos(self):
""" returns current robot position; intended to be
accessed by user-defined program."""
return self._x, self._y
#--- "private" getters and setters
def _setPos(self, x, y):
""" sets robot position (teleport); intended to be
accessed only through GUI, not by user-defined program."""
self._x = x
self._y = y
def _getOrientation(self):
""" returns orientation (0, 1, 2, 3); needed for drawing trace.
Not intended to be accessed by user-defined program."""
return self._facing
def _getOrientationKey(self):
""" returns orientation key ('N', 'E', 'S', W').
Not intended to be accessed by user-defined program."""
for key in self._orient_dict.keys():
if self._orient_dict[key] == self._facing:
return key
def _getInfoTuple(self):
""" returns (avenue, street, orientation, beepers).
Not intended to be accessed by user-defined program."""
return self._x, self._y, self._getOrientationKey(), self._beeper_bag
def _getInfoString(self):
""" returns (avenue, street, orientation, beepers).
Not intended to be accessed by user-defined program."""
return str(self._getInfoTuple())
#--- built-in tests
def front_is_clear(self):
''' True if no wall or border in front of robot'''
col = 2*self._x - 1
row = 2*self._y - 1
xx, yy = self._directions[self._facing]
return self.parent.isClear(col+xx, row+yy)
def facing_north(self):
''' True if Robot facing north'''
if self._facing == 0:
return True
else:
return False
def any_beepers_in_beeper_bag(self):
'''True if some beepers are left in Robot's bag'''
if self._beeper_bag == 0:
return False
else:
return True
def next_to_a_beeper(self):
'''True if beepers are present at current robot position.'''
if (self._x, self._y) in self.parent.beepers_dict:
return True
else:
return False
def left_is_clear(self):
'''Returns True if no walls or borders are to the immediate left
of the robot.'''
col = 2*self._x - 1
row = 2*self._y - 1
facing = self._facing + 1
facing %= 4
xx, yy = self._directions[facing]
if (col+xx, row+yy) in self.parent.walls_list:
return False
if (col+xx, row+yy) in self.parent.borders:
return False
else:
return True
def right_is_clear(self):
'''Returns True if no walls or borders are to the immediate
right of the robot.'''
col = 2*self._x - 1
row = 2*self._y - 1
facing = self._facing + 3
facing %= 4
xx, yy = self._directions[facing]
if (col+xx, row+yy) in self.parent.walls_list:
return False
if (col+xx, row+yy) in self.parent.borders:
return False
else:
return True
#--- Actions
def move(self):
'''Robot moves one street/avenue in direction where it is facing'''
if self.front_is_clear():
xx, yy = self._directions[self._facing]
self._x += xx
self._y += yy
if self.next_to_a_beeper():
self.at_beeper(self._x, self._y)
else:
mesg = _("""That move really hurt!
Please, make sure that there is no wall in front of me!""")
raise dialogs.HitWallException(mesg)
def turn_off(self):
mesg = _("I obey your command:\n turning myself off.")
raise dialogs.NormalEnd(mesg)
def turn_left(self):
'''Robot turns left by 90 degrees.'''
self._facing += 1
self._facing %= 4
def put_beeper(self):
'''Robot put one beeper down at current location.'''
if self.any_beepers_in_beeper_bag():
self._beeper_bag -= 1
self.parent.addOneBeeper(self._x, self._y)
else:
mesg = _("put_beeper() failed.\n I am not carrying any beepers.")
raise dialogs.PutBeeperException(mesg)
def pick_beeper(self):
'''Robot picks one beeper up at current location.'''
if self.next_to_a_beeper():
self.parent.removeOneBeeper(self._x, self._y)
self._beeper_bag += 1
else:
mesg = _("""pick_beeper failed.
I must be next to a beeper before I can pick it up.""")
raise dialogs.PickBeeperException(mesg)
def at_beeper(self, x, y):
'''Notifies interested parties about robot
being at a beeper.
'''
onbeepersound = os.path.join(conf.getSettings().SOUNDS_DIR, 'beep.wav')
if os.path.isfile(onbeepersound):
play(onbeepersound)
class Robot_brain2(Robot_brain1):
def __init__(self, parent=None, avenues=1, streets=1, orient_key = 'E',
beepers=0):
Robot_brain1.__init__(self, parent, avenues, streets,
orient_key, beepers)
#--- Additional built-in tests
def facing_east(self):
if self._facing == 3:
return True
else:
return False
def facing_south(self):
if self._facing == 2:
return True
else:
return False
def facing_west(self):
if self._facing == 1:
return True
else:
return False
#--- Additional action
def turn_right(self):
self._facing += 3
self._facing %= 4
def roll_dice(self, n=6):
return random.randint(1, n)
class Used_robot(Robot_brain1):
""" Adds physical attributes """
def __init__(self, avenues=1, streets=1, orient_key = 'E',
beepers=0, name = 'robot', colour = 'grey', parent=None):
Robot_brain1.__init__(self, parent, avenues, streets,
orient_key, beepers)
settings = conf.getSettings()
self._delay = 0.3
self.name = name
self.colour = colour.lower()
# The following are used to follow the robot trail
self.line_trace = []
self.set_trace_style(1, "sea green") # default
#--- Robot images
# create a list of four objects
if self.colour == 'yellow':
self._image = [getImage(images.YELLOW_ROBOT_N), getImage(images.YELLOW_ROBOT_W),
getImage(images.YELLOW_ROBOT_S), getImage(images.YELLOW_ROBOT_E)]
elif self.colour == 'blue':
self._image = [getImage(images.BLUE_ROBOT_N), getImage(images.BLUE_ROBOT_W),
getImage(images.BLUE_ROBOT_S), getImage(images.BLUE_ROBOT_E)]
elif self.colour == 'light blue':
self._image = [getImage(images.LIGHT_BLUE_ROBOT_N), getImage(images.LIGHT_BLUE_ROBOT_W),
getImage(images.LIGHT_BLUE_ROBOT_S), getImage(images.LIGHT_BLUE_ROBOT_E)]
elif self.colour == 'purple':
self._image = [getImage(images.PURPLE_ROBOT_N), getImage(images.PURPLE_ROBOT_W),
getImage(images.PURPLE_ROBOT_S), getImage(images.PURPLE_ROBOT_E)]
elif self.colour == 'green':
self._image = [getImage(images.GREEN_ROBOT_N), getImage(images.GREEN_ROBOT_W),
getImage(images.GREEN_ROBOT_S), getImage(images.GREEN_ROBOT_E)]
else:
self._image = [getImage(images.GREY_ROBOT_N), getImage(images.GREY_ROBOT_W),
getImage(images.GREY_ROBOT_S), getImage(images.GREY_ROBOT_E)]
self.imageOffset = (settings.SCREEN[7], settings.SCREEN[8])
# image size (x, y) [all images equal]; for use in automatic scrolling
self._image_size = self._image[0].GetWidth(), \
self._image[0].GetHeight()
## Note: for some reason, GetSize() did not work using
## wxPython 2.4, which is why I used GetWidth() and GetHeight()
# selecting the right image based on initial orientation
self.robot_image = self._image[self._facing]
#--- Action over-riden to handle images
def turn_left(self):
'''Robot turns left by 90 degrees, and image is updated.'''
Robot_brain1.turn_left(self)
self.robot_image = self._image[self._facing]
def set_trace_style(self, style, colour = "sea green"):
if style == 1:
self.trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
self.trace_width = 1
elif style == 2:
self.trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
self.trace_width = 1
elif style == 3:
self.trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
self.trace_width = 3
elif style == 4:
self.trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
self.trace_width = 3
elif style == 5:
self.trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
self.trace_width = 3
else:
self.trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
self.trace_width = 0
self.trace_style = style
self.trace_colour = colour
def get_trace_style(self, style):
if style == 1:
trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
trace_width = 1
elif style == 2:
trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
trace_width = 1
elif style == 3:
trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
trace_width = 3
elif style == 4:
trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
trace_width = 3
elif style == 5:
trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
trace_width = 3
else:
trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
trace_width = 0
return trace_width, trace_offset
def set_delay(self, delay):
'''Sets the delay value between robot actions.'''
if delay >= 0 and delay <= 10:
self._delay = delay
else:
mesg = _("""Setting delay failed.
Accepted values are between 0 and 10.""")
dialogs.messageDialog(mesg, _("Error"))
def get_delay(self):
return self._delay
delay = property(get_delay, set_delay, None, "Time between robot actions")
class New_improved_robot(Robot_brain2):
""" Adds physical attributes and better logic."""
def __init__(self, avenues=1, streets=1, orient_key = 'E',
beepers=0, name = 'robot', colour = 'grey', parent=None):
Robot_brain2.__init__(self, parent, avenues, streets,
orient_key, beepers)
self._delay = 0.3
self.name = name
self.colour = colour.lower()
# The following are used to follow the robot trail
self.line_trace = []
self.set_trace_style(1, "sea green") # default
#--- Robot images
# create a list of four objects
if self.colour == 'yellow':
self._image = [getImage(images.YELLOW_ROBOT_N), getImage(images.YELLOW_ROBOT_W),
getImage(images.YELLOW_ROBOT_S), getImage(images.YELLOW_ROBOT_E)]
elif self.colour == 'blue':
self._image = [getImage(images.BLUE_ROBOT_N), getImage(images.BLUE_ROBOT_W),
getImage(images.BLUE_ROBOT_S), getImage(images.BLUE_ROBOT_E)]
elif self.colour == 'light blue':
self._image = [getImage(images.LIGHT_BLUE_ROBOT_N), getImage(images.LIGHT_BLUE_ROBOT_W),
getImage(images.LIGHT_BLUE_ROBOT_S), getImage(images.LIGHT_BLUE_ROBOT_E)]
elif self.colour == 'purple':
self._image = [getImage(images.PURPLE_ROBOT_N), getImage(images.PURPLE_ROBOT_W),
getImage(images.PURPLE_ROBOT_S), getImage(images.PURPLE_ROBOT_E)]
elif self.colour == 'green':
self._image = [getImage(images.GREEN_ROBOT_N), getImage(images.GREEN_ROBOT_W),
getImage(images.GREEN_ROBOT_S), getImage(images.GREEN_ROBOT_E)]
else:
self._image = [getImage(images.GREY_ROBOT_N), getImage(images.GREY_ROBOT_W),
getImage(images.GREY_ROBOT_S), getImage(images.GREY_ROBOT_E)]
self.imageOffset = (settings.SCREEN[7], settings.SCREEN[8])
# image size (x, y) [all images equal]; for use in automatic scrolling
self._image_size = self._image[0].GetWidth(), \
self._image[0].GetHeight()
## Note: for some reason, GetSize() did not work using
## wxPython 2.4, which is why I used GetWidth() and GetHeight()
# selecting the right image based on initial orientation
self.robot_image = self._image[self._facing]
#--- Action over-riden to handle images
def turn_left(self):
'''Robot turns left by 90 degrees, and image is updated.'''
Robot_brain1.turn_left(self)
self.robot_image = self._image[self._facing]
def set_trace_style(self, style, colour = "sea green"):
if style == 1:
self.trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
self.trace_width = 1
elif style == 2:
self.trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
self.trace_width = 1
elif style == 3:
self.trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
self.trace_width = 3
elif style == 4:
self.trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
self.trace_width = 3
elif style == 5:
self.trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
self.trace_width = 3
else:
self.trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
self.trace_width = 0
self.trace_style = style
self.trace_colour = colour
def get_trace_style(self, style):
if style == 1:
trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
trace_width = 1
elif style == 2:
trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
trace_width = 1
elif style == 3:
trace_offset = [(3, 3), (3, -3), (-3, -3), (-3, 3)]
trace_width = 3
elif style == 4:
trace_offset = [(5, 5), (5, -5), (-5, -5), (-5, 5)]
trace_width = 3
elif style == 5:
trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
trace_width = 3
else:
trace_offset = [(0, 0), (0, 0), (0, 0), (0, 0)]
trace_width = 0
return trace_width, trace_offset
def set_delay(self, delay):
'''Sets the delay value between robot actions.'''
if delay >= 0 and delay <= 10:
self._delay = delay
else:
mesg = _("""Setting delay failed.
Accepted values are between 0 and 10.""")
dialogs.messageDialog(mesg, _("Error"))
def get_delay(self):
return self._delay
delay = property(get_delay, set_delay, None, "Time between robot actions")
# TODO: design "better looking" images for this robot.
#--- Action over-riden to handle images
def turn_right(self):
'''Robot turns right by 90 degrees, and image is updated.'''
Robot_brain2.turn_right(self)
self.robot_image = self._image[self._facing]
| gpl-2.0 | 2,743,357,906,677,990,000 | 36.15368 | 100 | 0.543431 | false |
xzturn/tensorflow | tensorflow/python/keras/engine/data_adapter_test.py | 2 | 42927 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GenericArrayLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(
self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input,
self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input,
self.arraylike_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.numpy_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.tensor_target))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor_v2(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
# memory), only the sliced data should be converted.
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle=True, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle='batch', batch_size=5)
self.model.evaluate(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.predict(self.arraylike_input, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.numpy_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.numpy_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.tensor_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.tensor_target, batch_size=5)
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target,
batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super(DatasetAdapterTest, self).setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GeneratorDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.generator_input, steps_per_epoch=10)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input)
def test_not_shuffled(self):
def generator():
for i in range(10):
yield np.ones((1, 1)) * i
adapter = self.adapter_cls(generator(), shuffle=True)
with context.eager_mode():
for i, data in enumerate(adapter.get_dataset()):
self.assertEqual(i, data[0].numpy().flatten())
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super(KerasSequenceAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
class DataHandlerTest(keras_parameterized.TestCase):
def test_finite_dataset_with_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=2)
self.assertEqual(data_handler.inferred_steps, 2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
def test_finite_dataset_without_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1)
data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, 3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_finite_dataset_with_steps_per_epoch_exact_size(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# If user specifies exact size of `Dataset` as `steps_per_epoch`,
# create a new iterator each epoch.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=4)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
def test_infinite_dataset_with_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat()
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_unknown_cardinality_dataset_with_steps_per_epoch(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
self.assertEqual(data_handler.inferred_steps, 2)
def test_unknown_cardinality_dataset_without_steps_per_epoch(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN)
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, None)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
with data_handler.catch_stop_iteration():
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
self.assertEqual(data_handler.inferred_steps, 4)
def test_insufficient_data(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1])
ds = ds.filter(lambda *args, **kwargs: True)
data_handler = data_adapter.DataHandler(
ds, initial_epoch=0, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
with data_handler.catch_stop_iteration():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertTrue(data_handler._insufficient_data)
self.assertEqual(returned_data, [[0, 1]])
def test_numpy(self):
x = np.array([0, 1, 2])
y = np.array([0, 2, 4])
sw = np.array([0, 4, 8])
data_handler = data_adapter.DataHandler(
x=x, y=y, sample_weight=sw, batch_size=1, epochs=2)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data,
[[(0, 0, 0), (1, 2, 4),
(2, 4, 8)], [(0, 0, 0), (1, 2, 4), (2, 4, 8)]])
def test_generator(self):
def generator():
for _ in range(2):
for step in range(3):
yield (ops.convert_to_tensor_v2([step]),)
data_handler = data_adapter.DataHandler(
generator(), epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_composite_tensor(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]], values=[0, 1, 2], dense_shape=[3, 1])
data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(
nest.map_structure(sparse_ops.sparse_tensor_to_dense, returned_data))
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_list_of_scalars(self):
data_handler = data_adapter.DataHandler([[0], [1], [2]],
epochs=2,
steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_class_weight_user_errors(self):
with self.assertRaisesRegexp(ValueError, 'to be a dict with keys'):
data_adapter.DataHandler(
x=[[0], [1], [2]],
y=[[2], [1], [0]],
batch_size=1,
sample_weight=[[1.], [2.], [4.]],
class_weight={
0: 0.5,
1: 1.,
3: 1.5 # Skips class `2`.
})
with self.assertRaisesRegexp(ValueError, 'with a single output'):
data_adapter.DataHandler(
x=np.ones((10, 1)),
y=[np.ones((10, 1)), np.zeros((10, 1))],
batch_size=2,
class_weight={
0: 0.5,
1: 1.,
2: 1.5
})
class TestValidationSplit(keras_parameterized.TestCase):
@parameterized.named_parameters(('numpy_arrays', True), ('tensors', False))
def test_validation_split_shuffled(self, use_numpy):
if use_numpy:
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (val_x, val_y, val_sw) = (
data_adapter.train_validation_split((x, y, sw), validation_split=0.2))
self.assertEqual(int(train_x.shape[0]), 4)
self.assertEqual(int(train_y.shape[0]), 4)
self.assertEqual(int(train_sw.shape[0]), 4)
for i in range(4):
# Check that all arrays were shuffled in identical order.
self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy())
self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy())
self.assertEqual(int(val_x.shape[0]), 1)
self.assertEqual(int(val_y.shape[0]), 1)
self.assertEqual(int(val_sw.shape[0]), 1)
for i in range(1):
# Check that all arrays were shuffled in identical order.
self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy())
self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy())
# Check that arrays contain expected values.
self.assertEqual(
sorted(array_ops.concat([train_x, val_x], axis=0).numpy().tolist()),
sorted(ops.convert_to_tensor_v2(x).numpy().tolist()))
self.assertEqual(
sorted(array_ops.concat([train_y, val_y], axis=0).numpy().tolist()),
sorted(ops.convert_to_tensor_v2(y).numpy().tolist()))
self.assertEqual(
sorted(array_ops.concat([train_sw, val_sw], axis=0).numpy().tolist()),
sorted(ops.convert_to_tensor_v2(sw).numpy().tolist()))
@parameterized.named_parameters(('numpy_arrays', True), ('tensors', False))
def test_validation_split_unshuffled(self, use_numpy):
if use_numpy:
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (val_x, val_y, val_sw) = (
data_adapter.train_validation_split((x, y, sw),
validation_split=0.2,
shuffle=False))
self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3])
self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6])
self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12])
self.assertEqual(val_x.numpy().tolist(), [4])
self.assertEqual(val_y.numpy().tolist(), [8])
self.assertEqual(val_sw.numpy().tolist(), [16])
def test_validation_split_user_error(self):
with self.assertRaisesRegexp(ValueError, 'is only supported for Tensors'):
data_adapter.train_validation_split(
lambda: np.ones((10, 1)), validation_split=0.2)
def test_validation_split_none(self):
train_sw, val_sw = data_adapter.train_validation_split(
None, validation_split=0.2)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
(_, train_sw), (_, val_sw) = data_adapter.train_validation_split(
(np.ones((10, 1)), None), validation_split=0.2)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
class TestUtils(keras_parameterized.TestCase):
def test_expand_1d_sparse_tensors_untouched(self):
st = sparse_tensor.SparseTensor(
indices=[[0], [10]], values=[1, 2], dense_shape=[10])
st = data_adapter.expand_1d(st)
self.assertEqual(st.shape.rank, 1)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 | 5,481,293,635,844,655,000 | 38.747222 | 80 | 0.649451 | false |
ethanhlc/streamlink | src/streamlink/plugins/dogus.py | 2 | 3327 | from __future__ import print_function
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
from streamlink.stream import HDSStream
from streamlink.stream import HLSStream
class Dogus(Plugin):
"""
Support for live streams from Dogus sites include startv, ntv, ntvspor, and kralmuzik
"""
url_re = re.compile(r"""https?://(?:www.)?
(?:
startv.com.tr/canli-yayin|
ntv.com.tr/canli-yayin/ntv|
ntvspor.net/canli-yayin|
kralmuzik.com.tr/tv/kral-tv|
kralmuzik.com.tr/tv/kral-pop-tv
)/?""", re.VERBOSE)
mobile_url_re = re.compile(r"""(?P<q>[\"'])(?P<url>https?://[^'"]*?/live/hls/[^'"]*?\?token=)
(?P<token>[^'"]*?)(?P=q)""", re.VERBOSE)
desktop_url_re = re.compile(r"""(?P<q>[\"'])(?P<url>https?://[^'"]*?/live/hds/[^'"]*?\?token=)
(?P<token>[^'"]*?)(?P=q)""", re.VERBOSE)
token_re = re.compile(r"""token=(?P<q>[\"'])(?P<token>[^'"]*?)(?P=q)""")
hds_schema = validate.Schema(validate.all(
{
"success": True,
"xtra": {
"url": validate.url(),
"use_akamai": bool
}
},
validate.get("xtra")
))
SWF_URL = "http://dygassets.akamaized.net/player2/plugins/flowplayer/flowplayer.httpstreaming-3.2.11.swf"
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_star_streams(self, desktop_url, mobile_url, token=""):
if token:
self.logger.debug("Opening stream with token: {}", token)
if mobile_url:
for _, s in HLSStream.parse_variant_playlist(self.session,
mobile_url + token,
headers={"Referer": self.url}).items():
yield "live", s
if desktop_url:
# get the HDS stream URL
res = http.get(desktop_url + token)
stream_data = http.json(res, schema=self.hds_schema)
for _, s in HDSStream.parse_manifest(self.session,
stream_data["url"],
pvswf=self.SWF_URL,
is_akamai=stream_data["use_akamai"],
headers={"Referer": self.url}).items():
yield "live", s
def _get_streams(self):
res = http.get(self.url)
mobile_url_m = self.mobile_url_re.search(res.text)
desktop_url_m = self.desktop_url_re.search(res.text)
desktop_url = desktop_url_m and desktop_url_m.group("url")
mobile_url = mobile_url_m and mobile_url_m.group("url")
token = (desktop_url_m and desktop_url_m.group("token")) or (mobile_url_m and mobile_url_m.group("token"))
if not token:
# if no token is in the url, try to find it else where in the page
token_m = self.token_re.search(res.text)
token = token_m and token_m.group("token")
return self._get_star_streams(desktop_url, mobile_url, token=token)
__plugin__ = Dogus
| bsd-2-clause | -1,471,256,863,183,394,000 | 38.141176 | 114 | 0.513375 | false |
VeNoMouS/Sick-Beard | lib/transmissionrpc/constants.py | 3 | 28010 | # -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 Erik Svensson <[email protected]>
# Licensed under the MIT license.
import logging
from six import iteritems
LOGGER = logging.getLogger('transmissionrpc')
LOGGER.setLevel(logging.ERROR)
def mirror_dict(source):
"""
Creates a dictionary with all values as keys and all keys as values.
"""
source.update(dict((value, key) for key, value in iteritems(source)))
return source
DEFAULT_PORT = 9091
DEFAULT_TIMEOUT = 30.0
TR_PRI_LOW = -1
TR_PRI_NORMAL = 0
TR_PRI_HIGH = 1
PRIORITY = mirror_dict({
'low' : TR_PRI_LOW,
'normal' : TR_PRI_NORMAL,
'high' : TR_PRI_HIGH
})
TR_RATIOLIMIT_GLOBAL = 0 # follow the global settings
TR_RATIOLIMIT_SINGLE = 1 # override the global settings, seeding until a certain ratio
TR_RATIOLIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of ratio
RATIO_LIMIT = mirror_dict({
'global' : TR_RATIOLIMIT_GLOBAL,
'single' : TR_RATIOLIMIT_SINGLE,
'unlimited' : TR_RATIOLIMIT_UNLIMITED
})
TR_IDLELIMIT_GLOBAL = 0 # follow the global settings
TR_IDLELIMIT_SINGLE = 1 # override the global settings, seeding until a certain idle time
TR_IDLELIMIT_UNLIMITED = 2 # override the global settings, seeding regardless of activity
IDLE_LIMIT = mirror_dict({
'global' : TR_RATIOLIMIT_GLOBAL,
'single' : TR_RATIOLIMIT_SINGLE,
'unlimited' : TR_RATIOLIMIT_UNLIMITED
})
# A note on argument maps
# These maps are used to verify *-set methods. The information is structured in
# a tree.
# set +- <argument1> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# | +- <argument2> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# |
# get +- <argument1> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# +- <argument2> - [<type>, <added version>, <removed version>, <previous argument name>, <next argument name>, <description>]
# Arguments for torrent methods
TORRENT_ARGS = {
'get' : {
'activityDate': ('number', 1, None, None, None, 'Last time of upload or download activity.'),
'addedDate': ('number', 1, None, None, None, 'The date when this torrent was first added.'),
'announceResponse': ('string', 1, 7, None, None, 'The announce message from the tracker.'),
'announceURL': ('string', 1, 7, None, None, 'Current announce URL.'),
'bandwidthPriority': ('number', 5, None, None, None, 'Bandwidth priority. Low (-1), Normal (0) or High (1).'),
'comment': ('string', 1, None, None, None, 'Torrent comment.'),
'corruptEver': ('number', 1, None, None, None, 'Number of bytes of corrupt data downloaded.'),
'creator': ('string', 1, None, None, None, 'Torrent creator.'),
'dateCreated': ('number', 1, None, None, None, 'Torrent creation date.'),
'desiredAvailable': ('number', 1, None, None, None, 'Number of bytes avalable and left to be downloaded.'),
'doneDate': ('number', 1, None, None, None, 'The date when the torrent finished downloading.'),
'downloadDir': ('string', 4, None, None, None, 'The directory path where the torrent is downloaded to.'),
'downloadedEver': ('number', 1, None, None, None, 'Number of bytes of good data downloaded.'),
'downloaders': ('number', 4, 7, None, None, 'Number of downloaders.'),
'downloadLimit': ('number', 1, None, None, None, 'Download limit in Kbps.'),
'downloadLimited': ('boolean', 5, None, None, None, 'Download limit is enabled'),
'downloadLimitMode': ('number', 1, 5, None, None, 'Download limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'error': ('number', 1, None, None, None, 'Kind of error. 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error.'),
'errorString': ('number', 1, None, None, None, 'Error message.'),
'eta': ('number', 1, None, None, None, 'Estimated number of seconds left when downloading or seeding. -1 means not available and -2 means unknown.'),
'etaIdle': ('number', 15, None, None, None, 'Estimated number of seconds left until the idle time limit is reached. -1 means not available and -2 means unknown.'),
'files': ('array', 1, None, None, None, 'Array of file object containing key, bytesCompleted, length and name.'),
'fileStats': ('array', 5, None, None, None, 'Aray of file statistics containing bytesCompleted, wanted and priority.'),
'hashString': ('string', 1, None, None, None, 'Hashstring unique for the torrent even between sessions.'),
'haveUnchecked': ('number', 1, None, None, None, 'Number of bytes of partial pieces.'),
'haveValid': ('number', 1, None, None, None, 'Number of bytes of checksum verified data.'),
'honorsSessionLimits': ('boolean', 5, None, None, None, 'True if session upload limits are honored'),
'id': ('number', 1, None, None, None, 'Session unique torrent id.'),
'isFinished': ('boolean', 9, None, None, None, 'True if the torrent is finished. Downloaded and seeded.'),
'isPrivate': ('boolean', 1, None, None, None, 'True if the torrent is private.'),
'isStalled': ('boolean', 14, None, None, None, 'True if the torrent has stalled (been idle for a long time).'),
'lastAnnounceTime': ('number', 1, 7, None, None, 'The time of the last announcement.'),
'lastScrapeTime': ('number', 1, 7, None, None, 'The time af the last successful scrape.'),
'leechers': ('number', 1, 7, None, None, 'Number of leechers.'),
'leftUntilDone': ('number', 1, None, None, None, 'Number of bytes left until the download is done.'),
'magnetLink': ('string', 7, None, None, None, 'The magnet link for this torrent.'),
'manualAnnounceTime': ('number', 1, None, None, None, 'The time until you manually ask for more peers.'),
'maxConnectedPeers': ('number', 1, None, None, None, 'Maximum of connected peers.'),
'metadataPercentComplete': ('number', 7, None, None, None, 'Download progress of metadata. 0.0 to 1.0.'),
'name': ('string', 1, None, None, None, 'Torrent name.'),
'nextAnnounceTime': ('number', 1, 7, None, None, 'Next announce time.'),
'nextScrapeTime': ('number', 1, 7, None, None, 'Next scrape time.'),
'peer-limit': ('number', 5, None, None, None, 'Maximum number of peers.'),
'peers': ('array', 2, None, None, None, 'Array of peer objects.'),
'peersConnected': ('number', 1, None, None, None, 'Number of peers we are connected to.'),
'peersFrom': ('object', 1, None, None, None, 'Object containing download peers counts for different peer types.'),
'peersGettingFromUs': ('number', 1, None, None, None, 'Number of peers we are sending data to.'),
'peersKnown': ('number', 1, 13, None, None, 'Number of peers that the tracker knows.'),
'peersSendingToUs': ('number', 1, None, None, None, 'Number of peers sending to us'),
'percentDone': ('double', 5, None, None, None, 'Download progress of selected files. 0.0 to 1.0.'),
'pieces': ('string', 5, None, None, None, 'String with base64 encoded bitfield indicating finished pieces.'),
'pieceCount': ('number', 1, None, None, None, 'Number of pieces.'),
'pieceSize': ('number', 1, None, None, None, 'Number of bytes in a piece.'),
'priorities': ('array', 1, None, None, None, 'Array of file priorities.'),
'queuePosition': ('number', 14, None, None, None, 'The queue position.'),
'rateDownload': ('number', 1, None, None, None, 'Download rate in bps.'),
'rateUpload': ('number', 1, None, None, None, 'Upload rate in bps.'),
'recheckProgress': ('double', 1, None, None, None, 'Progress of recheck. 0.0 to 1.0.'),
'secondsDownloading': ('number', 15, None, None, None, ''),
'secondsSeeding': ('number', 15, None, None, None, ''),
'scrapeResponse': ('string', 1, 7, None, None, 'Scrape response message.'),
'scrapeURL': ('string', 1, 7, None, None, 'Current scrape URL'),
'seeders': ('number', 1, 7, None, None, 'Number of seeders reported by the tracker.'),
'seedIdleLimit': ('number', 10, None, None, None, 'Idle limit in minutes.'),
'seedIdleMode': ('number', 10, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seed ratio limit.'),
'seedRatioMode': ('number', 5, None, None, None, 'Use global (0), torrent (1), or unlimited (2) limit.'),
'sizeWhenDone': ('number', 1, None, None, None, 'Size of the torrent download in bytes.'),
'startDate': ('number', 1, None, None, None, 'The date when the torrent was last started.'),
'status': ('number', 1, None, None, None, 'Current status, see source'),
'swarmSpeed': ('number', 1, 7, None, None, 'Estimated speed in Kbps in the swarm.'),
'timesCompleted': ('number', 1, 7, None, None, 'Number of successful downloads reported by the tracker.'),
'trackers': ('array', 1, None, None, None, 'Array of tracker objects.'),
'trackerStats': ('object', 7, None, None, None, 'Array of object containing tracker statistics.'),
'totalSize': ('number', 1, None, None, None, 'Total size of the torrent in bytes'),
'torrentFile': ('string', 5, None, None, None, 'Path to .torrent file.'),
'uploadedEver': ('number', 1, None, None, None, 'Number of bytes uploaded, ever.'),
'uploadLimit': ('number', 1, None, None, None, 'Upload limit in Kbps'),
'uploadLimitMode': ('number', 1, 5, None, None, 'Upload limit mode. 0 means global, 1 means signle, 2 unlimited.'),
'uploadLimited': ('boolean', 5, None, None, None, 'Upload limit enabled.'),
'uploadRatio': ('double', 1, None, None, None, 'Seed ratio.'),
'wanted': ('array', 1, None, None, None, 'Array of booleans indicated wanted files.'),
'webseeds': ('array', 1, None, None, None, 'Array of webseeds objects'),
'webseedsSendingToUs': ('number', 1, None, None, None, 'Number of webseeds seeding to us.'),
},
'set': {
'bandwidthPriority': ('number', 5, None, None, None, 'Priority for this transfer.'),
'downloadLimit': ('number', 5, None, 'speed-limit-down', None, 'Set the speed limit for download in Kib/s.'),
'downloadLimited': ('boolean', 5, None, 'speed-limit-down-enabled', None, 'Enable download speed limiter.'),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'honorsSessionLimits': ('boolean', 5, None, None, None, "Enables or disables the transfer to honour the upload limit set in the session."),
'location': ('array', 1, None, None, None, 'Local download location.'),
'peer-limit': ('number', 1, None, None, None, 'The peer limit for the torrents.'),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'queuePosition': ('number', 14, None, None, None, 'Position of this transfer in its queue.'),
'seedIdleLimit': ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
'seedIdleMode': ('number', 10, None, None, None, 'Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'seedRatioLimit': ('double', 5, None, None, None, 'Seeding ratio.'),
'seedRatioMode': ('number', 5, None, None, None, 'Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.'),
'speed-limit-down': ('number', 1, 5, None, 'downloadLimit', 'Set the speed limit for download in Kib/s.'),
'speed-limit-down-enabled': ('boolean', 1, 5, None, 'downloadLimited', 'Enable download speed limiter.'),
'speed-limit-up': ('number', 1, 5, None, 'uploadLimit', 'Set the speed limit for upload in Kib/s.'),
'speed-limit-up-enabled': ('boolean', 1, 5, None, 'uploadLimited', 'Enable upload speed limiter.'),
'trackerAdd': ('array', 10, None, None, None, 'Array of string with announce URLs to add.'),
'trackerRemove': ('array', 10, None, None, None, 'Array of ids of trackers to remove.'),
'trackerReplace': ('array', 10, None, None, None, 'Array of (id, url) tuples where the announce URL should be replaced.'),
'uploadLimit': ('number', 5, None, 'speed-limit-up', None, 'Set the speed limit for upload in Kib/s.'),
'uploadLimited': ('boolean', 5, None, 'speed-limit-up-enabled', None, 'Enable upload speed limiter.'),
},
'add': {
'bandwidthPriority': ('number', 8, None, None, None, 'Priority for this transfer.'),
'download-dir': ('string', 1, None, None, None, 'The directory where the downloaded contents will be saved in.'),
'cookies': ('string', 13, None, None, None, 'One or more HTTP cookie(s).'),
'filename': ('string', 1, None, None, None, "A file path or URL to a torrent file or a magnet link."),
'files-wanted': ('array', 1, None, None, None, "A list of file id's that should be downloaded."),
'files-unwanted': ('array', 1, None, None, None, "A list of file id's that shouldn't be downloaded."),
'metainfo': ('string', 1, None, None, None, 'The content of a torrent file, base64 encoded.'),
'paused': ('boolean', 1, None, None, None, 'If True, does not start the transfer when added.'),
'peer-limit': ('number', 1, None, None, None, 'Maximum number of peers allowed.'),
'priority-high': ('array', 1, None, None, None, "A list of file id's that should have high priority."),
'priority-low': ('array', 1, None, None, None, "A list of file id's that should have low priority."),
'priority-normal': ('array', 1, None, None, None, "A list of file id's that should have normal priority."),
}
}
# Arguments for session methods
SESSION_ARGS = {
'get': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ('boolean', 5, None, None, None, 'True if alternate global download speed limiter is ebabled.'),
"alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'True if alternate speeds scheduling is enabled.'),
"alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Days alternate speeds scheduling is enabled.'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s)'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'True when blocklist is enabled.'),
"blocklist-size": ('number', 5, None, None, None, 'Number of rules in the blocklist'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"config-dir": ('string', 8, None, None, None, 'location of transmissions configuration directory'),
"dht-enabled": ('boolean', 6, None, None, None, 'True if DHT enabled.'),
"download-dir": ('string', 1, None, None, None, 'The download directory.'),
"download-dir-free-space": ('number', 12, None, None, None, 'Free space in the download directory, in bytes'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'True if the download queue is enabled.'),
"encryption": ('string', 1, None, None, None, 'Encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'Seed inactivity limit in minutes.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'True if the seed activity limit is enabled.'),
"incomplete-dir": ('string', 7, None, None, None, 'The path to the directory for incomplete torrent transfer data.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None, 'True if the incomplete dir is enabled.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'True if local peer discovery is enabled.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'True if PEX is allowed.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'True if PEX is enabled.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'True if port forwarding is enabled.'),
"queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'True if stalled tracking of transfers is enabled.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'True if ".part" is appended to incomplete files'),
"rpc-version": ('number', 4, None, None, None, 'Transmission RPC API Version.'),
"rpc-version-minimum": ('number', 4, None, None, None, 'Minimum accepted RPC API Version.'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'True if the done script is enabled.'),
"script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'True if seed ration limit is enabled.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'True if upload queue is enabled.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'True if the download speed is limited.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'True if the upload speed is limited.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'When true uploaded torrents will start right away.'),
"trash-original-torrent-files": ('boolean', 9, None, None, None, 'When true added .torrent files will be deleted.'),
'units': ('object', 10, None, None, None, 'An object containing units for size and speed.'),
'utp-enabled': ('boolean', 13, None, None, None, 'True if Micro Transport Protocol (UTP) is enabled.'),
"version": ('string', 3, None, None, None, 'Transmission version.'),
},
'set': {
"alt-speed-down": ('number', 5, None, None, None, 'Alternate session download speed limit (in Kib/s).'),
"alt-speed-enabled": ('boolean', 5, None, None, None, 'Enables alternate global download speed limiter.'),
"alt-speed-time-begin": ('number', 5, None, None, None, 'Time when alternate speeds should be enabled. Minutes after midnight.'),
"alt-speed-time-enabled": ('boolean', 5, None, None, None, 'Enables alternate speeds scheduling.'),
"alt-speed-time-end": ('number', 5, None, None, None, 'Time when alternate speeds should be disabled. Minutes after midnight.'),
"alt-speed-time-day": ('number', 5, None, None, None, 'Enables alternate speeds scheduling these days.'),
"alt-speed-up": ('number', 5, None, None, None, 'Alternate session upload speed limit (in Kib/s).'),
"blocklist-enabled": ('boolean', 5, None, None, None, 'Enables the block list'),
"blocklist-url": ('string', 11, None, None, None, 'Location of the block list. Updated with blocklist-update.'),
"cache-size-mb": ('number', 10, None, None, None, 'The maximum size of the disk cache in MB'),
"dht-enabled": ('boolean', 6, None, None, None, 'Enables DHT.'),
"download-dir": ('string', 1, None, None, None, 'Set the session download directory.'),
"download-queue-size": ('number', 14, None, None, None, 'Number of slots in the download queue.'),
"download-queue-enabled": ('boolean', 14, None, None, None, 'Enables download queue.'),
"encryption": ('string', 1, None, None, None, 'Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.'),
"idle-seeding-limit": ('number', 10, None, None, None, 'The default seed inactivity limit in minutes.'),
"idle-seeding-limit-enabled": ('boolean', 10, None, None, None, 'Enables the default seed inactivity limit'),
"incomplete-dir": ('string', 7, None, None, None, 'The path to the directory of incomplete transfer data.'),
"incomplete-dir-enabled": ('boolean', 7, None, None, None, 'Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.'),
"lpd-enabled": ('boolean', 9, None, None, None, 'Enables local peer discovery for public torrents.'),
"peer-limit": ('number', 1, 5, None, 'peer-limit-global', 'Maximum number of peers.'),
"peer-limit-global": ('number', 5, None, 'peer-limit', None, 'Maximum number of peers.'),
"peer-limit-per-torrent": ('number', 5, None, None, None, 'Maximum number of peers per transfer.'),
"pex-allowed": ('boolean', 1, 5, None, 'pex-enabled', 'Allowing PEX in public torrents.'),
"pex-enabled": ('boolean', 5, None, 'pex-allowed', None, 'Allowing PEX in public torrents.'),
"port": ('number', 1, 5, None, 'peer-port', 'Peer port.'),
"peer-port": ('number', 5, None, 'port', None, 'Peer port.'),
"peer-port-random-on-start": ('boolean', 5, None, None, None, 'Enables randomized peer port on start of Transmission.'),
"port-forwarding-enabled": ('boolean', 1, None, None, None, 'Enables port forwarding.'),
"rename-partial-files": ('boolean', 8, None, None, None, 'Appends ".part" to incomplete files'),
"queue-stalled-minutes": ('number', 14, None, None, None, 'Number of minutes of idle that marks a transfer as stalled.'),
"queue-stalled-enabled": ('boolean', 14, None, None, None, 'Enable tracking of stalled transfers.'),
"script-torrent-done-enabled": ('boolean', 9, None, None, None, 'Whether or not to call the "done" script.'),
"script-torrent-done-filename": ('string', 9, None, None, None, 'Filename of the script to run when the transfer is done.'),
"seed-queue-size": ('number', 14, None, None, None, 'Number of slots in the upload queue.'),
"seed-queue-enabled": ('boolean', 14, None, None, None, 'Enables upload queue.'),
"seedRatioLimit": ('double', 5, None, None, None, 'Seed ratio limit. 1.0 means 1:1 download and upload ratio.'),
"seedRatioLimited": ('boolean', 5, None, None, None, 'Enables seed ration limit.'),
"speed-limit-down": ('number', 1, None, None, None, 'Download speed limit (in Kib/s).'),
"speed-limit-down-enabled": ('boolean', 1, None, None, None, 'Enables download speed limiting.'),
"speed-limit-up": ('number', 1, None, None, None, 'Upload speed limit (in Kib/s).'),
"speed-limit-up-enabled": ('boolean', 1, None, None, None, 'Enables upload speed limiting.'),
"start-added-torrents": ('boolean', 9, None, None, None, 'Added torrents will be started right away.'),
"trash-original-torrent-files": ('boolean', 9, None, None, None, 'The .torrent file of added torrents will be deleted.'),
'utp-enabled': ('boolean', 13, None, None, None, 'Enables Micro Transport Protocol (UTP).'),
},
}
| gpl-3.0 | 44,125,306,050,202,640 | 93.949153 | 200 | 0.56387 | false |
ehdsouza/python-sdk | examples/tone_analyzer_v3.py | 1 | 1714 | import json
import os
from os.path import join, dirname
from watson_developer_cloud import ToneAnalyzerV3
tone_analyzer = ToneAnalyzerV3(
username='YOUR SERVICE USERNAME',
password='YOUR SERVICE PASSWORD',
version='2016-05-19')
print("\ntone_chat() example 1:\n")
utterances = [{'text': 'I am very happy.', 'user': 'glenn'},
{'text': 'It is a good day.', 'user': 'glenn'}]
print(json.dumps(tone_analyzer.tone_chat(utterances), indent=2))
print("\ntone() example 1:\n")
print(json.dumps(tone_analyzer.tone(text='I am very happy. It is a good day.'),
indent=2))
print("\ntone() example 2:\n")
with open(join(dirname(__file__),
'../resources/tone-example.json')) as tone_json:
tone = tone_analyzer.tone(json.load(tone_json)['text'], 'emotion')
print(json.dumps(tone, indent=2))
print("\ntone() example 3:\n")
with open(join(dirname(__file__),
'../resources/tone-example.json')) as tone_json:
tone = tone_analyzer.tone(json.load(tone_json)['text'], 'emotion',
True, 'text/plain')
print(json.dumps(tone, indent=2))
print("\ntone() example 4:\n")
with open(join(dirname(__file__),
'../resources/tone-example.json')) as tone_json:
tone = tone_analyzer.tone(json.load(tone_json), 'emotion',
content_type='application/json', )
print(json.dumps(tone, indent=2))
print("\ntone() example 5:\n")
with open(join(dirname(__file__),
'../resources/tone-example-html.json')) as tone_json:
tone = tone_analyzer.tone(json.load(tone_json)['text'], 'emotion',
content_type='text/html')
print(json.dumps(tone, indent=2))
| apache-2.0 | -6,885,231,069,875,425,000 | 37.088889 | 79 | 0.613769 | false |
saradbowman/osf.io | osf/migrations/0175_pagecounter_schema.py | 10 | 1213 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-10 18:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osf', '0174_add_ab_testing_home_page_version_b_flag'),
]
operations = [
migrations.AddField(
model_name='pagecounter',
name='action',
field=models.CharField(blank=True, null=True, max_length=128),
),
migrations.AddField(
model_name='pagecounter',
name='file',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pagecounters', to='osf.BaseFileNode'),
),
migrations.AddField(
model_name='pagecounter',
name='resource',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pagecounters', to='osf.Guid'),
),
migrations.AddField(
model_name='pagecounter',
name='version',
field=models.IntegerField(blank=True, null=True),
),
]
| apache-2.0 | 566,671,735,279,718,600 | 32.694444 | 156 | 0.607585 | false |
glaubitz/fs-uae-debian | arcade/OpenGL/arrays/arraydatatype.py | 9 | 13543 | """Array data-type implementations (abstraction points for GL array types"""
import ctypes
import OpenGL
from OpenGL.raw.GL import _types
from OpenGL import plugins
from OpenGL.arrays import formathandler, _arrayconstants as GL_1_1
from OpenGL import logs
_log = logs.getLog( 'OpenGL.arrays.arraydatatype' )
from OpenGL import acceleratesupport
ADT = None
if acceleratesupport.ACCELERATE_AVAILABLE:
try:
from OpenGL_accelerate.arraydatatype import ArrayDatatype as ADT
except ImportError as err:
_log.warn(
"Unable to load ArrayDatatype accelerator from OpenGL_accelerate"
)
if ADT is None:
# Python-coded version
class HandlerRegistry( dict ):
GENERIC_OUTPUT_PREFERENCES = ['numpy','ctypesarrays']
def __init__( self, plugin_match ):
self.match = plugin_match
self.output_handler = None
self.preferredOutput = None
self.all_output_handlers = []
def __call__( self, value ):
"""Lookup of handler for given value"""
try:
typ = value.__class__
except AttributeError as err:
typ = type(value)
handler = self.get( typ )
if not handler:
if hasattr( typ, '__mro__' ):
for base in typ.__mro__:
handler = self.get( base )
if not handler:
handler = self.match( base )
if handler:
handler = handler.load()
if handler:
handler = handler()
if handler:
self[ typ ] = handler
if hasattr( handler, 'registerEquivalent' ):
handler.registerEquivalent( typ, base )
return handler
raise TypeError(
"""No array-type handler for type %s.%s (value: %s) registered"""%(
typ.__module__, type.__name__, repr(value)[:50]
)
)
return handler
def handler_by_plugin_name( self, name ):
plugin = plugins.FormatHandler.by_name( name )
if plugin:
try:
return plugin.load()
except ImportError as err:
return None
else:
raise RuntimeError( 'No handler of name %s found'%(name,))
def get_output_handler( self ):
"""Fast-path lookup for output handler object"""
if self.output_handler is None:
if self.preferredOutput is not None:
self.output_handler = self.handler_by_plugin_name( self.preferredOutput )
if not self.output_handler:
for preferred in self.GENERIC_OUTPUT_PREFERENCES:
self.output_handler = self.handler_by_plugin_name( preferred )
if self.output_handler:
break
if not self.output_handler:
raise RuntimeError(
"""Unable to find any output handler at all (not even ctypes/numpy ones!)"""
)
return self.output_handler
def register( self, handler, types=None ):
"""Register this class as handler for given set of types"""
if not isinstance( types, (list,tuple)):
types = [ types ]
for type in types:
self[ type ] = handler
if handler.isOutput:
self.all_output_handlers.append( handler )
def registerReturn( self, handler ):
"""Register this handler as the default return-type handler"""
if isinstance( handler, (str,unicode)):
self.preferredOutput = handler
self.output_handler = None
else:
self.preferredOutput = None
self.output_handler = handler
GLOBAL_REGISTRY = HandlerRegistry( plugins.FormatHandler.match)
formathandler.FormatHandler.TYPE_REGISTRY = GLOBAL_REGISTRY
class ArrayDatatype( object ):
"""Mix-in for array datatype classes
The ArrayDatatype marker essentially is used to mark a particular argument
as having an "array" type, which means that it is eligible for handling
via the arrays sub-package and its registered handlers.
"""
typeConstant = None
handler = GLOBAL_REGISTRY
getHandler = GLOBAL_REGISTRY.__call__
returnHandler = GLOBAL_REGISTRY.get_output_handler
isAccelerated = False
@classmethod
def getRegistry( cls ):
"""Get our handler registry"""
return cls.handler
def from_param( cls, value, typeConstant=None ):
"""Given a value in a known data-pointer type, convert to a ctypes pointer"""
return cls.getHandler(value).from_param( value, cls.typeConstant )
from_param = classmethod( logs.logOnFail( from_param, _log ) )
def dataPointer( cls, value ):
"""Given a value in a known data-pointer type, return long for pointer"""
try:
return cls.getHandler(value).dataPointer( value )
except Exception as err:
_log.warn(
"""Failure in dataPointer for %s instance %s""", type(value), value,
)
raise
dataPointer = classmethod( logs.logOnFail( dataPointer, _log ) )
def voidDataPointer( cls, value ):
"""Given value in a known data-pointer type, return void_p for pointer"""
pointer = cls.dataPointer( value )
try:
return ctypes.c_void_p(pointer)
except TypeError as err:
return pointer
voidDataPointer = classmethod( logs.logOnFail( voidDataPointer, _log ) )
def typedPointer( cls, value ):
"""Return a pointer-to-base-type pointer for given value"""
return ctypes.cast( cls.dataPointer(value), ctypes.POINTER( cls.baseType ))
typedPointer = classmethod( typedPointer )
def asArray( cls, value, typeCode=None ):
"""Given a value, convert to preferred array representation"""
return cls.getHandler(value).asArray( value, typeCode or cls.typeConstant )
asArray = classmethod( logs.logOnFail( asArray, _log ) )
def arrayToGLType( cls, value ):
"""Given a data-value, guess the OpenGL type of the corresponding pointer
Note: this is not currently used in PyOpenGL and may be removed
eventually.
"""
return cls.getHandler(value).arrayToGLType( value )
arrayToGLType = classmethod( logs.logOnFail( arrayToGLType, _log ) )
def arraySize( cls, value, typeCode = None ):
"""Given a data-value, calculate dimensions for the array (number-of-units)"""
return cls.getHandler(value).arraySize( value, typeCode or cls.typeConstant )
arraySize = classmethod( logs.logOnFail( arraySize, _log ) )
def unitSize( cls, value, typeCode=None ):
"""Determine unit size of an array (if possible)
Uses our local type if defined, otherwise asks the handler to guess...
"""
return cls.getHandler(value).unitSize( value, typeCode or cls.typeConstant )
unitSize = classmethod( logs.logOnFail( unitSize, _log ) )
def zeros( cls, dims, typeCode=None ):
"""Allocate a return array of the given dimensions filled with zeros"""
return cls.returnHandler().zeros( dims, typeCode or cls.typeConstant )
zeros = classmethod( logs.logOnFail( zeros, _log ) )
def dimensions( cls, value ):
"""Given a data-value, get the dimensions (assumes full structure info)"""
return cls.getHandler(value).dimensions( value )
dimensions = classmethod( logs.logOnFail( dimensions, _log ) )
def arrayByteCount( cls, value ):
"""Given a data-value, try to determine number of bytes it's final form occupies
For most data-types this is arraySize() * atomic-unit-size
"""
return cls.getHandler(value).arrayByteCount( value )
arrayByteCount = classmethod( logs.logOnFail( arrayByteCount, _log ) )
# the final array data-type classes...
class GLclampdArray( ArrayDatatype, ctypes.POINTER(_types.GLclampd )):
"""Array datatype for GLclampd types"""
baseType = _types.GLclampd
typeConstant = _types.GL_DOUBLE
class GLclampfArray( ArrayDatatype, ctypes.POINTER(_types.GLclampf )):
"""Array datatype for GLclampf types"""
baseType = _types.GLclampf
typeConstant = _types.GL_FLOAT
class GLfloatArray( ArrayDatatype, ctypes.POINTER(_types.GLfloat )):
"""Array datatype for GLfloat types"""
baseType = _types.GLfloat
typeConstant = _types.GL_FLOAT
class GLdoubleArray( ArrayDatatype, ctypes.POINTER(_types.GLdouble )):
"""Array datatype for GLdouble types"""
baseType = _types.GLdouble
typeConstant = _types.GL_DOUBLE
class GLbyteArray( ArrayDatatype, ctypes.POINTER(_types.GLbyte )):
"""Array datatype for GLbyte types"""
baseType = _types.GLbyte
typeConstant = _types.GL_BYTE
class GLcharArray( ArrayDatatype, ctypes.c_char_p):
"""Array datatype for ARB extension pointers-to-arrays"""
baseType = _types.GLchar
typeConstant = _types.GL_BYTE
GLcharARBArray = GLcharArray
class GLshortArray( ArrayDatatype, ctypes.POINTER(_types.GLshort )):
"""Array datatype for GLshort types"""
baseType = _types.GLshort
typeConstant = _types.GL_SHORT
class GLintArray( ArrayDatatype, ctypes.POINTER(_types.GLint )):
"""Array datatype for GLint types"""
baseType = _types.GLint
typeConstant = _types.GL_INT
class GLubyteArray( ArrayDatatype, ctypes.POINTER(_types.GLubyte )):
"""Array datatype for GLubyte types"""
baseType = _types.GLubyte
typeConstant = _types.GL_UNSIGNED_BYTE
GLbooleanArray = GLubyteArray
class GLushortArray( ArrayDatatype, ctypes.POINTER(_types.GLushort )):
"""Array datatype for GLushort types"""
baseType = _types.GLushort
typeConstant = _types.GL_UNSIGNED_SHORT
class GLuintArray( ArrayDatatype, ctypes.POINTER(_types.GLuint )):
"""Array datatype for GLuint types"""
baseType = _types.GLuint
typeConstant = _types.GL_UNSIGNED_INT
class GLint64Array( ArrayDatatype, ctypes.POINTER(_types.GLint64 )):
"""Array datatype for GLuint types"""
baseType = _types.GLint64
typeConstant = None # TODO: find out what this should be!
class GLuint64Array( ArrayDatatype, ctypes.POINTER(_types.GLuint64 )):
"""Array datatype for GLuint types"""
baseType = _types.GLuint64
typeConstant = _types.GL_UNSIGNED_INT64
class GLenumArray( ArrayDatatype, ctypes.POINTER(_types.GLenum )):
"""Array datatype for GLenum types"""
baseType = _types.GLenum
typeConstant = _types.GL_UNSIGNED_INT
class GLsizeiArray( ArrayDatatype, ctypes.POINTER(_types.GLsizei )):
"""Array datatype for GLsizei types"""
baseType = _types.GLsizei
typeConstant = _types.GL_INT
class GLvoidpArray( ArrayDatatype, ctypes.POINTER(_types.GLvoid )):
"""Array datatype for GLenum types"""
baseType = _types.GLvoidp
typeConstant = _types.GL_VOID_P
else:
# Cython-coded array handler
_log.info( 'Using accelerated ArrayDatatype' )
ArrayDatatype = ADT( None, None )
GLclampdArray = ADT( GL_1_1.GL_DOUBLE, _types.GLclampd )
GLclampfArray = ADT( GL_1_1.GL_FLOAT, _types.GLclampf )
GLdoubleArray = ADT( GL_1_1.GL_DOUBLE, _types.GLdouble )
GLfloatArray = ADT( GL_1_1.GL_FLOAT, _types.GLfloat )
GLbyteArray = ADT( GL_1_1.GL_BYTE, _types.GLbyte )
GLcharArray = GLcharARBArray = ADT( GL_1_1.GL_BYTE, _types.GLchar )
GLshortArray = ADT( GL_1_1.GL_SHORT, _types.GLshort )
GLintArray = ADT( GL_1_1.GL_INT, _types.GLint )
GLubyteArray = GLbooleanArray = ADT( GL_1_1.GL_UNSIGNED_BYTE, _types.GLubyte )
GLushortArray = ADT( GL_1_1.GL_UNSIGNED_SHORT, _types.GLushort )
GLuintArray = ADT( GL_1_1.GL_UNSIGNED_INT, _types.GLuint )
GLint64Array = ADT( None, _types.GLint64 )
GLuint64Array = ADT( GL_1_1.GL_UNSIGNED_INT64, _types.GLuint64 )
GLenumArray = ADT( GL_1_1.GL_UNSIGNED_INT, _types.GLenum )
GLsizeiArray = ADT( GL_1_1.GL_INT, _types.GLsizei )
GLvoidpArray = ADT( _types.GL_VOID_P, _types.GLvoidp )
GL_CONSTANT_TO_ARRAY_TYPE = {
GL_1_1.GL_DOUBLE : GLclampdArray,
GL_1_1.GL_FLOAT : GLclampfArray,
GL_1_1.GL_FLOAT : GLfloatArray,
GL_1_1.GL_DOUBLE : GLdoubleArray,
GL_1_1.GL_BYTE : GLbyteArray,
GL_1_1.GL_SHORT : GLshortArray,
GL_1_1.GL_INT : GLintArray,
GL_1_1.GL_UNSIGNED_BYTE : GLubyteArray,
GL_1_1.GL_UNSIGNED_SHORT : GLushortArray,
GL_1_1.GL_UNSIGNED_INT : GLuintArray,
#GL_1_1.GL_UNSIGNED_INT : GLenumArray,
}
| gpl-2.0 | 8,271,836,533,093,973,000 | 43.993355 | 100 | 0.595954 | false |
red-hood/calendarserver | twistedcaldav/sharing.py | 1 | 37736 | # -*- test-case-name: twistedcaldav.test.test_sharing -*-
# #
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #
"""
Sharing behavior
"""
__all__ = [
"SharedResourceMixin",
"SharedHomeMixin",
]
from twext.who.idirectory import RecordType
from twisted.internet.defer import succeed, inlineCallbacks, DeferredList, \
returnValue
from twistedcaldav import customxml, caldavxml
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.linkresource import LinkFollowerMixIn
from txdav.common.datastore.sql_tables import _ABO_KIND_GROUP, \
_BIND_MODE_DIRECT, _BIND_MODE_INDIRECT, _BIND_MODE_OWN, _BIND_MODE_READ, \
_BIND_MODE_WRITE, _BIND_STATUS_ACCEPTED, _BIND_STATUS_DECLINED, \
_BIND_STATUS_DELETED, _BIND_STATUS_INVALID, _BIND_STATUS_INVITED
from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
from txdav.xml import element
from txdav.who.wiki import RecordType as WikiRecordType, WikiAccessLevel
from txweb2 import responsecode
from txweb2.dav.http import ErrorResponse, MultiStatusResponse
from txweb2.dav.resource import TwistedACLInheritable
from txweb2.dav.util import allDataFromStream, joinURL
from txweb2.http import HTTPError, Response, XMLResponse
class SharedResourceMixin(object):
"""
A mix-in for calendar/addressbook resources that implements sharing-related
functionality.
"""
@inlineCallbacks
def inviteProperty(self, request):
"""
Calculate the customxml.Invite property (for readProperty) from the
invites database.
"""
if config.Sharing.Enabled:
@inlineCallbacks
def invitePropertyElement(invitation, includeUID=True):
userid = "urn:x-uid:" + invitation.shareeUID
principal = yield self.principalForUID(invitation.shareeUID)
cn = principal.displayName() if principal else invitation.shareeUID
returnValue(customxml.InviteUser(
customxml.UID.fromString(invitation.uid) if includeUID else None,
element.HRef.fromString(userid),
customxml.CommonName.fromString(cn),
customxml.InviteAccess(invitationBindModeToXMLMap[invitation.mode]()),
invitationBindStatusToXMLMap[invitation.status](),
))
# See if this property is on the shared calendar
if self.isShared():
invitations = yield self.validateInvites(request)
returnValue(customxml.Invite(
*[(yield invitePropertyElement(invitation)) for invitation in invitations]
))
# See if it is on the sharee calendar
if self.isShareeResource():
original = yield self._newStoreObject.ownerView()
if original is not None:
invitations = yield original.allInvitations()
invitations = yield self.validateInvites(request, invitations)
ownerPrincipal = yield self.principalForUID(self._newStoreObject.ownerHome().uid())
if ownerPrincipal is None:
owner = "invalid"
ownerCN = "Invalid"
else:
# FIXME: use urn:x-uid in all cases
if self.isCalendarCollection():
owner = ownerPrincipal.principalURL()
else:
owner = "urn:x-uid:" + ownerPrincipal.principalUID()
ownerCN = ownerPrincipal.displayName()
returnValue(customxml.Invite(
customxml.Organizer(
element.HRef.fromString(owner),
customxml.CommonName.fromString(ownerCN),
),
*[(yield invitePropertyElement(invitation, includeUID=False)) for invitation in invitations]
))
returnValue(None)
@inlineCallbacks
def upgradeToShare(self):
"""
Set the resource-type property on this resource to indicate that this
is the owner's version of a resource which has been shared.
"""
# Change status on store object
yield self._newStoreObject.setShared(True)
@inlineCallbacks
def downgradeFromShare(self, request):
# Change status on store object
yield self._newStoreObject.setShared(False)
# Remove all invitees
for invitation in (yield self._newStoreObject.allInvitations()):
yield self._newStoreObject.uninviteUIDFromShare(invitation.shareeUID)
returnValue(True)
@inlineCallbacks
def directShare(self, request):
"""
Directly bind an accessible calendar/address book collection into the
current principal's calendar/addressbook home.
@param request: the request triggering this action
@type request: L{IRequest}
@return: the (asynchronous) HTTP result to respond to the direct-share
request.
@rtype: L{Deferred} firing L{txweb2.http.Response}, failing with
L{HTTPError}
"""
# Need to have at least DAV:read to do this
yield self.authorize(request, (element.Read(),))
# Find current principal
authz_principal = self.currentPrincipal(request).children[0]
if not isinstance(authz_principal, element.HRef):
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "valid-principal"),
"Current user principal not a DAV:href",
))
principalURL = str(authz_principal)
if not principalURL:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "valid-principal"),
"Current user principal not specified",
))
sharee = (yield request.locateResource(principalURL))
# Check enabled for service
from twistedcaldav.directory.principal import DirectoryCalendarPrincipalResource
if not isinstance(sharee, DirectoryCalendarPrincipalResource):
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-principal"),
"Current user principal is not a calendar/addressbook enabled principal",
))
# Get the home collection
if self.isCalendarCollection():
shareeHomeResource = yield sharee.calendarHome(request)
elif self.isAddressBookCollection() or self.isGroup():
shareeHomeResource = yield sharee.addressBookHome(request)
else:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-principal"),
"No calendar/addressbook home for principal",
))
# TODO: Make sure principal is not sharing back to themselves
hostURL = (yield self.canonicalURL(request))
shareeHomeURL = shareeHomeResource.url()
if hostURL.startswith(shareeHomeURL):
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Can't share your own calendar or addressbook",
))
# Accept it
shareeView = yield self._newStoreObject.directShareWithUser(
sharee.principalUID(),
displayName=self.displayName()
)
# Return the URL of the shared calendar
sharedAsURL = joinURL(shareeHomeResource.url(), shareeView.name())
returnValue(XMLResponse(
code=responsecode.OK,
element=customxml.SharedAs(
element.HRef.fromString(sharedAsURL)
)
))
def isShared(self):
"""
Return True if this is an owner shared calendar collection.
"""
try:
return self._newStoreObject.isShared() if self._newStoreObject else False
except AttributeError:
return False
def setShare(self, share_url):
"""
Set the URL associated with this L{SharedResourceMixin}. (This
is only invoked on the sharee's resource, not the owner's.)
"""
self._isShareeResource = True
self._share_url = share_url
def isShareeResource(self):
"""
Return True if this is a sharee view of a shared collection.
"""
return (
hasattr(self, "_newStoreObject") and
hasattr(self._newStoreObject, "owned") and
not self._newStoreObject.owned() and
getattr(self._newStoreObject, "_bindMode", None) is not None
)
def removeShareeResource(self, request):
"""
Called when the sharee DELETEs a shared collection.
"""
return self._newStoreObject.deleteShare()
@inlineCallbacks
def _checkAccessControl(self):
"""
Check the shared access mode of this resource, potentially consulting
an external access method if necessary.
@return: a L{Deferred} firing a L{bytes} or L{None}, with one of the
potential values: C{"own"}, which means that the home is the owner
of the collection and it is not shared; C{"read-only"}, meaning
that the home that this collection is bound into has only read
access to this collection; C{"read-write"}, which means that the
home has both read and write access; C{"original"}, which means
that it should inherit the ACLs of the owner's collection, whatever
those happen to be, or C{None}, which means that the external
access control mechanism has dictate the home should no longer have
any access at all.
"""
if self._newStoreObject.direct():
owner = yield self.principalForUID(self._newStoreObject.ownerHome().uid())
sharee = yield self.principalForUID(self._newStoreObject.viewerHome().uid())
if owner.record.recordType == WikiRecordType.macOSXServerWiki:
# Access level comes from what the wiki has granted to the
# sharee
access = (yield owner.record.accessForRecord(sharee.record))
if access == WikiAccessLevel.read:
returnValue("read-only")
elif access == WikiAccessLevel.write:
returnValue("read-write")
else:
returnValue(None)
else:
# Check proxy access
proxy_mode = yield sharee.proxyMode(owner)
if proxy_mode == "none":
returnValue("original")
else:
returnValue("read-write" if proxy_mode == "write" else "read-only")
else:
# Invited shares use access mode from the invite
# Get the access for self
bindMode = yield self._newStoreObject.effectiveShareMode()
returnValue(invitationAccessFromBindModeMap.get(bindMode))
@inlineCallbacks
def shareeAccessControlList(self, request, *args, **kwargs):
"""
Return WebDAV ACLs appropriate for the current user accessing the
shared collection. For an "invite" share we take the privilege granted
to the sharee in the invite and map that to WebDAV ACLs. For a
"direct" share, if it is a wiki collection we map the wiki privileges
into WebDAV ACLs, otherwise we use whatever privileges exist on the
underlying shared collection.
@param request: the request used to locate the owner resource.
@type request: L{txweb2.iweb.IRequest}
@param args: The arguments for
L{txweb2.dav.idav.IDAVResource.accessControlList}
@param kwargs: The keyword arguments for
L{txweb2.dav.idav.IDAVResource.accessControlList}, plus
keyword-only arguments.
@return: the appropriate WebDAV ACL for the sharee
@rtype: L{davxml.ACL}
"""
assert self._isShareeResource, "Only call this for a sharee resource"
assert self.isCalendarCollection() or self.isAddressBookCollection(), "Only call this for a address book or calendar resource"
sharee = yield self.principalForUID(self._newStoreObject.viewerHome().uid())
access = yield self._checkAccessControl()
if access == "original" and not self._newStoreObject.ownerHome().external():
original = (yield request.locateResource(self._share_url))
result = (yield original.accessControlList(request, *args, **kwargs))
returnValue(result)
# Direct shares use underlying privileges of shared collection
userprivs = [
]
if access in ("read-only", "read-write",):
userprivs.append(element.Privilege(element.Read()))
userprivs.append(element.Privilege(element.ReadACL()))
userprivs.append(element.Privilege(element.ReadCurrentUserPrivilegeSet()))
if access in ("read-only",):
userprivs.append(element.Privilege(element.WriteProperties()))
if access in ("read-write",):
userprivs.append(element.Privilege(element.Write()))
proxyprivs = list(userprivs)
try:
proxyprivs.remove(element.Privilege(element.ReadACL()))
except ValueError:
# If wiki says no-access then ReadACL won't be in the list
pass
aces = (
# Inheritable specific access for the resource's associated principal.
element.ACE(
element.Principal(element.HRef(sharee.principalURL())),
element.Grant(*userprivs),
element.Protected(),
TwistedACLInheritable(),
),
)
if self.isCalendarCollection():
aces += (
# Inheritable CALDAV:read-free-busy access for authenticated users.
element.ACE(
element.Principal(element.Authenticated()),
element.Grant(element.Privilege(caldavxml.ReadFreeBusy())),
TwistedACLInheritable(),
),
)
# Give read access to config.ReadPrincipals
aces += config.ReadACEs
# Give all access to config.AdminPrincipals
aces += config.AdminACEs
if self.isCalendarCollection() and config.EnableProxyPrincipals:
aces += (
# DAV:read/DAV:read-current-user-privilege-set access for this principal's calendar-proxy-read users.
element.ACE(
element.Principal(element.HRef(joinURL(sharee.principalURL(), "calendar-proxy-read/"))),
element.Grant(
element.Privilege(element.Read()),
element.Privilege(element.ReadCurrentUserPrivilegeSet()),
element.Privilege(element.WriteProperties()),
),
element.Protected(),
TwistedACLInheritable(),
),
# DAV:read/DAV:read-current-user-privilege-set/DAV:write access for this principal's calendar-proxy-write users.
element.ACE(
element.Principal(element.HRef(joinURL(sharee.principalURL(), "calendar-proxy-write/"))),
element.Grant(*proxyprivs),
element.Protected(),
TwistedACLInheritable(),
),
)
returnValue(element.ACL(*aces))
@inlineCallbacks
def validUserIDForShare(self, userid, request=None):
"""
Test the user id to see if it is a valid identifier for sharing and
return a "normalized" form for our own use (e.g. convert mailto: to
urn:uuid).
@param userid: the userid to test
@type userid: C{str}
@return: C{str} of normalized userid or C{None} if
userid is not allowed.
"""
# First try to resolve as a calendar principal
principal = yield self.principalForCalendarUserAddress(userid)
if principal is None:
principal = yield self.principalForCalendarGroupAddress(userid)
if principal:
if request:
ownerPrincipal = (yield self.ownerPrincipal(request))
if ownerPrincipal is None or ownerPrincipal.principalURL() == principal.principalURL():
returnValue(None)
returnValue(principal.principalURL())
# TODO: we do not support external users right now so this is being hard-coded
# off in spite of the config option.
# elif config.Sharing.AllowExternalUsers:
# return userid
else:
returnValue(None)
@inlineCallbacks
def principalForCalendarGroupAddress(self, groupid):
"""
Get principal for group address if extant
"""
if (
config.Sharing.Enabled and
config.Sharing.Calendars.Enabled and
config.Sharing.Calendars.Groups.Enabled
):
# see if group
for principalCollection in self.principalCollections():
record = yield principalCollection.directory.recordWithCalendarUserAddress(groupid)
if record is not None and record.recordType == RecordType.group:
groupPrincipal = yield principalCollection.principalForRecord(record)
if groupPrincipal is not None:
returnValue(groupPrincipal)
returnValue(None)
@inlineCallbacks
def validateInvites(self, request, invitations=None):
"""
Make sure each userid in an invite is valid - if not re-write status.
"""
# assert request
if invitations is None:
invitations = yield self._newStoreObject.allInvitations()
adjusted_invitations = []
for invitation in invitations:
if invitation.status != _BIND_STATUS_INVALID:
if not (yield self.validUserIDForShare("urn:x-uid:" + invitation.shareeUID, request)):
self.log.error("Invalid sharee detected: {uid}", uid=invitation.shareeUID)
invitation = invitation._replace(status=_BIND_STATUS_INVALID)
invitation = invitation._replace(
mode=(
yield self._newStoreObject._effectiveShareMode(
invitation.mode, invitation.shareeUID, self._newStoreObject._txn
)
)
)
adjusted_invitations.append(invitation)
returnValue(adjusted_invitations)
def inviteUIDToShare(self, userid, cn, ace, summary, request):
""" Send out in invite first, and then add this user to the share list
@param userid:
@param ace: Must be one of customxml.ReadWriteAccess or customxml.ReadAccess
"""
# TODO: Check if this collection is shared, and error out if it isn't
resultIsList = True
if type(userid) is not list:
userid = [userid]
resultIsList = False
if type(cn) is not list:
cn = [cn]
dl = [self.inviteSingleUserToShare(_user, _cn, ace, summary, request) for _user, _cn in zip(userid, cn)]
return self._processShareActionList(dl, resultIsList)
def uninviteUIDFromShare(self, userid, ace, request):
"""
Send out in uninvite first, and then remove this user from the share list.
"""
# Do not validate the userid - we want to allow invalid users to be removed because they
# may have been valid when added, but no longer valid now. Clients should be able to clear out
# anything known to be invalid.
# TODO: Check if this collection is shared, and error out if it isn't
resultIsList = True
if type(userid) is not list:
userid = [userid]
resultIsList = False
dl = [self.uninviteSingleUserFromShare(user, ace, request) for user in userid]
return self._processShareActionList(dl, resultIsList)
def inviteUserUpdateToShare(self, userid, cn, aceOLD, aceNEW, summary, request):
resultIsList = True
if type(userid) is not list:
userid = [userid]
resultIsList = False
if type(cn) is not list:
cn = [cn]
dl = [self.inviteSingleUserUpdateToShare(_user, _cn, aceOLD, aceNEW, summary, request) for _user, _cn in zip(userid, cn)]
return self._processShareActionList(dl, resultIsList)
def _processShareActionList(self, dl, resultIsList):
def _defer(resultset):
results = [result if success else False for success, result in resultset]
return results if resultIsList else results[0]
return DeferredList(dl).addCallback(_defer)
@inlineCallbacks
def inviteSingleUserToShare(self, userid, cn, ace, summary, request): #@UnusedVariable
# We currently only handle local users
sharee = yield self.principalForCalendarUserAddress(userid)
if sharee is None:
sharee = yield self.principalForCalendarGroupAddress(userid)
if sharee is None:
returnValue(False)
result = (yield self._newStoreObject.inviteUIDToShare(
sharee.principalUID(),
invitationBindModeFromXMLMap[type(ace)],
summary,
))
returnValue(result)
@inlineCallbacks
def uninviteSingleUserFromShare(self, userid, aces, request): #@UnusedVariable
# Cancel invites - we'll just use whatever userid we are given. However, if we
# cannot find a matching principal, try to extract the uid from the userid
# and use that (to allow invalid principals to be removed).
sharee = yield self.principalForCalendarUserAddress(userid)
if sharee is not None:
uid = sharee.principalUID()
elif userid.startswith("urn:x-uid:"):
uid = userid[10:]
else:
returnValue(False)
result = (yield self._newStoreObject.uninviteUIDFromShare(uid))
returnValue(result)
@inlineCallbacks
def uninviteFromShare(self, invitation, request):
yield self._newStoreObject.uninviteFromShare(invitation)
returnValue(True)
def inviteSingleUserUpdateToShare(self, userid, commonName, acesOLD, aceNEW, summary, request): #@UnusedVariable
# Just update existing
return self.inviteSingleUserToShare(userid, commonName, aceNEW, summary, request)
@inlineCallbacks
def _xmlHandleInvite(self, request, docroot):
# Sharing must be enabled for this collection
if not self.canBeShared():
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Sharing not supported on this resource",
))
yield self.authorize(request, (element.Read(), element.Write()))
result = (yield self._handleInvite(request, docroot))
returnValue(result)
@inlineCallbacks
def _handleInvite(self, request, invitedoc):
def _handleInviteSet(inviteset):
userid = None
cn = None
access = None
summary = None
for item in inviteset.children:
if isinstance(item, element.HRef):
userid = str(item)
continue
if isinstance(item, customxml.CommonName):
cn = str(item)
continue
if isinstance(item, customxml.InviteSummary):
summary = str(item)
continue
if isinstance(item, customxml.ReadAccess) or isinstance(item, customxml.ReadWriteAccess):
access = item
continue
if userid and access and summary:
return (userid, cn, access, summary)
else:
error_text = []
if userid is None:
error_text.append("missing href")
if access is None:
error_text.append("missing access")
if summary is None:
error_text.append("missing summary")
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"%s: %s" % (", ".join(error_text), inviteset,),
))
def _handleInviteRemove(inviteremove):
userid = None
access = []
for item in inviteremove.children:
if isinstance(item, element.HRef):
userid = str(item)
continue
if isinstance(item, customxml.ReadAccess) or isinstance(item, customxml.ReadWriteAccess):
access.append(item)
continue
if userid is None:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Missing href: %s" % (inviteremove,),
))
if len(access) == 0:
access = None
else:
access = set(access)
return (userid, access)
setDict, removeDict, updateinviteDict = {}, {}, {}
okusers = set()
badusers = set()
for item in invitedoc.children:
if isinstance(item, customxml.InviteSet):
userid, cn, access, summary = _handleInviteSet(item)
setDict[userid] = (cn, access, summary)
# Validate each userid on add only
uid = (yield self.validUserIDForShare(userid, request))
if uid is None:
uid = yield self.principalForCalendarGroupAddress(userid)
(badusers if uid is None else okusers).add(userid)
elif isinstance(item, customxml.InviteRemove):
userid, access = _handleInviteRemove(item)
removeDict[userid] = access
# Treat removed userids as valid as we will fail invalid ones silently
okusers.add(userid)
# Only make changes if all OK
if len(badusers) == 0:
okusers = set()
badusers = set()
# Special case removing and adding the same user and treat that as an add
sameUseridInRemoveAndSet = [u for u in removeDict.keys() if u in setDict]
for u in sameUseridInRemoveAndSet:
removeACL = removeDict[u]
cn, newACL, summary = setDict[u]
updateinviteDict[u] = (cn, removeACL, newACL, summary)
del removeDict[u]
del setDict[u]
for userid, access in removeDict.iteritems():
result = (yield self.uninviteUIDFromShare(userid, access, request))
# If result is False that means the user being removed was not
# actually invited, but let's not return an error in this case.
okusers.add(userid)
for userid, (cn, access, summary) in setDict.iteritems():
result = (yield self.inviteUIDToShare(userid, cn, access, summary, request))
(okusers if result else badusers).add(userid)
for userid, (cn, removeACL, newACL, summary) in updateinviteDict.iteritems():
result = (yield self.inviteUserUpdateToShare(userid, cn, removeACL, newACL, summary, request))
(okusers if result else badusers).add(userid)
# In this case bad items do not prevent ok items from being processed
ok_code = responsecode.OK
else:
# In this case a bad item causes all ok items not to be processed so failed dependency is returned
ok_code = responsecode.FAILED_DEPENDENCY
# Do a final validation of the entire set of invites
invites = (yield self.validateInvites(request))
numRecords = len(invites)
# Set the sharing state on the collection
shared = self.isShared()
if shared and numRecords == 0:
yield self.downgradeFromShare(request)
elif not shared and numRecords != 0:
yield self.upgradeToShare()
# Create the multistatus response - only needed if some are bad
if badusers:
xml_responses = []
xml_responses.extend([
element.StatusResponse(element.HRef(userid), element.Status.fromResponseCode(ok_code))
for userid in sorted(okusers)
])
xml_responses.extend([
element.StatusResponse(element.HRef(userid), element.Status.fromResponseCode(responsecode.FORBIDDEN))
for userid in sorted(badusers)
])
#
# Return response
#
returnValue(MultiStatusResponse(xml_responses))
else:
returnValue(responsecode.OK)
@inlineCallbacks
def _xmlHandleInviteReply(self, request, docroot):
# Sharing must be enabled for this collection
if not self.canShare():
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Sharing not supported on this resource",
))
yield self.authorize(request, (element.Read(), element.Write()))
result = (yield self._handleInviteReply(request, docroot))
returnValue(result)
def _handleInviteReply(self, request, docroot):
raise NotImplementedError
@inlineCallbacks
def xmlRequestHandler(self, request):
# Need to read the data and get the root element first
xmldata = (yield allDataFromStream(request.stream))
try:
doc = element.WebDAVDocument.fromString(xmldata)
except ValueError, e:
self.log.error("Error parsing doc (%s) Doc:\n %s" % (str(e), xmldata,))
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Invalid XML",
))
root = doc.root_element
if type(root) in self.xmlDocHandlers:
result = (yield self.xmlDocHandlers[type(root)](self, request, root))
returnValue(result)
else:
self.log.error("Unsupported XML (%s)" % (root,))
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Unsupported XML",
))
xmlDocHandlers = {
customxml.InviteShare: _xmlHandleInvite,
customxml.InviteReply: _xmlHandleInviteReply,
}
def isGroup(self):
try:
return self._newStoreObject._kind == _ABO_KIND_GROUP
except AttributeError:
return False
def POST_handler_content_type(self, request, contentType):
if self.isCollection() or self.isGroup():
if contentType:
if contentType in self._postHandlers:
return self._postHandlers[contentType](self, request)
else:
self.log.info("Got a POST on collection or group with an unsupported content type: %s" % (contentType,))
else:
self.log.info("Got a POST on collection or group with no content type")
return succeed(responsecode.FORBIDDEN)
_postHandlers = {
("application", "xml") : xmlRequestHandler,
("text", "xml") : xmlRequestHandler,
}
invitationBindStatusToXMLMap = {
_BIND_STATUS_INVITED : customxml.InviteStatusNoResponse,
_BIND_STATUS_ACCEPTED : customxml.InviteStatusAccepted,
_BIND_STATUS_DECLINED : customxml.InviteStatusDeclined,
_BIND_STATUS_INVALID : customxml.InviteStatusInvalid,
_BIND_STATUS_DELETED : customxml.InviteStatusDeleted,
}
invitationBindStatusFromXMLMap = dict((v, k) for k, v in invitationBindStatusToXMLMap.iteritems())
invitationBindModeToXMLMap = {
_BIND_MODE_READ : customxml.ReadAccess,
_BIND_MODE_WRITE : customxml.ReadWriteAccess,
}
invitationBindModeFromXMLMap = dict((v, k) for k, v in invitationBindModeToXMLMap.iteritems())
invitationAccessFromBindModeMap = {
_BIND_MODE_OWN: "own",
_BIND_MODE_READ: "read-only",
_BIND_MODE_WRITE: "read-write",
_BIND_MODE_DIRECT: "read-write",
_BIND_MODE_INDIRECT: "read-write",
}
class SharedHomeMixin(LinkFollowerMixIn):
"""
A mix-in for calendar/addressbook homes that defines the operations for
manipulating a sharee's set of shared calendars.
"""
@inlineCallbacks
def provisionShare(self, child, request=None):
"""
Set shared state and check access control.
"""
if child._newStoreObject is not None and not child._newStoreObject.owned():
ownerHomeURL = (yield self._otherPrincipalHomeURL(child._newStoreObject.ownerHome().uid()))
ownerView = yield child._newStoreObject.ownerView()
child.setShare(joinURL(ownerHomeURL, ownerView.name()) if ownerHomeURL else None)
access = yield child._checkAccessControl()
if access is None:
returnValue(None)
returnValue(child)
def _otherPrincipalHomeURL(self, otherUID):
# Is this only meant to be overridden?
pass
@inlineCallbacks
def acceptShare(self, request, inviteUID, summary):
# Accept the share
try:
shareeView = yield self._newStoreHome.acceptShare(inviteUID, summary)
except DirectoryRecordNotFoundError:
# Missing sharer record => fail request
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Invite UID not valid",
))
if shareeView is None:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Invite UID not valid",
))
# Return the URL of the shared collection
sharedAsURL = joinURL(self.url(), shareeView.shareName())
returnValue(XMLResponse(
code=responsecode.OK,
element=customxml.SharedAs(
element.HRef.fromString(sharedAsURL)
)
))
@inlineCallbacks
def declineShare(self, request, inviteUID):
# Remove it if it is in the DB
try:
result = yield self._newStoreHome.declineShare(inviteUID)
except DirectoryRecordNotFoundError:
# Missing sharer record => just treat decline as success
result = True
if not result:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "invalid-share"),
"Invite UID not valid",
))
returnValue(Response(code=responsecode.NO_CONTENT))
def _handleInviteReply(self, request, invitereplydoc):
"""
Handle a user accepting or declining a sharing invite
"""
hostUrl = None
accepted = None
summary = None
replytoUID = None
for item in invitereplydoc.children:
if isinstance(item, customxml.InviteStatusAccepted):
accepted = True
elif isinstance(item, customxml.InviteStatusDeclined):
accepted = False
elif isinstance(item, customxml.InviteSummary):
summary = str(item)
elif isinstance(item, customxml.HostURL):
for hosturlItem in item.children:
if isinstance(hosturlItem, element.HRef):
hostUrl = str(hosturlItem)
elif isinstance(item, customxml.InReplyTo):
replytoUID = str(item)
if accepted is None or hostUrl is None or replytoUID is None:
raise HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(customxml.calendarserver_namespace, "valid-request"),
"Missing required XML elements",
))
if accepted:
return self.acceptShare(request, replytoUID, summary=summary)
else:
return self.declineShare(request, replytoUID)
| apache-2.0 | 902,628,120,102,176,500 | 38.472803 | 134 | 0.604436 | false |
tpsatish95/gensim | setup.py | 4 | 5463 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Run with:
sudo python ./setup.py install
"""
import os
import sys
import warnings
if sys.version_info[:2] < (2, 6):
raise Exception('This version of gensim needs Python 2.6 or later.')
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
# the following code is adapted from tornado's setup.py:
# https://github.com/tornadoweb/tornado/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up word2vec and doc2vec training, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for gensim to run,
although they do result in significant speed improvements for some modules.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message +
"Extension modules" +
"There was an issue with your platform configuration - see above.")
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(self.warning_message +
"The %s extension module" % (name,) +
"The output above this warning shows how the compilation failed.")
# the following is needed to be able to add numpy's include dirs... without
# importing numpy directly in this script, before it's actually installed!
# http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# https://docs.python.org/2/library/__builtin__.html#module-__builtin__
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
def readfile(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
model_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')
setup(
name='gensim',
version='0.12.0',
description='Python framework for fast Vector Space Modelling',
long_description=readfile('README.rst'),
ext_modules=[
Extension('gensim.models.word2vec_inner',
sources=['./gensim/models/word2vec_inner.c'],
include_dirs=[model_dir]),
Extension('gensim.models.doc2vec_inner',
sources=['./gensim/models/doc2vec_inner.c'],
include_dirs=[model_dir]),
],
cmdclass={'build_ext': custom_build_ext},
packages=find_packages(),
author=u'Radim Řehůřek',
author_email='[email protected]',
url='http://radimrehurek.com/gensim',
download_url='http://pypi.python.org/pypi/gensim',
keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '
'LSA, LSI, Latent Dirichlet Allocation, LDA, '
'Hierarchical Dirichlet Process, HDP, Random Projections, '
'TFIDF, word2vec',
license='LGPL',
platforms='any',
zip_safe=False,
classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
],
test_suite="gensim.test",
setup_requires=[
'numpy >= 1.3'
],
install_requires=[
'numpy >= 1.3',
'scipy >= 0.7.0',
'six >= 1.2.0',
'smart_open >= 1.2.1',
],
extras_require={
'distributed': ['Pyro4 >= 4.27'],
},
include_package_data=True,
)
| lgpl-3.0 | 8,696,871,435,351,643,000 | 31.694611 | 97 | 0.623993 | false |
vinilios/synnefo | snf-astakos-app/astakos/synnefo_settings.py | 10 | 2655 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Django settings metadata. To be used in setup.py snf-webproject entry points.
"""
installed_apps = [
{'before': 'django.contrib.admin',
'insert': 'astakos.im', },
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django_tables2',
'astakos.quotaholder_app',
'synnefo_branding',
'astakos.oa2',
]
context_processors = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.contrib.messages.context_processors.messages',
'astakos.im.context_processors.media',
'astakos.im.context_processors.im_modules',
'astakos.im.context_processors.auth_providers',
'astakos.im.context_processors.next',
'astakos.im.context_processors.code',
'astakos.im.context_processors.invitations',
'astakos.im.context_processors.menu',
'astakos.im.context_processors.custom_messages',
'astakos.im.context_processors.last_login_method',
'astakos.im.context_processors.membership_policies',
'synnefo.webproject.context_processors.cloudbar'
]
middlware_classes = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'synnefo.webproject.middleware.LoggingConfigMiddleware',
'synnefo.webproject.middleware.SecureMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
]
static_files = {'astakos.im': ''}
# The following settings will replace the default django settings
AUTHENTICATION_BACKENDS = (
'astakos.im.auth_backends.EmailBackend',
'astakos.im.auth_backends.TokenBackend')
CUSTOM_USER_MODEL = 'astakos.im.AstakosUser'
#SOUTH_TESTS_MIGRATE = False
BROKER_URL = ''
# INTERNAL_IPS = ('127.0.0.1',)
| gpl-3.0 | 8,899,553,753,741,158,000 | 33.934211 | 77 | 0.734463 | false |
Pal3love/otRebuilder | Package/otRebuilder/Dep/fontTools/ttLib/tables/F_F_T_M_.py | 3 | 1287 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.timeTools import timestampFromString, timestampToString
from . import DefaultTable
FFTMFormat = """
> # big endian
version: I
FFTimeStamp: Q
sourceCreated: Q
sourceModified: Q
"""
class table_F_F_T_M_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
def compile(self, ttFont):
data = sstruct.pack(FFTMFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("FontForge's timestamp, font source creation and modification dates")
writer.newline()
formatstring, names, fixes = sstruct.getformat(FFTMFormat)
for name in names:
value = getattr(self, name)
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
value = timestampToString(value)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
value = timestampFromString(value)
else:
value = safeEval(value)
setattr(self, name, value)
| mit | -8,940,748,176,325,591,000 | 29.642857 | 86 | 0.731935 | false |
flennerhag/mlens | mlens/parallel/tests/test_b3_layer_temporal.py | 1 | 3745 | """"ML-ENSEMBLE
Testing suite for Layer and Transformer
"""
from mlens.testing import Data, EstimatorContainer, get_layer, run_layer
def test_fit():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', False, False, window=2, step_size=3)
run_layer(*args)
def test_predict():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', False, False, window=2, step_size=3)
run_layer(*args)
def test_transform():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', False, False, window=2, step_size=3)
run_layer(*args)
def test_fit_prep():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', False, True, window=2, step_size=3)
run_layer(*args)
def test_predict_prep():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', False, True, window=2, step_size=3)
run_layer(*args)
def test_transform_prep():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', False, True, window=2, step_size=3)
run_layer(*args)
def test_fit_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', True, False, window=2, step_size=3)
run_layer(*args)
def test_predict_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', True, False, window=2, step_size=3)
run_layer(*args)
def test_transform_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', True, False, window=2, step_size=3)
run_layer(*args)
def test_fit_prep_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | Prep] test fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', True, True, window=2, step_size=3)
run_layer(*args)
def test_predict_prep_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | No Prep] test predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', True, True, window=2, step_size=3)
run_layer(*args)
def test_transform_prep_proba():
"""[Parallel | Layer | Multiprocessing | Temporal | Proba | Prep] test transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', True, True, window=2, step_size=3)
run_layer(*args)
def test_fit_fp():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test feature prop fit"""
args = get_layer('fit', 'multiprocessing', 'temporal', False, False, feature_prop=2, window=2, step_size=3)
run_layer(*args)
def test_predict_fp():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test feature prop predict"""
args = get_layer('predict', 'multiprocessing', 'temporal', False, False, feature_prop=2, window=2, step_size=3)
run_layer(*args)
def test_transform_fp():
"""[Parallel | Layer | Multiprocessing | Temporal | No Proba | No Prep] test feature prop transform"""
args = get_layer('transform', 'multiprocessing', 'temporal', False, False, feature_prop=2, window=2, step_size=3)
run_layer(*args)
| mit | -7,368,481,038,749,782,000 | 38.840426 | 117 | 0.661949 | false |
Alexander-M-Waldman/local_currency_site | lib/python2.7/site-packages/allauth/socialaccount/providers/foursquare/provider.py | 5 | 1026 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class FoursquareAccount(ProviderAccount):
def get_profile_url(self):
return 'https://foursquare.com/user/' \
+ self.account.extra_data.get('id')
def get_avatar_url(self):
return self.account.extra_data.get('photo')
def to_str(self):
dflt = super(FoursquareAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class FoursquareProvider(OAuth2Provider):
id = 'foursquare'
name = 'Foursquare'
account_class = FoursquareAccount
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(first_name=data.get('firstname'),
last_name=data.get('lastname'),
email=data.get('contact').get('email'))
providers.registry.register(FoursquareProvider)
| gpl-3.0 | -3,992,791,272,591,432,000 | 30.090909 | 74 | 0.679337 | false |
lihui7115/ChromiumGStreamerBackend | components/cronet/tools/cr_cronet.py | 9 | 3357 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
cr_cronet.py - cr - like helper tool for cronet developers
"""
import argparse
import os
import sys
def run(command, extra_options=''):
command = command + ' ' + extra_options
print command
return os.system(command)
def build(out_dir, extra_options=''):
return run('ninja -C ' + out_dir + ' cronet_test_instrumentation_apk',
extra_options)
def install(release_arg):
return run('build/android/adb_install_apk.py ' + release_arg + \
' --apk=CronetTest.apk')
def test(release_arg, extra_options):
return run('build/android/test_runner.py instrumentation '+ \
release_arg + ' --test-apk=CronetTestInstrumentation',
extra_options)
def debug(extra_options):
return run('build/android/adb_gdb --start ' + \
'--activity=.CronetTestActivity ' + \
'--program-name=CronetTest ' + \
'--package-name=org.chromium.net',
extra_options)
def stack(out_dir):
return run('adb logcat -d | third_party/android_tools/ndk/ndk-stack ' + \
'-sym ' + out_dir + '/lib')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command',
choices=['gyp',
'sync',
'build',
'install',
'proguard',
'test',
'build-test',
'stack',
'debug',
'build-debug'])
parser.add_argument('-r', '--release', action='store_true',
help='use release configuration')
options, extra_options_list = parser.parse_known_args()
print options
print extra_options_list
gyp_defines = 'GYP_DEFINES="OS=android run_findbugs=1 enable_websockets=0 '+ \
'disable_file_support=1 disable_ftp_support=1 '+ \
'use_icu_alternatives_on_android=1" '
out_dir = 'out/Debug'
release_arg = ''
extra_options = ' '.join(extra_options_list)
if options.release:
out_dir = 'out/Release'
release_arg = ' --release'
if (options.command=='gyp'):
return run (gyp_defines + ' gclient runhooks')
if (options.command=='sync'):
return run ('git pull --rebase && ' + gyp_defines + ' gclient sync')
if (options.command=='build'):
return build(out_dir, extra_options)
if (options.command=='install'):
return install(release_arg)
if (options.command=='proguard'):
return run ('ninja -C ' + out_dir + ' cronet_sample_proguard_apk')
if (options.command=='test'):
return install(release_arg) or test(release_arg, extra_options)
if (options.command=='build-test'):
return build(out_dir) or install(release_arg) or \
test(release_arg, extra_options)
if (options.command=='stack'):
return stack(out_dir)
if (options.command=='debug'):
return install(release_arg) or debug(extra_options)
if (options.command=='build-debug'):
return build(out_dir) or install(release_arg) or debug(extra_options)
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 1,914,110,942,877,224,400 | 30.669811 | 80 | 0.587727 | false |
zouyapeng/horizon | openstack_dashboard/dashboards/project/data_processing/clusters/tabs.py | 7 | 6387 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from openstack_dashboard.dashboards.project. \
data_processing.utils import workflow_helpers as helpers
from openstack_dashboard.api import glance
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "cluster_details_tab"
template_name = "project/data_processing.clusters/_details.html"
def get_context_data(self, request):
cluster_id = self.tab_group.kwargs['cluster_id']
cluster_info = {}
try:
sahara = saharaclient.client(request)
cluster = sahara.clusters.get(cluster_id)
for info_key, info_val in cluster.info.items():
for key, val in info_val.items():
if str(val).startswith(('http://', 'https://')):
cluster.info[info_key][key] = build_link(val)
base_image = glance.image_get(request,
cluster.default_image_id)
if getattr(cluster, 'cluster_template_id', None):
cluster_template = helpers.safe_call(
sahara.cluster_templates.get,
cluster.cluster_template_id)
else:
cluster_template = None
if getattr(cluster, 'neutron_management_network', None):
net_id = cluster.neutron_management_network
network = neutron.network_get(request, net_id)
network.set_id_as_name_if_empty()
net_name = network.name
else:
net_name = None
cluster_info.update({"cluster": cluster,
"base_image": base_image,
"cluster_template": cluster_template,
"network": net_name})
except Exception as e:
LOG.error("Unable to fetch cluster details: %s" % str(e))
return cluster_info
def build_link(url):
return "<a href='" + url + "' target=\"_blank\">" + url + "</a>"
class NodeGroupsTab(tabs.Tab):
name = _("Node Groups")
slug = "cluster_nodegroups_tab"
template_name = (
"project/data_processing.clusters/_nodegroups_details.html")
def get_context_data(self, request):
cluster_id = self.tab_group.kwargs['cluster_id']
try:
sahara = saharaclient.client(request)
cluster = sahara.clusters.get(cluster_id)
for ng in cluster.node_groups:
if ng["flavor_id"]:
ng["flavor_name"] = (
nova.flavor_get(request, ng["flavor_id"]).name)
if ng["floating_ip_pool"]:
ng["floating_ip_pool_name"] = (
self._get_floating_ip_pool_name(
request, ng["floating_ip_pool"]))
ng["node_group_template"] = helpers.safe_call(
sahara.node_group_templates.get,
ng.get("node_group_template_id", None))
except Exception:
cluster = {}
exceptions.handle(request,
_("Unable to get node group details."))
return {"cluster": cluster}
def _get_floating_ip_pool_name(self, request, pool_id):
pools = [pool for pool in network.floating_ip_pools_list(
request) if pool.id == pool_id]
return pools[0].name if pools else pool_id
class Instance(object):
def __init__(self, name=None, id=None, internal_ip=None,
management_ip=None):
self.name = name
self.id = id
self.internal_ip = internal_ip
self.management_ip = management_ip
class InstancesTable(tables.DataTable):
name = tables.Column("name",
link=("horizon:project:instances:detail"),
verbose_name=_("Name"))
internal_ip = tables.Column("internal_ip",
verbose_name=_("Internal IP"))
management_ip = tables.Column("management_ip",
verbose_name=_("Management IP"))
class Meta:
name = "cluster_instances"
# Just ignoring the name.
verbose_name = _(" ")
class InstancesTab(tabs.TableTab):
name = _("Instances")
slug = "cluster_instances_tab"
template_name = "project/data_processing.clusters/_instances_details.html"
table_classes = (InstancesTable, )
def get_cluster_instances_data(self):
cluster_id = self.tab_group.kwargs['cluster_id']
try:
sahara = saharaclient.client(self.request)
cluster = sahara.clusters.get(cluster_id)
instances = []
for ng in cluster.node_groups:
for instance in ng["instances"]:
instances.append(Instance(
name=instance["instance_name"],
id=instance["instance_id"],
internal_ip=instance.get("internal_ip",
"Not assigned"),
management_ip=instance.get("management_ip",
"Not assigned")))
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to fetch instance details."))
return instances
class ClusterDetailsTabs(tabs.TabGroup):
slug = "cluster_details"
tabs = (GeneralTab, NodeGroupsTab, InstancesTab, )
sticky = True
| apache-2.0 | -5,357,498,103,764,889,000 | 34.287293 | 78 | 0.573352 | false |
Nelestya/0X000M001 | Download.py | 1 | 1618 | #!/usr/bin/python3.4
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import youtube_dl
import os
class MyLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
class Downloadmp3():
"""Class for download webm and convert mp3 in youtube"""
def __init__(self, url):
self.options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'logger': MyLogger(),
}
with youtube_dl.YoutubeDL(self.options) as ydl:
ydl.download([url])
def movefile(directory):
"""Method for move the mp3 file in Directory Download"""
directory = os.path.dirname(directory + "/")
####################################################
#If Download in not exist Create the Directory Download
if not os.path.exists(directory):
os.makedirs(directory)
dirs = os.listdir()
for file in dirs:
####################################################
#if file finish by mp3 move in directory Download
if file[(len(file)-3):] == "mp3":
path = directory + "/" + file
os.rename(file, path)
###########################################################
#TEST PHASE
if __name__ == "__main__":
Downloadmp3("https://www.youtube.com/watch?v=sB8H-lyegUc")
Downloadmp3.movefile("Test")
| gpl-3.0 | 2,695,171,229,128,586,000 | 25.52459 | 64 | 0.485785 | false |
pshchelo/ironic | ironic/tests/unit/drivers/modules/irmc/test_management.py | 4 | 22864 | # Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for iRMC Management Driver
"""
import os
import xml.etree.ElementTree as ET
import mock
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules.irmc import management as irmc_management
from ironic.drivers.modules.irmc import power as irmc_power
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_irmc_info()
@mock.patch.object(irmc_management.irmc, 'elcm',
spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
@mock.patch.object(manager_utils, 'node_power_action',
specset=True, autospec=True)
@mock.patch.object(irmc_power.IRMCPower, 'get_power_state',
return_value=states.POWER_ON,
specset=True, autospec=True)
class IRMCManagementFunctionsTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCManagementFunctionsTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.driver = driver_factory.get_driver("fake_irmc")
self.node = obj_utils.create_test_node(self.context,
driver='fake_irmc',
driver_info=driver_info)
self.info = irmc_common.parse_driver_info(self.node)
irmc_management.irmc.scci.SCCIError = Exception
irmc_management.irmc.scci.SCCIInvalidInputError = ValueError
def test_backup_bios_config(self, mock_get_power, mock_power_action,
mock_elcm):
self.config(clean_priority_restore_irmc_bios_config=10, group='irmc')
bios_config = {'Server': {'System': {'BiosConfig': {'key1': 'val1'}}}}
mock_elcm.backup_bios_config.return_value = {
'bios_config': bios_config}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_management.backup_bios_config(task)
self.assertEqual(bios_config, task.node.driver_internal_info[
'irmc_bios_config'])
self.assertEqual(1, mock_elcm.backup_bios_config.call_count)
def test_backup_bios_config_skipped(self, mock_get_power,
mock_power_action, mock_elcm):
self.config(clean_priority_restore_irmc_bios_config=0, group='irmc')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_management.backup_bios_config(task)
self.assertNotIn('irmc_bios_config',
task.node.driver_internal_info)
self.assertFalse(mock_elcm.backup_bios_config.called)
def test_backup_bios_config_failed(self, mock_get_power,
mock_power_action, mock_elcm):
self.config(clean_priority_restore_irmc_bios_config=10, group='irmc')
mock_elcm.backup_bios_config.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IRMCOperationError,
irmc_management.backup_bios_config,
task)
self.assertNotIn('irmc_bios_config',
task.node.driver_internal_info)
self.assertEqual(1, mock_elcm.backup_bios_config.call_count)
def test__restore_bios_config(self, mock_get_power, mock_power_action,
mock_elcm):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
# Set bios data for the node info
task.node.driver_internal_info['irmc_bios_config'] = 'data'
irmc_management._restore_bios_config(task)
self.assertEqual(1, mock_elcm.restore_bios_config.call_count)
def test__restore_bios_config_failed(self, mock_get_power,
mock_power_action,
mock_elcm):
mock_elcm.restore_bios_config.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
# Set bios data for the node info
task.node.driver_internal_info['irmc_bios_config'] = 'data'
self.assertRaises(exception.IRMCOperationError,
irmc_management._restore_bios_config,
task)
# Backed up BIOS config is still in the node object
self.assertEqual('data', task.node.driver_internal_info[
'irmc_bios_config'])
self.assertTrue(mock_elcm.restore_bios_config.called)
def test__restore_bios_config_corrupted(self, mock_get_power,
mock_power_action,
mock_elcm):
mock_elcm.restore_bios_config.side_effect = \
irmc_management.irmc.scci.SCCIInvalidInputError
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
# Set bios data for the node info
task.node.driver_internal_info['irmc_bios_config'] = 'data'
self.assertRaises(exception.IRMCOperationError,
irmc_management._restore_bios_config,
task)
# Backed up BIOS config is removed from the node object
self.assertNotIn('irmc_bios_config',
task.node.driver_internal_info)
self.assertTrue(mock_elcm.restore_bios_config.called)
class IRMCManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCManagementTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.driver = driver_factory.get_driver("fake_irmc")
self.node = obj_utils.create_test_node(self.context,
driver='fake_irmc',
driver_info=driver_info)
self.info = irmc_common.parse_driver_info(self.node)
def test_get_properties(self):
expected = irmc_common.COMMON_PROPERTIES
expected.update(ipmitool.COMMON_PROPERTIES)
expected.update(ipmitool.CONSOLE_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM, boot_devices.BIOS,
boot_devices.SAFE]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices(task)))
@mock.patch.object(irmc_management.ipmitool, "send_raw", spec_set=True,
autospec=True)
def _test_management_interface_set_boot_device_ok(
self, boot_mode, params, expected_raw_code, send_raw_mock):
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = ''
if boot_mode:
driver_utils.add_node_capability(task, 'boot_mode', boot_mode)
self.driver.management.set_boot_device(task, **params)
send_raw_mock.assert_has_calls([
mock.call(task, "0x00 0x08 0x03 0x08"),
mock.call(task, expected_raw_code)])
def test_management_interface_set_boot_device_ok_pxe(self):
params = {'device': boot_devices.PXE, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0x80 0x04 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0x80 0x04 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xa0 0x04 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0xc0 0x04 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0xc0 0x04 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xe0 0x04 0x00 0x00 0x00")
def test_management_interface_set_boot_device_ok_disk(self):
params = {'device': boot_devices.DISK, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0x80 0x08 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0x80 0x08 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xa0 0x08 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0xc0 0x08 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0xc0 0x08 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xe0 0x08 0x00 0x00 0x00")
def test_management_interface_set_boot_device_ok_cdrom(self):
params = {'device': boot_devices.CDROM, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0x80 0x20 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0x80 0x20 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xa0 0x20 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0xc0 0x20 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0xc0 0x20 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xe0 0x20 0x00 0x00 0x00")
def test_management_interface_set_boot_device_ok_bios(self):
params = {'device': boot_devices.BIOS, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0x80 0x18 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0x80 0x18 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xa0 0x18 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0xc0 0x18 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0xc0 0x18 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xe0 0x18 0x00 0x00 0x00")
def test_management_interface_set_boot_device_ok_safe(self):
params = {'device': boot_devices.SAFE, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0x80 0x0c 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0x80 0x0c 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xa0 0x0c 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_ok(
None,
params,
"0x00 0x08 0x05 0xc0 0x0c 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
"0x00 0x08 0x05 0xc0 0x0c 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'uefi',
params,
"0x00 0x08 0x05 0xe0 0x0c 0x00 0x00 0x00")
@mock.patch.object(irmc_management.ipmitool, "send_raw", spec_set=True,
autospec=True)
def test_management_interface_set_boot_device_ng(self, send_raw_mock):
"""uefi mode, next boot only, unknown device."""
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_utils.add_node_capability(task, 'boot_mode', 'uefi')
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task,
"unknown")
@mock.patch.object(irmc_management.irmc, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_scci_ok(
self, mock_get_irmc_report, mock_scci):
"""'irmc_sensor_method' = 'scci' specified and OK data."""
with open(os.path.join(os.path.dirname(__file__),
'fake_sensors_data_ok.xml'), "r") as report:
fake_txt = report.read()
fake_xml = ET.fromstring(fake_txt)
mock_get_irmc_report.return_value = fake_xml
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = self.driver.management.get_sensors_data(task)
expected = {
'Fan (4)': {
'FAN1 SYS (29)': {
'Units': 'RPM',
'Sensor ID': 'FAN1 SYS (29)',
'Sensor Reading': '600 RPM'
},
'FAN2 SYS (29)': {
'Units': 'None',
'Sensor ID': 'FAN2 SYS (29)',
'Sensor Reading': 'None None'
}
},
'Temperature (1)': {
'Systemboard 1 (7)': {
'Units': 'degree C',
'Sensor ID': 'Systemboard 1 (7)',
'Sensor Reading': '80 degree C'
},
'Ambient (55)': {
'Units': 'degree C',
'Sensor ID': 'Ambient (55)',
'Sensor Reading': '42 degree C'
}
}
}
self.assertEqual(expected, sensor_dict)
@mock.patch.object(irmc_management.irmc, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_scci_ng(
self, mock_get_irmc_report, mock_scci):
"""'irmc_sensor_method' = 'scci' specified and NG data."""
with open(os.path.join(os.path.dirname(__file__),
'fake_sensors_data_ng.xml'), "r") as report:
fake_txt = report.read()
fake_xml = ET.fromstring(fake_txt)
mock_get_irmc_report.return_value = fake_xml
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = self.driver.management.get_sensors_data(task)
self.assertEqual(len(sensor_dict), 0)
@mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
spec_set=True, autospec=True)
def test_management_interface_get_sensors_data_ipmitool_ok(
self,
get_sensors_data_mock):
"""'irmc_sensor_method' = 'ipmitool' specified."""
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'ipmitool'
task.driver.management.get_sensors_data(task)
get_sensors_data_mock.assert_called_once_with(
task.driver.management, task)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_exception(
self,
get_irmc_report_mock):
"""'FailedToGetSensorData Exception."""
get_irmc_report_mock.side_effect = exception.InvalidParameterValue(
"Fake Error")
irmc_management.irmc.scci.SCCIInvalidInputError = Exception
irmc_management.irmc.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
e = self.assertRaises(exception.FailedToGetSensorData,
self.driver.management.get_sensors_data,
task)
self.assertEqual("Failed to get sensor data for node 1be26c0b-" +
"03f2-4d2e-ae87-c02d7f33c123. Error: Fake Error",
str(e))
@mock.patch.object(irmc_management.LOG, 'error', spec_set=True,
autospec=True)
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test_management_interface_inject_nmi_ok(self, mock_get_irmc_client,
mock_log):
irmc_client = mock_get_irmc_client.return_value
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.management.inject_nmi(task)
irmc_client.assert_called_once_with(
irmc_management.irmc.scci.POWER_RAISE_NMI)
self.assertFalse(mock_log.called)
@mock.patch.object(irmc_management.LOG, 'error', spec_set=True,
autospec=True)
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
def test_management_interface_inject_nmi_fail(self, mock_get_irmc_client,
mock_log):
irmc_client = mock_get_irmc_client.return_value
irmc_client.side_effect = Exception()
irmc_management.irmc.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IRMCOperationError,
self.driver.management.inject_nmi,
task)
irmc_client.assert_called_once_with(
irmc_management.irmc.scci.POWER_RAISE_NMI)
self.assertTrue(mock_log.called)
@mock.patch.object(irmc_management, '_restore_bios_config',
spec_set=True, autospec=True)
def test_management_interface_restore_irmc_bios_config(self,
mock_restore_bios):
with task_manager.acquire(self.context, self.node.uuid) as task:
result = task.driver.management.restore_irmc_bios_config(task)
self.assertIsNone(result)
mock_restore_bios.assert_called_once_with(task)
| apache-2.0 | 2,663,143,609,079,919,600 | 43.138996 | 78 | 0.578989 | false |
eskibars/domoticz | plugins/AwoxSMP/plugin.py | 23 | 7982 | # Awox SmartPlug Plugin
#
# Author: zaraki673, 2017
#
"""
<plugin key="AwoxSMP" name="Awox SmartPlug" author="zaraki673" version="1.0.0">
<params>
<param field="Address" label="MAC Address" width="150px" required="true"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import binascii
import struct
import lib.pySmartPlugSmpB16
from bluepy import btle
START_OF_MESSAGE = b'\x0f'
END_OF_MESSAGE = b'\xff\xff'
SMPstate = 0
SMPconso = 0
class BasePlugin:
enabled = False
pluginState = "Not Ready"
sessionCookie = ""
privateKey = b""
socketOn = "FALSE"
def __init__(self):
return
def onStart(self):
global SMPstate, SMPconso
if Parameters["Mode6"] == "Debug":
Domoticz.Debugging(1)
if (len(Devices) == 0):
Domoticz.Device(Name="Status", Unit=1, Type=17, Switchtype=0).Create()
Domoticz.Device(Name="Conso", Unit=2, TypeName="Usage").Create()
Domoticz.Log("Devices created.")
else:
if (1 in Devices): SMPstate = Devices[1].nValue
if (2 in Devices): SMPconso = Devices[2].nValue
DumpConfigToLog()
Domoticz.Log("Plugin is started.")
Domoticz.Heartbeat(20)
def onStop(self):
Domoticz.Log("Plugin is stopping.")
def onConnect(self, Status, Description):
return
def onMessage(self, Data, Status, Extra):
return
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Debug("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
Command = Command.strip()
action, sep, params = Command.partition(' ')
action = action.capitalize()
if (action == 'On'):
try:
plug = SmartPlug(Parameters["Address"])
plug.on()
UpdateDevice(1,1,'On')
plug.disconnect()
except btle.BTLEException as err:
Domoticz.Log('error when setting plug %s on (code %d)' % (Parameters["Address"], err.code))
elif (action == 'Off'):
try:
plug = SmartPlug(Parameters["Address"])
plug.off()
UpdateDevice(1,0,'Off')
plug.disconnect()
except btle.BTLEException as err:
Domoticz.Log('error when setting plug %s on (code %d)' % (Parameters["Address"], err.code))
return True
def onDisconnect(self):
return
def onHeartbeat(self):
global SMPstate, SMPconso
try:
plug = SmartPlug(Parameters["Address"])
(SMPstate, SMPconso) = plug.status_request()
plug.disconnect()
SMPstate = 'on' if SMPstate else 'off'
Domoticz.Log('plug state = %s' % SMPstate)
if (SMPstate == 'off'): UpdateDevice(1,0,'Off')
else: UpdateDevice(1,1,'On')
Domoticz.Log('plug power = %d W' % SMPconso)
UpdateDevice(2,0,str(SMPconso))
except btle.BTLEException as err:
Domoticz.Log('error when requesting stat to plug %s (code %d)' % (Parameters["Address"], err.code))
return True
def SetSocketSettings(self, power):
return
def GetSocketSettings(self):
return
def genericPOST(self, commandName):
return
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Status, Description):
global _plugin
_plugin.onConnect(Status, Description)
def onMessage(Data, Status, Extra):
global _plugin
_plugin.onMessage(Data, Status, Extra)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onDisconnect():
global _plugin
_plugin.onDisconnect()
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# xml built in parser threw import error on expat so just do it manually
def extractTagValue(tagName, XML):
startPos = XML.find(tagName)
endPos = XML.find(tagName, startPos+1)
if ((startPos == -1) or (endPos == -1)): Domoticz.Error("'"+tagName+"' not found in supplied XML")
return XML[startPos+len(tagName)+1:endPos-2]
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
def UpdateDevice(Unit, nValue, sValue):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):
Devices[Unit].Update(nValue, str(sValue))
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
class SmartPlug(btle.Peripheral):
def __init__(self, addr):
btle.Peripheral.__init__(self, addr)
self.delegate = NotificationDelegate()
self.setDelegate(self.delegate)
self.plug_svc = self.getServiceByUUID('0000fff0-0000-1000-8000-00805f9b34fb')
self.plug_cmd_ch = self.plug_svc.getCharacteristics('0000fff3-0000-1000-8000-00805f9b34fb')[0]
def on(self):
self.delegate.chg_is_ok = False
self.plug_cmd_ch.write(self.get_buffer(binascii.unhexlify('0300010000')))
self.wait_data(0.5)
return self.delegate.chg_is_ok
def off(self):
self.delegate.chg_is_ok = False
self.plug_cmd_ch.write(self.get_buffer(binascii.unhexlify('0300000000')))
self.wait_data(0.5)
return self.delegate.chg_is_ok
def status_request(self):
self.plug_cmd_ch.write(self.get_buffer(binascii.unhexlify('04000000')))
self.wait_data(2.0)
return self.delegate.state, self.delegate.power
def program_request(self):
self.plug_cmd_ch.write(self.get_buffer(binascii.unhexlify('07000000')))
self.wait_data(2.0)
return self.delegate.programs
def calculate_checksum(self, message):
return (sum(bytearray(message)) + 1) & 0xff
def get_buffer(self, message):
return START_OF_MESSAGE + struct.pack("b",len(message) + 1) + message + struct.pack("b",self.calculate_checksum(message)) + END_OF_MESSAGE
def wait_data(self, timeout):
self.delegate.need_data = True
while self.delegate.need_data and self.waitForNotifications(timeout):
pass
class NotificationDelegate(btle.DefaultDelegate):
def __init__(self):
btle.DefaultDelegate.__init__(self)
self.state = False
self.power = 0
self.chg_is_ok = False
self.programs = []
self._buffer = b''
self.need_data = True
def handleNotification(self, cHandle, data):
#not sure 0x0f indicate begin of buffer but
if data[:1] == START_OF_MESSAGE:
self._buffer = data
else:
self._buffer = self._buffer + data
if self._buffer[-2:] == END_OF_MESSAGE:
self.handle_data(self._buffer)
self._buffer = b''
self.need_data = False
def handle_data(self, bytes_data):
# it's a state change confirm notification ?
if bytes_data[0:3] == b'\x0f\x04\x03':
self.chg_is_ok = True
# it's a state/power notification ?
if bytes_data[0:3] == b'\x0f\x0f\x04':
(state, dummy, power) = struct.unpack_from(">?BI", bytes_data, offset=4)
self.state = state
self.power = power / 1000
# it's a 0x0a notif ?
if bytes_data[0:3] == b'\x0f\x33\x0a':
print ("0A notif %s" % bytes_data)
# it's a programs notif ?
if bytes_data[0:3] == b'\x0f\x71\x07' :
program_offset = 4
self.programs = []
while program_offset + 21 < len(bytes_data):
(present, name, flags, start_hour, start_minute, end_hour, end_minute) = struct.unpack_from(">?16sbbbbb", bytes_data, program_offset)
#TODO interpret flags (day of program ?)
if present:
self.programs.append({ "name" : name.decode('iso-8859-1').strip('\0'), "flags":flags, "start":"{0:02d}:{1:02d}".format(start_hour, start_minute), "end":"{0:02d}:{1:02d}".format(end_hour, end_minute)})
program_offset += 22
| gpl-3.0 | -6,923,799,045,652,868,000 | 29.7 | 205 | 0.678401 | false |
petewarden/tensorflow | tensorflow/python/eager/context.py | 3 | 83450 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import random
import threading
from absl import logging
import numpy as np
import six
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import executor
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
_KEEP_ALIVE_SECS = 600
_python_eager_context_create_counter = monitoring.Counter(
"/tensorflow/api/python/eager_context_create_counter",
"Counter for number of eager contexts created in Python.")
# Re-exporting through context.
is_tfrt_enabled = tfrt_utils.enabled
# Expose it as internally public APIs for Keras use cases in b/171080602.
tf_export("__internal__.is_tfrt_enabled", v1=[])(is_tfrt_enabled)
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
__slots__ = ["_data", "_max_items", "_max_tensor_size"]
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data.clear()
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
__slots__ = ["_config_proto_serialized", "_executor_type"]
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString(
deterministic=True)
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
class _TensorCaches(threading.local):
"""Thread local tensor caches."""
__slots__ = ["_ones_rank_cache", "_zeros_cache"]
def __init__(self):
super(_TensorCaches, self).__init__()
self._ones_rank_cache = None
self._zeros_cache = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
@tf_export("config.LogicalDevice")
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a logical device initialized by the runtime.
A `tf.config.LogicalDevice` corresponds to an initialized logical device on a
`tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors
and operations can be placed on a specific logical device by calling
`tf.device` with a specified `tf.config.LogicalDevice`.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.LogicalDeviceConfiguration",
"config.experimental.VirtualDeviceConfiguration")
class LogicalDeviceConfiguration(
collections.namedtuple("LogicalDeviceConfiguration",
["memory_limit", "experimental_priority"])):
"""Configuration class for a logical devices.
The class specifies the parameters to configure a `tf.config.PhysicalDevice`
as it is initialized to a `tf.config.LogicalDevice` during runtime
initialization. Not all fields are valid for all device types.
See `tf.config.get_logical_device_configuration` and
`tf.config.set_logical_device_configuration` for usage examples.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
experimental_priority: (optional) Priority to assign to a virtual device.
Lower values have higher priorities and 0 is the default.
Within a physical GPU, the GPU scheduler will prioritize ops on virtual
devices with higher priority. Currently only supported for Nvidia GPUs.
"""
def __new__(cls, memory_limit=None, experimental_priority=None):
return super(LogicalDeviceConfiguration,
cls).__new__(cls, memory_limit, experimental_priority)
@tf_export("config.PhysicalDevice")
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a visible `tf.config.PhysicalDevice` is initialized one or more
`tf.config.LogicalDevice` objects are created. Use
`tf.config.set_visible_devices` to configure the visibility of a physical
device and `tf.config.set_logical_device_configuration` to configure multiple
`tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
useful when separation between models is needed or to simulate a multi-device
environment.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
class _AtomicCounter(object):
"""A simple atomic counter."""
__slots__ = ["_value", "_lock"]
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def increment_and_get(self):
with self._lock:
self._value += 1
return self._value
_context_id_counter = _AtomicCounter()
class _TensorCacheDeleter(object):
"""Deletes tensor caches for a given context."""
__slots__ = ["_context_id"]
def __init__(self, context_id):
self._context_id = context_id
def __del__(self):
if _tensor_caches_map is None:
return
if self._context_id in _tensor_caches_map:
del _tensor_caches_map[self._context_id]
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
# This _id is used only to index the tensor caches.
# TODO(iga): Remove this when tensor caches are moved to C++.
self._id = _context_id_counter.increment_and_get()
self._tensor_cache_deleter = _TensorCacheDeleter(self._id)
_tensor_caches_map[self._id] = _TensorCaches()
self._config = config
self._thread_local_data = pywrap_tfe.EagerContextThreadLocalData(
self,
is_eager=lambda: default_execution_mode == EAGER_MODE,
device_spec=_starting_device_spec)
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._seed = None
self._initialize_lock = threading.Lock()
self._initialized = False
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
self._mirroring_policy = None
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._default_is_async = execution_mode == ASYNC
self._use_tfrt = is_tfrt_enabled()
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._physical_device_to_index = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._enable_mlir_graph_optimization = None
self._optimizer_experimental_options = {}
_python_eager_context_create_counter.get_cell().increase_by(1)
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
logical_devices = []
context_devices = []
device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tfe.TF_DeviceListCount(device_list)):
dev_name = pywrap_tfe.TF_DeviceListName(device_list, i)
context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
# If the job is localhost, we assume that the cluster has not yet been
# configured and thus clear the job, replica & task.
if spec.job == "localhost":
spec = spec.replace(job=None, replica=None, task=None)
logical_devices.append(
LogicalDevice(name=spec.to_string(), device_type=spec.device_type))
dev_type = pywrap_tfe.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
self._logical_devices = logical_devices
self._context_devices = context_devices
pywrap_tfe.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
if self._initialized:
return
with self._initialize_lock:
if self._initialized:
return
assert self._context_devices is None
opts = pywrap_tfe.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._mirroring_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy(
opts, self._mirroring_policy)
if self._default_is_async == ASYNC:
pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True)
if self._use_tfrt is not None:
pywrap_tfe.TFE_ContextOptionsSetTfrt(opts, self._use_tfrt)
context_handle = pywrap_tfe.TFE_NewContext(opts)
finally:
pywrap_tfe.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(context_handle, _KEEP_ALIVE_SECS,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str)
self._context_handle = context_handle
self._initialize_logical_devices()
self._initialized = True
def _clear_caches(self):
self.ones_rank_cache().flush()
self.zeros_cache().flush()
pywrap_tfe.TFE_ClearScalarCache()
def get_server_def(self):
return self._server_def
def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs,
server_def_str)
self._initialize_logical_devices()
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Update a server_def on the context.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
keep_alive_secs: Num. seconds after which the remote end will hang up. As
long as the client is still alive, the server state for the context will
be kept alive. If the client is killed (or there is some failure), the
server will clean up its context keep_alive_secs after the final RPC it
receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle,
keep_alive_secs, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def check_alive(self, worker_name):
"""Checks whether a remote worker is alive or not.
Args:
worker_name: a string representing the remote worker. It must be a fully
specified name like "/job:worker/replica:0/task:0".
Returns:
a boolean indicating whether the remote worker is alive or not.
Raises:
ValueError: if context is not initialized.
"""
# TODO(yuefengz): support checking multiple workers.
if self._context_handle:
return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)
else:
raise ValueError("Context is not initialized.")
def sync_executors(self):
"""Sync both local executors and the ones on remote workers.
In async execution mode, local function calls can return before the
corresponding remote op/function execution requests are completed. Calling
this method creates a synchronization barrier for remote executors. It only
returns when all remote pending nodes are finished, potentially with errors
if any remote executors are in error state.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle:
pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def clear_executor_errors(self):
"""Clear errors in both local executors and remote workers.
After receiving errors from remote workers, additional requests on the fly
could further taint the status on the remote workers due to the async nature
of remote execution. Calling this method block on waiting for all pending
nodes in remote executors to finish and clear their error statuses.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle:
pywrap_tfe.TFE_ContextClearExecutors(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def clear_kernel_cache(self):
"""Clear kernel cache and reset all stateful kernels.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
self._collective_ops_server_def = server_def
# TODO(b/129298253): Allow creating datasets/tensors before enabling
# collective ops.
if self._context_handle is not None:
logging.warning("Enabling collective ops after program startup may cause "
"error when accessing previously created tensors.")
with self._initialize_lock:
assert self._initialized
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:0"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
def abort_collective_ops(self, code, message):
"""Abort the collective ops.
This is intended to be used when a peer failure is detected, which allows
the user to handle the case instead of hanging. This aborts all on-going
collectives. After all subsequent collectives error immediately, and you
need to reset_context() to use collectives again.
Args:
code: a `tf.errors` error code.
message: a string. The error message.
"""
self.ensure_initialized()
pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message)
def check_collective_ops_peer_health(self, task, timeout_in_ms):
"""Check collective peer health.
This probes each task to see if they're still alive. Note that restarted
tasks are considered a different one, and they're considered not healthy.
This should only be used in multi client multi worker training.
Args:
task: a task string, must be in the format of /job:xxx/replica:0/task:N.
timeout_in_ms: an integer, the timeout. If zero, there's no timeout.
Raises:
tf.errors.UnavailableError: when a peer is down.
tf.errors.FailedPreconditionError: when a peer is a different one from the
one this task has talked to, e.g. the peer has restarted.
tf.errors.InvalidArgumentError: when the task string is invalid.
"""
self.ensure_initialized()
pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth(self._handle, task,
timeout_in_ms)
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_is_eager = ctx.is_eager
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
if isinstance(name, LogicalDevice):
name = name.name
elif pydev.is_device_spec(name):
name = name.to_string()
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
def host_address_space(self):
self.ensure_initialized()
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_HostAddressSpace(self._context_handle, buffer_)
address_space = pywrap_tf_session.TF_GetBuffer(buffer_).decode("utf-8")
return address_space
# TODO(fishx): remove this property.
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
return ASYNC if self.is_async() else SYNC
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
enable_async = (mode == ASYNC)
if self.is_async() != enable_async:
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
self.executor.wait()
executor_new = executor.new_executor(enable_async)
self._thread_local_data.executor = executor_new
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle,
executor_new.handle())
else:
self._default_is_async = enable_async
def is_async(self):
if self._context_handle is not None:
return self.executor.is_async()
else:
return self._default_is_async
@property
def executor(self):
self.ensure_initialized()
return executor.Executor(
pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle))
@executor.setter
def executor(self, e):
self.ensure_initialized()
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle())
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
is_mlir_bridge_enabled = pywrap_tfe.TF_IsMlirBridgeEnabled()
config.experimental.mlir_bridge_rollout = is_mlir_bridge_enabled
if (is_mlir_bridge_enabled ==
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED):
config.experimental.enable_mlir_bridge = True
if self._enable_mlir_graph_optimization is not None:
config.experimental.enable_mlir_graph_optimization = (
self._enable_mlir_graph_optimization)
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
priority = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
if virt_dev.experimental_priority is not None:
priority.append(virt_dev.experimental_priority)
# If priority is specified, it must be specified for all virtual
# devices.
if priority and len(device_limits) != len(priority):
raise ValueError("priority must be specified for all virtual devices")
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits, priority=priority))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string,
len(fdef_string))
def get_function_def(self, name):
"""Get a function definition from the context.
Args:
name: function signature name.
Returns:
The requested FunctionDef.
Raises:
tf.errors.NotFoundError: if name is not the name of a registered function.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(proto_data)
return function_def
def register_custom_device(self, device_capsule, device_name,
device_info_capsule):
"""Calls TFE_RegisterCustomDevice. See the non-member function."""
self.ensure_initialized()
pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule,
device_name, device_info_capsule)
def pack_eager_tensors(self, tensors):
"""Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
Returns:
A packed EagerTensor.
"""
self.ensure_initialized()
return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)
def list_function_names(self):
"""Get a list of names of registered functions.
Returns:
A set of names of all registered functions for the context.
"""
self.ensure_initialized()
return set(pywrap_tfe.TFE_ContextListFunctionNames(self._handle))
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))
def add_op_callback(self, callback):
"""Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature
`f(op_type, inputs, attrs, outputs, op_name=None, graph=None)`.
See doc strings in `op_callbacks.py` for details on the function
signature and its semantics.
"""
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
def remove_op_callback(self, callback):
"""Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
"""
if callback not in self._thread_local_data.op_callbacks:
raise KeyError(
"The specified op callback has not been registered, "
"and hence cannot be removed.")
del self._thread_local_data.op_callbacks[
self._thread_local_data.op_callbacks.index(callback)]
@property
def op_callbacks(self):
return self._thread_local_data.op_callbacks
@property
def invoking_op_callbacks(self):
return self._thread_local_data.invoking_op_callbacks
@invoking_op_callbacks.setter
def invoking_op_callbacks(self, value):
self._thread_local_data.invoking_op_callbacks = value
def _initialize_physical_devices(self, reinitialize=False):
"""Gets local devices visible to the system.
Args:
reinitialize: If True, reinitializes self._physical_devices so that
dynamic registered devices will also be visible to the python front-end.
"""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if not reinitialize and self._physical_devices is not None:
return
devs = pywrap_tfe.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(),
device_type=d.decode().split(":")[1]) for d in devs]
self._physical_device_to_index = {
p: i for i, p in enumerate(self._physical_devices)
}
self._visible_device_list = list(self._physical_devices)
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def reinitialize_physical_devices(self):
"""Gets local devices visible to the system."""
# Reinitialize the physical device list after registering
# the pluggable device.
self._initialize_physical_devices(True)
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is None:
return list(self._physical_devices)
return [d for d in self._physical_devices if d.device_type == device_type]
def get_device_details(self, device): # pylint: disable=redefined-outer-name
"""Returns details about a physical devices.
Args:
device: A `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices` or `tf.config.get_visible_devices`.
Returns:
A dict with string keys.
"""
if not isinstance(device, PhysicalDevice):
raise ValueError("device must be a tf.config.PhysicalDevice, but got: "
"%s" % (device,))
if (self._physical_device_to_index is None or
device not in self._physical_device_to_index):
raise ValueError("The PhysicalDevice must be one obtained from "
"calling `tf.config.list_physical_devices`, but got: "
"%s" % (device,))
index = self._physical_device_to_index[device]
details = pywrap_tfe.TF_GetDeviceDetails(index)
# Change compute_capability from a string to a tuple
if "compute_capability" in details:
try:
major, minor = details["compute_capability"].split(".")
details["compute_capability"] = (int(major), int(minor))
except ValueError:
raise RuntimeError("Device returned compute capability an in invalid "
"format: %s" % details["compute_capability"])
return details
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_logical_device_configuration(
cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
if device_type is None:
return list(self._logical_devices)
return [d for d in self._logical_devices if d.device_type == device_type]
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return list(self._visible_device_list)
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_memory_info(self, dev):
"""Returns a dict of memory info for the device."""
self._initialize_physical_devices()
self.ensure_initialized()
return pywrap_tfe.TFE_GetMemoryInfo(self._context_handle, dev)
# TODO(reedwm): Remove this function
def get_total_memory_usage(self, dev):
"""Returns total memory usage in bytes for the current device."""
return self.get_memory_info(dev)["current"]
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_logical_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_logical_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
if vdev.experimental_priority is not None:
raise ValueError("Setting experimental_priority on CPU virtual "
" devices is currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type)
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
def get_compiler_ir(self, device_name, function_name, args, stage="hlo"):
return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name,
stage, device_name, args)
@deprecated(
None, "XLA:CPU and XLA:GPU devices are deprecated", warn_once=True)
def enable_xla_devices(self):
"""Enables XLA:CPU and XLA:GPU devices registration."""
pywrap_tfe.TF_EnableXlaDevices()
@property
def enable_mlir_bridge(self):
return pywrap_tfe.TF_IsMlirBridgeEnabled()
@property
def enable_mlir_graph_optimization(self):
return self._enable_mlir_graph_optimization
@enable_mlir_bridge.setter
def enable_mlir_bridge(self, enabled):
pywrap_tfe.TF_EnableMlirBridge(enabled)
self._thread_local_data.function_call_options = None
@enable_mlir_graph_optimization.setter
def enable_mlir_graph_optimization(self, enabled):
self._enable_mlir_graph_optimization = enabled
self._thread_local_data.function_call_options = None
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetSoftDevicePlacement(self._handle, enable)
self._soft_device_placement = enable
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetLogDevicePlacement(self._handle, enable)
self._log_device_placement = enable
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
@property
def use_tfrt(self):
return self._use_tfrt
@use_tfrt.setter
def use_tfrt(self, tfrt):
"""Sets whether to use TFRT."""
if not isinstance(tfrt, bool):
raise ValueError("Expecting a boolean but got %s" % type(tfrt))
if self._use_tfrt != tfrt:
if self._initialized:
raise ValueError("use_tfrt should be set before being initialized.")
self._use_tfrt = tfrt
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
__slots__ = ["_device_name", "_ctx", "_stack"]
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not set directly. Use _set_context.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tfe.TFE_Py_SetEagerContext(ctx)
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def _reset_context():
"""Clears and re-initializes the singleton context.
Should only be used for testing.
"""
global _context
global _device_parsing_cache
with _context_lock:
if _context is not None:
_context._clear_caches()
_context = None
_create_context()
_device_parsing_cache = {}
pywrap_tfe.TFE_ClearScalarCache()
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly", v1=[])
def executing_eagerly():
"""Checks whether the current thread has eager execution enabled.
Eager execution is enabled by default and this API returns `True`
in most of cases. However, this API might return `False` in the following use
cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
ctx = context_safe()
if ctx is None:
return default_execution_mode == EAGER_MODE
return ctx.executing_eagerly()
@tf_export(v1=["executing_eagerly"])
def executing_eagerly_v1():
"""Checks whether the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
When eager execution is enabled, returns `True` in most cases. However,
this API might return `False` in the following use cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
>>> tf.compat.v1.enable_eager_execution()
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function`
after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
return executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
# Used by b/167638505 for keras backend API and Lambda layer.
@tf_export("__internal__.eager_context.eager_mode", v1=[])
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
# Expose some properties of Context as internally public APIs (b/160348781).
@tf_export("__internal__.eager_context.get_config", v1=[])
def get_config():
"""Get the ConfigProto of Context.
Returns:
The ConfigProto of Context.
"""
return context().config
@tf_export("__internal__.eager_context.get_device_name", v1=[])
def get_device_name():
"""Get the device name for the current thread.
Returns:
The device name for the current thread.
"""
return context().device_name
@tf_export("__internal__.eager_context.set_soft_device_placement", v1=[])
def set_soft_device_placement(enabled):
"""Set if soft device placements should be allowed.
Args:
enabled: Whether to enable soft device placement.
"""
context().soft_device_placement = enabled
@tf_export("__internal__.eager_context.get_executor", v1=[])
def get_executor():
"""Get the Executor of the current thread.
Returns:
The Executor of the current thread.
"""
return context().executor
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
if mode is None:
yield
else:
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
@contextlib.contextmanager
def collect_graphs(optimized=True):
"""Collects a flat list of pre- or post-optimization graphs.
The collected graphs include device placements, which can be useful for
testing.
Usage:
```
@def_function.function
def f(x):
return x + constant_op.constant(1.)
with context.collect_graphs() as graphs:
with ops.device("CPU:0"):
f(constant_op.constant(1.))
graph, = graphs # `graph` contains a single GraphDef for inspection
```
Args:
optimized: whether to collect optimized graphs or non-optimized graphs
Yields:
A list of GraphDefs, populated when the context manager exits.
"""
ctx = context()
ctx.enable_graph_collection()
try:
graphs = []
yield graphs
metadata = ctx.export_run_metadata()
finally:
ctx.disable_graph_collection()
for graph in metadata.function_graphs:
if optimized:
graphs.append(graph.post_optimization_graph)
else:
graphs.append(graph.pre_optimization_graph)
def get_server_def():
return context().get_server_def()
def set_server_def(server_def):
context().set_server_def(server_def)
def update_server_def(server_def):
context().update_server_def(server_def)
def check_alive(worker_name):
return context().check_alive(worker_name)
@tf_export("experimental.async_scope")
@tf_contextlib.contextmanager
def async_scope():
"""Context manager for grouping async operations.
Ops/function calls inside the scope can return before finishing the actual
execution. When exiting the async scope, a synchronization barrier will be
automatically added to ensure the completion of all async op and function
execution, potentially raising exceptions if async execution results in
an error state.
Users may write the following code to asynchronously invoke `train_step_fn`
and log the `loss` metric for every `num_steps` steps in a training loop.
`train_step_fn` internally consumes data using `iterator.get_next()`, and may
throw OutOfRangeError when running out of data. In the case:
```
try:
with tf.experimental.async_scope():
for _ in range(num_steps):
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
logging.info('loss = %s', loss.numpy())
```
Yields:
Context manager for grouping async operations.
"""
# TODO(haoyuzhang): replace env var once we have a config method to turn on
# and off async streaming RPC
remote_async_env_var = "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"
old_policy = os.environ.get(remote_async_env_var)
try:
os.environ[remote_async_env_var] = str(True)
yield
# Note: sync local and remote executors iff the async block does not raise
# an exception. Triggering sync after an exception may lead to derived
# runtime errors and unexpected exception types.
context().sync_executors()
finally:
if old_policy is None:
del os.environ[remote_async_env_var]
else:
os.environ[remote_async_env_var] = old_policy
def async_wait():
"""Sync all async operations and raise any errors during execution.
In async execution mode, an op/function call can return before finishing the
actual execution. Calling this method creates a synchronization barrier for
all async op and function execution. It only returns when all pending nodes
are finished, potentially raising exceptions if async execution results in
an error state.
"""
context().sync_executors()
@tf_export("experimental.async_clear_error")
def async_clear_error():
"""Clear pending operations and error statuses in async execution.
In async execution mode, an error in op/function execution can lead to errors
in subsequent ops/functions that are scheduled but not yet executed. Calling
this method clears all pending operations and reset the async execution state.
Example:
```
while True:
try:
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
break
logging.info('loss = %s', loss.numpy())
```
"""
context().clear_executor_errors()
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
def get_function_def(name):
return context().get_function_def(name)
def register_custom_device(device_capsule, device_name, device_info_capsule):
"""Calls TFE_RegisterCustomDevice to register a custom device with Python.
Enables using C extensions specifying a custom device from Python. See the
experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
details.
Note that custom devices are not currently supported inside `tf.function`s.
Args:
device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice'
containing a pointer to a TFE_CustomDevice struct. The capsule retains
ownership of the memory.
device_name: A string indicating the name to register the custom device
under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may
subsequently be passed to `with tf.device(...):`.
device_info_capsule: A PyCapsule with the name set to
'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific
struct with the initial state of the custom device (the void* device_info
argument to TFE_RegisterCustomDevice). This method takes ownership of the
memory and clears the capsule destructor.
"""
context().register_custom_device(device_capsule, device_name,
device_info_capsule)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
| apache-2.0 | 4,775,819,562,434,817,000 | 32.90898 | 89 | 0.687969 | false |
SamiHiltunen/invenio-redirector | invenio_redirector/views.py | 4 | 2933 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implement redirection URLs."""
import inspect
from flask import Blueprint, abort, current_app, make_response, redirect, \
render_template, request, url_for
from flask_login import current_user
from .api import get_redirection_data
from .registry import get_redirect_method
blueprint = Blueprint('goto', __name__, url_prefix="/goto",
template_folder='templates', static_folder='static')
@blueprint.route('/<path:component>', methods=['GET', 'POST'])
def index(component):
"""Handle /goto set of pages."""
redirection_data = get_redirection_data(component)
goto_plugin = get_redirect_method(redirection_data['plugin'])
args, dummy_varargs, dummy_varkw, defaults = inspect.getargspec(
goto_plugin)
args = args and list(args) or []
args.reverse()
defaults = defaults and list(defaults) or []
defaults.reverse()
params_to_pass = {}
for (arg, default) in zip(args, defaults):
params_to_pass[arg] = default
# Let's put what is in the GET query
for key, value in request.args.items():
if key in params_to_pass:
params_to_pass[key] = str(value)
# Let's override the params_to_pass to the call with the
# arguments in the configuration
configuration_parameters = redirection_data['parameters'] or {}
params_to_pass.update(configuration_parameters)
# Let's add default parameters if the plugin expects them
if 'component' in params_to_pass:
params_to_pass['component'] = component
if 'path' in params_to_pass:
params_to_pass['path'] = request.path
if 'user_info' in params_to_pass:
params_to_pass['user_info'] = current_user._get_current_object()
if 'req' in params_to_pass:
params_to_pass['req'] = request._get_current_object()
try:
new_url = goto_plugin(**params_to_pass)
except Exception:
current_app.logger.exception("Redirection handler problem.")
abort(404)
if new_url:
if new_url.startswith('/'):
new_url = current_app.config['CFG_SITE_URL'] + new_url
return redirect(new_url)
abort(404)
| gpl-2.0 | -3,288,096,179,801,401,300 | 36.602564 | 75 | 0.679168 | false |
yashu-seth/BinPy | BinPy/tests/test_signal_generator.py | 5 | 3621 | import time
import math
from BinPy import *
def test_sin_signal():
sig1 = SignalGenerator(typ=0, freq=1000, ampl=1)
time.sleep(0.5) # To allow setup time.
max = -1
min = 2
start_time = time.time()
while (time.time() - start_time) < sig1.time_period:
# sample when the run method is in sleep
while sig1.updating:
pass
t = sig1.last_updated_time
sig1_output = sig1.outputs[0].voltage - sig1.outputs[1].voltage
sin_t = 0.5 * math.sin(2 * 3.145926 * sig1._frequency * t) + 0.5
if (round(sig1_output, 2) != round(sin_t, 2)):
sig1.kill()
assert False
time.sleep(sig1.sampling_time_interval)
sig1.kill()
def test_square_signal():
sig1 = SignalGenerator(typ=1, freq=1000, ampl=2)
sig1.set_offset(-1)
time.sleep(0.5)
# To allow setup time.
# To make range [ -1 to 1 ]
max = -1
min = 2
start_time = time.time()
while (time.time() - start_time) < sig1.time_period:
# sample when the run method is in sleep
while sig1.updating:
pass
t = sig1.last_updated_time
sig1_output = sig1.outputs[0].voltage - sig1.outputs[1].voltage
sq_t = 1 if t < (sig1.time_period / float(2)) else -1
if (round(sig1_output, 2) != round(sq_t, 2)):
sig1.kill()
assert False
time.sleep(sig1.sampling_time_interval)
sig1.kill()
def test_ramp_signal():
sig1 = SignalGenerator(typ=2, freq=1000, ampl=2)
sig1.set_offset(-1)
time.sleep(0.5) # To allow setup time.
# To make range [ -1 to 1 ]
max = -1
min = 2
start_time = time.time()
while (time.time() - start_time) < sig1.time_period:
# sample when the run method is in sleep
while sig1.updating:
pass
t = sig1.last_updated_time
sig1_output = sig1.outputs[0].voltage - sig1.outputs[1].voltage
r_t = 2 * ((t / sig1.time_period) - 0.5)
if (round(sig1_output, 2) != round(r_t, 2)):
sig1.kill()
assert False
time.sleep(sig1.sampling_time_interval)
sig1.kill()
def test_triangle_signal():
sig1 = SignalGenerator(typ=3, freq=1000, ampl=1)
time.sleep(0.5) # To allow setup time.
max = -1
min = 2
start_time = time.time()
while (time.time() - start_time) < sig1.time_period:
# sample when the run method is in sleep
while sig1.updating:
pass
t = sig1.last_updated_time
sig1_output = sig1.outputs[0].voltage - sig1.outputs[1].voltage
if (t < (sig1.time_period / 2.0)):
tr_t = (2 * t / sig1.time_period)
else:
tr_t = (2 * (sig1.time_period - t) / sig1.time_period)
if (round(sig1_output, 2) != round(tr_t, 2)):
sig1.kill()
assert False
time.sleep(sig1.sampling_time_interval)
sig1.kill()
def test_ttl_signal():
sig1 = SignalGenerator(typ=4, freq=1000, ampl=2)
time.sleep(0.5) # To allow setup time.
max = -1
min = 2
start_time = time.time()
while (time.time() - start_time) < sig1.time_period:
# sample when the run method is in sleep
while sig1.updating:
pass
t = sig1.last_updated_time
sig1_output = sig1.outputs[0].voltage - sig1.outputs[1].voltage
ttl_t = 5 if t < (sig1.time_period / 2.0) else 0
if (round(sig1_output, 2) != round(ttl_t, 2)):
sig1.kill()
assert False
time.sleep(sig1.sampling_time_interval)
sig1.kill()
| bsd-3-clause | -3,346,815,174,515,235,300 | 24.5 | 72 | 0.560895 | false |
kragniz/hexogen | Hexogen/ProgramData.py | 1 | 1257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Louis Taylor <[email protected]>
#
# This file is part of Hexogen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.txt
NAME = 'Hexogen'
VERSION = '0.01-alpha'
| gpl-3.0 | -6,364,007,642,810,695,000 | 56.136364 | 81 | 0.483691 | false |
iwvelando/conky_wunderground_scripts | wunderground_hourly_forecast.py | 1 | 2546 | import urllib2
import json
import sys
import os
import datetime
# Extract command line arguments and grab the wunderground API key
location = sys.argv[1]
nHours = int(sys.argv[2])
apiKey = open(os.getenv('HOME') + '/.api/wunderground','r').readline().split('\n')[0]
# Wunderground API call for astronomy data (for sunrise and sunset)
g = urllib2.urlopen('http://api.wunderground.com/api/' + apiKey + '/astronomy/q/' + location + '.json')
json_string = g.read()
g.close()
parsed_json = json.loads(json_string)
parsedSunrise = parsed_json['moon_phase']['sunrise']
parsedSunset = parsed_json['moon_phase']['sunset']
# Wunderground API call for hourly weather data
f = urllib2.urlopen('http://api.wunderground.com/api/' + apiKey + '/hourly/q/' + location + '.json')
json_string = f.read()
f.close()
parsed_json = json.loads(json_string)
hourly = parsed_json['hourly_forecast']
hours = len(hourly)
if nHours > hours:
nHours = hours
# If sunrise or sunset hasn't happened yet, print them on the first line
now = datetime.datetime.today().replace(second=0,microsecond=0)
sunrise = now.replace(hour=int(parsedSunrise['hour']),minute=int(parsedSunrise['minute']))
sunset = now.replace(hour=int(parsedSunset['hour']),minute=int(parsedSunset['minute']))
if sunrise > now:
sun = 'Sunrise: ' + sunrise.strftime('%H:%M') + ', Sunset: ' + sunset.strftime('%H:%M')
print sun
elif sunset > now:
sun = 'Sunset: ' + sunset.strftime('%H:%M')
print sun
#Set a dictionary for parsing the icon field to a smaller text
icons = {'chanceflurries' : 'flurry?',
'chancerain' : 'rain?',
'chancesleet' : 'sleet?',
'chancesnow' : 'snow?',
'chancetstorms' : 'tstorm?',
'clear' : 'clear',
'cloudy' : 'cloudy',
'flurries' : 'flurry',
'fog' : 'fog',
'hazy' : 'hazy',
'mostlycloudy' : 'cloudy',
'mostlysunny' : 'sunny',
'partlycloudy' : 'cloudy~',
'partlysunny' : 'sunny~',
'rain' : 'rain',
'sleet' : 'sleet',
'snow' : 'snow',
'sunny' : 'sunny',
'tstorms' : 'tstorm',
'unknown' : '???'}
# Iterate through nHours and print their data
for i in range(nHours):
#hour = datetime.strptime(str(hourly[i]['FCTTIME']['civil']),"%I:%M %p").strftime("%I")
hour = int(hourly[i]['FCTTIME']['hour'])
#ampm = str(hourly[i]['FCTTIME']['ampm'])
temp = int(hourly[i]['temp']['english'])
cond = icons[str(hourly[i]['icon'])]
wspd = int(hourly[i]['wspd']['english'])
wdir = int(hourly[i]['wdir']['degrees'])
rain_chance = int(hourly[i]['pop'])
print '{:2d} | {:3d} | {:2d} @ {:3d} | {:3d} | {:7s}'.format(hour,temp,wspd,wdir,rain_chance,cond)
| bsd-2-clause | 5,041,621,537,292,661,000 | 33.876712 | 103 | 0.653967 | false |
maestro-hybrid-cloud/keystone | keystone/contrib/oauth1/validator.py | 17 | 6574 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oAuthlib request validator."""
from oslo_log import log
import six
from keystone.common import dependency
from keystone.contrib.oauth1 import core as oauth1
from keystone import exception
METHOD_NAME = 'oauth_validator'
LOG = log.getLogger(__name__)
@dependency.requires('oauth_api')
class OAuthValidator(oauth1.RequestValidator):
# TODO(mhu) set as option probably?
@property
def enforce_ssl(self):
return False
@property
def safe_characters(self):
# oauth tokens are generated from a uuid hex value
return set("abcdef0123456789")
def _check_token(self, token):
# generic token verification when they're obtained from a uuid hex
return (set(token) <= self.safe_characters and
len(token) == 32)
def check_client_key(self, client_key):
return self._check_token(client_key)
def check_request_token(self, request_token):
return self._check_token(request_token)
def check_access_token(self, access_token):
return self._check_token(access_token)
def check_nonce(self, nonce):
# Assuming length is not a concern
return set(nonce) <= self.safe_characters
def check_verifier(self, verifier):
return (all(i in oauth1.VERIFIER_CHARS for i in verifier) and
len(verifier) == 8)
def get_client_secret(self, client_key, request):
client = self.oauth_api.get_consumer_with_secret(client_key)
return client['secret']
def get_request_token_secret(self, client_key, token, request):
token_ref = self.oauth_api.get_request_token(token)
return token_ref['request_secret']
def get_access_token_secret(self, client_key, token, request):
access_token = self.oauth_api.get_access_token(token)
return access_token['access_secret']
def get_default_realms(self, client_key, request):
# realms weren't implemented with the previous library
return []
def get_realms(self, token, request):
return []
def get_redirect_uri(self, token, request):
# OOB (out of band) is supposed to be the default value to use
return 'oob'
def get_rsa_key(self, client_key, request):
# HMAC signing is used, so return a dummy value
return ''
def invalidate_request_token(self, client_key, request_token, request):
# this method is invoked when an access token is generated out of a
# request token, to make sure that request token cannot be consumed
# anymore. This is done in the backend, so we do nothing here.
pass
def validate_client_key(self, client_key, request):
try:
return self.oauth_api.get_consumer(client_key) is not None
except exception.NotFound:
return False
def validate_request_token(self, client_key, token, request):
try:
return self.oauth_api.get_request_token(token) is not None
except exception.NotFound:
return False
def validate_access_token(self, client_key, token, request):
try:
return self.oauth_api.get_access_token(token) is not None
except exception.NotFound:
return False
def validate_timestamp_and_nonce(self,
client_key,
timestamp,
nonce,
request,
request_token=None,
access_token=None):
return True
def validate_redirect_uri(self, client_key, redirect_uri, request):
# we expect OOB, we don't really care
return True
def validate_requested_realms(self, client_key, realms, request):
# realms are not used
return True
def validate_realms(self,
client_key,
token,
request,
uri=None,
realms=None):
return True
def validate_verifier(self, client_key, token, verifier, request):
try:
req_token = self.oauth_api.get_request_token(token)
return req_token['verifier'] == verifier
except exception.NotFound:
return False
def verify_request_token(self, token, request):
# there aren't strong expectations on the request token format
return isinstance(token, six.string_types)
def verify_realms(self, token, realms, request):
return True
# The following save_XXX methods are called to create tokens. I chose to
# keep the original logic, but the comments below show how that could be
# implemented. The real implementation logic is in the backend.
def save_access_token(self, token, request):
pass
# token_duration = CONF.oauth1.request_token_duration
# request_token_id = request.client_key
# self.oauth_api.create_access_token(request_token_id,
# token_duration,
# token["oauth_token"],
# token["oauth_token_secret"])
def save_request_token(self, token, request):
pass
# project_id = request.headers.get('Requested-Project-Id')
# token_duration = CONF.oauth1.request_token_duration
# self.oauth_api.create_request_token(request.client_key,
# project_id,
# token_duration,
# token["oauth_token"],
# token["oauth_token_secret"])
def save_verifier(self, token, verifier, request):
# keep the old logic for this, as it is done in two steps and requires
# information that the request validator has no access to
pass
| apache-2.0 | 9,042,448,513,270,215,000 | 35.726257 | 78 | 0.606632 | false |
AdrieleD/gr-mac1 | examples/transceiver_CSS_USRP.py | 1 | 13355 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: IEEE 802.15.4 Transceiver using CSS PHY
# Description: IEEE 802.15.4 Transceiver using CSS PHY
# Generated: Wed Jun 22 12:23:47 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import uhd
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import fftsink2
from gnuradio.wxgui import forms
from gnuradio.wxgui import scopesink2
from gnuradio.wxgui import waterfallsink2
from grc_gnuradio import wxgui as grc_wxgui
from ieee802_15_4_css_phy import ieee802_15_4_css_phy # grc-generated hier_block
from optparse import OptionParser
import ieee802_15_4
import pmt
import time
import trafficGenerator
import wx
class transceiver_CSS_USRP(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="IEEE 802.15.4 Transceiver using CSS PHY")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.text_msg = text_msg = "Hello World, this is GNU Radio using the IEEE 802.15.4 CSS PHY!"
self.freq = freq = 2480000000
self.samp_rate = samp_rate = 1e6
self.msg_interval = msg_interval = 1000
self.gain = gain = 20
self.cur_freq = cur_freq = freq
self.c = c = ieee802_15_4.css_phy(chirp_number=4, phy_packetsize_bytes=len(text_msg)+15)
##################################################
# Blocks
##################################################
self.nb = self.nb = wx.Notebook(self.GetWin(), style=wx.NB_TOP)
self.nb.AddPage(grc_wxgui.Panel(self.nb), "RX Waterfall")
self.nb.AddPage(grc_wxgui.Panel(self.nb), "RX FFT")
self.nb.AddPage(grc_wxgui.Panel(self.nb), "RX Time")
self.nb.AddPage(grc_wxgui.Panel(self.nb), "RX Symbols")
self.Add(self.nb)
_msg_interval_sizer = wx.BoxSizer(wx.VERTICAL)
self._msg_interval_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_msg_interval_sizer,
value=self.msg_interval,
callback=self.set_msg_interval,
label="Message interval [ms]",
converter=forms.float_converter(),
proportion=0,
)
self._msg_interval_slider = forms.slider(
parent=self.GetWin(),
sizer=_msg_interval_sizer,
value=self.msg_interval,
callback=self.set_msg_interval,
minimum=1,
maximum=5000,
num_steps=1000,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_msg_interval_sizer)
_gain_sizer = wx.BoxSizer(wx.VERTICAL)
self._gain_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_gain_sizer,
value=self.gain,
callback=self.set_gain,
label="TX/RX Gain",
converter=forms.int_converter(),
proportion=0,
)
self._gain_slider = forms.slider(
parent=self.GetWin(),
sizer=_gain_sizer,
value=self.gain,
callback=self.set_gain,
minimum=1,
maximum=100,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=int,
proportion=1,
)
self.Add(_gain_sizer)
self._freq_chooser = forms.radio_buttons(
parent=self.GetWin(),
value=self.freq,
callback=self.set_freq,
label="Channel",
choices=[1000000 * (2400 + 5 * (i - 10)) for i in range(11, 27)],
labels=[i for i in range(11, 27)],
style=wx.RA_HORIZONTAL,
)
self.Add(self._freq_chooser)
self.wxgui_waterfallsink2_0 = waterfallsink2.waterfall_sink_c(
self.nb.GetPage(0).GetWin(),
baseband_freq=0,
dynamic_range=100,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=512,
fft_rate=15,
average=False,
avg_alpha=None,
title="RX Waterfall",
)
self.nb.GetPage(0).Add(self.wxgui_waterfallsink2_0.win)
self.wxgui_scopesink2_3 = scopesink2.scope_sink_c(
self.nb.GetPage(3).GetWin(),
title="RX Correlator Output",
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0,
ac_couple=False,
xy_mode=False,
num_inputs=1,
trig_mode=wxgui.TRIG_MODE_AUTO,
y_axis_label="Counts",
)
self.nb.GetPage(3).Add(self.wxgui_scopesink2_3.win)
self.wxgui_scopesink2_2 = scopesink2.scope_sink_c(
self.nb.GetPage(2).GetWin(),
title="RX Time Signal",
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0,
ac_couple=False,
xy_mode=False,
num_inputs=1,
trig_mode=wxgui.TRIG_MODE_AUTO,
y_axis_label="Counts",
)
self.nb.GetPage(2).Add(self.wxgui_scopesink2_2.win)
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.nb.GetPage(1).GetWin(),
baseband_freq=freq,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=True,
avg_alpha=None,
title="RX FFT",
peak_hold=False,
)
self.nb.GetPage(1).Add(self.wxgui_fftsink2_0.win)
self.uhd_usrp_source_0 = uhd.usrp_source(
",".join(("addr=192.168.10.4", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_center_freq(freq, 0)
self.uhd_usrp_source_0.set_gain(gain, 0)
self.uhd_usrp_source_0.set_antenna("J1", 0)
self.uhd_usrp_sink_1 = uhd.usrp_sink(
",".join(("addr=192.168.10.6", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_sink_1.set_samp_rate(samp_rate)
self.uhd_usrp_sink_1.set_center_freq(freq, 0)
self.uhd_usrp_sink_1.set_gain(gain, 0)
self.trafficGenerator_Distribution_0 = trafficGenerator.Distribution(0, 100, 4, 5, int)
self.ieee802_15_4_rime_stack_0 = ieee802_15_4.rime_stack(([129]), ([131]), ([132]), ([23,42]))
self.ieee802_15_4_mac_0 = ieee802_15_4.mac(True)
self.ieee802_15_4_css_phy_1 = ieee802_15_4_css_phy(
bits_per_cw=c.bits_per_symbol,
chirp_seq=c.chirp_seq,
codewords=c.codewords,
intlv_seq=c.intlv_seq,
len_sub=38,
nbytes_payload=c.phy_packetsize_bytes,
nsamp_frame=c.nsamp_frame,
num_subchirps=c.n_subchirps,
nzeros_padding=c.padded_zeros,
phr=c.PHR,
preamble=c.preamble,
sfd=c.SFD,
sym_per_frame=c.nsym_frame,
threshold=0.95,
time_gap_1=c.time_gap_1,
time_gap_2=c.time_gap_2,
)
self._cur_freq_static_text = forms.static_text(
parent=self.GetWin(),
value=self.cur_freq,
callback=self.set_cur_freq,
label="Current center frequency",
converter=forms.float_converter(),
)
self.Add(self._cur_freq_static_text)
self.blocks_socket_pdu_0_0 = blocks.socket_pdu("UDP_SERVER", "", "52001", 10000, False)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_int*1)
self.blocks_message_strobe_0 = blocks.message_strobe(pmt.intern(text_msg), msg_interval)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_message_strobe_0, 'strobe'), (self.ieee802_15_4_rime_stack_0, 'bcin'))
self.msg_connect((self.blocks_socket_pdu_0_0, 'pdus'), (self.ieee802_15_4_rime_stack_0, 'bcin'))
self.msg_connect((self.ieee802_15_4_css_phy_1, 'rxout'), (self.ieee802_15_4_mac_0, 'pdu in'))
self.msg_connect((self.ieee802_15_4_mac_0, 'pdu out'), (self.ieee802_15_4_css_phy_1, 'txin'))
self.msg_connect((self.ieee802_15_4_mac_0, 'app out'), (self.ieee802_15_4_rime_stack_0, 'fromMAC'))
self.msg_connect((self.ieee802_15_4_rime_stack_0, 'bcout'), (self.blocks_socket_pdu_0_0, 'pdus'))
self.msg_connect((self.ieee802_15_4_rime_stack_0, 'toMAC'), (self.ieee802_15_4_mac_0, 'app in'))
self.connect((self.ieee802_15_4_css_phy_1, 0), (self.uhd_usrp_sink_1, 0))
self.connect((self.ieee802_15_4_css_phy_1, 1), (self.wxgui_scopesink2_3, 0))
self.connect((self.trafficGenerator_Distribution_0, 0), (self.blocks_null_sink_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.ieee802_15_4_css_phy_1, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.wxgui_fftsink2_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.wxgui_scopesink2_2, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.wxgui_waterfallsink2_0, 0))
def get_text_msg(self):
return self.text_msg
def set_text_msg(self, text_msg):
self.text_msg = text_msg
self.set_c(ieee802_15_4.css_phy(chirp_number=4, phy_packetsize_bytes=len(self.text_msg)+15))
self.blocks_message_strobe_0.set_msg(pmt.intern(self.text_msg))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.set_cur_freq(self.freq)
self._freq_chooser.set_value(self.freq)
self.uhd_usrp_sink_1.set_center_freq(self.freq, 0)
self.uhd_usrp_source_0.set_center_freq(self.freq, 0)
self.wxgui_fftsink2_0.set_baseband_freq(self.freq)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_sink_1.set_samp_rate(self.samp_rate)
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
self.wxgui_scopesink2_2.set_sample_rate(self.samp_rate)
self.wxgui_scopesink2_3.set_sample_rate(self.samp_rate)
self.wxgui_waterfallsink2_0.set_sample_rate(self.samp_rate)
def get_msg_interval(self):
return self.msg_interval
def set_msg_interval(self, msg_interval):
self.msg_interval = msg_interval
self._msg_interval_slider.set_value(self.msg_interval)
self._msg_interval_text_box.set_value(self.msg_interval)
self.blocks_message_strobe_0.set_period(self.msg_interval)
def get_gain(self):
return self.gain
def set_gain(self, gain):
self.gain = gain
self._gain_slider.set_value(self.gain)
self._gain_text_box.set_value(self.gain)
self.uhd_usrp_sink_1.set_gain(self.gain, 0)
self.uhd_usrp_source_0.set_gain(self.gain, 0)
def get_cur_freq(self):
return self.cur_freq
def set_cur_freq(self, cur_freq):
self.cur_freq = cur_freq
self._cur_freq_static_text.set_value(self.cur_freq)
def get_c(self):
return self.c
def set_c(self, c):
self.c = c
self.ieee802_15_4_css_phy_1.set_bits_per_cw(self.c.bits_per_symbol)
self.ieee802_15_4_css_phy_1.set_chirp_seq(self.c.chirp_seq)
self.ieee802_15_4_css_phy_1.set_codewords(self.c.codewords)
self.ieee802_15_4_css_phy_1.set_intlv_seq(self.c.intlv_seq)
self.ieee802_15_4_css_phy_1.set_nbytes_payload(self.c.phy_packetsize_bytes)
self.ieee802_15_4_css_phy_1.set_nsamp_frame(self.c.nsamp_frame)
self.ieee802_15_4_css_phy_1.set_num_subchirps(self.c.n_subchirps)
self.ieee802_15_4_css_phy_1.set_nzeros_padding(self.c.padded_zeros)
self.ieee802_15_4_css_phy_1.set_phr(self.c.PHR)
self.ieee802_15_4_css_phy_1.set_preamble(self.c.preamble)
self.ieee802_15_4_css_phy_1.set_sfd(self.c.SFD)
self.ieee802_15_4_css_phy_1.set_sym_per_frame(self.c.nsym_frame)
self.ieee802_15_4_css_phy_1.set_time_gap_1(self.c.time_gap_1)
self.ieee802_15_4_css_phy_1.set_time_gap_2(self.c.time_gap_2)
def main(top_block_cls=transceiver_CSS_USRP, options=None):
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Error: failed to enable real-time scheduling."
tb = top_block_cls()
tb.Start(True)
tb.Wait()
if __name__ == '__main__':
main()
| gpl-3.0 | 9,201,087,214,878,913,000 | 37.266476 | 112 | 0.579633 | false |
mozilla/badges.mozilla.org | badgus/badger_api/views.py | 2 | 3957 | import logging
import json
try:
from cStringIO import cStringIO as StringIO
except:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import (HttpResponseRedirect, HttpResponse,
HttpResponseForbidden, HttpResponseNotFound, Http404)
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.views.decorators.csrf import csrf_exempt
from django.http.multipartparser import MultiPartParser
from django.utils.translation import ugettext_lazy as _
from valet_keys.decorators import accepts_valet_key
import badger.views
from badger.models import (Badge, Award, Nomination, Progress, DeferredAward,
BadgerException,
NominationApproveNotAllowedException,
NominationAcceptNotAllowedException,
BadgeAlreadyAwardedException,
BadgeAwardNotAllowedException)
from badger.utils import get_badge, award_badge
@accepts_valet_key
@csrf_exempt
def awards_list(request, *args, **kwargs):
"""Extend the django-badger awards list URL to offer a POST API"""
slug = kwargs.get('slug', None)
if "GET" == request.method or not slug:
# If GET or missing a slug, bail out to the original view
return badger.views.awards_list(request, *args, **kwargs)
if not request.valet_key:
return HttpResponseForbidden('Valid key required')
badge = get_object_or_404(Badge, slug=slug)
if not badge.allows_award_to(request.user):
return HttpResponseForbidden('Award forbidden')
(data, files, response) = _parse_request_data(request)
if response: return response
description = data.get('description', '')
emails = data.get('emails', [])
if not emails:
return _bad_request(_("email list is required"))
errors, successes = {}, {}
for email in emails:
try:
validate_email(email)
result = badge.award_to(email=email, awarder=request.user,
description=description,
raise_already_awarded=True)
if not result:
errors[email] = 'FAILED'
else:
if isinstance(result, Award):
successes[email] = 'AWARDED'
else:
successes[email] = 'INVITED'
except ValidationError, e:
errors[email] = "INVALID"
except BadgeAlreadyAwardedException, e:
errors[email] = "ALREADYAWARDED"
except Exception, e:
errors[email] = "EXCEPTION %s" % e
return _json_response(errors=errors, successes=successes)
def _parse_request_data(request):
# Try parsing one of the supported content types from the request
try:
content_type = request.META.get('CONTENT_TYPE', '')
if content_type.startswith('application/json'):
return (json.loads(request.body), None, None)
elif content_type.startswith('multipart/form-data'):
parser = MultiPartParser(request.META,
StringIO(request.body),
request.upload_handlers,
request.encoding)
data, files = parser.parse()
return (data, files, None)
else:
return (None, None,
_bad_request(_("Unsupported content-type: %s") %
content_type))
except Exception, e:
return (None, None,
_bad_request(_("Request parsing error: %s") % e))
def _bad_request(msg):
resp = HttpResponse()
resp.status_code = 400
resp.content = unicode(msg).encode('utf-8')
return resp
def _json_response(**out):
resp = HttpResponse(json.dumps(out))
resp['Content-Type'] = 'application/json'
return resp
| bsd-3-clause | -6,858,415,196,554,576,000 | 31.434426 | 77 | 0.625727 | false |
Distrotech/scons | build/scons/engine/SCons/Tool/ipkg.py | 2 | 2509 | """SCons.Tool.ipkg
Tool-specific initialization for ipkg.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The ipkg tool calls the ipkg-build. Its only argument should be the
packages fake_root.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ipkg.py 2014/01/04 01:12:18 root"
import os
import SCons.Builder
def generate(env):
"""Add Builders and construction variables for ipkg to an Environment."""
try:
bld = env['BUILDERS']['Ipkg']
except KeyError:
bld = SCons.Builder.Builder( action = '$IPKGCOM',
suffix = '$IPKGSUFFIX',
source_scanner = None,
target_scanner = None)
env['BUILDERS']['Ipkg'] = bld
env['IPKG'] = 'ipkg-build'
env['IPKGCOM'] = '$IPKG $IPKGFLAGS ${SOURCE}'
env['IPKGUSER'] = os.popen('id -un').read().strip()
env['IPKGGROUP'] = os.popen('id -gn').read().strip()
env['IPKGFLAGS'] = SCons.Util.CLVar('-o $IPKGUSER -g $IPKGGROUP')
env['IPKGSUFFIX'] = '.ipk'
def exists(env):
return env.Detect('ipkg-build')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -6,297,349,418,761,261,000 | 36.447761 | 113 | 0.682742 | false |
jestapinski/oppia | core/controllers/incoming_emails_test.py | 5 | 4102 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the incoming email handler."""
from core.domain import feedback_services
from core.platform import models
from core.tests import test_utils
import feconf
(feedback_models, email_models) = models.Registry.import_models([
models.NAMES.feedback, models.NAMES.email])
class IncomingReplyEmailTests(test_utils.GenericTestBase):
USER_A_EMAIL = '[email protected]'
USER_B_EMAIL = '[email protected]'
def setUp(self):
super(IncomingReplyEmailTests, self).setUp()
self.signup(self.USER_A_EMAIL, 'A')
self.user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, 'B')
self.user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, 'Title')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
def test_that_reply_emails_are_added_to_thread(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
# Create thread.
feedback_services.create_thread(
self.exploration.id, 'a_state_name', self.user_id_a,
'a subject', 'some text')
threadlist = feedback_services.get_all_threads(
self.exploration.id, False)
thread_id = threadlist[0].get_thread_id()
# Create another message.
feedback_services.create_message(
self.exploration.id, thread_id, self.user_id_b, None, None,
'user b message')
# Check that there are 2 messages in thread.
messages = feedback_services.get_messages(
self.exploration.id, thread_id)
self.assertEqual(len(messages), 2)
# Check that received_via_email is set to False.
self.assertFalse(messages[0].received_via_email)
# Get reply_to id for user A.
model = email_models.FeedbackEmailReplyToIdModel.get(
self.user_id_a, self.exploration.id, thread_id)
recipient_email = 'reply+%s@%s' % (
model.reply_to_id, feconf.INCOMING_EMAILS_DOMAIN_NAME)
# Send email to Oppia.
self.post_email(
str(recipient_email), self.USER_A_EMAIL, 'feedback email reply',
'New reply')
# Check that new message is added.
messages = feedback_services.get_messages(
self.exploration.id, thread_id)
self.assertEqual(len(messages), 3)
# Check content of message is correct.
msg = messages[-1]
self.assertEqual(msg.text, 'New reply')
self.assertEqual(msg.author_id, self.user_id_a)
self.assertTrue(msg.received_via_email)
def test_that_assertion_is_raised_for_fake_reply_to_id(self):
# Generate reply email.
recipient_email = 'reply+%s@%s' % (
'fake_id', feconf.INCOMING_EMAILS_DOMAIN_NAME)
# Send email to Oppia.
self.post_email(
recipient_email, self.USER_A_EMAIL, 'feedback email reply',
'New reply', expect_errors=True, expected_status_int=404)
| apache-2.0 | -2,695,697,959,131,080,700 | 40.02 | 80 | 0.630912 | false |
mateuszmidor/GumtreeOnMap | src/geocoderwithcache.py | 1 | 1376 | '''
Created on 03-08-2014
@author: mateusz
'''
from geocoder import Geocoder
from injectdependency import InjectDependency, Inject
@InjectDependency('logger')
class GeocoderWithCache():
logger = Inject
def __init__(self, geocoder=Geocoder, storage=dict()):
self.geocoder = geocoder
self.addressCache = storage
def getCoordinates(self, address):
address = self.normalize(address)
if (self.cachedAddress(address)):
return self.getCachedCoordsForAddress(address)
else:
coordinates = self.geocoder.getCoordinates(address)
self.addressCache[address] = coordinates
return coordinates
def registerAddress(self, address, coordinates):
address = self.normalize(address)
self.addressCache[address] = coordinates
self.logger.info("Registered new address: " + address + " " + str(coordinates))
def normalize(self, address):
return address.lower()
def cachedAddress(self, address):
return (address in self.addressCache)
def getCachedCoordsForAddress(self, address):
coordinates = self.addressCache[address]
self.logger.info("Fetched address from cache: " + address + " " + str(coordinates))
return coordinates
| gpl-2.0 | 2,080,086,836,232,755,000 | 30.809524 | 91 | 0.62936 | false |
dajohnso/cfme_tests | artifactor/__init__.py | 2 | 12258 | """
Artifactor
Artifactor is used to collect artifacts from a number of different plugins and put them into
one place. Artifactor works around a series of events and is geared towards unit testing, though
it is extensible and customizable enough that it can be used for a variety of purposes.
The main guts of Artifactor is around the plugins. Before Artifactor can do anything it must have
a configured plugin. This plugin is then configured to bind certain functions inside itself
to certain events. When Artifactor is triggered to handle a certain event, it will tell the plugin
that that particular event has happened and the plugin will respond accordingly.
In addition to the plugins, Artifactor can also run certain callback functions before and after
the hook function itself. These are call pre and post hook callbacks. Artifactor allows multiple
pre and post hook callbacks to be defined per event, but does not guarantee the order that they
are executed in.
To allow data to be passed to and from hooks, Artifactor has the idea of global and event local
values. The global values persist in the Artifactor instance for its lifetime, but the event local
values are destroyed at the end of each event.
Let's take the example of using the unit testing suite py.test as an example for Artifactor.
Suppose we have a number of tests that run as part of a test suite and we wish to store a text
file that holds the time the test was run and its result. This information is required to reside
in a folder that is relevant to the test itself. This type of job is what Artifactor was designed
for.
To begin with, we need to create a plugin for Artifactor. Consider the following piece of code::
from artifactor import ArtifactorBasePlugin
import time
class Test(ArtifactorBasePlugin):
def plugin_initialize(self):
self.register_plugin_hook('start_test', self.start_test)
self.register_plugin_hook('finish_test', self.finish_test)
def start_test(self, test_name, test_location, artifact_path):
filename = artifact_path + "-" + self.ident + ".log"
with open(filename, "w") as f:
f.write(test_name + "\n")
f.write(str(time.time()) + "\n")
def finish_test(self, test_name, artifact_path, test_result):
filename = artifact_path + "-" + self.ident + ".log"
with open(filename, "w+") as f:
f.write(test_result)
This is a typical plugin in Artifactor, it consists of 2 things. The first item is
the special function called ``plugin_initialize()``. This is important
and is equivilent to the ``__init__()`` that would usually be found in a class definition.
Artifactor calls ``plugin_initialize()`` for each plugin as it loads it.
Inside this section we register the hook functions to their associated events. Each event
can only have a single function associated with it. Event names are able to be freely assigned
so you can customize plugins to work to specific events for your use case.
The ``register_plugin_hook()`` takes an event name as a string and a function to callback when
that event is experienced.
Next we have the hook functions themselves, ``start_test()`` and ``finish_test()``. These
have arguments in their prototypes and these arguments are supplied by Artifactor and are
created either as arguments to the ``fire_hook()`` function, which is responsible for actually
telling Artifactor that an even has occured, or they are created in the pre hook script.
Artifactor uses the global and local values referenced earlier to store these argument values.
When a pre, post or hook callback finishes, it has the opportunity to supply updates to both
the global and local values dictionaries. In doing this, a pre-hook script can prepare data,
which will could be stored in the locals dictionary and then passed to the actual plugin hook
as a keyword argument. local values override global values.
We need to look at an example of this, but first we must configure artifactor and the plugin::
log_dir: /home/me/artiout
per_run: run #test, run, None
overwrite: True
artifacts:
test:
enabled: True
plugin: test
Here we have defined a ``log_dir`` which will be the root of all of our artifacts. We have asked
Artifactor to group the artifacts by run, which means that it will try to create a directory
under the ``log_dir`` which indicates which test "run" this was. We can also specify a value of
"test" here, which will move the test run identifying folder up to the leaf in the tree.
The ``log_dir`` and contents of the config are stored in global values as ``log_dir`` and
``artifactor_config`` respectively. These are the only two global values which are setup by
Artifactor.
This data is then passed to artifactor as a dict, we will assume a variable name of ``config`` here.
Let's consider how we would run this test
art = artifactor.artifactor
art.set_config(config)
art.register_plugin(test.Test, "test")
artifactor.initialize()
a.fire_hook('start_session', run_id=2235)
a.fire_hook('start_test', test_name="my_test", test_location="tests/mytest.py")
a.fire_hook('finish_test', test_name="my_test", test_location="tests/mytest.py",
test_result="FAILED")
a.fire_hook('finish_session')
The art.register_plugin is used to bind a plugin name to a class definition. Notice in the config
section earlier, we have a ``plugin: test`` field. This name ``test`` is what Artifactor will
look for when trying to find the appropriate plugin. When we register the plugin with the
``register_plugin`` function, we take the ``test.Test`` class and essentially give it the name
``test`` so that the names will tie up and the plugin will be used.
Notice that we have sent some information to along with the request to fire the hook. Ignoring the
``start_session`` event for a minute, the ``start_test`` event sends a ``test_name`` and a
``test_location``. However, the ``start_test`` hook also required an argument called
``argument_path``. This is not supplied by the hook, and isn't setup as a global value, so how does
it get there?
Inside Artifactor, by default, a pre_hook callback called ``start_test()`` is bound to the
``start_test`` event. This callback returns a local values update which includes ``artifact_path``.
This is how the artifact_path is returned. This hook can be removed, by running a
``unregister_hook_callback`` with the name of the hook callback.
"""
import logging
import os
import re
import sys
from py.path import local
from riggerlib import Rigger, RiggerBasePlugin, RiggerClient
from utils.net import random_port
from utils.path import log_path
class Artifactor(Rigger):
"""A sub from Rigger"""
def set_config(self, config):
self.config = config
def parse_config(self):
"""
Reads the config data and sets up values
"""
if not self.config:
return False
self.log_dir = local(self.config.get('log_dir', log_path))
self.log_dir.ensure(dir=True)
self.artifact_dir = local(self.config.get('artifact_dir', log_path.join('artifacts')))
self.artifact_dir.ensure(dir=True)
self.logger = create_logger('artifactor', self.log_dir.join('artifactor.log').strpath)
self.squash_exceptions = self.config.get('squash_exceptions', False)
if not self.log_dir:
print("!!! Log dir must be specified in yaml")
sys.exit(127)
if not self.artifact_dir:
print("!!! Artifact dir must be specified in yaml")
sys.exit(127)
self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(random_port())
self.setup_plugin_instances()
self.start_server()
self.global_data = {
'artifactor_config': self.config,
'log_dir': self.log_dir.strpath,
'artifact_dir': self.artifact_dir.strpath,
'artifacts': dict(),
'old_artifacts': dict()
}
def handle_failure(self, exc):
self.logger.error("exception", exc_info=exc)
def log_message(self, message):
self.logger.debug(message)
class ArtifactorClient(RiggerClient):
pass
class ArtifactorBasePlugin(RiggerBasePlugin):
"""A sub from RiggerBasePlugin"""
@property
def store(self):
if not hasattr(self, '_store'):
self._store = {}
return self._store
def initialize(artifactor):
artifactor.parse_config()
artifactor.register_hook_callback('pre_start_test', 'pre', parse_setup_dir,
name="default_start_test")
artifactor.register_hook_callback('start_test', 'pre', parse_setup_dir,
name="default_start_test")
artifactor.register_hook_callback('finish_test', 'pre', parse_setup_dir,
name="default_finish_test")
artifactor.register_hook_callback('start_session', 'pre', start_session,
name="default_start_session")
artifactor.register_hook_callback('build_report', 'pre', merge_artifacts,
name="merge_artifacts")
artifactor.register_hook_callback('finish_session', 'pre', merge_artifacts,
name="merge_artifacts")
artifactor.initialized = True
def start_session(run_id=None):
"""
Convenience fire_hook for built in hook
"""
return None, {'run_id': run_id}
def merge_artifacts(old_artifacts, artifacts):
"""
This is extremely important and merges the old_Artifacts from a composite-uncollect build
with the new artifacts for this run
"""
old_artifacts.update(artifacts)
return {'old_artifacts': old_artifacts}, None
def parse_setup_dir(test_name, test_location, artifactor_config, artifact_dir, run_id):
"""
Convenience fire_hook for built in hook
"""
if test_name and test_location:
run_type = artifactor_config.get('per_run')
overwrite = artifactor_config.get('reuse_dir', False)
path = setup_artifact_dir(root_dir=artifact_dir, test_name=test_name,
test_location=test_location, run_type=run_type,
run_id=run_id, overwrite=overwrite)
else:
raise Exception('Not enough information to create artifact')
return {'artifact_path': path}, None
def setup_artifact_dir(root_dir=None, test_name=None, test_location=None,
run_type=None, run_id=None, overwrite=True):
"""
Sets up the artifact dir and returns it.
"""
test_name = re.sub(r"[^a-zA-Z0-9_.\-\[\]]", "_", test_name)
test_name = re.sub(r"[/]", "_", test_name)
test_name = re.sub(r"__+", "_", test_name)
orig_path = os.path.abspath(root_dir)
if run_id:
run_id = str(run_id)
if run_type == "run" and run_id:
path = os.path.join(orig_path, run_id, test_location, test_name)
elif run_type == "test" and run_id:
path = os.path.join(orig_path, test_location, test_name, run_id)
else:
path = os.path.join(orig_path, test_location, test_name)
try:
os.makedirs(path)
except OSError as e:
if e.errno == 17:
if overwrite:
pass
else:
print("Directories already existed and overwrite is set to False")
sys.exit(127)
else:
raise
return path
def create_logger(logger_name, filename):
"""Creates and returns the named logger
If the logger already exists, it will be destroyed and recreated
with the current config in env.yaml
"""
# If the logger already exists, reset its handlers
logger = logging.getLogger(logger_name)
for handler in logger.handlers:
logger.removeHandler(handler)
log_file = filename
file_formatter = logging.Formatter('%(asctime)-15s [%(levelname).1s] %(message)s')
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
logger.setLevel('DEBUG')
return logger
| gpl-2.0 | -3,901,482,880,227,899,000 | 40.412162 | 100 | 0.678006 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.