repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aarsan/azure-quickstart-templates | hortonworks-on-centos/scripts/vm-bootstrap.py | 89 | 53170 | #
# vm-bootstrap.py
#
# This script is used to prepare VMs launched via HDP Cluster Install Blade on Azure.
#
# Parameters passed from the bootstrap script invocation by the controller (shown in the parameter order).
# Required parameters:
# action: "bootstrap" to set up VM and initiate cluster deployment. "check" for checking on cluster deployment status.
# cluster_id: user-specified name of the cluster
# admin_password: password for the Ambari "admin" user
# Required parameters for "bootstrap" action:
# scenario_id: "evaluation" or "standard"
# num_masters: number of masters in the cluster
# num_workers: number of workers in the cluster
# master_prefix: hostname prefix for master hosts (master hosts are named <cluster_id>-<master_prefix>-<id>
# worker_prefix: hostname prefix for worker hosts (worker hosts are named <cluster_id>-<worker_prefix>-<id>
# domain_name: the domain name part of the hosts, starting with a period (e.g., .cloudapp.net)
# id_padding: number of digits for the host <id> (e.g., 2 uses <id> like 01, 02, .., 10, 11)
# masters_iplist: list of masters' local IPV4 addresses sorted from master_01 to master_XX delimited by a ','
# workers_iplist: list of workers' local IPV4 addresses sorted from worker_01 to worker_XX delimited by a ','
# Required parameters for "check" action:
# --check_timeout_seconds:
# the number of seconds after which the script is required to exit
# --report_timeout_fail:
# if "true", exit code 1 is returned in case deployment has failed, or deployment has not finished after
# check_timeout_seconds
# if "false", exit code 0 is returned if deployment has finished successfully, or deployment has not finished after
# check_timeout_seconds
# Optional:
# protocol: if "https" (default), https:8443 is used for Ambari. Otherwise, Ambari uses http:8080
from optparse import OptionParser
import base64
import json
import logging
import os
import pprint
import re
import socket
import sys
import time
import urllib2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/tmp/vm-bootstrap.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting VM Bootstrap...')
parser = OptionParser()
parser.add_option("--cluster_id", type="string", dest="cluster_id")
parser.add_option("--scenario_id", type="string", dest="scenario_id", default="evaluation")
parser.add_option("--num_masters", type="int", dest="num_masters")
parser.add_option("--num_workers", type="int", dest="num_workers")
parser.add_option("--master_prefix", type="string", dest="master_prefix")
parser.add_option("--worker_prefix", type="string", dest="worker_prefix")
parser.add_option("--domain_name", type="string", dest="domain_name")
parser.add_option("--id_padding", type="int", dest="id_padding", default=2)
parser.add_option("--admin_password", type="string", dest="admin_password", default="admin")
parser.add_option("--masters_iplist", type="string", dest="masters_iplist")
parser.add_option("--workers_iplist", type="string", dest="workers_iplist")
parser.add_option("--protocol", type="string", dest="protocol", default="https")
parser.add_option("--action", type="string", dest="action", default="bootstrap")
parser.add_option("--check_timeout_seconds", type="int", dest="check_timeout_seconds", default="250")
parser.add_option("--report_timeout_fail", type="string", dest="report_timeout_fail", default="false")
(options, args) = parser.parse_args()
cluster_id = options.cluster_id
scenario_id = options.scenario_id.lower()
num_masters = options.num_masters
num_workers = options.num_workers
master_prefix = options.master_prefix
worker_prefix = options.worker_prefix
domain_name = options.domain_name
id_padding = options.id_padding
admin_password = options.admin_password
masters_iplist = options.masters_iplist
workers_iplist = options.workers_iplist
protocol = options.protocol
action = options.action
check_timeout_seconds = options.check_timeout_seconds
report_timeout_fail = options.report_timeout_fail.lower() == "true"
logger.info('action=' + action)
admin_username = 'admin'
current_admin_password = 'admin'
request_timeout = 30
port = '8443' if (protocol == 'https') else '8080'
http_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(http_handler)
urllib2.install_opener(opener)
class TimeoutException(Exception):
pass
def get_ambari_auth_string():
return 'Basic ' + base64.encodestring('%s:%s' % (admin_username, current_admin_password)).replace('\n', '')
def run_system_command(command):
os.system(command)
def get_hostname(id):
if id <= num_masters:
return master_prefix + str(id).zfill(id_padding)
else:
return worker_prefix + str(id - num_masters).zfill(id_padding)
def get_fqdn(id):
return get_hostname(id) + domain_name
def get_host_ip(hostname):
if (hostname.startswith(master_prefix)):
return masters_iplist[int(hostname.split('-')[-1]) -1]
else:
return workers_iplist[int(hostname.split('-')[-1]) -1]
def get_host_ip_map(hostnames):
host_ip_map = {}
for hostname in hostnames:
num_tries = 0
ip = None
while ip is None and num_tries < 5:
try:
ip = get_host_ip(hostname)
# ip = socket.gethostbyname(hostname)
except:
time.sleep(1)
num_tries = num_tries + 1
continue
if ip is None:
logger.info('Failed to look up ip address for ' + hostname)
raise
else:
logger.info(hostname + ' resolved to ' + ip)
host_ip_map[hostname] = ip
return host_ip_map
def update_etc_hosts(host_ip_map):
logger.info('Adding entries to /etc/hosts file...')
with open("/etc/hosts", "a") as file:
for host in sorted(host_ip_map):
file.write('%s\t%s\t%s\n' % (host_ip_map[host], host + domain_name, host))
logger.info('Finished updating /etc/hosts')
def update_ambari_agent_ini(ambari_server_hostname):
logger.info('Updating ambari-agent.ini file...')
command = 'sed -i s/hostname=localhost/hostname=%s/ /etc/ambari-agent/conf/ambari-agent.ini' % ambari_server_hostname
logger.info('Executing command: ' + command)
run_system_command(command)
logger.info('Finished updating ambari-agent.ini file')
def patch_ambari_agent():
logger.info('Patching ambari-agent to prevent rpmdb corruption...')
logger.info('Finished patching ambari-server')
def enable_https():
command = """
printf 'api.ssl=true\nclient.api.ssl.cert_name=https.crt\nclient.api.ssl.key_name=https.key\nclient.api.ssl.port=8443' >> /etc/ambari-server/conf/ambari.properties
mkdir /root/ambari-cert
cd /root/ambari-cert
# create server.crt and server.key (self-signed)
openssl genrsa -out server.key 2048
openssl req -new -key server.key -out server.csr -batch
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
echo PulUuMWPp0o4Lq6flGA0NGDKNRZQGffW2mWmJI3klSyspS7mUl > pass.txt
cp pass.txt passin.txt
# encrypts server.key with des3 as server.key.secured with the specified password
openssl rsa -in server.key -des3 -out server.key.secured -passout file:pass.txt
# creates /tmp/https.keystore.p12
openssl pkcs12 -export -in 'server.crt' -inkey 'server.key.secured' -certfile 'server.crt' -out '/var/lib/ambari-server/keys/https.keystore.p12' -password file:pass.txt -passin file:passin.txt
mv pass.txt /var/lib/ambari-server/keys/https.pass.txt
cd ..
rm -rf /root/ambari-cert
"""
run_system_command(command)
def set_admin_password(new_password, timeout):
logger.info('Setting admin password...')
def poll_until_all_agents_registered(num_hosts, timeout):
url = '%s://localhost:%s/api/v1/hosts' % (protocol, port)
logger.info('poll until all agents')
all_hosts_registered = False
start_time = time.time()
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if len(jsonResult['items']) >= num_hosts:
all_hosts_registered = True
break
except :
logger.exception('Could not poll agent status from the server.')
time.sleep(5)
if not all_hosts_registered:
raise Exception('Timed out while waiting for all agents to register')
def is_ambari_server_host():
hostname = socket.getfqdn()
hostname = hostname.split('.')[0]
logger.info(hostname)
logger.info('Checking ambari host')
logger.info(ambari_server_hostname)
return hostname == ambari_server_hostname
def create_blueprint(scenario_id):
blueprint_name = 'myblueprint'
logger.info('Creating blueprint for scenario %s' % scenario_id)
url = '%s://localhost:%s/api/v1/blueprints/%s' % (protocol, port, blueprint_name)
evaluation_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "DRPC_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "3"
}
]
small_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "9"
}
]
medium_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "HIVE_SERVER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "99"
}
]
large_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "HIVE_METASTORE"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_5",
"components" : [
{
"name" : "NODEMANAGER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_6",
"components" : [
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_7",
"components" : [
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_8",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "200"
}
]
if scenario_id == 'evaluation':
host_groups = evaluation_host_groups
elif scenario_id == 'small':
host_groups = small_host_groups
elif scenario_id == 'medium':
host_groups = medium_host_groups
elif scenario_id == 'large':
host_groups = large_host_groups
host_groups = evaluation_host_groups if scenario_id == 'evaluation' else small_host_groups
evaluation_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "sandbox",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY"
}
},
{
"hdfs-site" : {
"dfs.block.size" : "34217472",
"dfs.replication" : "1",
"dfs.namenode.accesstime.precision" : "3600000",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs",
"dfs.nfs.exports.allowed.hosts" : "* rw",
"dfs.datanode.max.xcievers" : "1024",
"dfs.block.access.token.enable" : "false",
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hive-site" : {
"javax.jdo.option.ConnectionPassword" : "hive",
"hive.tez.container.size" : "250",
"hive.tez.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true",
"hive.heapsize" : "250",
"hive.users.in.admin.role" : "hue,hive",
"hive_metastore_user_passwd" : "hive",
"hive.server2.enable.impersonation": "true",
"hive.compactor.check.interval": "300s",
"hive.compactor.initiator.on": "true",
"hive.compactor.worker.timeout": "86400s",
"hive.enforce.bucketing": "true",
"hive.support.concurrency": "true",
"hive.exec.dynamic.partition.mode": "nonstrict",
"hive.server2.enable.doAs": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
"hive.txn.max.open.batch": "1000",
"hive.txn.timeout": "300",
"hive.security.authorization.enabled": "false",
"hive.users.in.admin.role": "hue,hive",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_2%:9083"
}
},
{
"mapred-env": {
"jobhistory_heapsize" : "250"
}
},
{
"mapred-site" : {
"mapreduce.map.memory.mb" : "250",
"mapreduce.reduce.memory.mb" : "250",
"mapreduce.task.io.sort.mb" : "64",
"yarn.app.mapreduce.am.resource.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
"mapred.job.reduce.memory.mb" : "250",
"mapred.child.java.opts" : "-Xmx200m",
"mapred.job.map.memory.mb" : "250",
"io.sort.mb" : "64",
"mapreduce.map.java.opts" : "-Xmx200m",
"mapreduce.reduce.java.opts" : "-Xmx200m"
}
},
{
"oozie-site" : {
"oozie.service.ProxyUserService.proxyuser.hue.hosts" : "*",
"oozie.service.ProxyUserService.proxyuser.hue.groups" : "*",
"oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
"oozie.service.ProxyUserService.proxyuser.falcon.groups": "*",
"oozie.service.JPAService.jdbc.password" : "oozie"
}
},
{
"storm-site" : {
"logviewer.port" : 8005,
"nimbus.childopts" : "-Xmx220m -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=sandbox.hortonworks.com,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
"ui.childopts" : "-Xmx220m",
"drpc.childopts" : "-Xmx220m"
}
},
{
"tez-site" : {
"tez.am.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
"tez.am.resource.memory.mb" : "250",
"tez.dag.am.resource.memory.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m"
}
},
{
"webhcat-site" : {
"webhcat.proxyuser.hue.hosts" : "*",
"webhcat.proxyuser.hue.groups" : "*",
"webhcat.proxyuser.hcat.hosts" : "*",
"webhcat.proxyuser.hcat.groups" : "*",
"templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://sandbox.hortonworks.com:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse"
}
},
{
"yarn-env": {
"apptimelineserver_heapsize" : "250",
"resourcemanager_heapsize" : "250",
"nodemanager_heapsize" : "250",
"yarn_heapsize" : "250"
}
},
{
"yarn-site" : {
"yarn.nodemanager.resource.memory-mb": "2250",
"yarn.nodemanager.vmem-pmem-ratio" : "10",
"yarn.scheduler.minimum-allocation-mb" : "250",
"yarn.scheduler.maximum-allocation-mb" : "2250",
"yarn.nodemanager.pmem-check-enabled" : "false",
"yarn.acl.enable" : "false",
"yarn.resourcemanager.webapp.proxyuser.hcat.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.hcat.hosts" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.hosts" : "*"
}
}
]
standard_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "hdp",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"yarn-site": {
"yarn.nodemanager.local-dirs": "/disks/0/hadoop/yarn/local,/disks/1/hadoop/yarn/local,/disks/2/hadoop/yarn/local,/disks/3/hadoop/yarn/local,/disks/4/hadoop/yarn/local,/disks/5/hadoop/yarn/local,/disks/6/hadoop/yarn/local,/disks/7/hadoop/yarn/local",
"yarn.nodemanager.log-dirs": "/disks/0/hadoop/yarn/log,/disks/1/hadoop/yarn/log,/disks/2/hadoop/yarn/log,/disks/3/hadoop/yarn/log,/disks/4/hadoop/yarn/log,/disks/5/hadoop/yarn/log,/disks/6/hadoop/yarn/log,/disks/7/hadoop/yarn/log,/disks/8/hadoop/yarn/log,/disks/9/hadoop/yarn/log,/disks/10/hadoop/yarn/log,/disks/11/hadoop/yarn/log,/disks/12/hadoop/yarn/log,/disks/13/hadoop/yarn/log,/disks/14/hadoop/yarn/log,/disks/15/hadoop/yarn/log",
"yarn.timeline-service.leveldb-timeline-store.path": "/disks/0/hadoop/yarn/timeline",
"yarn.nodemanager.resource.memory-mb" : "32768",
"yarn.scheduler.maximum-allocation-mb" : "32768",
"yarn.scheduler.minimum-allocation-mb" : "2048"
}
},
{
"tez-site": {
"tez.am.resource.memory.mb" : "2048",
"tez.am.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC"
}
},
{
"mapred-site": {
"mapreduce.map.java.opts" : "-Xmx1638m",
"mapreduce.map.memory.mb" : "2048",
"mapreduce.reduce.java.opts" : "-Xmx1638m",
"mapreduce.reduce.memory.mb" : "2048",
"mapreduce.task.io.sort.mb" : "819",
"yarn.app.mapreduce.am.command-opts" : "-Xmx1638m",
"yarn.app.mapreduce.am.resource.mb" : "2048"
}
},
{
"hdfs-site": {
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY",
"hbase.tmp.dir": "/disks/0/hadoop/hbase"
}
},
{
"storm-site": {
"storm.local.dir": "/disks/0/hadoop/storm"
}
},
{
"falcon-startup.properties": {
"*.config.store.uri": "file:///disks/0/hadoop/falcon/store"
}
},
{
"hive-site": {
"hive.auto.convert.join.noconditionaltask.size" : "716177408",
"hive.tez.container.size" : "2048",
"hive.tez.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_3%:9083"
}
}
]
configurations = evaluation_configurations if scenario_id == 'evaluation' else standard_configurations
data = {
"configurations" : configurations,
"host_groups": host_groups,
"Blueprints" : {
"blueprint_name" : blueprint_name,
"stack_name" : "HDP",
"stack_version" : "2.2"
}
}
data = json.dumps(data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=request_timeout)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
return 'myblueprint'
def initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers):
logger.info('Deploying cluster...')
url = '%s://localhost:%s/api/v1/clusters/%s' % (protocol, port, cluster_id)
if num_masters + num_workers < 4:
raise Exception('Cluster size must be 4 or greater')
data = {
"blueprint": blueprint_name,
"default_password": "admin",
"host_groups": [
]
}
for i in range(1, num_masters + 1):
data['host_groups'].append({
"name": "master_%d" % i,
"hosts": [{
"fqdn": get_fqdn(i)
}]
})
worker_hosts = []
for i in range(num_masters + 1, num_masters + num_workers + 1):
worker_hosts.append({
"fqdn": get_fqdn(i)
})
data['host_groups'].append({
"name": "workers",
"hosts": worker_hosts
})
data = json.dumps(data)
pprint.pprint('data=' + data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=120)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
def poll_until_cluster_deployed(cluster_id, timeout):
url = '%s://localhost:%s/api/v1/clusters/%s/requests/1?fields=Requests/progress_percent,Requests/request_status' % (protocol, port, cluster_id)
deploy_success = False
deploy_finished = False
start_time = time.time()
logger.info('poll until function')
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if jsonResult['Requests']['request_status'] == 'COMPLETED':
deploy_success = True
if int(jsonResult['Requests']['progress_percent']) == 100 or jsonResult['Requests']['request_status'] == 'FAILED':
deploy_finished = True
break
except:
logger.info('Could not poll deploy status from the server.')
time.sleep(5)
if not deploy_finished:
raise TimeoutException('Timed out while waiting for cluster deployment to finish')
elif not deploy_success:
raise Exception('Cluster deploy failed')
if action == 'bootstrap':
masters_iplist = masters_iplist.split(',')
workers_iplist = workers_iplist.split(',')
ambari_server_hostname = get_hostname(1)
all_hostnames = map((lambda i: get_hostname(i)), range(1, num_masters + num_workers + 1))
logger.info(all_hostnames)
host_ip_map = get_host_ip_map(all_hostnames)
update_etc_hosts(host_ip_map)
update_ambari_agent_ini(ambari_server_hostname)
patch_ambari_agent()
run_system_command('chkconfig ambari-agent on')
logger.info('Starting ambari-agent...')
run_system_command('ambari-agent start')
logger.info('ambari-agent started')
if is_ambari_server_host():
run_system_command('chkconfig ambari-server on')
logger.info('Running ambari-server setup...')
run_system_command('ambari-server setup -s -j /usr/jdk64/jdk1.7.0_45')
logger.info('ambari-server setup finished')
if protocol == 'https':
logger.info('Enabling HTTPS...')
enable_https()
logger.info('HTTPS enabled')
logger.info('Starting ambari-server...')
run_system_command('ambari-server start')
logger.info('ambari-server started')
try:
set_admin_password(admin_password, 60 * 2)
# set current_admin_password so that HTTP requests to Ambari start using the new user-specified password
current_admin_password = admin_password
poll_until_all_agents_registered(num_masters + num_workers, 60 * 4)
blueprint_name = create_blueprint(scenario_id)
initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers)
except:
logger.error('Failed VM Bootstrap')
sys.exit(1)
else:
try:
current_admin_password = admin_password
poll_until_cluster_deployed(cluster_id, check_timeout_seconds)
except TimeoutException as e:
logger.info(e)
if report_timeout_fail:
logger.error('Failed cluster deployment')
sys.exit(1)
else:
logger.info('Cluster deployment has not completed')
sys.exit(0)
except:
logger.error('Failed cluster deployment')
sys.exit(1)
logger.info('Finished VM Bootstrap successfully')
sys.exit(0)
| mit | -6,374,576,150,531,086,000 | 23.718735 | 457 | 0.464623 | false |
c7zero/chipsec | chipsec/hal/uefi_common.py | 5 | 76636 | #!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
Common UEFI/EFI functionality including UEFI variables, Firmware Volumes, Secure Boot variables, S3 boot-script, UEFI tables, etc.
"""
import os
import struct
from collections import namedtuple
from chipsec.file import *
from chipsec.logger import *
#from chipsec.helper.oshelper import helper
################################################################################################
#
# EFI Variable and Variable Store Defines
#
################################################################################################
# UDK2010.SR1\MdeModulePkg\Include\Guid\VariableFormat.h
#
# #ifndef __VARIABLE_FORMAT_H__
# #define __VARIABLE_FORMAT_H__
#
# #define EFI_VARIABLE_GUID \
# { 0xddcf3616, 0x3275, 0x4164, { 0x98, 0xb6, 0xfe, 0x85, 0x70, 0x7f, 0xfe, 0x7d } }
#
# extern EFI_GUID gEfiVariableGuid;
#
# ///
# /// Alignment of variable name and data, according to the architecture:
# /// * For IA-32 and Intel(R) 64 architectures: 1.
# /// * For IA-64 architecture: 8.
# ///
# #if defined (MDE_CPU_IPF)
# #define ALIGNMENT 8
# #else
# #define ALIGNMENT 1
# #endif
#
# //
# // GET_PAD_SIZE calculates the miminal pad bytes needed to make the current pad size satisfy the alignment requirement.
# //
# #if (ALIGNMENT == 1)
# #define GET_PAD_SIZE(a) (0)
# #else
# #define GET_PAD_SIZE(a) (((~a) + 1) & (ALIGNMENT - 1))
# #endif
#
# ///
# /// Alignment of Variable Data Header in Variable Store region.
# ///
# #define HEADER_ALIGNMENT 4
# #define HEADER_ALIGN(Header) (((UINTN) (Header) + HEADER_ALIGNMENT - 1) & (~(HEADER_ALIGNMENT - 1)))
#
# ///
# /// Status of Variable Store Region.
# ///
# typedef enum {
# EfiRaw,
# EfiValid,
# EfiInvalid,
# EfiUnknown
# } VARIABLE_STORE_STATUS;
#
# #pragma pack(1)
#
# #define VARIABLE_STORE_SIGNATURE EFI_VARIABLE_GUID
#
# ///
# /// Variable Store Header Format and State.
# ///
# #define VARIABLE_STORE_FORMATTED 0x5a
# #define VARIABLE_STORE_HEALTHY 0xfe
#
# ///
# /// Variable Store region header.
# ///
# typedef struct {
# ///
# /// Variable store region signature.
# ///
# EFI_GUID Signature;
# ///
# /// Size of entire variable store,
# /// including size of variable store header but not including the size of FvHeader.
# ///
# UINT32 Size;
# ///
# /// Variable region format state.
# ///
# UINT8 Format;
# ///
# /// Variable region healthy state.
# ///
# UINT8 State;
# UINT16 Reserved;
# UINT32 Reserved1;
# } VARIABLE_STORE_HEADER;
#
# ///
# /// Variable data start flag.
# ///
# #define VARIABLE_DATA 0x55AA
#
# ///
# /// Variable State flags.
# ///
# #define VAR_IN_DELETED_TRANSITION 0xfe ///< Variable is in obsolete transition.
# #define VAR_DELETED 0xfd ///< Variable is obsolete.
# #define VAR_HEADER_VALID_ONLY 0x7f ///< Variable header has been valid.
# #define VAR_ADDED 0x3f ///< Variable has been completely added.
#
# ///
# /// Single Variable Data Header Structure.
# ///
# typedef struct {
# ///
# /// Variable Data Start Flag.
# ///
# UINT16 StartId;
# ///
# /// Variable State defined above.
# ///
# UINT8 State;
# UINT8 Reserved;
# ///
# /// Attributes of variable defined in UEFI specification.
# ///
# UINT32 Attributes;
# ///
# /// Size of variable null-terminated Unicode string name.
# ///
# UINT32 NameSize;
# ///
# /// Size of the variable data without this header.
# ///
# UINT32 DataSize;
# ///
# /// A unique identifier for the vendor that produces and consumes this varaible.
# ///
# EFI_GUID VendorGuid;
# } VARIABLE_HEADER;
#
# #pragma pack()
#
# typedef struct _VARIABLE_INFO_ENTRY VARIABLE_INFO_ENTRY;
#
# ///
# /// This structure contains the variable list that is put in EFI system table.
# /// The variable driver collects all variables that were used at boot service time and produces this list.
# /// This is an optional feature to dump all used variables in shell environment.
# ///
# struct _VARIABLE_INFO_ENTRY {
# VARIABLE_INFO_ENTRY *Next; ///< Pointer to next entry.
# EFI_GUID VendorGuid; ///< Guid of Variable.
# CHAR16 *Name; ///< Name of Variable.
# UINT32 Attributes; ///< Attributes of variable defined in UEFI specification.
# UINT32 ReadCount; ///< Number of times to read this variable.
# UINT32 WriteCount; ///< Number of times to write this variable.
# UINT32 DeleteCount; ///< Number of times to delete this variable.
# UINT32 CacheCount; ///< Number of times that cache hits this variable.
# BOOLEAN Volatile; ///< TRUE if volatile, FALSE if non-volatile.
# };
#
# #endif // _EFI_VARIABLE_H_
#
# Variable Store Header Format and State.
#
VARIABLE_STORE_FORMATTED = 0x5a
VARIABLE_STORE_HEALTHY = 0xfe
#
# Variable Store region header.
#
#typedef struct {
# ///
# /// Variable store region signature.
# ///
# EFI_GUID Signature;
# ///
# /// Size of entire variable store,
# /// including size of variable store header but not including the size of FvHeader.
# ///
# UINT32 Size;
# ///
# /// Variable region format state.
# ///
# UINT8 Format;
# ///
# /// Variable region healthy state.
# ///
# UINT8 State;
# UINT16 Reserved;
# UINT32 Reserved1;
#} VARIABLE_STORE_HEADER;
#
# Signature is EFI_GUID (guid0 guid1 guid2 guid3)
VARIABLE_STORE_HEADER_FMT = '<8sIBBHI'
VARIABLE_STORE_HEADER_SIZE = struct.calcsize( VARIABLE_STORE_HEADER_FMT )
class VARIABLE_STORE_HEADER( namedtuple('VARIABLE_STORE_HEADER', 'guid0 guid1 guid2 guid3 Size Format State Reserved Reserved1') ):
__slots__ = ()
def __str__(self):
return """
EFI Variable Store
-----------------------------
Signature : {%08X-%04X-%04X-%04s-%06s}
Size : 0x%08X bytes
Format : 0x%02X
State : 0x%02X
Reserved : 0x%04X
Reserved1 : 0x%08X
""" % ( self.guid0, self.guid1, self.guid2, self.guid3[:2].encode('hex').upper(), self.guid3[-6::].encode('hex').upper(), self.Size, self.Format, self.State, self.Reserved, self.Reserved1 )
#
# Variable data start flag.
#
VARIABLE_DATA = 0x55aa
VARIABLE_DATA_SIGNATURE = struct.pack('=H', VARIABLE_DATA )
#
# Variable Attributes
#
EFI_VARIABLE_NON_VOLATILE = 0x00000001 # Variable is non volatile
EFI_VARIABLE_BOOTSERVICE_ACCESS = 0x00000002 # Variable is boot time accessible
EFI_VARIABLE_RUNTIME_ACCESS = 0x00000004 # Variable is run-time accessible
EFI_VARIABLE_HARDWARE_ERROR_RECORD = 0x00000008 #
EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS = 0x00000010 # Variable is authenticated
EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS = 0x00000020 # Variable is time based authenticated
EFI_VARIABLE_APPEND_WRITE = 0x00000040 # Variable allows append
UEFI23_1_AUTHENTICATED_VARIABLE_ATTRIBUTES = (EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
def IS_VARIABLE_ATTRIBUTE(_c, _Mask):
return ( (_c & _Mask) != 0 )
def IS_EFI_VARIABLE_AUTHENTICATED( attr ):
return ( IS_VARIABLE_ATTRIBUTE( attr, EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS ) or IS_VARIABLE_ATTRIBUTE( attr, EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS ) )
MAX_VARIABLE_SIZE = 1024
MAX_NVRAM_SIZE = 1024*1024
def get_nvar_name(nvram, name_offset, isAscii):
if isAscii:
nend = nvram.find('\x00', name_offset)
name_size = nend - name_offset + 1 # add trailing zero symbol
name = nvram[name_offset:nend]
return (name, name_size)
else:
nend = nvram.find('\x00\x00', name_offset)
while (nend & 1) == 1:
nend = nend + 1
nend = nvram.find('\x00\x00', nend)
name_size = nend - name_offset + 2 # add trailing zero symbol
name = unicode(nvram[name_offset:nend], "utf-16-le")
return (name, name_size)
VARIABLE_SIGNATURE_VSS = VARIABLE_DATA_SIGNATURE
################################################################################################
#
# EFI Firmware Volume Defines
#
################################################################################################
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
EFI_FILE_HEADER_CONSTRUCTION = 0x01
EFI_FILE_HEADER_VALID = 0x02
EFI_FILE_DATA_VALID = 0x04
EFI_FILE_MARKED_FOR_UPDATE = 0x08
EFI_FILE_DELETED = 0x10
EFI_FILE_HEADER_INVALID = 0x20
FFS_FIXED_CHECKSUM = 0xAA
EFI_FVB2_ERASE_POLARITY = 0x00000800
EFI_FV_FILETYPE_ALL = 0x00
EFI_FV_FILETYPE_RAW = 0x01
EFI_FV_FILETYPE_FREEFORM = 0x02
EFI_FV_FILETYPE_SECURITY_CORE = 0x03
EFI_FV_FILETYPE_PEI_CORE = 0x04
EFI_FV_FILETYPE_DXE_CORE = 0x05
EFI_FV_FILETYPE_PEIM = 0x06
EFI_FV_FILETYPE_DRIVER = 0x07
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x08
EFI_FV_FILETYPE_APPLICATION = 0x09
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0x0b
EFI_FV_FILETYPE_FFS_PAD = 0xf0
FILE_TYPE_NAMES = {0x00: 'FV_ALL', 0x01: 'FV_RAW', 0x02: 'FV_FREEFORM', 0x03: 'FV_SECURITY_CORE', 0x04: 'FV_PEI_CORE', 0x05: 'FV_DXE_CORE', 0x06: 'FV_PEIM', 0x07: 'FV_DRIVER', 0x08: 'FV_COMBINED_PEIM_DRIVER', 0x09: 'FV_APPLICATION', 0x0B: 'FV_FVIMAGE', 0x0F: 'FV_FFS_PAD'}
EFI_SECTION_ALL = 0x00
EFI_SECTION_COMPRESSION = 0x01
EFI_SECTION_GUID_DEFINED = 0x02
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1B
EFI_SECTION_SMM_DEPEX = 0x1C
SECTION_NAMES = {0x00: 'S_ALL', 0x01: 'S_COMPRESSION', 0x02: 'S_GUID_DEFINED', 0x10: 'S_PE32', 0x11: 'S_PIC', 0x12: 'S_TE', 0x13: 'S_DXE_DEPEX', 0x14: 'S_VERSION', 0x15: 'S_USER_INTERFACE', 0x16: 'S_COMPATIBILITY16', 0x17: 'S_FV_IMAGE', 0x18: 'S_FREEFORM_SUBTYPE_GUID', 0x19: 'S_RAW', 0x1B: 'S_PEI_DEPEX', 0x1C: 'S_SMM_DEPEX'}
EFI_SECTIONS_EXE = [EFI_SECTION_PE32, EFI_SECTION_TE, EFI_SECTION_PIC, EFI_SECTION_COMPATIBILITY16]
GUID = "<IHH8s"
guid_size = struct.calcsize(GUID)
EFI_COMPRESSION_SECTION = "<IB"
EFI_COMPRESSION_SECTION_size = struct.calcsize(EFI_COMPRESSION_SECTION)
EFI_GUID_DEFINED_SECTION = "<IHH8sHH"
EFI_GUID_DEFINED_SECTION_size = struct.calcsize(EFI_GUID_DEFINED_SECTION)
EFI_CRC32_GUIDED_SECTION_EXTRACTION_PROTOCOL_GUID = "FC1BCDB0-7D31-49AA-936A-A4600D9DD083"
VARIABLE_STORE_FV_GUID = 'FFF12B8D-7696-4C8B-A985-2747075B4F50'
EFI_FIRMWARE_FILE_SYSTEM_GUID = "7A9354D9-0468-444A-81CE-0BF617D890DF"
EFI_FIRMWARE_FILE_SYSTEM2_GUID = "8C8CE578-8A3D-4F1C-9935-896185C32DD3"
EFI_FIRMWARE_FILE_SYSTEM3_GUID = "5473C07A-3DCB-4DCA-BD6F-1E9689E7349A"
EFI_FS_GUIDS = [EFI_FIRMWARE_FILE_SYSTEM3_GUID, EFI_FIRMWARE_FILE_SYSTEM2_GUID, EFI_FIRMWARE_FILE_SYSTEM_GUID]
LZMA_CUSTOM_DECOMPRESS_GUID = "EE4E5898-3914-4259-9D6E-DC7BD79403CF"
FIRMWARE_VOLUME_GUID = "24400798-3807-4A42-B413-A1ECEE205DD8"
TIANO_DECOMPRESSED_GUID = "A31280AD-481E-41B6-95E8-127F4C984779"
VOLUME_SECTION_GUID = "367AE684-335D-4671-A16D-899DBFEA6B88"
#
# Compression Types
#
COMPRESSION_TYPE_NONE = 0
COMPRESSION_TYPE_TIANO = 1
COMPRESSION_TYPE_LZMA = 2
COMPRESSION_TYPES = [COMPRESSION_TYPE_NONE, COMPRESSION_TYPE_TIANO, COMPRESSION_TYPE_LZMA]
################################################################################################
#
# Misc Defines
#
################################################################################################
#
# Status codes
# edk2: MdePkg/Include/Base.h
#
# @TODO
#define ENCODE_ERROR(StatusCode) ((RETURN_STATUS)(MAX_BIT | (StatusCode)))
#define ENCODE_WARNING(a) (a)
class StatusCode:
EFI_SUCCESS = 0
EFI_LOAD_ERROR = 1
EFI_INVALID_PARAMETER = 2
EFI_UNSUPPORTED = 3
EFI_BAD_BUFFER_SIZE = 4
EFI_BUFFER_TOO_SMALL = 5
EFI_NOT_READY = 6
EFI_DEVICE_ERROR = 7
EFI_WRITE_PROTECTED = 8
EFI_OUT_OF_RESOURCES = 9
EFI_VOLUME_CORRUPTED = 10
EFI_VOLUME_FULL = 11
EFI_NO_MEDIA = 12
EFI_MEDIA_CHANGED = 13
EFI_NOT_FOUND = 14
EFI_ACCESS_DENIED = 15
EFI_NO_RESPONSE = 16
EFI_NO_MAPPING = 17
EFI_TIMEOUT = 18
EFI_NOT_STARTED = 19
EFI_ALREADY_STARTED = 20
EFI_ABORTED = 21
EFI_ICMP_ERROR = 22
EFI_TFTP_ERROR = 23
EFI_PROTOCOL_ERROR = 24
EFI_INCOMPATIBLE_VERSION = 25
EFI_SECURITY_VIOLATION = 26
EFI_CRC_ERROR = 27
EFI_END_OF_MEDIA = 28
EFI_END_OF_FILE = 31
EFI_INVALID_LANGUAGE = 32
EFI_COMPROMISED_DATA = 33
EFI_HTTP_ERROR = 35
'''
EFI_WARN_UNKNOWN_GLYPH = 1
EFI_WARN_DELETE_FAILURE = 2
EFI_WARN_WRITE_FAILURE = 3
EFI_WARN_BUFFER_TOO_SMALL = 4
EFI_WARN_STALE_DATA = 5
EFI_WARN_FILE_SYSTEM = 6
'''
EFI_STATUS_DICT = {
StatusCode.EFI_SUCCESS :"EFI_SUCCESS",
StatusCode.EFI_LOAD_ERROR :"EFI_LOAD_ERROR",
StatusCode.EFI_INVALID_PARAMETER :"EFI_INVALID_PARAMETER",
StatusCode.EFI_UNSUPPORTED :"EFI_UNSUPPORTED",
StatusCode.EFI_BAD_BUFFER_SIZE :"EFI_BAD_BUFFER_SIZE",
StatusCode.EFI_BUFFER_TOO_SMALL :"EFI_BUFFER_TOO_SMALL",
StatusCode.EFI_NOT_READY :"EFI_NOT_READY",
StatusCode.EFI_DEVICE_ERROR :"EFI_DEVICE_ERROR",
StatusCode.EFI_WRITE_PROTECTED :"EFI_WRITE_PROTECTED",
StatusCode.EFI_OUT_OF_RESOURCES :"EFI_OUT_OF_RESOURCES",
StatusCode.EFI_VOLUME_CORRUPTED :"EFI_VOLUME_CORRUPTED",
StatusCode.EFI_VOLUME_FULL :"EFI_VOLUME_FULL",
StatusCode.EFI_NO_MEDIA :"EFI_NO_MEDIA",
StatusCode.EFI_MEDIA_CHANGED :"EFI_MEDIA_CHANGED",
StatusCode.EFI_NOT_FOUND :"EFI_NOT_FOUND",
StatusCode.EFI_ACCESS_DENIED :"EFI_ACCESS_DENIED",
StatusCode.EFI_NO_RESPONSE :"EFI_NO_RESPONSE",
StatusCode.EFI_NO_MAPPING :"EFI_NO_MAPPING",
StatusCode.EFI_TIMEOUT :"EFI_TIMEOUT",
StatusCode.EFI_NOT_STARTED :"EFI_NOT_STARTED",
StatusCode.EFI_ALREADY_STARTED :"EFI_ALREADY_STARTED",
StatusCode.EFI_ABORTED :"EFI_ABORTED",
StatusCode.EFI_ICMP_ERROR :"EFI_ICMP_ERROR",
StatusCode.EFI_TFTP_ERROR :"EFI_TFTP_ERROR",
StatusCode.EFI_PROTOCOL_ERROR :"EFI_PROTOCOL_ERROR",
StatusCode.EFI_INCOMPATIBLE_VERSION:"EFI_INCOMPATIBLE_VERSION",
StatusCode.EFI_SECURITY_VIOLATION :"EFI_SECURITY_VIOLATION",
StatusCode.EFI_CRC_ERROR :"EFI_CRC_ERROR",
StatusCode.EFI_END_OF_MEDIA :"EFI_END_OF_MEDIA",
StatusCode.EFI_END_OF_FILE :"EFI_END_OF_FILE",
StatusCode.EFI_INVALID_LANGUAGE :"EFI_INVALID_LANGUAGE",
StatusCode.EFI_COMPROMISED_DATA :"EFI_COMPROMISED_DATA",
StatusCode.EFI_HTTP_ERROR :"EFI_HTTP_ERROR"
}
EFI_GUID_FMT = "IHH8s"
def EFI_GUID( guid0, guid1, guid2, guid3 ):
return ("%08X-%04X-%04X-%04s-%06s" % (guid0, guid1, guid2, guid3[:2].encode('hex').upper(), guid3[-6::].encode('hex').upper()) )
def align(of, size):
of = (((of + size - 1)/size) * size)
return of
def bit_set(value, mask, polarity = False):
if polarity: value = ~value
return ( (value & mask) == mask )
def get_3b_size(s):
return (ord(s[0]) + (ord(s[1]) << 8) + (ord(s[2]) << 16))
def guid_str(guid0, guid1, guid2, guid3):
guid = "%08X-%04X-%04X-%04s-%06s" % (guid0, guid1, guid2, guid3[:2].encode('hex').upper(), guid3[-6::].encode('hex').upper())
return guid
# #################################################################################################
#
# UEFI Firmware Volume Parsing/Modification Functionality
#
# #################################################################################################
def align_image(image, size=8, fill='\x00'):
return image.ljust(((len(image) + size - 1) / size) * size, fill)
def get_guid_bin(guid):
values = guid.split('-')
if [len(x) for x in values] == [8, 4, 4, 4, 12]:
values = values[0:3] + [values[3][0:2], values[3][2:4]] + [values[4][x:x+2] for x in xrange(0, 12, 2)]
values = [int(x, 16) for x in values]
return struct.pack('<LHHBBBBBBBB', *tuple(values))
return ''
def assemble_uefi_file(guid, image):
EFI_FFS_FILE_HEADER = "<16sHBBL"
FileHeaderSize = struct.calcsize(EFI_FFS_FILE_HEADER)
Type = EFI_FV_FILETYPE_FREEFORM
CheckSum = 0x0000;
Attributes = 0x40
Size = FileHeaderSize + len(image)
State = 0xF8
SizeState = (Size & 0x00FFFFFF) | (State << 24)
FileHeader = struct.pack(EFI_FFS_FILE_HEADER, get_guid_bin(guid), CheckSum, Type, Attributes, (Size & 0x00FFFFFF))
hsum = FvChecksum8(FileHeader)
if (Attributes & FFS_ATTRIB_CHECKSUM):
fsum = FvChecksum8(image)
else:
fsum = FFS_FIXED_CHECKSUM
CheckSum = (hsum | (fsum << 8))
return struct.pack(EFI_FFS_FILE_HEADER, get_guid_bin(guid), CheckSum, Type, Attributes, SizeState) + image
def assemble_uefi_section(image, uncomressed_size, compression_type):
EFI_COMPRESSION_SECTION_HEADER = "<LLB"
SectionType = EFI_SECTION_COMPRESSION
SectionSize = struct.calcsize(EFI_COMPRESSION_SECTION_HEADER) + len(image)
SectionHeader = struct.pack(EFI_COMPRESSION_SECTION_HEADER, (SectionSize & 0x00FFFFFF) | (SectionType << 24), uncomressed_size, compression_type)
return SectionHeader + image
def assemble_uefi_raw(image):
return align_image(struct.pack('<L', ((len(image) + 4) & 0x00FFFFFF) + (EFI_SECTION_RAW << 24)) + image)
def FvSum8(buffer):
sum8 = 0
for b in buffer:
sum8 = (sum8 + ord(b)) & 0xff
return sum8
def FvChecksum8(buffer):
return ((0x100 - FvSum8(buffer)) & 0xff)
def FvSum16(buffer):
sum16 = 0
blen = len(buffer)/2
i = 0
while i < blen:
el16 = ord(buffer[2*i]) | (ord(buffer[2*i+1]) << 8)
sum16 = (sum16 + el16) & 0xffff
i = i + 1
return sum16
def FvChecksum16(buffer):
return ((0x10000 - FvSum16(buffer)) & 0xffff)
def ValidateFwVolumeHeader(ZeroVector, FsGuid, FvLength, Attributes, HeaderLength, Checksum, ExtHeaderOffset, Reserved, CalcSum, size):
zero_vector = (ZeroVector == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
fv_rsvd = (Reserved == 0)
fs_guid = (FsGuid in (EFI_FS_GUIDS + [VARIABLE_STORE_FV_GUID]))
fv_len = (FvLength <= size)
fv_header_len = (ExtHeaderOffset < FvLength) and (HeaderLength < FvLength)
#sum = (Checksum == CalcSum)
return fv_rsvd and fv_len and fv_header_len
def NextFwVolume(buffer, off = 0):
fof = off
EFI_FIRMWARE_VOLUME_HEADER = "<16sIHH8sQIIHHHBB"
vf_header_size = struct.calcsize(EFI_FIRMWARE_VOLUME_HEADER)
EFI_FV_BLOCK_MAP_ENTRY = "<II"
size = len(buffer)
res = (None, None, None, None, None, None, None, None, None)
while ((fof + vf_header_size) < size):
fof = buffer.find("_FVH", fof)
if fof < 0x28: return res
fof = fof - 0x28
ZeroVector, FileSystemGuid0, FileSystemGuid1,FileSystemGuid2,FileSystemGuid3, \
FvLength, Signature, Attributes, HeaderLength, Checksum, ExtHeaderOffset, \
Reserved, Revision = struct.unpack(EFI_FIRMWARE_VOLUME_HEADER, buffer[fof:fof+vf_header_size])
'''
print "\nFV volume offset: 0x%08X" % fof
print "\tFvLength: 0x%08X" % FvLength
print "\tAttributes: 0x%08X" % Attributes
print "\tHeaderLength: 0x%04X" % HeaderLength
print "\tChecksum: 0x%04X" % Checksum
print "\tRevision: 0x%02X" % Revision
print "\tExtHeaderOffset: 0x%02X" % ExtHeaderOffset
print "\tReserved: 0x%02X" % Reserved
'''
#print "FFS Guid: %s" % guid_str(FileSystemGuid0, FileSystemGuid1,FileSystemGuid2, FileSystemGuid3)
#print "FV Checksum: 0x%04X (0x%04X)" % (Checksum, FvChecksum16(buffer[fof:fof+HeaderLength]))
#'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
fvh = struct.pack(EFI_FIRMWARE_VOLUME_HEADER, ZeroVector, \
FileSystemGuid0, FileSystemGuid1,FileSystemGuid2,FileSystemGuid3, \
FvLength, Signature, Attributes, HeaderLength, 0, ExtHeaderOffset, \
Reserved, Revision)
if (len(fvh) < HeaderLength):
#print "len(fvh)=%d, HeaderLength=%d" % (len(fvh), HeaderLength)
tail = buffer[fof+len(fvh):fof+HeaderLength]
fvh = fvh + tail
CalcSum = FvChecksum16(fvh)
FsGuid = guid_str(FileSystemGuid0, FileSystemGuid1,FileSystemGuid2,FileSystemGuid3)
if (ValidateFwVolumeHeader(ZeroVector, FsGuid, FvLength, Attributes, HeaderLength, Checksum, ExtHeaderOffset, Reserved, CalcSum, size)):
res = (fof, FsGuid, FvLength, Attributes, HeaderLength, Checksum, ExtHeaderOffset, buffer[fof:fof+FvLength], CalcSum)
return res
else:
fof += 0x2C
return res
EFI_FFS_FILE_HEADER = "<IHH8sHBB3sB"
file_header_size = struct.calcsize(EFI_FFS_FILE_HEADER)
def NextFwFile(FvImage, FvLength, fof, polarity):
fof = align(fof, 8)
cur_offset = fof
next_offset = None
res = None
update_or_deleted = False
if (fof + file_header_size) <= min(FvLength, len(FvImage)):
if ('\xff\xff\xff\xff' == FvImage[fof+file_header_size-4:fof+file_header_size]):
next_offset = fof + 8
return (cur_offset, next_offset, None, None, None, None, None, None, None, None, update_or_deleted, None)
Name0, Name1, Name2, Name3, IntegrityCheck, Type, Attributes, Size, State = struct.unpack(EFI_FFS_FILE_HEADER, FvImage[fof:fof+file_header_size])
fsize = get_3b_size(Size);
update_or_deleted = (bit_set(State, EFI_FILE_MARKED_FOR_UPDATE, polarity)) or (bit_set(State, EFI_FILE_DELETED, polarity))
if (not bit_set(State, EFI_FILE_HEADER_VALID, polarity)) or (bit_set(State, EFI_FILE_HEADER_INVALID, polarity)):
next_offset = align(fof + 1, 8)
elif (not bit_set(State, EFI_FILE_DATA_VALID, polarity)):
next_offset = align(fof + 1, 8)
elif fsize == 0:
next_offset = align(fof + 1, 8)
else:
next_offset = fof + fsize
next_offset = align(next_offset, 8)
Name = guid_str(Name0, Name1, Name2, Name3)
fheader = struct.pack(EFI_FFS_FILE_HEADER, Name0, Name1, Name2, Name3, 0, Type, Attributes, Size, 0)
hsum = FvChecksum8(fheader)
if (Attributes & FFS_ATTRIB_CHECKSUM):
fsum = FvChecksum8(FvImage[fof+file_header_size:fof+fsize])
else:
fsum = FFS_FIXED_CHECKSUM
CalcSum = (hsum | (fsum << 8))
res = (cur_offset, next_offset, Name, Type, Attributes, State, IntegrityCheck, fsize, FvImage[fof:fof+fsize], file_header_size, update_or_deleted, CalcSum)
if res is None: return (cur_offset, next_offset, None, None, None, None, None, None, None, None, update_or_deleted, None)
else: return res
EFI_COMMON_SECTION_HEADER = "<3sB"
EFI_COMMON_SECTION_HEADER_size = struct.calcsize(EFI_COMMON_SECTION_HEADER)
def NextFwFileSection(sections, ssize, sof, polarity):
# offset, next_offset, SecName, SecType, SecBody, SecHeaderSize
cur_offset = sof
if (sof + EFI_COMMON_SECTION_HEADER_size) < ssize:
header = sections[sof:sof+EFI_COMMON_SECTION_HEADER_size]
if len(header) < EFI_COMMON_SECTION_HEADER_size: return (None, None, None, None, None, None)
Size, Type = struct.unpack(EFI_COMMON_SECTION_HEADER, header)
Size = get_3b_size(Size)
sec_name = "S_UNKNOWN_%02X" % Type
if Type in SECTION_NAMES.keys():
sec_name = SECTION_NAMES[Type]
if (Size == 0xffffff and Type == 0xff) or (Size == 0):
sof = align(sof + 4, 4)
return (cur_offset, sof, None, None, None, None)
sec_body = sections[sof:sof+Size]
sof = align(sof + Size, 4)
return (cur_offset, sof, sec_name, Type, sec_body, EFI_COMMON_SECTION_HEADER_size)
return (None, None, None, None, None, None)
def DecodeSection(SecType, SecBody, SecHeaderSize):
pass
# #################################################################################################
#
# UEFI Variable (NVRAM) Parsing Functionality
#
# #################################################################################################
# typedef struct {
# ///
# /// Type of the signature. GUID signature types are defined in below.
# ///
# EFI_GUID SignatureType;
# ///
# /// Total size of the signature list, including this header.
# ///
# UINT32 SignatureListSize;
# ///
# /// Size of the signature header which precedes the array of signatures.
# ///
# UINT32 SignatureHeaderSize;
# ///
# /// Size of each signature.
# ///
# UINT32 SignatureSize;
# ///
# /// Header before the array of signatures. The format of this header is specified
# /// by the SignatureType.
# /// UINT8 SignatureHeader[SignatureHeaderSize];
# ///
# /// An array of signatures. Each signature is SignatureSize bytes in length.
# /// EFI_SIGNATURE_DATA Signatures[][SignatureSize];
# ///
# } EFI_SIGNATURE_LIST;
SIGNATURE_LIST = "<IHH8sIII"
SIGNATURE_LIST_size = struct.calcsize(SIGNATURE_LIST)
def parse_sha256(data):
return
def parse_rsa2048(data):
return
def parse_rsa2048_sha256(data):
return
def parse_sha1(data):
return
def parse_rsa2048_sha1(data):
return
def parse_x509(data):
return
def parse_sha224(data):
return
def parse_sha384(data):
return
def parse_sha512(data):
return
def parse_pkcs7(data):
return
sig_types = {"C1C41626-504C-4092-ACA9-41F936934328": ("EFI_CERT_SHA256_GUID", parse_sha256, 0x30, "SHA256"), \
"3C5766E8-269C-4E34-AA14-ED776E85B3B6": ("EFI_CERT_RSA2048_GUID", parse_rsa2048, 0x110, "RSA2048"), \
"E2B36190-879B-4A3D-AD8D-F2E7BBA32784": ("EFI_CERT_RSA2048_SHA256_GUID", parse_rsa2048_sha256, 0x110, "RSA2048_SHA256"), \
"826CA512-CF10-4AC9-B187-BE01496631BD": ("EFI_CERT_SHA1_GUID", parse_sha1, 0x24, "SHA1"), \
"67F8444F-8743-48F1-A328-1EAAB8736080": ("EFI_CERT_RSA2048_SHA1_GUID", parse_rsa2048_sha1, 0x110, "RSA2048_SHA1"), \
"A5C059A1-94E4-4AA7-87B5-AB155C2BF072": ("EFI_CERT_X509_GUID", parse_x509, 0, "X509"), \
"0B6E5233-A65C-44C9-9407-D9AB83BFC8BD": ("EFI_CERT_SHA224_GUID", parse_sha224, 0x2c, "SHA224"), \
"FF3E5307-9FD0-48C9-85F1-8AD56C701E01": ("EFI_CERT_SHA384_GUID", parse_sha384, 0x40, "SHA384"), \
"093E0FAE-A6C4-4F50-9F1B-D41E2B89C19A": ("EFI_CERT_SHA512_GUID", parse_sha512, 0x50, "SHA512"), \
"4AAFD29D-68DF-49EE-8AA9-347D375665A7": ("EFI_CERT_TYPE_PKCS7_GUID", parse_pkcs7, 0, "PKCS7") }
#def parse_db(db, var_name, path):
def parse_db( db, decode_dir ):
db_size = len(db)
if 0 == db_size:
return
dof = 0
nsig = 0
entries = []
# some platforms have 0's in the beginnig, skip all 0 (no known SignatureType starts with 0x00):
while (dof < db_size and db[dof] == '\x00'): dof = dof + 1
while (dof + SIGNATURE_LIST_size) < db_size:
SignatureType0, SignatureType1, SignatureType2, SignatureType3, SignatureListSize, SignatureHeaderSize, SignatureSize \
= struct.unpack(SIGNATURE_LIST, db[dof:dof+SIGNATURE_LIST_size])
# prevent infinite loop when parsing malformed var
if SignatureListSize == 0:
logger().log_bad("db parsing failed!")
return entries
SignatureType = guid_str(SignatureType0, SignatureType1, SignatureType2, SignatureType3)
short_name = "UNKNOWN"
sig_parse_f = None
sig_size = 0
if (SignatureType in sig_types.keys()):
sig_name, sig_parse_f, sig_size, short_name = sig_types[SignatureType]
#logger().log( "SignatureType : %s (%s)" % (SignatureType, sig_name) )
#logger().log( "SignatureListSize : 0x%08X" % SignatureListSize )
#logger().log( "SignatureHeaderSize : 0x%08X" % SignatureHeaderSize )
#logger().log( "SignatureSize : 0x%08X" % SignatureSize )
#logger().log( "Parsing..." )
if (((sig_size > 0) and (sig_size == SignatureSize)) or ((sig_size == 0) and (SignatureSize >= 0x10))):
sof = 0
sig_list = db[dof+SIGNATURE_LIST_size+SignatureHeaderSize:dof+SignatureListSize]
sig_list_size = len(sig_list)
while ((sof + guid_size) < sig_list_size):
sig_data = sig_list[sof:sof+SignatureSize]
owner0, owner1, owner2, owner3 = struct.unpack(GUID, sig_data[:guid_size])
owner = guid_str(owner0, owner1, owner2, owner3)
data = sig_data[guid_size:]
#logger().log( "owner: %s" % owner )
entries.append( data )
sig_file_name = "%s-%s-%02d.bin" % (short_name, owner, nsig)
sig_file_name = os.path.join(decode_dir, sig_file_name)
write_file(sig_file_name, data)
if (sig_parse_f != None):
sig_parse_f(data)
sof = sof + SignatureSize
nsig = nsig + 1
else:
err_str = "Wrong SignatureSize for %s type: 0x%X." % (SignatureType, SignatureSize)
if (sig_size > 0): err_str = err_str + " Must be 0x%X." % (sig_size)
else: err_str = err_str + " Must be >= 0x10."
logger().error( err_str )
entries.append( data )
sig_file_name = "%s-%s-%02d.bin" % (short_name, SignatureType, nsig)
sig_file_name = os.path.join(decode_dir, sig_file_name)
write_file(sig_file_name, data)
nsig = nsig + 1
dof = dof + SignatureListSize
return entries
def parse_efivar_file( fname, var=None ):
if not var:
var = read_file( fname )
#path, var_name = os.path.split( fname )
#var_name, ext = os.path.splitext( var_name )
var_path = fname + '.dir'
if not os.path.exists( var_path ):
os.makedirs( var_path )
parse_db( var, var_path )
########################################################################################################
#
# S3 Resume Boot-Script Parsing Functionality
#
########################################################################################################
BOOTSCRIPT_TABLE_OFFSET = 24
RUNTIME_SCRIPT_TABLE_BASE_OFFSET = 32
ACPI_VARIABLE_SET_STRUCT_SIZE = 0x48
S3_BOOTSCRIPT_VARIABLES = [ 'AcpiGlobalVariable' ]
MAX_S3_BOOTSCRIPT_ENTRY_LENGTH = 0x200
#
# MdePkg\Include\Pi\PiS3BootScript.h
#
#//*******************************************
#// EFI Boot Script Opcode definitions
#//*******************************************
#define EFI_BOOT_SCRIPT_IO_WRITE_OPCODE 0x00
#define EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE 0x01
#define EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE 0x02
#define EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE 0x03
#define EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE 0x04
#define EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE 0x05
#define EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE 0x06
#define EFI_BOOT_SCRIPT_STALL_OPCODE 0x07
#define EFI_BOOT_SCRIPT_DISPATCH_OPCODE 0x08
#define EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE 0x09
#define EFI_BOOT_SCRIPT_INFORMATION_OPCODE 0x0A
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE 0x0B
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE 0x0C
#define EFI_BOOT_SCRIPT_IO_POLL_OPCODE 0x0D
#define EFI_BOOT_SCRIPT_MEM_POLL_OPCODE 0x0E
#define EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE 0x0F
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE 0x10
class S3BootScriptOpcode:
EFI_BOOT_SCRIPT_IO_WRITE_OPCODE = 0x00
EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE = 0x01
EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE = 0x02
EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE = 0x03
EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE = 0x04
EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE = 0x05
EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE = 0x06
EFI_BOOT_SCRIPT_STALL_OPCODE = 0x07
EFI_BOOT_SCRIPT_DISPATCH_OPCODE = 0x08
#EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE = 0x09
#EFI_BOOT_SCRIPT_INFORMATION_OPCODE = 0x0A
#EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE = 0x0B
#EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE = 0x0C
#EFI_BOOT_SCRIPT_IO_POLL_OPCODE = 0x0D
#EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x0E
#EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE = 0x0F
#EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE = 0x10
#EFI_BOOT_SCRIPT_TABLE_OPCODE = 0xAA
EFI_BOOT_SCRIPT_TERMINATE_OPCODE = 0xFF
class S3BootScriptOpcode_MDE (S3BootScriptOpcode):
EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE = 0x09
EFI_BOOT_SCRIPT_INFORMATION_OPCODE = 0x0A
EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE = 0x0B
EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE = 0x0C
EFI_BOOT_SCRIPT_IO_POLL_OPCODE = 0x0D
EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x0E
EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE = 0x0F
EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE = 0x10
#
# EdkCompatibilityPkg\Foundation\Framework\Include\EfiBootScript.h
#
#define EFI_BOOT_SCRIPT_IO_WRITE_OPCODE 0x00
#define EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE 0x01
#define EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE 0x02
#define EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE 0x03
#define EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE 0x04
#define EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE 0x05
#define EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE 0x06
#define EFI_BOOT_SCRIPT_STALL_OPCODE 0x07
#define EFI_BOOT_SCRIPT_DISPATCH_OPCODE 0x08
#
#//
#// Extensions to boot script definitions
#//
#define EFI_BOOT_SCRIPT_MEM_POLL_OPCODE 0x09
#define EFI_BOOT_SCRIPT_INFORMATION_OPCODE 0x0A
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE 0x0B
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE 0x0C
#
#define EFI_BOOT_SCRIPT_TABLE_OPCODE 0xAA
#define EFI_BOOT_SCRIPT_TERMINATE_OPCODE 0xFF
class S3BootScriptOpcode_EdkCompat (S3BootScriptOpcode):
EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x09
EFI_BOOT_SCRIPT_INFORMATION_OPCODE = 0x0A
EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE = 0x0B
EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE = 0x0C
EFI_BOOT_SCRIPT_TABLE_OPCODE = 0xAA
#
# Names of S3 Boot Script Opcodes
#
script_opcodes = {
S3BootScriptOpcode.EFI_BOOT_SCRIPT_IO_WRITE_OPCODE: "S3_BOOTSCRIPT_IO_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_IO_READ_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE: "S3_BOOTSCRIPT_MEM_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_MEM_READ_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG_READ_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE: "S3_BOOTSCRIPT_SMBUS_EXECUTE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_STALL_OPCODE: "S3_BOOTSCRIPT_STALL",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_DISPATCH_OPCODE: "S3_BOOTSCRIPT_DISPATCH",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE: "S3_BOOTSCRIPT_DISPATCH_2",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_INFORMATION_OPCODE: "S3_BOOTSCRIPT_INFORMATION",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG2_WRITE",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG2_READ_WRITE",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_IO_POLL_OPCODE: "S3_BOOTSCRIPT_IO_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_MEM_POLL_OPCODE: "S3_BOOTSCRIPT_MEM_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG2_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_TABLE_OPCODE: "S3_BOOTSCRIPT_TABLE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_TERMINATE_OPCODE: "S3_BOOTSCRIPT_TERMINATE"
}
# //*******************************************
# // EFI_BOOT_SCRIPT_WIDTH
# //*******************************************
# typedef enum {
# EfiBootScriptWidthUint8,
# EfiBootScriptWidthUint16,
# EfiBootScriptWidthUint32,
# EfiBootScriptWidthUint64,
# EfiBootScriptWidthFifoUint8,
# EfiBootScriptWidthFifoUint16,
# EfiBootScriptWidthFifoUint32,
# EfiBootScriptWidthFifoUint64,
# EfiBootScriptWidthFillUint8,
# EfiBootScriptWidthFillUint16,
# EfiBootScriptWidthFillUint32,
# EfiBootScriptWidthFillUint64,
# EfiBootScriptWidthMaximum
# } EFI_BOOT_SCRIPT_WIDTH;
class S3BootScriptWidth:
EFI_BOOT_SCRIPT_WIDTH_UINT8 = 0x00
EFI_BOOT_SCRIPT_WIDTH_UINT16 = 0x01
EFI_BOOT_SCRIPT_WIDTH_UINT32 = 0x02
EFI_BOOT_SCRIPT_WIDTH_UINT64 = 0x03
script_width_sizes = {
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT8 : 1,
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT16 : 2,
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT32 : 4,
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT64 : 8
}
script_width_values = {
1 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT8,
2 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT16,
4 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT32,
8 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT64
}
script_width_formats = {
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT8 : 'B',
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT16 : 'H',
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT32 : 'I',
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT64 : 'Q'
}
# //************************************************
# // EFI_SMBUS_DEVICE_ADDRESS
# //************************************************
# typedef struct _EFI_SMBUS_DEVICE_ADDRESS {
# UINTN SmbusDeviceAddress:7;
# } EFI_SMBUS_DEVICE_ADDRESS;
# //************************************************
# // EFI_SMBUS_DEVICE_COMMAND
# //************************************************
# typedef UINTN EFI_SMBUS_DEVICE_COMMAND;
#
# //************************************************
# // EFI_SMBUS_OPERATION
# //************************************************
# typedef enum _EFI_SMBUS_OPERATION {
# EfiSmbusQuickRead,
# EfiSmbusQuickWrite,
# EfiSmbusReceiveByte,
# EfiSmbusSendByte,
# EfiSmbusReadByte,
# EfiSmbusWriteByte,
# EfiSmbusReadWord,
# EfiSmbusWriteWord,
# EfiSmbusReadBlock,
# EfiSmbusWriteBlock,
# EfiSmbusProcessCall,
# EfiSmbusBWBRProcessCall
# } EFI_SMBUS_OPERATION;
class S3BootScriptSmbusOperation:
QUICK_READ = 0x00
QUICK_WRITE = 0x01
RECEIVE_BYTE = 0x02
SEND_BYTE = 0x03
READ_BYTE = 0x04
WRITE_BYTE = 0x05
READ_WORD = 0x06
WRITE_WORD = 0x07
READ_BLOCK = 0x08
WRITE_BLOCK = 0x09
PROCESS_CALL = 0x0A
BWBR_PROCESS_CALL = 0x0B
class op_io_pci_mem():
def __init__(self, opcode, size, width, address, unknown, count, buffer, value=None, mask=None):
self.opcode = opcode
self.size = size
self.width = width
self.address = address
self.unknown = unknown
self.count = count
self.value = value
self.mask = mask
self.name = script_opcodes[ opcode ]
self.buffer = buffer # data[ self.size : ]
self.values = None
if self.count is not None and self.count > 0 and self.buffer is not None:
sz = self.count * script_width_sizes[ self.width ]
if len(self.buffer) != sz:
logger().log( '[?] buffer size (0x%X) != Width x Count (0x%X)' % (len(self.buffer), sz) )
else:
self.values = list( struct.unpack( ('<%d%c' % (self.count,script_width_formats[self.width])), self.buffer ) )
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Width : 0x%02X (%X bytes)\n" % (self.width, script_width_sizes[self.width])
str_r += " Address: 0x%08X\n" % self.address
if self.value is not None: str_r += " Value : 0x%08X\n" % self.value
if self.mask is not None: str_r += " Mask : 0x%08X\n" % self.mask
if self.unknown is not None: str_r += " Unknown: 0x%04X\n" % self.unknown
if self.count is not None: str_r += " Count : 0x%X\n" % self.count
if self.values is not None:
fmt = '0x%0' + ( '%dX' % (script_width_sizes[self.width]*2) )
str_r += " Values : %s\n" % (" ".join( [fmt % v for v in self.values] ))
elif self.buffer is not None:
str_r += (" Buffer (size = 0x%X):\n" % len(self.buffer)) + dump_buffer( self.buffer, 16 )
return str_r
class op_smbus_execute():
def __init__(self, opcode, size, slave_address, command, operation, peccheck):
self.opcode = opcode
self.size = size
self.slave_address = slave_address
self.command = command
self.operation = operation
self.peccheck = peccheck
self.name = script_opcodes[ opcode ]
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Slave Address: 0x%02X\n" % self.slave_address
str_r += " Command : 0x%08X\n" % self.command
str_r += " Operation : 0x%02X\n" % self.operation
str_r += " PEC Check : %d\n" % self.peccheck
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
# UINT64 Duration;
#} EFI_BOOT_SCRIPT_STALL;
class op_stall():
def __init__(self, opcode, size, duration):
self.opcode = opcode
self.size = size
self.duration = duration
self.name = script_opcodes[ self.opcode ]
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Duration: 0x%08X (us)\n" % self.duration
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
# EFI_PHYSICAL_ADDRESS EntryPoint;
#} EFI_BOOT_SCRIPT_DISPATCH;
class op_dispatch():
def __init__(self, opcode, size, entrypoint, context=None):
self.opcode = opcode
self.size = size
self.entrypoint = entrypoint
self.context = context
self.name = script_opcodes[ self.opcode ]
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Entry Point: 0x%016X\n" % self.entrypoint
if self.context is not None: str_r += " Context : 0x%016X\n" % self.context
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
# UINT32 Width;
# UINT64 Address;
# UINT64 Duration;
# UINT64 LoopTimes;
#} EFI_BOOT_SCRIPT_MEM_POLL;
class op_mem_poll():
def __init__(self, opcode, size, width, address, duration, looptimes):
self.opcode = opcode
self.size = size
self.width = width
self.address = address
self.duration = duration
self.looptimes = looptimes
self.name = 'S3_BOOTSCRIPT_MEM_POLL'
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Width : 0x%02X (%X bytes)\n" % (self.width, script_width_sizes[self.width])
str_r += " Address : 0x%016X\n" % self.address
str_r += " Duration? : 0x%016X\n" % self.duration
str_r += " LoopTimes?: 0x%016X\n" % self.looptimes
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
#} EFI_BOOT_SCRIPT_TERMINATE;
class op_terminate():
def __init__(self, opcode, size):
self.opcode = opcode
self.size = size
self.name = script_opcodes[ self.opcode ]
def __str__(self):
return " Opcode : %s (0x%02X)\n" % (self.name, self.opcode)
class op_unknown():
def __init__(self, opcode, size):
self.opcode = opcode
self.size = size
def __str__(self):
return " Opcode : unknown (0x%02X)\n" % self.opcode
class S3BOOTSCRIPT_ENTRY():
def __init__( self, script_type, index, offset_in_script, length, data=None ):
self.script_type = script_type
self.index = index
self.offset_in_script = offset_in_script
self.length = length
self.data = data
self.decoded_opcode = None
self.header_length = 0
def __str__(self):
entry_str = '' if self.index is None else ('[%03d] ' % self.index)
entry_str += ( 'Entry at offset 0x%04X (len = 0x%X, header len = 0x%X):' % (self.offset_in_script, self.length, self.header_length) )
if self.data: entry_str = entry_str + '\nData:\n' + dump_buffer(self.data, 16)
if self.decoded_opcode: entry_str = entry_str + 'Decoded:\n' + str(self.decoded_opcode)
return entry_str
# #################################################################################################
#
# UEFI Table Parsing Functionality
#
# #################################################################################################
MAX_EFI_TABLE_SIZE = 0x1000
# typedef struct {
# UINT64 Signature;
# UINT32 Revision;
# UINT32 HeaderSize;
# UINT32 CRC32;
# UINT32 Reserved;
# } EFI_TABLE_HEADER;
EFI_TABLE_HEADER_FMT = '=8sIIII'
EFI_TABLE_HEADER_SIZE = 0x18
class EFI_TABLE_HEADER( namedtuple('EFI_TABLE_HEADER', 'Signature Revision HeaderSize CRC32 Reserved') ):
__slots__ = ()
def __str__(self):
return """Header:
Signature : %s
Revision : %s
HeaderSize : 0x%08X
CRC32 : 0x%08X
Reserved : 0x%08X""" % ( self.Signature, EFI_SYSTEM_TABLE_REVISION(self.Revision), self.HeaderSize, self.CRC32, self.Reserved )
# #################################################################################################
# EFI System Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
# //
# // EFI Runtime Services Table
# //
# #define EFI_SYSTEM_TABLE_SIGNATURE SIGNATURE_64 ('I','B','I',' ','S','Y','S','T')
# #define EFI_2_31_SYSTEM_TABLE_REVISION ((2 << 16) | (31))
# #define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
# #define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
# #define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10))
# #define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00))
# #define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10))
# #define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02))
# #define EFI_SYSTEM_TABLE_REVISION EFI_2_31_SYSTEM_TABLE_REVISION
#
# \EdkCompatibilityPkg\Foundation\Efi\Include\EfiApi.h
# ----------------------------------------------------
#
# //
# // EFI Configuration Table
# //
# typedef struct {
# EFI_GUID VendorGuid;
# VOID *VendorTable;
# } EFI_CONFIGURATION_TABLE;
#
#
# #define EFI_SYSTEM_TABLE_SIGNATURE 0x5453595320494249ULL
# struct _EFI_SYSTEM_TABLE {
# EFI_TABLE_HEADER Hdr;
#
# CHAR16 *FirmwareVendor;
# UINT32 FirmwareRevision;
#
# EFI_HANDLE ConsoleInHandle;
# EFI_SIMPLE_TEXT_IN_PROTOCOL *ConIn;
#
# EFI_HANDLE ConsoleOutHandle;
# EFI_SIMPLE_TEXT_OUT_PROTOCOL *ConOut;
#
# EFI_HANDLE StandardErrorHandle;
# EFI_SIMPLE_TEXT_OUT_PROTOCOL *StdErr;
#
# EFI_RUNTIME_SERVICES *RuntimeServices;
# EFI_BOOT_SERVICES *BootServices;
#
# UINTN NumberOfTableEntries;
# EFI_CONFIGURATION_TABLE *ConfigurationTable;
#
# };
EFI_SYSTEM_TABLE_SIGNATURE = 'IBI SYST'
EFI_2_50_SYSTEM_TABLE_REVISION = ((2 << 16) | (50))
EFI_2_40_SYSTEM_TABLE_REVISION = ((2 << 16) | (40))
EFI_2_31_SYSTEM_TABLE_REVISION = ((2 << 16) | (31))
EFI_2_30_SYSTEM_TABLE_REVISION = ((2 << 16) | (30))
EFI_2_20_SYSTEM_TABLE_REVISION = ((2 << 16) | (20))
EFI_2_10_SYSTEM_TABLE_REVISION = ((2 << 16) | (10))
EFI_2_00_SYSTEM_TABLE_REVISION = ((2 << 16) | (00))
EFI_1_10_SYSTEM_TABLE_REVISION = ((1 << 16) | (10))
EFI_1_02_SYSTEM_TABLE_REVISION = ((1 << 16) | (02))
EFI_REVISIONS = [EFI_2_50_SYSTEM_TABLE_REVISION, EFI_2_40_SYSTEM_TABLE_REVISION, EFI_2_31_SYSTEM_TABLE_REVISION, EFI_2_30_SYSTEM_TABLE_REVISION, EFI_2_20_SYSTEM_TABLE_REVISION, EFI_2_10_SYSTEM_TABLE_REVISION, EFI_2_00_SYSTEM_TABLE_REVISION, EFI_1_10_SYSTEM_TABLE_REVISION, EFI_1_02_SYSTEM_TABLE_REVISION ]
def EFI_SYSTEM_TABLE_REVISION(revision):
return ('%d.%d' % (revision>>16,revision&0xFFFF) )
EFI_SYSTEM_TABLE_FMT = '=12Q'
class EFI_SYSTEM_TABLE( namedtuple('EFI_SYSTEM_TABLE', 'FirmwareVendor FirmwareRevision ConsoleInHandle ConIn ConsoleOutHandle ConOut StandardErrorHandle StdErr RuntimeServices BootServices NumberOfTableEntries ConfigurationTable') ):
__slots__ = ()
def __str__(self):
return """EFI System Table:
FirmwareVendor : 0x%016X
FirmwareRevision : 0x%016X
ConsoleInHandle : 0x%016X
ConIn : 0x%016X
ConsoleOutHandle : 0x%016X
ConOut : 0x%016X
StandardErrorHandle : 0x%016X
StdErr : 0x%016X
RuntimeServices : 0x%016X
BootServices : 0x%016X
NumberOfTableEntries: 0x%016X
ConfigurationTable : 0x%016X
""" % ( self.FirmwareVendor, self.FirmwareRevision, self.ConsoleInHandle, self.ConIn, self.ConsoleOutHandle, self.ConOut, self.StandardErrorHandle, self.StdErr, self.RuntimeServices, self.BootServices, self.NumberOfTableEntries, self.ConfigurationTable )
# #################################################################################################
# EFI Runtime Services Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
# #define EFI_RUNTIME_SERVICES_SIGNATURE SIGNATURE_64 ('R','U','N','T','S','E','R','V')
# #define EFI_RUNTIME_SERVICES_REVISION EFI_2_31_SYSTEM_TABLE_REVISION
#
# ///
# /// EFI Runtime Services Table.
# ///
# typedef struct {
# ///
# /// The table header for the EFI Runtime Services Table.
# ///
# EFI_TABLE_HEADER Hdr;
#
# //
# // Time Services
# //
# EFI_GET_TIME GetTime;
# EFI_SET_TIME SetTime;
# EFI_GET_WAKEUP_TIME GetWakeupTime;
# EFI_SET_WAKEUP_TIME SetWakeupTime;
#
# //
# // Virtual Memory Services
# //
# EFI_SET_VIRTUAL_ADDRESS_MAP SetVirtualAddressMap;
# EFI_CONVERT_POINTER ConvertPointer;
#
# //
# // Variable Services
# //
# EFI_GET_VARIABLE GetVariable;
# EFI_GET_NEXT_VARIABLE_NAME GetNextVariableName;
# EFI_SET_VARIABLE SetVariable;
#
# //
# // Miscellaneous Services
# //
# EFI_GET_NEXT_HIGH_MONO_COUNT GetNextHighMonotonicCount;
# EFI_RESET_SYSTEM ResetSystem;
#
# //
# // UEFI 2.0 Capsule Services
# //
# EFI_UPDATE_CAPSULE UpdateCapsule;
# EFI_QUERY_CAPSULE_CAPABILITIES QueryCapsuleCapabilities;
#
# //
# // Miscellaneous UEFI 2.0 Service
# //
# EFI_QUERY_VARIABLE_INFO QueryVariableInfo;
# } EFI_RUNTIME_SERVICES;
EFI_RUNTIME_SERVICES_SIGNATURE = 'RUNTSERV'
EFI_RUNTIME_SERVICES_REVISION = EFI_2_31_SYSTEM_TABLE_REVISION
EFI_RUNTIME_SERVICES_TABLE_FMT = '=14Q'
class EFI_RUNTIME_SERVICES_TABLE( namedtuple('EFI_RUNTIME_SERVICES_TABLE', 'GetTime SetTime GetWakeupTime SetWakeupTime SetVirtualAddressMap ConvertPointer GetVariable GetNextVariableName SetVariable GetNextHighMonotonicCount ResetSystem UpdateCapsule QueryCapsuleCapabilities QueryVariableInfo') ):
__slots__ = ()
def __str__(self):
return """Runtime Services:
GetTime : 0x%016X
SetTime : 0x%016X
GetWakeupTime : 0x%016X
SetWakeupTime : 0x%016X
SetVirtualAddressMap : 0x%016X
ConvertPointer : 0x%016X
GetVariable : 0x%016X
GetNextVariableName : 0x%016X
SetVariable : 0x%016X
GetNextHighMonotonicCount: 0x%016X
ResetSystem : 0x%016X
UpdateCapsule : 0x%016X
QueryCapsuleCapabilities : 0x%016X
QueryVariableInfo : 0x%016X
""" % ( self.GetTime, self.SetTime, self.GetWakeupTime, self.SetWakeupTime, self.SetVirtualAddressMap, self.ConvertPointer, self.GetVariable, self.GetNextVariableName, self.SetVariable, self.GetNextHighMonotonicCount, self.ResetSystem, self.UpdateCapsule, self.QueryCapsuleCapabilities, self.QueryVariableInfo )
# #################################################################################################
# EFI Boot Services Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
# #define EFI_BOOT_SERVICES_SIGNATURE SIGNATURE_64 ('B','O','O','T','S','E','R','V')
# #define EFI_BOOT_SERVICES_REVISION EFI_2_31_SYSTEM_TABLE_REVISION
#
# ///
# /// EFI Boot Services Table.
# ///
# typedef struct {
# ///
# /// The table header for the EFI Boot Services Table.
# ///
# EFI_TABLE_HEADER Hdr;
#
# //
# // Task Priority Services
# //
# EFI_RAISE_TPL RaiseTPL;
# EFI_RESTORE_TPL RestoreTPL;
#
# //
# // Memory Services
# //
# EFI_ALLOCATE_PAGES AllocatePages;
# EFI_FREE_PAGES FreePages;
# EFI_GET_MEMORY_MAP GetMemoryMap;
# EFI_ALLOCATE_POOL AllocatePool;
# EFI_FREE_POOL FreePool;
#
# //
# // Event & Timer Services
# //
# EFI_CREATE_EVENT CreateEvent;
# EFI_SET_TIMER SetTimer;
# EFI_WAIT_FOR_EVENT WaitForEvent;
# EFI_SIGNAL_EVENT SignalEvent;
# EFI_CLOSE_EVENT CloseEvent;
# EFI_CHECK_EVENT CheckEvent;
#
# //
# // Protocol Handler Services
# //
# EFI_INSTALL_PROTOCOL_INTERFACE InstallProtocolInterface;
# EFI_REINSTALL_PROTOCOL_INTERFACE ReinstallProtocolInterface;
# EFI_UNINSTALL_PROTOCOL_INTERFACE UninstallProtocolInterface;
# EFI_HANDLE_PROTOCOL HandleProtocol;
# VOID *Reserved;
# EFI_REGISTER_PROTOCOL_NOTIFY RegisterProtocolNotify;
# EFI_LOCATE_HANDLE LocateHandle;
# EFI_LOCATE_DEVICE_PATH LocateDevicePath;
# EFI_INSTALL_CONFIGURATION_TABLE InstallConfigurationTable;
#
# //
# // Image Services
# //
# EFI_IMAGE_LOAD LoadImage;
# EFI_IMAGE_START StartImage;
# EFI_EXIT Exit;
# EFI_IMAGE_UNLOAD UnloadImage;
# EFI_EXIT_BOOT_SERVICES ExitBootServices;
#
# //
# // Miscellaneous Services
# //
# EFI_GET_NEXT_MONOTONIC_COUNT GetNextMonotonicCount;
# EFI_STALL Stall;
# EFI_SET_WATCHDOG_TIMER SetWatchdogTimer;
#
# //
# // DriverSupport Services
# //
# EFI_CONNECT_CONTROLLER ConnectController;
# EFI_DISCONNECT_CONTROLLER DisconnectController;
#
# //
# // Open and Close Protocol Services
# //
# EFI_OPEN_PROTOCOL OpenProtocol;
# EFI_CLOSE_PROTOCOL CloseProtocol;
# EFI_OPEN_PROTOCOL_INFORMATION OpenProtocolInformation;
#
# //
# // Library Services
# //
# EFI_PROTOCOLS_PER_HANDLE ProtocolsPerHandle;
# EFI_LOCATE_HANDLE_BUFFER LocateHandleBuffer;
# EFI_LOCATE_PROTOCOL LocateProtocol;
# EFI_INSTALL_MULTIPLE_PROTOCOL_INTERFACES InstallMultipleProtocolInterfaces;
# EFI_UNINSTALL_MULTIPLE_PROTOCOL_INTERFACES UninstallMultipleProtocolInterfaces;
#
# //
# // 32-bit CRC Services
# //
# EFI_CALCULATE_CRC32 CalculateCrc32;
#
# //
# // Miscellaneous Services
# //
# EFI_COPY_MEM CopyMem;
# EFI_SET_MEM SetMem;
# EFI_CREATE_EVENT_EX CreateEventEx;
# } EFI_BOOT_SERVICES;
EFI_BOOT_SERVICES_SIGNATURE = 'BOOTSERV'
EFI_BOOT_SERVICES_REVISION = EFI_2_31_SYSTEM_TABLE_REVISION
EFI_BOOT_SERVICES_TABLE_FMT = '=44Q'
class EFI_BOOT_SERVICES_TABLE( namedtuple('EFI_BOOT_SERVICES_TABLE', 'RaiseTPL RestoreTPL AllocatePages FreePages GetMemoryMap AllocatePool FreePool CreateEvent SetTimer WaitForEvent SignalEvent CloseEvent CheckEvent InstallProtocolInterface ReinstallProtocolInterface UninstallProtocolInterface HandleProtocol Reserved RegisterProtocolNotify LocateHandle LocateDevicePath InstallConfigurationTable LoadImage StartImage Exit UnloadImage ExitBootServices GetNextMonotonicCount Stall SetWatchdogTimer ConnectController DisconnectController OpenProtocol CloseProtocol OpenProtocolInformation ProtocolsPerHandle LocateHandleBuffer LocateProtocol InstallMultipleProtocolInterfaces UninstallMultipleProtocolInterfaces CalculateCrc32 CopyMem SetMem CreateEventEx') ):
__slots__ = ()
def __str__(self):
return """Boot Services:
RaiseTPL : 0x%016X
RestoreTPL : 0x%016X
AllocatePages : 0x%016X
FreePages : 0x%016X
GetMemoryMap : 0x%016X
AllocatePool : 0x%016X
FreePool : 0x%016X
CreateEvent : 0x%016X
SetTimer : 0x%016X
WaitForEvent : 0x%016X
SignalEvent : 0x%016X
CloseEvent : 0x%016X
CheckEvent : 0x%016X
InstallProtocolInterface : 0x%016X
ReinstallProtocolInterface : 0x%016X
UninstallProtocolInterface : 0x%016X
HandleProtocol : 0x%016X
Reserved : 0x%016X
RegisterProtocolNotify : 0x%016X
LocateHandle : 0x%016X
LocateDevicePath : 0x%016X
InstallConfigurationTable : 0x%016X
LoadImage : 0x%016X
StartImage : 0x%016X
Exit : 0x%016X
UnloadImage : 0x%016X
ExitBootServices : 0x%016X
GetNextMonotonicCount : 0x%016X
Stall : 0x%016X
SetWatchdogTimer : 0x%016X
ConnectController : 0x%016X
DisconnectController : 0x%016X
OpenProtocol : 0x%016X
CloseProtocol : 0x%016X
OpenProtocolInformation : 0x%016X
ProtocolsPerHandle : 0x%016X
LocateHandleBuffer : 0x%016X
LocateProtocol : 0x%016X
InstallMultipleProtocolInterfaces : 0x%016X
UninstallMultipleProtocolInterfaces: 0x%016X
CalculateCrc32 : 0x%016X
CopyMem : 0x%016X
SetMem : 0x%016X
CreateEventEx : 0x%016X
""" % ( self.RaiseTPL, self.RestoreTPL, self.AllocatePages, self.FreePages, self.GetMemoryMap, self.AllocatePool, self.FreePool, self.CreateEvent, self.SetTimer, self.WaitForEvent, self.SignalEvent, self.CloseEvent, self.CheckEvent, self.InstallProtocolInterface, self.ReinstallProtocolInterface, self.UninstallProtocolInterface, self.HandleProtocol, self.Reserved, self.RegisterProtocolNotify, self.LocateHandle, self.LocateDevicePath, self.InstallConfigurationTable, self.LoadImage, self.StartImage, self.Exit, self.UnloadImage, self.ExitBootServices, self.GetNextMonotonicCount, self.Stall, self.SetWatchdogTimer, self.ConnectController, self.DisconnectController, self.OpenProtocol, self.CloseProtocol, self.OpenProtocolInformation, self.ProtocolsPerHandle, self.LocateHandleBuffer, self.LocateProtocol, self.InstallMultipleProtocolInterfaces, self.UninstallMultipleProtocolInterfaces, self.CalculateCrc32, self.CopyMem, self.SetMem, self.CreateEventEx )
# #################################################################################################
# EFI System Configuration Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
#///
#/// Contains a set of GUID/pointer pairs comprised of the ConfigurationTable field in the
#/// EFI System Table.
#///
#typedef struct {
# ///
# /// The 128-bit GUID value that uniquely identifies the system configuration table.
# ///
# EFI_GUID VendorGuid;
# ///
# /// A pointer to the table associated with VendorGuid.
# ///
# VOID *VendorTable;
#} EFI_CONFIGURATION_TABLE;
#
EFI_VENDOR_TABLE_FORMAT = '<' + EFI_GUID_FMT + 'Q'
EFI_VENDOR_TABLE_SIZE = struct.calcsize(EFI_VENDOR_TABLE_FORMAT)
class EFI_VENDOR_TABLE( namedtuple('EFI_VENDOR_TABLE', 'VendorGuid0 VendorGuid1 VendorGuid2 VendorGuid3 VendorTable') ):
__slots__ = ()
def VendorGuid(self):
return EFI_GUID(self.VendorGuid0,self.VendorGuid1,self.VendorGuid2,self.VendorGuid3)
class EFI_CONFIGURATION_TABLE():
def __init__( self ):
self.VendorTables = {}
def __str__(self):
return ( 'Vendor Tables:\n%s' % (''.join( ['{%s} : 0x%016X\n' % (vt,self.VendorTables[vt]) for vt in self.VendorTables])) )
# #################################################################################################
# EFI DXE Services Table
# #################################################################################################
#
# \MdePkg\Include\Pi\PiDxeCis.h
# -----------------------------
#
# //
# // DXE Services Table
# //
# #define DXE_SERVICES_SIGNATURE 0x565245535f455844ULL
# #define DXE_SPECIFICATION_MAJOR_REVISION 1
# #define DXE_SPECIFICATION_MINOR_REVISION 20
# #define DXE_SERVICES_REVISION ((DXE_SPECIFICATION_MAJOR_REVISION<<16) | (DXE_SPECIFICATION_MINOR_REVISION))
#
# typedef struct {
# ///
# /// The table header for the DXE Services Table.
# /// This header contains the DXE_SERVICES_SIGNATURE and DXE_SERVICES_REVISION values.
# ///
# EFI_TABLE_HEADER Hdr;
#
# //
# // Global Coherency Domain Services
# //
# EFI_ADD_MEMORY_SPACE AddMemorySpace;
# EFI_ALLOCATE_MEMORY_SPACE AllocateMemorySpace;
# EFI_FREE_MEMORY_SPACE FreeMemorySpace;
# EFI_REMOVE_MEMORY_SPACE RemoveMemorySpace;
# EFI_GET_MEMORY_SPACE_DESCRIPTOR GetMemorySpaceDescriptor;
# EFI_SET_MEMORY_SPACE_ATTRIBUTES SetMemorySpaceAttributes;
# EFI_GET_MEMORY_SPACE_MAP GetMemorySpaceMap;
# EFI_ADD_IO_SPACE AddIoSpace;
# EFI_ALLOCATE_IO_SPACE AllocateIoSpace;
# EFI_FREE_IO_SPACE FreeIoSpace;
# EFI_REMOVE_IO_SPACE RemoveIoSpace;
# EFI_GET_IO_SPACE_DESCRIPTOR GetIoSpaceDescriptor;
# EFI_GET_IO_SPACE_MAP GetIoSpaceMap;
#
# //
# // Dispatcher Services
# //
# EFI_DISPATCH Dispatch;
# EFI_SCHEDULE Schedule;
# EFI_TRUST Trust;
# //
# // Service to process a single firmware volume found in a capsule
# //
# EFI_PROCESS_FIRMWARE_VOLUME ProcessFirmwareVolume;
# } DXE_SERVICES;
#DXE_SERVICES_SIGNATURE = 0x565245535f455844
#DXE_SPECIFICATION_MAJOR_REVISION = 1
#DXE_SPECIFICATION_MINOR_REVISION = 20
#DXE_SERVICES_REVISION = ((DXE_SPECIFICATION_MAJOR_REVISION<<16) | (DXE_SPECIFICATION_MINOR_REVISION))
EFI_DXE_SERVICES_TABLE_SIGNATURE = 'DXE_SERV' # 0x565245535f455844
EFI_DXE_SERVICES_TABLE_FMT = '=17Q'
class EFI_DXE_SERVICES_TABLE( namedtuple('EFI_DXE_SERVICES_TABLE', 'AddMemorySpace AllocateMemorySpace FreeMemorySpace RemoveMemorySpace GetMemorySpaceDescriptor SetMemorySpaceAttributes GetMemorySpaceMap AddIoSpace AllocateIoSpace FreeIoSpace RemoveIoSpace GetIoSpaceDescriptor GetIoSpaceMap Dispatch Schedule Trust ProcessFirmwareVolume') ):
__slots__ = ()
def __str__(self):
return """DXE Services:
AddMemorySpace : 0x%016X
AllocateMemorySpace : 0x%016X
FreeMemorySpace : 0x%016X
RemoveMemorySpace : 0x%016X
GetMemorySpaceDescriptor: 0x%016X
SetMemorySpaceAttributes: 0x%016X
GetMemorySpaceMap : 0x%016X
AddIoSpace : 0x%016X
AllocateIoSpace : 0x%016X
FreeIoSpace : 0x%016X
RemoveIoSpace : 0x%016X
GetIoSpaceDescriptor : 0x%016X
GetIoSpaceMap : 0x%016X
Dispatch : 0x%016X
Schedule : 0x%016X
Trust : 0x%016X
ProcessFirmwareVolume : 0x%016X
""" % ( self.AddMemorySpace, self.AllocateMemorySpace, self.FreeMemorySpace, self.RemoveMemorySpace, self.GetMemorySpaceDescriptor, self.SetMemorySpaceAttributes, self.GetMemorySpaceMap, self.AddIoSpace, self.AllocateIoSpace, self.FreeIoSpace, self.RemoveIoSpace, self.GetIoSpaceDescriptor, self.GetIoSpaceMap, self.Dispatch, self.Schedule, self.Trust, self.ProcessFirmwareVolume )
# #################################################################################################
# EFI PEI Services Table
# #################################################################################################
#
# //
# // Framework PEI Specification Revision information
# //
# #define FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION 0
# #define FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION 91
#
#
# //
# // PEI services signature and Revision defined in Framework PEI spec
# //
# #define FRAMEWORK_PEI_SERVICES_SIGNATURE 0x5652455320494550ULL
# #define FRAMEWORK_PEI_SERVICES_REVISION ((FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION<<16) | (FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION))
#
# ///
# /// FRAMEWORK_EFI_PEI_SERVICES is a collection of functions whose implementation is provided by the PEI
# /// Foundation. The table may be located in the temporary or permanent memory, depending upon the capabilities
# /// and phase of execution of PEI.
# ///
# /// These services fall into various classes, including the following:
# /// - Managing the boot mode.
# /// - Allocating both early and permanent memory.
# /// - Supporting the Firmware File System (FFS).
# /// - Abstracting the PPI database abstraction.
# /// - Creating Hand-Off Blocks (HOBs).
# ///
# struct _FRAMEWORK_EFI_PEI_SERVICES {
# EFI_TABLE_HEADER Hdr;
# //
# // PPI Functions
# //
# EFI_PEI_INSTALL_PPI InstallPpi;
# EFI_PEI_REINSTALL_PPI ReInstallPpi;
# EFI_PEI_LOCATE_PPI LocatePpi;
# EFI_PEI_NOTIFY_PPI NotifyPpi;
# //
# // Boot Mode Functions
# //
# EFI_PEI_GET_BOOT_MODE GetBootMode;
# EFI_PEI_SET_BOOT_MODE SetBootMode;
# //
# // HOB Functions
# //
# EFI_PEI_GET_HOB_LIST GetHobList;
# EFI_PEI_CREATE_HOB CreateHob;
# //
# // Firmware Volume Functions
# //
# EFI_PEI_FFS_FIND_NEXT_VOLUME FfsFindNextVolume;
# EFI_PEI_FFS_FIND_NEXT_FILE FfsFindNextFile;
# EFI_PEI_FFS_FIND_SECTION_DATA FfsFindSectionData;
# //
# // PEI Memory Functions
# //
# EFI_PEI_INSTALL_PEI_MEMORY InstallPeiMemory;
# EFI_PEI_ALLOCATE_PAGES AllocatePages;
# EFI_PEI_ALLOCATE_POOL AllocatePool;
# EFI_PEI_COPY_MEM CopyMem;
# EFI_PEI_SET_MEM SetMem;
# //
# // (the following interfaces are installed by publishing PEIM)
# // Status Code
# //
# EFI_PEI_REPORT_STATUS_CODE ReportStatusCode;
# //
# // Reset
# //
# EFI_PEI_RESET_SYSTEM ResetSystem;
# ///
# /// Inconsistent with specification here:
# /// In Framework Spec, PeiCis0.91, CpuIo and PciCfg are NOT pointers.
# ///
#
# //
# // I/O Abstractions
# //
# EFI_PEI_CPU_IO_PPI *CpuIo;
# EFI_PEI_PCI_CFG_PPI *PciCfg;
# };
EFI_FRAMEWORK_PEI_SERVICES_TABLE_SIGNATURE = 0x5652455320494550
#FRAMEWORK_PEI_SERVICES_SIGNATURE = 0x5652455320494550
FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION = 0
FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION = 91
FRAMEWORK_PEI_SERVICES_REVISION = ((FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION<<16) | (FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION))
# #################################################################################################
# EFI System Management System Table
# #################################################################################################
#
#define SMM_SMST_SIGNATURE EFI_SIGNATURE_32 ('S', 'M', 'S', 'T')
#define EFI_SMM_SYSTEM_TABLE_REVISION (0 << 16) | (0x09)
# //
# // System Management System Table (SMST)
# //
# struct _EFI_SMM_SYSTEM_TABLE {
# ///
# /// The table header for the System Management System Table (SMST).
# ///
# EFI_TABLE_HEADER Hdr;
#
# ///
# /// A pointer to a NULL-terminated Unicode string containing the vendor name. It is
# /// permissible for this pointer to be NULL.
# ///
# CHAR16 *SmmFirmwareVendor;
# ///
# /// The particular revision of the firmware.
# ///
# UINT32 SmmFirmwareRevision;
#
# ///
# /// Adds, updates, or removes a configuration table entry from the SMST.
# ///
# EFI_SMM_INSTALL_CONFIGURATION_TABLE SmmInstallConfigurationTable;
#
# //
# // I/O Services
# //
# ///
# /// A GUID that designates the particular CPU I/O services.
# ///
# EFI_GUID EfiSmmCpuIoGuid;
# ///
# /// Provides the basic memory and I/O interfaces that are used to abstract accesses to
# /// devices.
# ///
# EFI_SMM_CPU_IO_INTERFACE SmmIo;
#
# //
# // Runtime memory service
# //
# ///
# ///
# /// Allocates pool memory from SMRAM for IA-32 or runtime memory for the
# /// Itanium processor family.
# ///
# EFI_SMMCORE_ALLOCATE_POOL SmmAllocatePool;
# ///
# /// Returns pool memory to the system.
# ///
# EFI_SMMCORE_FREE_POOL SmmFreePool;
# ///
# /// Allocates memory pages from the system.
# ///
# EFI_SMMCORE_ALLOCATE_PAGES SmmAllocatePages;
# ///
# /// Frees memory pages for the system.
# ///
# EFI_SMMCORE_FREE_PAGES SmmFreePages;
#
# //
# // MP service
# //
#
# /// Inconsistent with specification here:
# /// In Framework Spec, this definition does not exist. This method is introduced in PI1.1 specification for
# /// the implementation needed.
# EFI_SMM_STARTUP_THIS_AP SmmStartupThisAp;
#
# //
# // CPU information records
# //
# ///
# /// A 1-relative number between 1 and the NumberOfCpus field. This field designates
# /// which processor is executing the SMM infrastructure. This number also serves as an
# /// index into the CpuSaveState and CpuOptionalFloatingPointState
# /// fields.
# ///
# UINTN CurrentlyExecutingCpu;
# ///
# /// The number of EFI Configuration Tables in the buffer
# /// SmmConfigurationTable.
# ///
# UINTN NumberOfCpus;
# ///
# /// A pointer to the EFI Configuration Tables. The number of entries in the table is
# /// NumberOfTableEntries.
# ///
# EFI_SMM_CPU_SAVE_STATE *CpuSaveState;
# ///
# /// A pointer to a catenation of the EFI_SMM_FLOATING_POINT_SAVE_STATE.
# /// The size of this entire table is NumberOfCpus* size of the
# /// EFI_SMM_FLOATING_POINT_SAVE_STATE. These fields are populated only if
# /// there is at least one SMM driver that has registered for a callback with the
# /// FloatingPointSave field in EFI_SMM_BASE_PROTOCOL.RegisterCallback() set to TRUE.
# ///
# EFI_SMM_FLOATING_POINT_SAVE_STATE *CpuOptionalFloatingPointState;
#
# //
# // Extensibility table
# //
# ///
# /// The number of EFI Configuration Tables in the buffer
# /// SmmConfigurationTable.
# ///
# UINTN NumberOfTableEntries;
# ///
# /// A pointer to the EFI Configuration Tables. The number of entries in the table is
# /// NumberOfTableEntries.
# ///
# EFI_CONFIGURATION_TABLE *SmmConfigurationTable;
# };
EFI_SMM_SYSTEM_TABLE_SIGNATURE = 'SMST'
EFI_SMM_SYSTEM_TABLE_REVISION = (0 << 16) | (0x09)
EFI_TABLES = {
EFI_SYSTEM_TABLE_SIGNATURE : {'name' : 'EFI System Table', 'struct' : EFI_SYSTEM_TABLE, 'fmt' : EFI_SYSTEM_TABLE_FMT },
EFI_RUNTIME_SERVICES_SIGNATURE : {'name' : 'EFI Runtime Services Table', 'struct' : EFI_RUNTIME_SERVICES_TABLE, 'fmt' : EFI_RUNTIME_SERVICES_TABLE_FMT },
EFI_BOOT_SERVICES_SIGNATURE : {'name' : 'EFI Boot Services Table', 'struct' : EFI_BOOT_SERVICES_TABLE, 'fmt' : EFI_BOOT_SERVICES_TABLE_FMT },
EFI_DXE_SERVICES_TABLE_SIGNATURE : {'name' : 'EFI DXE Services Table', 'struct' : EFI_DXE_SERVICES_TABLE, 'fmt' : EFI_DXE_SERVICES_TABLE_FMT }
#EFI_FRAMEWORK_PEI_SERVICES_TABLE_SIGNATURE : {'name' : 'EFI Framework PEI Services Table', 'struct' : EFI_FRAMEWORK_PEI_SERVICES_TABLE, 'fmt' : EFI_FRAMEWORK_PEI_SERVICES_TABLE_FMT },
#EFI_SMM_SYSTEM_TABLE_SIGNATURE : {'name' : 'EFI SMM System Table', 'struct' : EFI_SMM_SYSTEM_TABLE, 'fmt' : EFI_SMM_SYSTEM_TABLE_FMT },
#EFI_CONFIG_TABLE_SIGNATURE : {'name' : 'EFI Configuration Table', 'struct' : EFI_CONFIG_TABLE, 'fmt' : EFI_CONFIG_TABLE_FMT }
}
| gpl-2.0 | -3,650,301,885,846,886,000 | 38.831601 | 958 | 0.594186 | false |
proppy/appengine-try-python-flask | lib/werkzeug/testapp.py | 303 | 9398 | # -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
| apache-2.0 | 4,733,074,001,145,304,000 | 39.86087 | 83 | 0.694084 | false |
jazkarta/edx-platform-for-isc | common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore_bulk_operations.py | 8 | 33066 | import copy
import ddt
import unittest
from bson.objectid import ObjectId
from mock import MagicMock, Mock, call
from xmodule.modulestore.split_mongo.split import SplitBulkWriteMixin
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection
from opaque_keys.edx.locator import CourseLocator
class TestBulkWriteMixin(unittest.TestCase):
def setUp(self):
super(TestBulkWriteMixin, self).setUp()
self.bulk = SplitBulkWriteMixin()
self.bulk.SCHEMA_VERSION = 1
self.clear_cache = self.bulk._clear_cache = Mock(name='_clear_cache')
self.conn = self.bulk.db_connection = MagicMock(name='db_connection', spec=MongoConnection)
self.conn.get_course_index.return_value = {'initial': 'index'}
self.course_key = CourseLocator('org', 'course', 'run-a', branch='test')
self.course_key_b = CourseLocator('org', 'course', 'run-b', branch='test')
self.structure = {'this': 'is', 'a': 'structure', '_id': ObjectId()}
self.definition = {'this': 'is', 'a': 'definition', '_id': ObjectId()}
self.index_entry = {'this': 'is', 'an': 'index'}
def assertConnCalls(self, *calls):
self.assertEqual(list(calls), self.conn.mock_calls)
def assertCacheNotCleared(self):
self.assertFalse(self.clear_cache.called)
class TestBulkWriteMixinPreviousTransaction(TestBulkWriteMixin):
"""
Verify that opening and closing a transaction doesn't affect later behaviour.
"""
def setUp(self):
super(TestBulkWriteMixinPreviousTransaction, self).setUp()
self.bulk._begin_bulk_operation(self.course_key)
self.bulk.insert_course_index(self.course_key, MagicMock('prev-index-entry'))
self.bulk.update_structure(self.course_key, {'this': 'is', 'the': 'previous structure', '_id': ObjectId()})
self.bulk._end_bulk_operation(self.course_key)
self.conn.reset_mock()
self.clear_cache.reset_mock()
@ddt.ddt
class TestBulkWriteMixinClosed(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk operations aren't active.
"""
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_no_bulk_read_structure(self, version_guid):
# Reading a structure when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertConnCalls(call.get_structure(self.course_key.as_object_id(version_guid)))
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
def test_no_bulk_write_structure(self):
# Writing a structure when no bulk operation is active should just
# call through to the db_connection. It should also clear the
# system cache
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls(call.insert_structure(self.structure))
self.clear_cache.assert_called_once_with(self.structure['_id'])
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_no_bulk_read_definition(self, version_guid):
# Reading a definition when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertConnCalls(call.get_definition(self.course_key.as_object_id(version_guid)))
self.assertEqual(result, self.conn.get_definition.return_value)
def test_no_bulk_write_definition(self):
# Writing a definition when no bulk operation is active should just
# call through to the db_connection.
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls(call.insert_definition(self.definition))
@ddt.data(True, False)
def test_no_bulk_read_index(self, ignore_case):
# Reading a course index when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertConnCalls(call.get_course_index(self.course_key, ignore_case))
self.assertEqual(result, self.conn.get_course_index.return_value)
self.assertCacheNotCleared()
def test_no_bulk_write_index(self):
# Writing a course index when no bulk operation is active should just call
# through to the db_connection
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls(call.insert_course_index(self.index_entry))
self.assertCacheNotCleared()
def test_out_of_order_end(self):
# Calling _end_bulk_operation without a corresponding _begin...
# is a noop
self.bulk._end_bulk_operation(self.course_key)
def test_write_new_index_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_course_index.assert_called_once_with(self.index_entry)
def test_write_updated_index_on_close(self):
old_index = {'this': 'is', 'an': 'old index'}
self.conn.get_course_index.return_value = old_index
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.update_course_index.assert_called_once_with(self.index_entry, from_index=old_index)
def test_write_structure_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_structure(self.structure))
def test_write_multiple_structures_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[call.insert_structure(self.structure), call.insert_structure(other_structure)],
self.conn.mock_calls
)
def test_write_index_and_definition_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.definition['_id']}})
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_definition(self.definition),
call.update_course_index(
{'versions': {self.course_key.branch: self.definition['_id']}},
from_index=original_index
)
)
def test_write_index_and_multiple_definitions_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}})
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[
call.insert_definition(self.definition),
call.insert_definition(other_definition),
call.update_course_index(
{'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}},
from_index=original_index
)
],
self.conn.mock_calls
)
def test_write_definition_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_definition(self.definition))
def test_write_multiple_definitions_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[call.insert_definition(self.definition), call.insert_definition(other_definition)],
self.conn.mock_calls
)
def test_write_index_and_structure_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.structure['_id']}})
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_structure(self.structure),
call.update_course_index(
{'versions': {self.course_key.branch: self.structure['_id']}},
from_index=original_index
)
)
def test_write_index_and_multiple_structures_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}})
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[
call.insert_structure(self.structure),
call.insert_structure(other_structure),
call.update_course_index(
{'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}},
from_index=original_index
)
],
self.conn.mock_calls
)
def test_version_structure_creates_new_version(self):
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_version_structure_new_course(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
version_result = self.bulk.version_structure(self.course_key, self.structure, 'user_id')
get_result = self.bulk.get_structure(self.course_key, version_result['_id'])
self.assertEquals(version_result, get_result)
class TestBulkWriteMixinClosedAfterPrevTransaction(TestBulkWriteMixinClosed, TestBulkWriteMixinPreviousTransaction):
"""
Test that operations on with a closed transaction aren't affected by a previously executed transaction
"""
pass
@ddt.ddt
class TestBulkWriteMixinFindMethods(TestBulkWriteMixin):
"""
Tests of BulkWriteMixin methods for finding many structures or indexes
"""
def test_no_bulk_find_matching_course_indexes(self):
branch = Mock(name='branch')
search_targets = MagicMock(name='search_targets')
self.conn.find_matching_course_indexes.return_value = [Mock(name='result')]
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertConnCalls(call.find_matching_course_indexes(branch, search_targets))
self.assertEqual(result, self.conn.find_matching_course_indexes.return_value)
self.assertCacheNotCleared()
@ddt.data(
(None, None, [], []),
(
'draft',
None,
[{'versions': {'draft': '123'}}],
[
{'versions': {'published': '123'}},
{}
],
),
(
'draft',
{'f1': 'v1'},
[{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}}],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'value2'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1'},
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}, 'search_targets': {'f2': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1', 'f2': 2},
[
{'search_targets': {'f1': 'v1', 'f2': 2}},
{'search_targets': {'f1': 'v1', 'f2': 2}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}},
],
),
)
@ddt.unpack
def test_find_matching_course_indexes(self, branch, search_targets, matching, unmatching):
db_indexes = [{'org': 'what', 'course': 'this', 'run': 'needs'}]
for n, index in enumerate(matching + unmatching):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
for attr in ['org', 'course', 'run']:
index[attr] = getattr(course_key, attr)
self.bulk.insert_course_index(course_key, index)
expected = matching + db_indexes
self.conn.find_matching_course_indexes.return_value = db_indexes
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertItemsEqual(result, expected)
for item in unmatching:
self.assertNotIn(item, result)
def test_no_bulk_find_structures_by_id(self):
ids = [Mock(name='id')]
self.conn.find_structures_by_id.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_by_id(ids)
self.assertConnCalls(call.find_structures_by_id(ids))
self.assertEqual(result, self.conn.find_structures_by_id.return_value)
self.assertCacheNotCleared()
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_find_structures_by_id(self, search_ids, active_ids, db_ids):
db_structure = lambda _id: {'db': 'structure', '_id': _id}
active_structure = lambda _id: {'active': 'structure', '_id': _id}
db_structures = [db_structure(_id) for _id in db_ids if _id not in active_ids]
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, active_structure(_id))
self.conn.find_structures_by_id.return_value = db_structures
results = self.bulk.find_structures_by_id(search_ids)
self.conn.find_structures_by_id.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
self.assertIn(active_structure(_id), results)
else:
self.assertNotIn(active_structure(_id), results)
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
self.assertIn(db_structure(_id), results)
else:
self.assertNotIn(db_structure(_id), results)
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_get_definitions(self, search_ids, active_ids, db_ids):
db_definition = lambda _id: {'db': 'definition', '_id': _id}
active_definition = lambda _id: {'active': 'definition', '_id': _id}
db_definitions = [db_definition(_id) for _id in db_ids if _id not in active_ids]
self.bulk._begin_bulk_operation(self.course_key)
for n, _id in enumerate(active_ids):
self.bulk.update_definition(self.course_key, active_definition(_id))
self.conn.get_definitions.return_value = db_definitions
results = self.bulk.get_definitions(self.course_key, search_ids)
self.conn.get_definitions.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
self.assertIn(active_definition(_id), results)
else:
self.assertNotIn(active_definition(_id), results)
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
self.assertIn(db_definition(_id), results)
else:
self.assertNotIn(db_definition(_id), results)
def test_no_bulk_find_structures_derived_from(self):
ids = [Mock(name='id')]
self.conn.find_structures_derived_from.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_derived_from(ids)
self.assertConnCalls(call.find_structures_derived_from(ids))
self.assertEqual(result, self.conn.find_structures_derived_from.return_value)
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - previous_versions to search for
# - documents in the cache with $previous_version.$_id
# - documents in the db with $previous_version.$_id
([], [], []),
(['1', '2', '3'], ['1.a', '1.b', '2.c'], ['1.a', '2.c']),
(['1', '2', '3'], ['1.a'], ['1.a', '2.c']),
(['1', '2', '3'], [], ['1.a', '2.c']),
(['1', '2', '3'], ['4.d'], ['1.a', '2.c']),
)
@ddt.unpack
def test_find_structures_derived_from(self, search_ids, active_ids, db_ids):
def db_structure(_id):
previous, _, current = _id.partition('.')
return {'db': 'structure', 'previous_version': previous, '_id': current}
def active_structure(_id):
previous, _, current = _id.partition('.')
return {'active': 'structure', 'previous_version': previous, '_id': current}
db_structures = [db_structure(_id) for _id in db_ids]
active_structures = []
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
structure = active_structure(_id)
self.bulk.update_structure(course_key, structure)
active_structures.append(structure)
self.conn.find_structures_derived_from.return_value = db_structures
results = self.bulk.find_structures_derived_from(search_ids)
self.conn.find_structures_derived_from.assert_called_once_with(search_ids)
for structure in active_structures:
if structure['previous_version'] in search_ids:
self.assertIn(structure, results)
else:
self.assertNotIn(structure, results)
for structure in db_structures:
if (
structure['previous_version'] in search_ids and # We're searching for this document
not any(active.endswith(structure['_id']) for active in active_ids) # This document doesn't match any active _ids
):
self.assertIn(structure, results)
else:
self.assertNotIn(structure, results)
def test_no_bulk_find_ancestor_structures(self):
original_version = Mock(name='original_version')
block_id = Mock(name='block_id')
self.conn.find_ancestor_structures.return_value = [MagicMock(name='result')]
result = self.bulk.find_ancestor_structures(original_version, block_id)
self.assertConnCalls(call.find_ancestor_structures(original_version, block_id))
self.assertEqual(result, self.conn.find_ancestor_structures.return_value)
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - original_version
# - block_id
# - matching documents in the cache
# - non-matching documents in the cache
# - expected documents returned from the db
# - unexpected documents returned from the db
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], [], []),
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}, '_id': 'foo'}], [], [], [{'_id': 'foo'}]),
('ov', 'bi', [], [{'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], []),
('ov', 'bi', [], [{'original_version': 'ov'}], [], []),
('ov', 'bi', [], [], [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], []),
(
'ov',
'bi',
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}],
[],
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'bar'}}}}],
[]
),
)
@ddt.unpack
def test_find_ancestor_structures(self, original_version, block_id, active_match, active_unmatch, db_match, db_unmatch):
for structure in active_match + active_unmatch + db_match + db_unmatch:
structure.setdefault('_id', ObjectId())
for n, structure in enumerate(active_match + active_unmatch):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, structure)
self.conn.find_ancestor_structures.return_value = db_match + db_unmatch
results = self.bulk.find_ancestor_structures(original_version, block_id)
self.conn.find_ancestor_structures.assert_called_once_with(original_version, block_id)
self.assertItemsEqual(active_match + db_match, results)
@ddt.ddt
class TestBulkWriteMixinOpen(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk write operations are open
"""
def setUp(self):
super(TestBulkWriteMixinOpen, self).setUp()
self.bulk._begin_bulk_operation(self.course_key)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_without_write_from_db(self, version_guid):
# Reading a structure before it's been written (while in bulk operation mode)
# returns the structure from the database
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_without_write_only_reads_once(self, version_guid):
# Reading the same structure multiple times shouldn't hit the database
# more than once
for _ in xrange(2):
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_after_write_no_db(self, version_guid):
# Reading a structure that's already been written shouldn't hit the db at all
self.structure['_id'] = version_guid
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 0)
self.assertEqual(result, self.structure)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_after_write_after_read(self, version_guid):
# Reading a structure that's been updated after being pulled from the db should
# still get the updated value
self.structure['_id'] = version_guid
self.bulk.get_structure(self.course_key, version_guid)
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.structure)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_without_write_from_db(self, version_guid):
# Reading a definition before it's been written (while in bulk operation mode)
# returns the definition from the database
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.conn.get_definition.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_without_write_only_reads_once(self, version_guid):
# Reading the same definition multiple times shouldn't hit the database
# more than once
for _ in xrange(2):
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.conn.get_definition.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_after_write_no_db(self, version_guid):
# Reading a definition that's already been written shouldn't hit the db at all
self.definition['_id'] = version_guid
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 0)
self.assertEqual(result, self.definition)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_after_write_after_read(self, version_guid):
# Reading a definition that's been updated after being pulled from the db should
# still get the updated value
self.definition['_id'] = version_guid
self.bulk.get_definition(self.course_key, version_guid)
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.definition)
@ddt.data(True, False)
def test_read_index_without_write_from_db(self, ignore_case):
# Reading the index without writing to it should pull from the database
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.conn.get_course_index.return_value, result)
@ddt.data(True, False)
def test_read_index_without_write_only_reads_once(self, ignore_case):
# Reading the index multiple times should only result in one read from
# the database
for _ in xrange(2):
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.conn.get_course_index.return_value, result)
@ddt.data(True, False)
def test_read_index_after_write(self, ignore_case):
# Reading the index after a write still should hit the database once to fetch the
# initial index, and should return the written index_entry
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.index_entry, result)
def test_read_index_ignore_case(self):
# Reading using ignore case should find an already written entry with a different case
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(
self.course_key.replace(
org=self.course_key.org.upper(),
course=self.course_key.course.title(),
run=self.course_key.run.upper()
),
ignore_case=True
)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.index_entry, result)
def test_version_structure_creates_new_version_before_read(self):
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_version_structure_creates_new_version_after_read(self):
self.conn.get_structure.return_value = copy.deepcopy(self.structure)
self.bulk.get_structure(self.course_key, self.structure['_id'])
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_copy_branch_versions(self):
# Directly updating an index so that the draft branch points to the published index
# version should work, and should only persist a single structure
self.maxDiff = None
published_structure = {'published': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key, published_structure)
index = {'versions': {'published': published_structure['_id']}}
self.bulk.insert_course_index(self.course_key, index)
index_copy = copy.deepcopy(index)
index_copy['versions']['draft'] = index['versions']['published']
self.bulk.update_course_index(self.course_key, index_copy)
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_structure.assert_called_once_with(published_structure)
self.conn.update_course_index.assert_called_once_with(index_copy, from_index=self.conn.get_course_index.return_value)
self.conn.get_course_index.assert_called_once_with(self.course_key)
class TestBulkWriteMixinOpenAfterPrevTransaction(TestBulkWriteMixinOpen, TestBulkWriteMixinPreviousTransaction):
"""
Test that operations on with an open transaction aren't affected by a previously executed transaction
"""
pass
| agpl-3.0 | -314,310,144,678,326,900 | 47.412884 | 151 | 0.623934 | false |
sandeepdsouza93/TensorFlow-15712 | tensorflow/python/client/timeline.py | 19 | 23737 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the assicaiated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
nn, rest = label.split(' = ')
op, rest = rest.split('(')
if rest == ')':
inputs = []
else:
inputs = rest[:-1].split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
if is_gputrace:
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
node_name, op = fields[:2]
inputs = []
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace:
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in alloc_list:
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def analyze_step_stats(self, show_dataflow=True, show_memory=True):
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
| apache-2.0 | 2,041,597,068,175,007,500 | 36.797771 | 80 | 0.638792 | false |
lokirius/python-for-android | python3-alpha/python3-src/Lib/plat-freebsd6/IN.py | 172 | 12416 | # Generated by h2py from /usr/include/netinet/in.h
# Included from sys/cdefs.h
__GNUCLIKE_ASM = 3
__GNUCLIKE_ASM = 2
__GNUCLIKE___TYPEOF = 1
__GNUCLIKE___OFFSETOF = 1
__GNUCLIKE___SECTION = 1
__GNUCLIKE_ATTRIBUTE_MODE_DI = 1
__GNUCLIKE_CTOR_SECTION_HANDLING = 1
__GNUCLIKE_BUILTIN_CONSTANT_P = 1
__GNUCLIKE_BUILTIN_VARARGS = 1
__GNUCLIKE_BUILTIN_STDARG = 1
__GNUCLIKE_BUILTIN_VAALIST = 1
__GNUC_VA_LIST_COMPATIBILITY = 1
__GNUCLIKE_BUILTIN_NEXT_ARG = 1
__GNUCLIKE_BUILTIN_MEMCPY = 1
__CC_SUPPORTS_INLINE = 1
__CC_SUPPORTS___INLINE = 1
__CC_SUPPORTS___INLINE__ = 1
__CC_SUPPORTS___FUNC__ = 1
__CC_SUPPORTS_WARNING = 1
__CC_SUPPORTS_VARADIC_XXX = 1
__CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
__CC_INT_IS_32BIT = 1
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __nonnull(x): return __attribute__((__nonnull__(x)))
def __predict_true(exp): return __builtin_expect((exp), 1)
def __predict_false(exp): return __builtin_expect((exp), 0)
def __predict_true(exp): return (exp)
def __predict_false(exp): return (exp)
def __format_arg(fmtarg): return __attribute__((__format_arg__ (fmtarg)))
def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
_POSIX_C_SOURCE = 199009
_POSIX_C_SOURCE = 199209
__XSI_VISIBLE = 600
_POSIX_C_SOURCE = 200112
__XSI_VISIBLE = 500
_POSIX_C_SOURCE = 199506
_POSIX_C_SOURCE = 198808
__POSIX_VISIBLE = 200112
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 199506
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199309
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199209
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199009
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 198808
__ISO_C_VISIBLE = 0
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 200112
__XSI_VISIBLE = 600
__BSD_VISIBLE = 1
__ISO_C_VISIBLE = 1999
# Included from sys/_types.h
# Included from machine/_types.h
# Included from machine/endian.h
_QUAD_HIGHWORD = 1
_QUAD_LOWWORD = 0
_LITTLE_ENDIAN = 1234
_BIG_ENDIAN = 4321
_PDP_ENDIAN = 3412
_BYTE_ORDER = _LITTLE_ENDIAN
LITTLE_ENDIAN = _LITTLE_ENDIAN
BIG_ENDIAN = _BIG_ENDIAN
PDP_ENDIAN = _PDP_ENDIAN
BYTE_ORDER = _BYTE_ORDER
def __word_swap_int_var(x): return \
def __word_swap_int_const(x): return \
def __word_swap_int(x): return __word_swap_int_var(x)
def __byte_swap_int_var(x): return \
def __byte_swap_int_const(x): return \
def __byte_swap_int(x): return __byte_swap_int_var(x)
def __byte_swap_long_var(x): return \
def __byte_swap_long_const(x): return \
def __byte_swap_long(x): return __byte_swap_long_var(x)
def __byte_swap_word_var(x): return \
def __byte_swap_word_const(x): return \
def __byte_swap_word(x): return __byte_swap_word_var(x)
def __htonl(x): return __bswap32(x)
def __htons(x): return __bswap16(x)
def __ntohl(x): return __bswap32(x)
def __ntohs(x): return __bswap16(x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_TCP = 6
IPPROTO_UDP = 17
def htonl(x): return __htonl(x)
def htons(x): return __htons(x)
def ntohl(x): return __ntohl(x)
def ntohs(x): return __ntohs(x)
IPPROTO_RAW = 255
INET_ADDRSTRLEN = 16
IPPROTO_HOPOPTS = 0
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_MOBILE = 55
IPPROTO_TLSP = 56
IPPROTO_SKIP = 57
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_SCTP = 132
IPPROTO_PIM = 103
IPPROTO_CARP = 112
IPPROTO_PGM = 113
IPPROTO_PFSYNC = 240
IPPROTO_OLD_DIVERT = 254
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPROTO_DIVERT = 258
IPPROTO_SPACER = 32767
IPPORT_RESERVED = 1024
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
IPPORT_MAX = 65535
def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_SENDSRCADDR = IP_RECVDSTADDR
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_ONESBCAST = 23
IP_FW_TABLE_ADD = 40
IP_FW_TABLE_DEL = 41
IP_FW_TABLE_FLUSH = 42
IP_FW_TABLE_GETSIZE = 43
IP_FW_TABLE_LIST = 44
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_RECVTTL = 65
IP_MINTTL = 66
IP_DONTFRAG = 67
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
# Included from netinet6/in6.h
__KAME_VERSION = "FreeBSD"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
def IFA6_IS_DEPRECATED(a): return \
def IFA6_IS_INVALID(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_2292PKTINFO = 19
IPV6_2292HOPLIMIT = 20
IPV6_2292NEXTHOP = 21
IPV6_2292HOPOPTS = 22
IPV6_2292DSTOPTS = 23
IPV6_2292RTHDR = 24
IPV6_2292PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_BINDV6ONLY = IPV6_V6ONLY
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDRDSTOPTS = 35
IPV6_RECVPKTINFO = 36
IPV6_RECVHOPLIMIT = 37
IPV6_RECVRTHDR = 38
IPV6_RECVHOPOPTS = 39
IPV6_RECVDSTOPTS = 40
IPV6_RECVRTHDRDSTOPTS = 41
IPV6_USE_MIN_MTU = 42
IPV6_RECVPATHMTU = 43
IPV6_PATHMTU = 44
IPV6_REACHCONF = 45
IPV6_PKTINFO = 46
IPV6_HOPLIMIT = 47
IPV6_NEXTHOP = 48
IPV6_HOPOPTS = 49
IPV6_DSTOPTS = 50
IPV6_RTHDR = 51
IPV6_PKTOPTIONS = 52
IPV6_RECVTCLASS = 57
IPV6_AUTOFLOWLABEL = 59
IPV6_TCLASS = 61
IPV6_DONTFRAG = 62
IPV6_PREFER_TEMPADDR = 63
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_V6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_USETEMPADDR = 32
IPV6CTL_TEMPPLTIME = 33
IPV6CTL_TEMPVLTIME = 34
IPV6CTL_AUTO_LINKLOCAL = 35
IPV6CTL_RIP6STATS = 36
IPV6CTL_PREFER_TEMPADDR = 37
IPV6CTL_ADDRCTLPOLICY = 38
IPV6CTL_USE_DEFAULTZONE = 39
IPV6CTL_MAXFRAGS = 41
IPV6CTL_IFQ = 42
IPV6CTL_ISATAPRTR = 43
IPV6CTL_MCAST_PMTU = 44
IPV6CTL_STEALTH = 45
IPV6CTL_MAXID = 46
| apache-2.0 | -3,358,910,974,087,255,600 | 21.533575 | 78 | 0.712146 | false |
nikolas/lettuce | tests/integration/lib/Django-1.3/django/db/backends/sqlite3/creation.py | 230 | 3239 | import os
import sys
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_for_pending_references(self, model, style, pending_references):
"SQLite3 doesn't support constraints"
return []
def sql_remove_table_constraints(self, model, references_to_delete, style):
"SQLite3 doesn't support constraints"
return []
def _get_test_db_name(self):
test_database_name = self.connection.settings_dict['TEST_NAME']
if test_database_name and test_database_name != ':memory:':
return test_database_name
return ':memory:'
def _create_test_db(self, verbosity, autoclobber):
test_database_name = self._get_test_db_name()
if test_database_name != ':memory:':
# Erase the old test database
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
if os.access(test_database_name, os.F_OK):
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
os.remove(test_database_name)
except Exception, e:
sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity):
if test_database_name and test_database_name != ":memory:":
# Remove the SQLite database file
os.remove(test_database_name)
| gpl-3.0 | -6,223,007,115,119,767,000 | 45.271429 | 152 | 0.532263 | false |
dex4er/django | tests/servers/tests.py | 49 | 5773 | # -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase
from django.test.utils import override_settings
from django.utils.http import urlencode
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
urls = 'servers.urls'
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Restore original settings
cls.settings_override.disable()
super(LiveServerBase, cls).tearDownClass()
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception('localhost:8081', ImproperlyConfigured)
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overriden setUpClass() method is executed.
pass
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
f = self.urlopen('/example_view/')
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
f = self.urlopen('/static/example_static_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
f = self.urlopen('/media/example_media_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
f = self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
f = self.urlopen('/model_view/')
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
| bsd-3-clause | 4,208,929,201,095,253,500 | 31.05 | 78 | 0.633385 | false |
engdan77/edoAutoHomeMobile | twisted/internet/test/test_default.py | 24 | 3539 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.default}.
"""
from __future__ import division, absolute_import
import select, sys
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.runtime import Platform
from twisted.python.reflect import requireModule
from twisted.internet import default
from twisted.internet.default import _getInstallFunction, install
from twisted.internet.test.test_main import NoReactor
from twisted.internet.interfaces import IReactorCore
unix = Platform('posix', 'other')
linux = Platform('posix', 'linux2')
windows = Platform('nt', 'win32')
osx = Platform('posix', 'darwin')
class PollReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the poll(2) or epoll(7)-based reactors.
"""
def assertIsPoll(self, install):
"""
Assert the given function will install the poll() reactor, or select()
if poll() is unavailable.
"""
if hasattr(select, "poll"):
self.assertEqual(
install.__module__, 'twisted.internet.pollreactor')
else:
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
def test_unix(self):
"""
L{_getInstallFunction} chooses the poll reactor on arbitrary Unix
platforms, falling back to select(2) if it is unavailable.
"""
install = _getInstallFunction(unix)
self.assertIsPoll(install)
def test_linux(self):
"""
L{_getInstallFunction} chooses the epoll reactor on Linux, or poll if
epoll is unavailable.
"""
install = _getInstallFunction(linux)
if requireModule('twisted.internet.epollreactor') is None:
self.assertIsPoll(install)
else:
self.assertEqual(
install.__module__, 'twisted.internet.epollreactor')
class SelectReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the select(2)-based reactor.
"""
def test_osx(self):
"""
L{_getInstallFunction} chooses the select reactor on OS X.
"""
install = _getInstallFunction(osx)
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
def test_windows(self):
"""
L{_getInstallFunction} chooses the select reactor on Windows.
"""
install = _getInstallFunction(windows)
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
class InstallationTests(SynchronousTestCase):
"""
Tests for actual installation of the reactor.
"""
def test_install(self):
"""
L{install} installs a reactor.
"""
with NoReactor():
install()
self.assertIn("twisted.internet.reactor", sys.modules)
def test_reactor(self):
"""
Importing L{twisted.internet.reactor} installs the default reactor if
none is installed.
"""
installed = []
def installer():
installed.append(True)
return install()
self.patch(default, "install", installer)
with NoReactor():
from twisted.internet import reactor
self.assertTrue(IReactorCore.providedBy(reactor))
self.assertEqual(installed, [True])
| mit | 7,378,696,779,155,456,000 | 28.739496 | 78 | 0.636055 | false |
lshain-android-source/external-chromium_org | chrome/common/extensions/docs/server2/github_file_system_test.py | 23 | 1750 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from appengine_blobstore import AppEngineBlobstore
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import files
from fake_fetchers import ConfigureFakeFetchers
from github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
import url_constants
class GithubFileSystemTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._base_path = os.path.join(sys.path[0],
'test_data',
'github_file_system')
self._file_system = GithubFileSystem.Create(ObjectStoreCreator.ForTest())
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def testList(self):
self.assertEqual(json.loads(self._ReadLocalFile('expected_list.json')),
self._file_system.Read(['/']).Get())
def testRead(self):
self.assertEqual(self._ReadLocalFile('expected_read.txt'),
self._file_system.ReadSingle('/analytics/launch.js'))
def testStat(self):
self.assertEqual(0, self._file_system.Stat('zipball').version)
def testKeyGeneration(self):
self.assertEqual(0, len(files.GetBlobKeys()))
self._file_system.ReadSingle('/analytics/launch.js')
self.assertEqual(1, len(files.GetBlobKeys()))
self._file_system.ReadSingle('/analytics/main.css')
self.assertEqual(1, len(files.GetBlobKeys()))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -8,866,620,653,089,136,000 | 34 | 77 | 0.696 | false |
lancezlin/ml_template_py | lib/python2.7/site-packages/pip/_vendor/pyparsing.py | 417 | 224171 | # module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.popitem(False)
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
prints::
['More', 'Iron', 'Lead', 'Gold', 'I']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, collections.Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| mit | 5,633,321,703,883,262,000 | 38.355864 | 195 | 0.574191 | false |
xR86/Algo | FII-year3sem1-SI/hw1/exe3/key_manager.py | 2 | 2576 | #!/usr/bin/python2.7
from Crypto.Cipher import AES
from Crypto import Random
'''
key = 'Sixteen byte key'
plain_text = 'Attack at dawn'
iv = Random.new().read(AES.block_size)
#if ECB, use PCKS#7
# append bytes to reach mod 16 boundary.
# All padding bytes have the same value: the number of bytes that you are adding:
length = 16 - (len(plain_text) % 16)
plain_text += chr(length)*length
cipher = AES.new(key, AES.MODE_ECB) #AES.new(key, AES.MODE_CFB, iv)
cipher_text = cipher.encrypt(plain_text)
print cipher_text.encode('hex')
decrypt = AES.new(key, AES.MODE_ECB)
dec_plain_text = decrypt.decrypt(cipher_text)
# remove from the back of the plaintext as many bytes as indicated by padding:
dec_plain_text = dec_plain_text[:-ord(dec_plain_text[-1])]
print dec_plain_text
'''
k1 = "ecb-fff byte key" #ECB
k2 = "cfb-fff byte key" #CFB, with iv known by A,B
k3 = "*known key this-" #encrypts k1, k2, is known by all 3 nodes from the start
def encrypt(k, mode):
global k3
key = k3
plain_text = k
#if ECB, use PCKS#7
# append bytes to reach mod 16 boundary.
# All padding bytes have the same value: the number of bytes that you are adding:
length = 16 - (len(plain_text) % 16)
plain_text += chr(length)*length
if mode == 'CFB':
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
else: #if no mode set, presume ECB
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(plain_text)
print cipher_text.encode('hex')
'''
decrypt = AES.new(key, AES.MODE_ECB)
dec_plain_text = decrypt.decrypt(cipher_text)
# remove from the back of the plaintext as many bytes as indicated by padding:
dec_plain_text = dec_plain_text[:-ord(dec_plain_text[-1])]
print dec_plain_text
'''
return cipher_text
import socket
###Acts as a server
sock = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 1595 # Reserve a port for your service.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port)) # Bind to the port
sock.listen(5) # Now wait for client connection.
counter = 0
while (counter < 2):
c, addr = sock.accept() # Establish connection with client.
tmp_name = '[' + chr(ord('A') + counter) + ']'
print tmp_name, 'Got connection from', addr
mode = c.recv(1024)
print tmp_name, ' recv request for mode: ', mode
print tmp_name, ' Sending crypto key'
if mode == 'ECB':
c.send(encrypt(k1, ''))
else:
c.send(encrypt(k2, ''))
c.close() # Close the connection
counter += 1
| gpl-2.0 | -3,831,906,174,181,155,000 | 25.556701 | 82 | 0.675466 | false |
wemanuel/smry | server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/gcloud/sdktools/components/list.py | 11 | 1866 | # Copyright 2013 Google Inc. All Rights Reserved.
"""The command to list installed/available gcloud components."""
import textwrap
from googlecloudsdk.calliope import base
class List(base.Command):
"""List the status of all Cloud SDK components.
List all packages and individual components in the Cloud SDK and provide
information such as whether the component is installed on the local
workstation, whether a newer version is available, the size of the component,
and the ID used to refer to the component in commands.
"""
detailed_help = {
'DESCRIPTION': textwrap.dedent("""\
This command lists all the tools in the Cloud SDK (both individual
components and preconfigured packages of components). For each
component, the command lists the following information:
* Status on your local workstation: not installed, installed (and
up to date), and update available (installed, but not up to date)
* Name of the component (a description)
* ID of the component (used to refer to the component in other
[{parent_command}] commands)
* Size of the component
In addition, if the `--show-versions` flag is specified, the command
lists the currently installed version (if any) and the latest
available version of each individual component.
"""),
'EXAMPLES': textwrap.dedent("""\
$ gcloud components list
$ gcloud components list --show-versions
"""),
}
@staticmethod
def Args(parser):
parser.add_argument(
'--show-versions', required=False, action='store_true',
help='Show installed and available versions of all components.')
def Run(self, args):
"""Runs the list command."""
self.group.update_manager.List(show_versions=args.show_versions)
| apache-2.0 | 2,947,782,261,586,296,000 | 36.32 | 79 | 0.677921 | false |
ricardaw/pismdev | util/PISMNC.py | 5 | 4793 | #!/usr/bin/env python
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
class PISMDataset(netCDF.Dataset):
def create_time(self, use_bounds = False, length = None, units = None):
self.createDimension('time', size = length)
t_var = self.createVariable('time', 'f8', ('time',))
t_var.axis = "T"
t_var.long_name = "time"
if not units:
t_var.units = "seconds since 1-1-1" # just a default
else:
t_var.units = units
if use_bounds:
self.createDimension('n_bounds', 2)
self.createVariable("time_bounds", 'f8', ('time', 'n_bounds'))
t_var.bounds = "time_bounds"
def create_dimensions(self, x, y, time_dependent = False, use_time_bounds = False):
"""
Create PISM-compatible dimensions in a NetCDF file.
"""
if time_dependent and not 'time' in self.variables.keys():
self.create_time(use_time_bounds)
self.createDimension('x', x.size)
self.createDimension('y', y.size)
x_var = self.createVariable('x', 'f8', ('x',))
x_var[:] = x
y_var = self.createVariable('y', 'f8', ('y',))
y_var[:] = y
x_var.axis = "X"
x_var.long_name = "X-coordinate in Cartesian system"
x_var.units = "m"
x_var.standard_name = "projection_x_coordinate"
y_var.axis = "Y"
y_var.long_name = "Y-coordinate in Cartesian system"
y_var.units = "m"
y_var.standard_name = "projection_y_coordinate"
self.sync()
def append_time(self, value, bounds = None):
if 'time' in self.dimensions.keys():
time = self.variables['time']
N = time.size
time[N] = value
if bounds:
self.variables['time_bounds'][N, :] = bounds
def set_attrs(self, var_name, attrs):
"""attrs should be a list of (name, value) tuples."""
if not attrs:
return
for (name, value) in attrs.iteritems():
setattr(self.variables[var_name], name, value)
def define_2d_field(self, var_name, time_dependent = False, dims = None, nc_type = 'f8', attrs = None):
"""
time_dependent: boolean
dims: an optional list of dimension names. use this to override the
default order ('time', 'y', 'x')
attrs: a dictionary of attributes
"""
if not dims:
if time_dependent:
dims = ('time', 'y', 'x')
else:
dims = ('y', 'x')
try:
var = self.variables[var_name]
except:
var = self.createVariable(var_name, nc_type, dims)
self.set_attrs(var_name, attrs)
return var
def define_timeseries(self, var_name, attrs = None):
try:
var = self.createVariable(var_name, 'f8', ('time',))
except:
var = self.variables[var_name]
self.set_attrs(var_name, attrs)
return var
def write(self, var_name, data, time_dependent = False):
"""
Write time-series or a 2D field to a file.
"""
if data.ndim == 1:
return self.write_timeseries(var_name, data)
elif data.ndim == 2:
return self.write_2d_field(var_name, data, time_dependent)
else:
return None
def write_2d_field(self, var_name, data, time_dependent = False):
"""
Write a 2D numpy array to a file in a format PISM can read.
"""
var = self.define_2d_field(var_name, time_dependent, None)
if time_dependent:
last_record = self.variables['time'].size - 1
var[last_record,:,:] = data
else:
var[:] = data
return var
def write_timeseries(self, var_name, data):
"""Write a 1D (time-series) array to a file."""
var = self.define_timeseries(var_name, None)
var[:] = data
return var
if __name__ == "__main__":
# produce a NetCDF file for testing
from numpy import linspace, meshgrid
nc = PISMDataset("foo.nc", 'w')
x = linspace(-100, 100, 101)
y = linspace(-100, 100, 201)
xx, yy = meshgrid(x, y)
nc.create_dimensions(x, y, time_dependent = True, use_time_bounds = True)
nc.define_2d_field("xx", time_dependent = True,
attrs = {"long_name" : "xx",
"comment" : "test variable",
"valid_range" : (-200.0, 200.0)})
for t in [0, 1, 2, 3]:
nc.append_time(t, (t-1, t))
nc.write("xx", xx + t, time_dependent = True)
nc.write("yy", yy + 2*t, time_dependent = True)
nc.close()
| gpl-2.0 | -2,277,154,563,205,809,700 | 28.22561 | 107 | 0.528896 | false |
gauribhoite/personfinder | env/google_appengine/lib/django-1.5/django/contrib/markup/templatetags/markup.py | 104 | 3499 | """
Set of "markup" template filters for Django. These filters transform plain text
markup syntaxes to HTML; currently there is support for:
* Textile, which requires the PyTextile library available at
http://loopcore.com/python-textile/
* Markdown, which requires the Python-markdown library from
http://www.freewisdom.org/projects/python-markdown
* reStructuredText, which requires docutils from http://docutils.sf.net/
"""
from django import template
from django.conf import settings
from django.utils.encoding import force_bytes, force_text
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True)
def textile(value):
try:
import textile
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'textile' filter: The Python textile library isn't installed.")
return force_text(value)
else:
return mark_safe(force_text(textile.textile(force_bytes(value), encoding='utf-8', output='utf-8')))
@register.filter(is_safe=True)
def markdown(value, arg=''):
"""
Runs Markdown over a given value, optionally using various
extensions python-markdown supports.
Syntax::
{{ value|markdown:"extension1_name,extension2_name..." }}
To enable safe mode, which strips raw HTML and only returns HTML
generated by actual Markdown syntax, pass "safe" as the first
extension in the list.
If the version of Markdown in use does not support extensions,
they will be silently ignored.
"""
import warnings
warnings.warn('The markdown filter has been deprecated',
category=DeprecationWarning)
try:
import markdown
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'markdown' filter: The Python markdown library isn't installed.")
return force_text(value)
else:
markdown_vers = getattr(markdown, "version_info", 0)
if markdown_vers < (2, 1):
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'markdown' filter: Django does not support versions of the Python markdown library < 2.1.")
return force_text(value)
else:
extensions = [e for e in arg.split(",") if e]
if extensions and extensions[0] == "safe":
extensions = extensions[1:]
return mark_safe(markdown.markdown(
force_text(value), extensions, safe_mode=True, enable_attributes=False))
else:
return mark_safe(markdown.markdown(
force_text(value), extensions, safe_mode=False))
@register.filter(is_safe=True)
def restructuredtext(value):
import warnings
warnings.warn('The restructuredtext filter has been deprecated',
category=DeprecationWarning)
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'restructuredtext' filter: The Python docutils library isn't installed.")
return force_text(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {})
parts = publish_parts(source=force_bytes(value), writer_name="html4css1", settings_overrides=docutils_settings)
return mark_safe(force_text(parts["fragment"]))
| apache-2.0 | 2,443,722,879,962,522,000 | 37.877778 | 130 | 0.671049 | false |
ElementsProject/elements | test/functional/test_framework/address.py | 1 | 5297 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode Bitcoin addresses.
- base58 P2PKH and P2SH addresses.
- bech32 segwit v0 P2WPKH and P2WSH addresses."""
import enum
import unittest
from .script import hash256, hash160, sha256, CScript, OP_0
from .segwit_addr import encode_segwit_address
from .util import assert_equal, hex_str_to_bytes
ADDRESS_BCRT1_UNSPENDABLE = 'ert1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq458dk'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(ert1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq458dk)#446fqfj4'
# Coins sent to this address can be spent with a witness stack of just OP_TRUE
ADDRESS_BCRT1_P2WSH_OP_TRUE = 'ert1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqp24xws'
class AddressType(enum.Enum):
bech32 = 'bech32'
p2sh_segwit = 'p2sh-segwit'
legacy = 'legacy' # P2PKH
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(hex_str_to_bytes(str)).hex()
str += checksum[:8]
value = int('0x' + str, 0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
def base58_to_byte(s):
"""Converts a base58-encoded string to its data and version.
Throws if the base58 checksum is invalid."""
if not s:
return b''
n = 0
for c in s:
n *= 58
assert c in chars
digit = chars.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
pad = 0
for c in s:
if c == chars[0]:
pad += 1
else:
break
res = b'\x00' * pad + res
# Assert if the checksum is invalid
assert_equal(hash256(res[:-4])[:4], res[-4:])
return res[1:-4], int(res[0])
def keyhash_to_p2pkh(hash, main=False):
assert len(hash) == 20
version = 235
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main=False, prefix=75):
assert len(hash) == 20
version = prefix
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main=False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main=False, prefix=75):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main, prefix)
def key_to_p2sh_p2wpkh(key, main=False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main=False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return encode_segwit_address("ert", version, program)
def script_to_p2wsh(script, main=False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main=False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main=False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
class TestFrameworkScript(unittest.TestCase):
def test_base58encodedecode(self):
def check_base58(data, version):
self.assertEqual(base58_to_byte(byte_to_base58(data, version)), (data, version))
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
| mit | 4,043,977,744,047,502,300 | 33.620915 | 119 | 0.680951 | false |
mikey1234/script.module.urlresolver | lib/urlresolver/plugins/crunchyroll.py | 3 | 3019 | '''
Crunchyroll urlresolver plugin
Copyright (C) 2013 voinage
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re
import urllib2
from urlresolver import common
import os
class crunchyrollResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "crunchyroll"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#http://www.crunchyroll.co.uk/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286
#http://www.crunchyroll.com/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html=self.net.http_GET('http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s'%media_id,{'Host':'www.crunchyroll.com',
'X-Device-Uniqueidentifier':'ffffffff-931d-1f73-ffff-ffffaf02fc5f',
'X-Device-Manufacturer':'HTC',
'X-Device-Model':'HTC Desire',
'X-Application-Name':'com.crunchyroll.crunchyroid',
'X-Device-Product':'htc_bravo',
'X-Device-Is-GoogleTV':'0'}).content
mp4=re.compile(r'"video_url":"(.+?)","h"').findall(html.replace('\\',''))[0]
return mp4
except Exception, e:
common.addon.log_error('**** Crunchyroll Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]CRUNCHYROLL[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s' % media_id
def get_host_and_id(self, url):
r = re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url) or 'crunchyroll' in host)
| gpl-2.0 | -2,115,550,390,118,779,100 | 42.753623 | 162 | 0.659159 | false |
iRGBit/QGIS | python/plugins/processing/tests/TestData.py | 38 | 2242 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TestData.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os.path
from processing.tools import dataobjects
dataFolder = os.path.join(os.path.dirname(__file__), 'data')
def table():
return os.path.join(dataFolder, 'table.dbf')
def points():
return os.path.join(dataFolder, 'points.shp')
def points2():
return os.path.join(dataFolder, 'points2.shp')
def raster():
return os.path.join(dataFolder, 'raster.tif')
def lines():
return os.path.join(dataFolder, 'lines.shp')
def polygons():
return os.path.join(dataFolder, 'polygons.shp')
def polygons2():
return os.path.join(dataFolder, 'polygons2.shp')
def polygonsGeoJson():
return os.path.join(dataFolder, 'polygons.geojson')
def union():
return os.path.join(dataFolder, 'union.shp')
def loadTestData():
dataobjects.load(points(), 'points')
dataobjects.load(points2(), 'points2')
dataobjects.load(polygons(), 'polygons')
dataobjects.load(polygons2(), 'polygons2')
dataobjects.load(polygonsGeoJson(), 'polygonsGeoJson')
dataobjects.load(lines(), 'lines')
dataobjects.load(raster(), 'raster')
dataobjects.load(table(), 'table')
dataobjects.load(union(), 'union')
| gpl-2.0 | -1,468,384,029,864,413,700 | 27.379747 | 75 | 0.528992 | false |
tersmitten/ansible | lib/ansible/modules/cloud/ovirt/ovirt_external_provider.py | 52 | 14722 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_external_provider
short_description: Module to manage external providers in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage external providers in oVirt/RHV"
options:
name:
description:
- "Name of the external provider to manage."
state:
description:
- "Should the external be present or absent"
- "When you are using absent for I(os_volume), you need to make
sure that SD is not attached to the data center!"
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the external provider."
type:
description:
- "Type of the external provider."
choices: ['os_image', 'network', 'os_volume', 'foreman']
url:
description:
- "URL where external provider is hosted."
- "Applicable for those types: I(os_image), I(os_volume), I(network) and I(foreman)."
username:
description:
- "Username to be used for login to external provider."
- "Applicable for all types."
password:
description:
- "Password of the user specified in C(username) parameter."
- "Applicable for all types."
tenant_name:
description:
- "Name of the tenant."
- "Applicable for those types: I(os_image), I(os_volume) and I(network)."
aliases: ['tenant']
authentication_url:
description:
- "Keystone authentication URL of the openstack provider."
- "Applicable for those types: I(os_image), I(os_volume) and I(network)."
aliases: ['auth_url']
data_center:
description:
- "Name of the data center where provider should be attached."
- "Applicable for those type: I(os_volume)."
read_only:
description:
- "Specify if the network should be read only."
- "Applicable if C(type) is I(network)."
type: bool
network_type:
description:
- "Type of the external network provider either external (for example OVN) or neutron."
- "Applicable if C(type) is I(network)."
choices: ['external', 'neutron']
default: ['external']
authentication_keys:
description:
- "List of authentication keys. Each key is represented by dict
like {'uuid': 'our-uuid', 'value': 'YourSecretValue=='}"
- "When you will not pass these keys and there are already some
of them defined in the system they will be removed."
- "Applicable for I(os_volume)."
default: []
version_added: "2.6"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add image external provider:
- ovirt_external_provider:
name: image_provider
type: os_image
url: http://1.2.3.4:9292
username: admin
password: 123456
tenant: admin
auth_url: http://1.2.3.4:35357/v2.0
# Add volume external provider:
- ovirt_external_provider:
name: image_provider
type: os_volume
url: http://1.2.3.4:9292
username: admin
password: 123456
tenant: admin
auth_url: http://1.2.3.4:5000/v2.0
authentication_keys:
-
uuid: "1234567-a1234-12a3-a234-123abc45678"
value: "ABCD00000000111111222333445w=="
# Add foreman provider:
- ovirt_external_provider:
name: foreman_provider
type: foreman
url: https://foreman.example.com
username: admin
password: 123456
# Add external network provider for OVN:
- ovirt_external_provider:
name: ovn_provider
type: network
network_type: external
url: http://1.2.3.4:9696
# Remove image external provider:
- ovirt_external_provider:
state: absent
name: image_provider
type: os_image
'''
RETURN = '''
id:
description: ID of the external provider which is managed
returned: On success if external provider is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
external_host_provider:
description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
returned: "On success and if parameter 'type: foreman' is used."
type: dict
openstack_image_provider:
description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
returned: "On success and if parameter 'type: os_image' is used."
type: dict
openstack_volume_provider:
description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
returned: "On success and if parameter 'type: os_volume' is used."
type: dict
openstack_network_provider:
description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
returned: "On success and if parameter 'type: network' is used."
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
)
OS_VOLUME = 'os_volume'
OS_IMAGE = 'os_image'
NETWORK = 'network'
FOREMAN = 'foreman'
class ExternalProviderModule(BaseModule):
non_provider_params = ['type', 'authentication_keys', 'data_center']
def provider_type(self, provider_type):
self._provider_type = provider_type
def provider_module_params(self):
provider_params = [
(key, value) for key, value in self._module.params.items() if key
not in self.non_provider_params
]
provider_params.append(('data_center', self.get_data_center()))
return provider_params
def get_data_center(self):
dc_name = self._module.params.get("data_center", None)
if dc_name:
system_service = self._connection.system_service()
data_centers_service = system_service.data_centers_service()
return data_centers_service.list(
search='name=%s' % dc_name,
)[0]
return dc_name
def build_entity(self):
provider_type = self._provider_type(
requires_authentication=self._module.params.get('username') is not None,
)
if self._module.params.pop('type') == NETWORK:
setattr(
provider_type,
'type',
otypes.OpenStackNetworkProviderType(self._module.params.pop('network_type'))
)
for key, value in self.provider_module_params():
if hasattr(provider_type, key):
setattr(provider_type, key, value)
return provider_type
def update_check(self, entity):
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('url'), entity.url) and
equal(self._module.params.get('authentication_url'), entity.authentication_url) and
equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and
equal(self._module.params.get('username'), entity.username)
)
def update_volume_provider_auth_keys(
self, provider, providers_service, keys
):
"""
Update auth keys for volume provider, if not exist add them or remove
if they are not specified and there are already defined in the external
volume provider.
Args:
provider (dict): Volume provider details.
providers_service (openstack_volume_providers_service): Provider
service.
keys (list): Keys to be updated/added to volume provider, each key
is represented as dict with keys: uuid, value.
"""
provider_service = providers_service.provider_service(provider['id'])
auth_keys_service = provider_service.authentication_keys_service()
provider_keys = auth_keys_service.list()
# removing keys which are not defined
for key in [
k.id for k in provider_keys if k.uuid not in [
defined_key['uuid'] for defined_key in keys
]
]:
self.changed = True
if not self._module.check_mode:
auth_keys_service.key_service(key).remove()
if not (provider_keys or keys):
# Nothing need to do when both are empty.
return
for key in keys:
key_id_for_update = None
for existing_key in provider_keys:
if key['uuid'] == existing_key.uuid:
key_id_for_update = existing_key.id
auth_key_usage_type = (
otypes.OpenstackVolumeAuthenticationKeyUsageType("ceph")
)
auth_key = otypes.OpenstackVolumeAuthenticationKey(
usage_type=auth_key_usage_type,
uuid=key['uuid'],
value=key['value'],
)
if not key_id_for_update:
self.changed = True
if not self._module.check_mode:
auth_keys_service.add(auth_key)
else:
# We cannot really distinguish here if it was really updated cause
# we cannot take key value to check if it was changed or not. So
# for sure we update here always.
self.changed = True
if not self._module.check_mode:
auth_key_service = (
auth_keys_service.key_service(key_id_for_update)
)
auth_key_service.update(auth_key)
def _external_provider_service(provider_type, system_service):
if provider_type == OS_IMAGE:
return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service()
elif provider_type == NETWORK:
return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service()
elif provider_type == OS_VOLUME:
return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service()
elif provider_type == FOREMAN:
return otypes.ExternalHostProvider, system_service.external_host_providers_service()
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None),
description=dict(default=None),
type=dict(
default=None,
required=True,
choices=[
OS_IMAGE, NETWORK, OS_VOLUME, FOREMAN,
],
aliases=['provider'],
),
url=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
tenant_name=dict(default=None, aliases=['tenant']),
authentication_url=dict(default=None, aliases=['auth_url']),
data_center=dict(default=None),
read_only=dict(default=None, type='bool'),
network_type=dict(
default='external',
choices=['external', 'neutron'],
),
authentication_keys=dict(
default=[], aliases=['auth_keys'], type='list', no_log=True,
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
provider_type_param = module.params.get('type')
provider_type, external_providers_service = _external_provider_service(
provider_type=provider_type_param,
system_service=connection.system_service(),
)
external_providers_module = ExternalProviderModule(
connection=connection,
module=module,
service=external_providers_service,
)
external_providers_module.provider_type(provider_type)
state = module.params.pop('state')
if state == 'absent':
ret = external_providers_module.remove()
elif state == 'present':
ret = external_providers_module.create()
openstack_volume_provider_id = ret.get('id')
if (
provider_type_param == OS_VOLUME and
openstack_volume_provider_id
):
external_providers_module.update_volume_provider_auth_keys(
ret, external_providers_service,
module.params.get('authentication_keys'),
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 | -4,661,822,859,126,856,000 | 35.261084 | 147 | 0.617172 | false |
megaprojectske/megaprojects.co.ke | megaprojects/articles/mixins.py | 1 | 1028 | # See: http://hunterford.me/django-custom-model-manager-chaining/
import models
class ArticleManagerMixin(object):
def published(self, status=True):
if status:
return self.filter(status=models.Article.STATUS_PUBLISHED)
else:
return self.filter(status=models.Article.STATUS_DRAFT)
def articles(self):
return self.filter(kind=models.Article.KIND_ARTICLE)
def blog(self):
return self.filter(kind=models.Article.KIND_BLOG)
def featured(self):
return self.filter(kind=models.Article.KIND_FEATURE)
class ImageManagerMixin(object):
def published(self, status=True):
return self.filter(article__status=models.Article.STATUS_PUBLISHED).filter(status=status)
def articles(self):
return self.filter(article__kind=models.Article.KIND_ARTICLE)
def blog(self):
return self.filter(article__kind=models.Article.KIND_BLOG)
def featured(self):
return self.filter(article__kind=models.Article.KIND_FEATURE)
| apache-2.0 | -4,660,127,067,919,884,000 | 28.371429 | 97 | 0.697471 | false |
tryton-ar/padron_afip_ar | padron_afip.py | 1 | 7151 | #! -*- coding: utf8 -*-
"Herramienta para procesar y consultar el Padrón Unico de Contribuyentes AFIP"
# Documentación e información adicional:
# http://www.sistemasagiles.com.ar/trac/wiki/PadronContribuyentesAFIP
# Basado en pyafipws padron.py de Mariano Reingart
from trytond.model import ModelView, ModelSQL, fields
from trytond.pool import Pool
from trytond.wizard import Wizard, StateView, Button, StateTransition
import urllib2
import os
#import shelve
#import sqlite3
import zipfile
from email.utils import formatdate
# Tipos de datos (código RG1361)
N = 'Numerico' # 2
A = 'Alfanumerico' # 3
I = 'Importe' # 4
C = A # 1 (caracter alfabetico)
B = A # 9 (blanco)
# formato y ubicación archivo completo de la condición tributaria según RG 1817
FORMATO = [
("cuit", 11, N, ""),
("denominacion", 30, A, ""),
("imp_ganancias", 2, A, "'NI', 'AC','EX', 'NC'"),
("imp_iva", 2, A, "'NI' , 'AC','EX','NA','XN','AN'"),
("monotributo", 2, A, "'NI', 'Codigo categoria tributaria'"),
("integrante_soc", 1, A, "'N' , 'S'"),
("empleador", 1, A, "'N', 'S'"),
("actividad_monotributo", 2, A, ""),
]
__all__ = ['PadronAfip', 'PadronAfipStart', 'PadronAfipImport']
class PadronAfip(ModelSQL, ModelView):
"padron.afip"
__name__ = "padron.afip"
denominacion = fields.Char('denominacion')
imp_ganancias = fields.Char('imp_ganancias')
imp_iva = fields.Char('imp_iva')
monotributo = fields.Char('monotributo')
integrante_soc = fields.Char('integrante_soc')
empleador = fields.Char('empleador')
actividad_monotributo = fields.Char('actividad_monotributo')
cuit = fields.Char('CUIT')
class PadronAfipStart(ModelView):
"padron afip start"
__name__ = "padron.afip.import.start"
class PadronAfipImport(Wizard):
"padron afip wizard import"
__name__ = "padron.afip.import"
start = StateView('padron.afip.import.start', 'padron_afip_ar.padron_afip_import_form', [
Button('Cancel','end', 'tryton-cancel'),
Button('Import', 'download_import', 'tryton-ok', default=True)
])
download_import = StateTransition()
def transition_download_import(self):
url = "http://www.afip.gob.ar/genericos/cInscripcion/archivos/apellidoNombreDenominacion.zip"
self._descargar(url)
self._procesar()
return 'end'
def _descargar(self, url, filename="padron.txt", proxy=None):
#import sys
#from utils import leer, escribir, N, A, I, get_install_dir
"Descarga el archivo de AFIP, devuelve 200 o 304 si no fue modificado"
proxies = {}
if proxy:
proxies['http'] = proxy
proxies['https'] = proxy
print "Abriendo URL %s ..." % url
req = urllib2.Request(url)
if os.path.exists(filename):
http_date = formatdate(timeval=os.path.getmtime(filename),
localtime=False, usegmt=True)
req.add_header('If-Modified-Since', http_date)
try:
web = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code == 304:
print "No modificado desde", http_date
return 304
else:
raise
# leer info del request:
meta = web.info()
lenght = float(meta['Content-Length'])
tmp = open(filename + ".zip", "wb")
print "Guardando"
size = 0
p0 = None
while True:
p = int(size / lenght * 100)
if p0 is None or p>p0:
print "Descargando ... %0d %%" % p
p0 = p
data = web.read(1024*100)
size = size + len(data)
if not data:
print "Descarga Terminada!"
break
tmp.write(data)
print "Abriendo ZIP..."
tmp.close()
web.close()
uf = open(filename + ".zip", "rb")
zf = zipfile.ZipFile(uf)
for fn in zf.namelist():
print "descomprimiendo", fn
tf = open(filename, "wb")
tf.write(zf.read(fn))
tf.close()
return 200
def _procesar(self, filename="padron.txt"):
"Analiza y crea la base de datos interna sqlite para consultas"
PadronAfip = Pool().get('padron.afip')
f = open(filename, "r")
keys = [k for k, l, t, d in FORMATO]
# conversion a planilla csv (no usado)
if False and not os.path.exists("padron.csv"):
csvfile = open('padron.csv', 'wb')
import csv
wr = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, l in enumerate(f):
if i % 100000 == 0:
print "Progreso: %d registros" % i
r = self._leer(l, FORMATO)
row = [r[k] for k in keys]
wr.writerow(row)
csvfile.close()
f.seek(0)
for i, l in enumerate(f):
if i % 10000 == 0: print i
registro = self._leer(l, FORMATO)
registro['cuit'] = str(registro['cuit'])
PadronAfip.create([registro])
def _leer(self, linea, formato, expandir_fechas=False):
"Analiza una linea de texto dado un formato, devuelve un diccionario"
dic = {}
comienzo = 1
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = (len(fmt)>3 and isinstance(fmt[3], int)) and fmt[3] or 2
valor = linea[comienzo-1:comienzo-1+longitud].strip()
try:
if chr(8) in valor or chr(127) in valor or chr(255) in valor:
valor = None # nulo
elif tipo == N:
if valor:
valor = long(valor)
else:
valor = 0
elif tipo == I:
if valor:
try:
if '.' in valor:
valor = float(valor)
else:
valor = valor.strip(" ")
valor = float(("%%s.%%0%sd" % dec) % (long(valor[:-dec] or '0'), int(valor[-dec:] or '0')))
except ValueError:
raise ValueError("Campo invalido: %s = '%s'" % (clave, valor))
else:
valor = 0.00
elif expandir_fechas and clave.lower().startswith("fec") and longitud <= 8:
if valor:
valor = "%s-%s-%s" % (valor[0:4], valor[4:6], valor[6:8])
else:
valor = None
else:
valor = valor.decode("ascii","ignore")
dic[clave] = valor
comienzo += longitud
except Exception, e:
raise ValueError("Error al leer campo %s pos %s val '%s': %s" % (
clave, comienzo, valor, str(e)))
return dic
| gpl-2.0 | -3,779,821,436,722,222,600 | 36.208333 | 123 | 0.517077 | false |
ojengwa/oh-mainline | vendor/packages/docutils/test/test_readers/test_pep/test_rfc2822.py | 19 | 6073 | #! /usr/bin/env python
# $Id: test_rfc2822.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for RFC-2822 headers in PEPs (readers/pep.py).
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.PEPParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['rfc2822'] = [
["""\
Author: Me
Version: 1
Date: 2002-04-23
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<field>
<field_name>
Version
<field_body>
<paragraph>
1
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
"""],
["""\
Author: Me
Version: 1
Date: 2002-04-23
.. Leading blank lines don't affect RFC-2822 header parsing.
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<field>
<field_name>
Version
<field_body>
<paragraph>
1
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
<comment xml:space="preserve">
Leading blank lines don't affect RFC-2822 header parsing.
"""],
["""\
.. A comment should prevent RFC-2822 header parsing.
Author: Me
Version: 1
Date: 2002-04-23
""",
"""\
<document source="test data">
<comment xml:space="preserve">
A comment should prevent RFC-2822 header parsing.
<paragraph>
Author: Me
Version: 1
Date: 2002-04-23
"""],
["""\
Author: Me
Version: 1
Date: 2002-04-23
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<paragraph>
Version: 1
Date: 2002-04-23
"""],
["""\
field:
empty item above, no blank line
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
field
<field_body>
<system_message level="2" line="2" source="test data" type="WARNING">
<paragraph>
RFC2822-style field list ends without a blank line; unexpected unindent.
<paragraph>
empty item above, no blank line
"""],
["""\
Author:
Me
Version:
1
Date:
2002-04-23
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<field>
<field_name>
Version
<field_body>
<paragraph>
1
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
"""],
["""\
Authors: Me,
Myself,
and I
Version: 1
or so
Date: 2002-04-23
(Tuesday)
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<paragraph>
Me,
Myself,
and I
<field>
<field_name>
Version
<field_body>
<paragraph>
1
or so
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
(Tuesday)
"""],
["""\
Authors: Me,
Myself,
and I
Version: 1
or so
Date: 2002-04-23
(Tuesday)
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<paragraph>
Me,
Myself,
and I
<field>
<field_name>
Version
<field_body>
<paragraph>
1
or so
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
(Tuesday)
"""],
["""\
Authors: - Me
- Myself
- I
Version:
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<bullet_list bullet="-">
<list_item>
<paragraph>
Me
<list_item>
<paragraph>
Myself
<list_item>
<paragraph>
I
<field>
<field_name>
Version
<field_body>
"""],
["""\
Authors: Me
Myself and I
Version:
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<paragraph>
Me
<block_quote>
<paragraph>
Myself and I
<system_message level="2" line="4" source="test data" type="WARNING">
<paragraph>
Block quote ends without a blank line; unexpected unindent.
<paragraph>
Version:
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 | -8,685,670,308,198,104,000 | 20.013841 | 84 | 0.428618 | false |
mitsuhiko/sentry | src/sentry/models/auditlogentry.py | 1 | 9023 | """
sentry.models.auditlogentry
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
Model, BoundedPositiveIntegerField, FlexibleForeignKey, GzippedDictField,
sane_repr
)
class AuditLogEntryEvent(object):
MEMBER_INVITE = 1
MEMBER_ADD = 2
MEMBER_ACCEPT = 3
MEMBER_EDIT = 4
MEMBER_REMOVE = 5
MEMBER_JOIN_TEAM = 6
MEMBER_LEAVE_TEAM = 7
ORG_ADD = 10
ORG_EDIT = 11
ORG_REMOVE = 12
TEAM_ADD = 20
TEAM_EDIT = 21
TEAM_REMOVE = 22
PROJECT_ADD = 30
PROJECT_EDIT = 31
PROJECT_REMOVE = 32
PROJECT_SET_PUBLIC = 33
PROJECT_SET_PRIVATE = 34
TAGKEY_REMOVE = 40
PROJECTKEY_ADD = 50
PROJECTKEY_EDIT = 51
PROJECTKEY_REMOVE = 52
PROJECTKEY_ENABLE = 53
PROJECTKEY_DISABLE = 53
SSO_ENABLE = 60
SSO_DISABLE = 61
SSO_EDIT = 62
SSO_IDENTITY_LINK = 63
APIKEY_ADD = 70
APIKEY_EDIT = 71
APIKEY_REMOVE = 72
class AuditLogEntry(Model):
__core__ = False
organization = FlexibleForeignKey('sentry.Organization')
actor_label = models.CharField(max_length=64, null=True, blank=True)
# if the entry was created via a user
actor = FlexibleForeignKey('sentry.User', related_name='audit_actors',
null=True, blank=True)
# if the entry was created via an api key
actor_key = FlexibleForeignKey('sentry.ApiKey', null=True, blank=True)
target_object = BoundedPositiveIntegerField(null=True)
target_user = FlexibleForeignKey('sentry.User', null=True, blank=True,
related_name='audit_targets')
# TODO(dcramer): we want to compile this mapping into JSX for the UI
event = BoundedPositiveIntegerField(choices=(
# We emulate github a bit with event naming
(AuditLogEntryEvent.MEMBER_INVITE, 'member.invite'),
(AuditLogEntryEvent.MEMBER_ADD, 'member.add'),
(AuditLogEntryEvent.MEMBER_ACCEPT, 'member.accept-invite'),
(AuditLogEntryEvent.MEMBER_REMOVE, 'member.remove'),
(AuditLogEntryEvent.MEMBER_EDIT, 'member.edit'),
(AuditLogEntryEvent.MEMBER_JOIN_TEAM, 'member.join-team'),
(AuditLogEntryEvent.MEMBER_LEAVE_TEAM, 'member.leave-team'),
(AuditLogEntryEvent.TEAM_ADD, 'team.create'),
(AuditLogEntryEvent.TEAM_EDIT, 'team.edit'),
(AuditLogEntryEvent.TEAM_REMOVE, 'team.remove'),
(AuditLogEntryEvent.PROJECT_ADD, 'project.create'),
(AuditLogEntryEvent.PROJECT_EDIT, 'project.edit'),
(AuditLogEntryEvent.PROJECT_REMOVE, 'project.remove'),
(AuditLogEntryEvent.PROJECT_SET_PUBLIC, 'project.set-public'),
(AuditLogEntryEvent.PROJECT_SET_PRIVATE, 'project.set-private'),
(AuditLogEntryEvent.ORG_ADD, 'org.create'),
(AuditLogEntryEvent.ORG_EDIT, 'org.edit'),
(AuditLogEntryEvent.ORG_REMOVE, 'org.remove'),
(AuditLogEntryEvent.TAGKEY_REMOVE, 'tagkey.remove'),
(AuditLogEntryEvent.PROJECTKEY_ADD, 'projectkey.create'),
(AuditLogEntryEvent.PROJECTKEY_EDIT, 'projectkey.edit'),
(AuditLogEntryEvent.PROJECTKEY_REMOVE, 'projectkey.remove'),
(AuditLogEntryEvent.PROJECTKEY_ENABLE, 'projectkey.enable'),
(AuditLogEntryEvent.PROJECTKEY_DISABLE, 'projectkey.disable'),
(AuditLogEntryEvent.SSO_ENABLE, 'sso.enable'),
(AuditLogEntryEvent.SSO_DISABLE, 'sso.disable'),
(AuditLogEntryEvent.SSO_EDIT, 'sso.edit'),
(AuditLogEntryEvent.SSO_IDENTITY_LINK, 'sso-identity.link'),
(AuditLogEntryEvent.APIKEY_ADD, 'api-key.create'),
(AuditLogEntryEvent.APIKEY_EDIT, 'api-key.edit'),
(AuditLogEntryEvent.APIKEY_REMOVE, 'api-key.remove'),
))
ip_address = models.GenericIPAddressField(null=True, unpack_ipv4=True)
data = GzippedDictField()
datetime = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_auditlogentry'
__repr__ = sane_repr('organization_id', 'type')
def save(self, *args, **kwargs):
if not self.actor_label:
assert self.actor or self.actor_key
if self.actor:
self.actor_label = self.actor.username
else:
self.actor_label = self.actor_key.key
super(AuditLogEntry, self).save(*args, **kwargs)
def get_actor_name(self):
if self.actor:
return self.actor.get_display_name()
elif self.actor_key:
return self.actor_key.key + ' (api key)'
return self.actor_label
def get_note(self):
if self.event == AuditLogEntryEvent.MEMBER_INVITE:
return 'invited member %s' % (self.data['email'],)
elif self.event == AuditLogEntryEvent.MEMBER_ADD:
if self.target_user == self.actor:
return 'joined the organization'
return 'added member %s' % (self.target_user.get_display_name(),)
elif self.event == AuditLogEntryEvent.MEMBER_ACCEPT:
return 'accepted the membership invite'
elif self.event == AuditLogEntryEvent.MEMBER_REMOVE:
if self.target_user == self.actor:
return 'left the organization'
return 'removed member %s' % (self.data.get('email') or self.target_user.get_display_name(),)
elif self.event == AuditLogEntryEvent.MEMBER_EDIT:
return 'edited member %s' % (self.data.get('email') or self.target_user.get_display_name(),)
elif self.event == AuditLogEntryEvent.MEMBER_JOIN_TEAM:
if self.target_user == self.actor:
return 'joined team %s' % (self.data['team_slug'],)
return 'added %s to team %s' % (
self.data.get('email') or self.target_user.get_display_name(),
self.data['team_slug'],
)
elif self.event == AuditLogEntryEvent.MEMBER_LEAVE_TEAM:
if self.target_user == self.actor:
return 'left team %s' % (self.data['team_slug'],)
return 'removed %s from team %s' % (
self.data.get('email') or self.target_user.get_display_name(),
self.data['team_slug'],
)
elif self.event == AuditLogEntryEvent.ORG_ADD:
return 'created the organization'
elif self.event == AuditLogEntryEvent.ORG_EDIT:
return 'edited the organization'
elif self.event == AuditLogEntryEvent.TEAM_ADD:
return 'created team %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.TEAM_EDIT:
return 'edited team %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.TEAM_REMOVE:
return 'removed team %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.PROJECT_ADD:
return 'created project %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.PROJECT_EDIT:
return 'edited project %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.PROJECT_REMOVE:
return 'removed project %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.TAGKEY_REMOVE:
return 'removed tags matching %s = *' % (self.data['key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_ADD:
return 'added project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_EDIT:
return 'edited project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_REMOVE:
return 'removed project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_ENABLE:
return 'enabled project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_DISABLE:
return 'disabled project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.SSO_ENABLE:
return 'enabled sso (%s)' % (self.data['provider'],)
elif self.event == AuditLogEntryEvent.SSO_DISABLE:
return 'disabled sso (%s)' % (self.data['provider'],)
elif self.event == AuditLogEntryEvent.SSO_EDIT:
return 'edited sso settings'
elif self.event == AuditLogEntryEvent.SSO_IDENTITY_LINK:
return 'linked their account to a new identity'
elif self.event == AuditLogEntryEvent.APIKEY_ADD:
return 'added api key %s' % (self.data['label'],)
elif self.event == AuditLogEntryEvent.APIKEY_EDIT:
return 'edited api key %s' % (self.data['label'],)
elif self.event == AuditLogEntryEvent.APIKEY_REMOVE:
return 'removed api key %s' % (self.data['label'],)
return ''
| bsd-3-clause | -5,821,903,562,131,085,000 | 40.013636 | 105 | 0.62651 | false |
mday299/ardupilot | Tools/autotest/pysim/iris_ros.py | 81 | 3622 | #!/usr/bin/env python
"""
Python interface to euroc ROS multirotor simulator
See https://pixhawk.org/dev/ros/sitl
"""
import time
import mav_msgs.msg as mav_msgs
import px4.msg as px4
import rosgraph_msgs.msg as rosgraph_msgs
import rospy
import sensor_msgs.msg as sensor_msgs
from aircraft import Aircraft
from rotmat import Vector3, Matrix3
def quat_to_dcm(q1, q2, q3, q4):
"""Convert quaternion to DCM."""
q3q3 = q3 * q3
q3q4 = q3 * q4
q2q2 = q2 * q2
q2q3 = q2 * q3
q2q4 = q2 * q4
q1q2 = q1 * q2
q1q3 = q1 * q3
q1q4 = q1 * q4
q4q4 = q4 * q4
m = Matrix3()
m.a.x = 1.0-2.0*(q3q3 + q4q4)
m.a.y = 2.0*(q2q3 - q1q4)
m.a.z = 2.0*(q2q4 + q1q3)
m.b.x = 2.0*(q2q3 + q1q4)
m.b.y = 1.0-2.0*(q2q2 + q4q4)
m.b.z = 2.0*(q3q4 - q1q2)
m.c.x = 2.0*(q2q4 - q1q3)
m.c.y = 2.0*(q3q4 + q1q2)
m.c.z = 1.0-2.0*(q2q2 + q3q3)
return m
class IrisRos(Aircraft):
"""A IRIS MultiCopter from ROS."""
def __init__(self):
Aircraft.__init__(self)
self.max_rpm = 1200
self.have_new_time = False
self.have_new_imu = False
self.have_new_pos = False
topics = {
"/clock" : (self.clock_cb, rosgraph_msgs.Clock),
"/iris/imu" : (self.imu_cb, sensor_msgs.Imu),
"/iris/vehicle_local_position" : (self.pos_cb, px4.vehicle_local_position),
}
rospy.init_node('ArduPilot', anonymous=True)
for topic in topics.keys():
(callback, msgtype) = topics[topic]
rospy.Subscriber(topic, msgtype, callback)
self.motor_pub = rospy.Publisher('/iris/command/motor_speed',
mav_msgs.CommandMotorSpeed,
queue_size=1)
self.last_time = 0
# spin() simply keeps python from exiting until this node is stopped
# rospy.spin()
def clock_cb(self, msg):
self.time_now = self.time_base + msg.clock.secs + msg.clock.nsecs*1.0e-9
self.have_new_time = True
def imu_cb(self, msg):
self.gyro = Vector3(msg.angular_velocity.x,
-msg.angular_velocity.y,
-msg.angular_velocity.z)
self.accel_body = Vector3(msg.linear_acceleration.x,
-msg.linear_acceleration.y,
-msg.linear_acceleration.z)
self.dcm = quat_to_dcm(msg.orientation.w,
msg.orientation.x,
-msg.orientation.y,
-msg.orientation.z)
self.have_new_imu = True
def pos_cb(self, msg):
self.velocity = Vector3(msg.vx, msg.vy, msg.vz)
self.position = Vector3(msg.x, msg.y, msg.z)
self.have_new_pos = True
def update(self, actuators):
while self.last_time == self.time_now or not self.have_new_time or not self.have_new_imu or not self.have_new_pos:
time.sleep(0.001)
self.have_new_time = False
self.have_new_pos = False
self.have_new_imu = False
# create motor speed message
msg = mav_msgs.CommandMotorSpeed()
msg.header.stamp = rospy.get_rostime()
motor_speed = []
for i in range(len(actuators)):
motor_speed.append(actuators[i]*self.max_rpm)
msg.motor_speed = motor_speed
self.last_time = self.time_now
self.motor_pub.publish(msg)
# update lat/lon/altitude
self.update_position()
| gpl-3.0 | 6,900,844,794,029,682,000 | 30.77193 | 122 | 0.5381 | false |
RedstonerServer/redstoner-utils | iptracker.py | 1 | 4991 | import mysqlhack
import org.bukkit as bukkit
import json
from java.util import UUID as UUID
from helpers import *
from org.bukkit import *
from traceback import format_exc as trace
from iptracker_secrets import *
iptrack_permission = "utils.iptrack"
iptrack_version = "1.1.0"
@hook.event("player.PlayerJoinEvent", "low")
def on_player_join(event):
t = threading.Thread(target=on_player_join_thread, args=(event, ))
t.daemon = True
t.start()
def on_player_join_thread(event):
player = event.getPlayer()
ip = player.getAddress().getHostString()
uuid = uid(player)
conn = zxJDBC.connect(mysql_database, mysql_user, mysql_pass, "com.mysql.jdbc.Driver")
curs = conn.cursor()
curs.execute("SELECT ips FROM uuid2ips WHERE uuid = ?", (uuid, ))
results = curs.fetchall()
if len(results) == 0:
ips = []
else:
ips = json.loads(results[0][0])
curs.execute("SELECT uuids FROM ip2uuids WHERE ip = ?", (ip, ))
results = curs.fetchall()
if len(results) == 0:
uuids = []
else:
uuids = json.loads(results[0][0])
new_ip_entry = (len(ips) == 0)
new_uuid_entry = (len(uuids) == 0)
if ip not in ips:
ips.append(ip)
if new_ip_entry:
curs.execute("INSERT INTO uuid2ips VALUES (?,?)", (uuid, json.dumps(ips), ))
else:
curs.execute("UPDATE uuid2ips SET ips = ? WHERE uuid = ?", (json.dumps(ips), uuid, ))
if uuid not in uuids:
uuids.append(uuid)
if new_uuid_entry:
curs.execute("INSERT INTO ip2uuids VALUES (?,?)", (ip, json.dumps(uuids), ))
else:
curs.execute("UPDATE ip2uuids SET uuids = ? WHERE ip = ?", (json.dumps(uuids), ip, ))
conn.commit()
curs.close()
conn.close()
@hook.command("getinfo")
def on_getinfo_command(sender, args):
t = threading.Thread(target=on_getinfo_command_thread, args=(sender, args))
t.daemon = True
t.start()
def on_getinfo_command_thread(sender, args):
if(sender.hasPermission(iptrack_permission)):
if not checkargs(sender, args, 1, 1):
return False
else:
if is_ip(args[0]):
conn = zxJDBC.connect(mysql_database, mysql_user, mysql_pass, "com.mysql.jdbc.Driver")
curs = conn.cursor()
curs.execute("SELECT uuids FROM ip2uuids WHERE ip = ?", (args[0], ))
results = curs.fetchall()
curs.close()
conn.close()
if len(results) == 0:
msg(sender, "IP " + args[0] + " is not registered in the database, maybe you got a number wrong?")
else:
uuids = json.loads(results[0][0])
msg(sender, "IP " + args[0] + " was seen with " + str(len(uuids)) + " different Accounts:")
for i in range(0, len(uuids)):
p=Bukkit.getOfflinePlayer(UUID.fromString(uuids[i]))
if is_player(sender):
send_JSON_message(sender.getName(), '["",{"text":"' + p.getName() + ' - (uuid: ' + uuids[i] + '","color":"gold","clickEvent":{"action":"run_command","value":"/getinfo ' + p.getName() + '"},"hoverEvent":{"action":"show_text","value":{"text":"","extra":[{"text":"To search for ' + p.getName() + ' in the database, simply click the name!","color":"gold"}]}}}]')
else:
msg(sender,p.getName() + " - (uuid: " + uuids[i] + ")")
else:
target = Bukkit.getOfflinePlayer(args[0])
uuid = target.getUniqueId()
conn = zxJDBC.connect(mysql_database, mysql_user, mysql_pass, "com.mysql.jdbc.Driver")
curs = conn.cursor()
curs.execute("SELECT ips FROM uuid2ips WHERE uuid = ?", (uuid.toString(), ))
results = curs.fetchall()
curs.close()
conn.close()
if len(results) == 0:
msg(sender, "Player " + args[0] + " is not registered in the database, maybe you misspelled the name?")
else:
ips = json.loads(results[0][0])
msg(sender, "Player " + args[0] + " was seen with " + str(len(ips)) + " different IPs:")
for i in range(0, len(ips)):
if is_player(sender):
send_JSON_message(sender.getName(), '["",{"text":"' + ips[i] + '","color":"gold","clickEvent":{"action":"run_command","value":"/getinfo ' + ips[i] + '"},"hoverEvent":{"action":"show_text","value":{"text":"","extra":[{"text":"To search for the IP ' + ips[i] + ' in the database, simply click the IP!","color":"gold"}]}}}]')
else:
msg(sender,ips[i])
else:
noperm(sender)
return True
| mit | -1,135,395,695,882,114,200 | 44.212963 | 386 | 0.527149 | false |
HeraclesHX/scikit-learn | sklearn/datasets/california_housing.py | 198 | 3877 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
| bsd-3-clause | 8,812,969,799,177,565,000 | 29.769841 | 79 | 0.655662 | false |
elopio/snapcraft | snapcraft/cli/version.py | 1 | 1079 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import click
import snapcraft
SNAPCRAFT_VERSION_TEMPLATE = 'snapcraft, version %(version)s'
@click.group()
def versioncli():
"""Version commands"""
pass
@versioncli.command('version')
def version():
"""Obtain snapcraft's version number.
Examples:
snapcraft version
snapcraft --version
"""
click.echo(SNAPCRAFT_VERSION_TEMPLATE % {'version': snapcraft.__version__})
| gpl-3.0 | -6,026,869,107,444,008,000 | 27.394737 | 79 | 0.719184 | false |
titom1986/CouchPotatoServer | libs/tmdb3/util.py | 32 | 16758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: util.py Assorted utilities used in tmdb_api
# Python Library
# Author: Raymond Wagner
#-----------------------
from copy import copy
from locales import get_locale
from tmdb_auth import get_session
class NameRepr(object):
"""Mixin for __repr__ methods using 'name' attribute."""
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}'>"\
.format(self).encode('utf-8')
class SearchRepr(object):
"""
Mixin for __repr__ methods for classes with '_name' and
'_request' attributes.
"""
def __repr__(self):
name = self._name if self._name else self._request._kwargs['query']
return u"<Search Results: {0}>".format(name).encode('utf-8')
class Poller(object):
"""
Wrapper for an optional callable to populate an Element derived
class with raw data, or data from a Request.
"""
def __init__(self, func, lookup, inst=None):
self.func = func
self.lookup = lookup
self.inst = inst
if func:
# with function, this allows polling data from the API
self.__doc__ = func.__doc__
self.__name__ = func.__name__
self.__module__ = func.__module__
else:
# without function, this is just a dummy poller used for applying
# raw data to a new Element class with the lookup table
self.__name__ = '_populate'
def __get__(self, inst, owner):
# normal decorator stuff
# return self for a class
# return instantiated copy of self for an object
if inst is None:
return self
func = None
if self.func:
func = self.func.__get__(inst, owner)
return self.__class__(func, self.lookup, inst)
def __call__(self):
# retrieve data from callable function, and apply
if not callable(self.func):
raise RuntimeError('Poller object called without a source function')
req = self.func()
if ('language' in req._kwargs) or ('country' in req._kwargs) \
and self.inst._locale.fallthrough:
# request specifies a locale filter, and fallthrough is enabled
# run a first pass with specified filter
if not self.apply(req.readJSON(), False):
return
# if first pass results in missed data, run a second pass to
# fill in the gaps
self.apply(req.new(language=None, country=None).readJSON())
# re-apply the filtered first pass data over top the second
# unfiltered set. this is to work around the issue that the
# properties have no way of knowing when they should or
# should not overwrite existing data. the cache engine will
# take care of the duplicate query
self.apply(req.readJSON())
def apply(self, data, set_nones=True):
# apply data directly, bypassing callable function
unfilled = False
for k, v in self.lookup.items():
if (k in data) and \
((data[k] is not None) if callable(self.func) else True):
# argument received data, populate it
setattr(self.inst, v, data[k])
elif v in self.inst._data:
# argument did not receive data, but Element already contains
# some value, so skip this
continue
elif set_nones:
# argument did not receive data, so fill it with None
# to indicate such and prevent a repeat scan
setattr(self.inst, v, None)
else:
# argument does not need data, so ignore it allowing it to
# trigger a later poll. this is intended for use when
# initializing a class with raw data, or when performing a
# first pass through when performing locale fall through
unfilled = True
return unfilled
class Data(object):
"""
Basic response definition class
This maps to a single key in a JSON dictionary received from the API
"""
def __init__(self, field, initarg=None, handler=None, poller=None,
raw=True, default=u'', lang=None, passthrough={}):
"""
This defines how the dictionary value is to be processed by the
poller
field -- defines the dictionary key that filters what data
this uses
initarg -- (optional) specifies that this field must be
supplied when creating a new instance of the Element
class this definition is mapped to. Takes an integer
for the order it should be used in the input
arguments
handler -- (optional) callable used to process the received
value before being stored in the Element object.
poller -- (optional) callable to be used if data is requested
and this value has not yet been defined. the
callable should return a dictionary of data from a
JSON query. many definitions may share a single
poller, which will be and the data used to populate
all referenced definitions based off their defined
field
raw -- (optional) if the specified handler is an Element
class, the data will be passed into it using the
'raw' keyword attribute. setting this to false
will force the data to instead be passed in as the
first argument
"""
self.field = field
self.initarg = initarg
self.poller = poller
self.raw = raw
self.default = default
self.sethandler(handler)
self.passthrough = passthrough
def __get__(self, inst, owner):
if inst is None:
return self
if self.field not in inst._data:
if self.poller is None:
return None
self.poller.__get__(inst, owner)()
return inst._data[self.field]
def __set__(self, inst, value):
if (value is not None) and (value != ''):
value = self.handler(value)
else:
value = self.default
if isinstance(value, Element):
value._locale = inst._locale
value._session = inst._session
for source, dest in self.passthrough:
setattr(value, dest, getattr(inst, source))
inst._data[self.field] = value
def sethandler(self, handler):
# ensure handler is always callable, even for passthrough data
if handler is None:
self.handler = lambda x: x
elif isinstance(handler, ElementType) and self.raw:
self.handler = lambda x: handler(raw=x)
else:
self.handler = lambda x: handler(x)
class Datapoint(Data):
pass
class Datalist(Data):
"""
Response definition class for list data
This maps to a key in a JSON dictionary storing a list of data
"""
def __init__(self, field, handler=None, poller=None, sort=None, raw=True, passthrough={}):
"""
This defines how the dictionary value is to be processed by the
poller
field -- defines the dictionary key that filters what data
this uses
handler -- (optional) callable used to process the received
value before being stored in the Element object.
poller -- (optional) callable to be used if data is requested
and this value has not yet been defined. the
callable should return a dictionary of data from a
JSON query. many definitions may share a single
poller, which will be and the data used to populate
all referenced definitions based off their defined
field
sort -- (optional) name of attribute in resultant data to be
used to sort the list after processing. this
effectively requires a handler be defined to process
the data into something that has attributes
raw -- (optional) if the specified handler is an Element
class, the data will be passed into it using the
'raw' keyword attribute. setting this to false will
force the data to instead be passed in as the first
argument
"""
super(Datalist, self).__init__(field, None, handler, poller, raw, passthrough=passthrough)
self.sort = sort
def __set__(self, inst, value):
data = []
if value:
for val in value:
val = self.handler(val)
if isinstance(val, Element):
val._locale = inst._locale
val._session = inst._session
for source, dest in self.passthrough.items():
setattr(val, dest, getattr(inst, source))
data.append(val)
if self.sort:
if self.sort is True:
data.sort()
else:
data.sort(key=lambda x: getattr(x, self.sort))
inst._data[self.field] = data
class Datadict(Data):
"""
Response definition class for dictionary data
This maps to a key in a JSON dictionary storing a dictionary of data
"""
def __init__(self, field, handler=None, poller=None, raw=True,
key=None, attr=None, passthrough={}):
"""
This defines how the dictionary value is to be processed by the
poller
field -- defines the dictionary key that filters what data
this uses
handler -- (optional) callable used to process the received
value before being stored in the Element object.
poller -- (optional) callable to be used if data is requested
and this value has not yet been defined. the
callable should return a dictionary of data from a
JSON query. many definitions may share a single
poller, which will be and the data used to populate
all referenced definitions based off their defined
field
key -- (optional) name of key in resultant data to be used
as the key in the stored dictionary. if this is not
the field name from the source data is used instead
attr -- (optional) name of attribute in resultant data to be
used as the key in the stored dictionary. if this is
not the field name from the source data is used
instead
raw -- (optional) if the specified handler is an Element
class, the data will be passed into it using the
'raw' keyword attribute. setting this to false will
force the data to instead be passed in as the first
argument
"""
if key and attr:
raise TypeError("`key` and `attr` cannot both be defined")
super(Datadict, self).__init__(field, None, handler, poller, raw, passthrough=passthrough)
if key:
self.getkey = lambda x: x[key]
elif attr:
self.getkey = lambda x: getattr(x, attr)
else:
raise TypeError("Datadict requires `key` or `attr` be defined " +
"for populating the dictionary")
def __set__(self, inst, value):
data = {}
if value:
for val in value:
val = self.handler(val)
if isinstance(val, Element):
val._locale = inst._locale
val._session = inst._session
for source, dest in self.passthrough.items():
setattr(val, dest, getattr(inst, source))
data[self.getkey(val)] = val
inst._data[self.field] = data
class ElementType( type ):
"""
MetaClass used to pre-process Element-derived classes and set up the
Data definitions
"""
def __new__(mcs, name, bases, attrs):
# any Data or Poller object defined in parent classes must be cloned
# and processed in this class to function properly
# scan through available bases for all such definitions and insert
# a copy into this class's attributes
# run in reverse order so higher priority values overwrite lower ones
data = {}
pollers = {'_populate':None}
for base in reversed(bases):
if isinstance(base, mcs):
for k, attr in base.__dict__.items():
if isinstance(attr, Data):
# extract copies of each defined Data element from
# parent classes
attr = copy(attr)
attr.poller = attr.poller.func
data[k] = attr
elif isinstance(attr, Poller):
# extract copies of each defined Poller function
# from parent classes
pollers[k] = attr.func
for k, attr in attrs.items():
if isinstance(attr, Data):
data[k] = attr
if '_populate' in attrs:
pollers['_populate'] = attrs['_populate']
# process all defined Data attribues, testing for use as an initial
# argument, and building a list of what Pollers are used to populate
# which Data points
pollermap = dict([(k, []) for k in pollers])
initargs = []
for k, v in data.items():
v.name = k
if v.initarg:
initargs.append(v)
if v.poller:
pn = v.poller.__name__
if pn not in pollermap:
pollermap[pn] = []
if pn not in pollers:
pollers[pn] = v.poller
pollermap[pn].append(v)
else:
pollermap['_populate'].append(v)
# wrap each used poller function with a Poller class, and push into
# the new class attributes
for k, v in pollermap.items():
if len(v) == 0:
continue
lookup = dict([(attr.field, attr.name) for attr in v])
poller = Poller(pollers[k], lookup)
attrs[k] = poller
# backfill wrapped Poller into each mapped Data object, and ensure
# the data elements are defined for this new class
for attr in v:
attr.poller = poller
attrs[attr.name] = attr
# build sorted list of arguments used for intialization
attrs['_InitArgs'] = tuple(
[a.name for a in sorted(initargs, key=lambda x: x.initarg)])
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
obj = cls.__new__(cls)
if ('locale' in kwargs) and (kwargs['locale'] is not None):
obj._locale = kwargs['locale']
else:
obj._locale = get_locale()
if 'session' in kwargs:
obj._session = kwargs['session']
else:
obj._session = get_session()
obj._data = {}
if 'raw' in kwargs:
# if 'raw' keyword is supplied, create populate object manually
if len(args) != 0:
raise TypeError(
'__init__() takes exactly 2 arguments (1 given)')
obj._populate.apply(kwargs['raw'], False)
else:
# if not, the number of input arguments must exactly match that
# defined by the Data definitions
if len(args) != len(cls._InitArgs):
raise TypeError(
'__init__() takes exactly {0} arguments ({1} given)'\
.format(len(cls._InitArgs)+1, len(args)+1))
for a, v in zip(cls._InitArgs, args):
setattr(obj, a, v)
obj.__init__()
return obj
class Element( object ):
__metaclass__ = ElementType
_lang = 'en'
| gpl-3.0 | -5,477,990,303,396,749,000 | 40.583127 | 98 | 0.541055 | false |
tobegit3hub/deep_cnn | java_predict_client/src/main/proto/tensorflow/contrib/slim/python/slim/learning_test.py | 9 | 31311 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from numpy import testing as np_testing
import tensorflow as tf
slim = tf.contrib.slim
class ClipGradientNormsTest(tf.test.TestCase):
def clip_values(self, arr):
norm = np.sqrt(np.sum(arr**2))
if norm > self._max_norm:
return self._max_norm * arr / np.sqrt(np.sum(arr**2))
return arr
def setUp(self):
np.random.seed(0)
self._max_norm = 1.0
self._grad_vec = np.array([1., 2., 3.])
self._clipped_grad_vec = self.clip_values(self._grad_vec)
self._zero_vec = np.zeros(self._grad_vec.size)
def testOrdinaryGradIsClippedCorrectly(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = slim.learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
# Ensure the variable passed through.
self.assertEqual(gradients_to_variables[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(gradients_to_variables[0])
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
def testNoneGradPassesThroughCorrectly(self):
gradient = None
variable = tf.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = slim.learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
self.assertEqual(gradients_to_variables[0], None)
self.assertEqual(gradients_to_variables[1], variable)
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = tf.constant(self._grad_vec, dtype=tf.float32)
indices = tf.constant(sparse_grad_indices, dtype=tf.int32)
dense_shape = tf.constant(sparse_grad_dense_shape, dtype=tf.int32)
gradient = tf.IndexedSlices(values, indices, dense_shape)
variable = tf.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
gradients_to_variables = slim.learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradients_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with tf.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
class MultiplyGradientsTest(tf.test.TestCase):
def setUp(self):
np.random.seed(0)
self._multiplier = 3.7
self._grad_vec = np.array([1., 2., 3.])
self._multiplied_grad_vec = np.multiply(self._grad_vec, self._multiplier)
def testNonListGradsRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
slim.learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testEmptyMultiplesRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
slim.learning.multiply_gradients([grad_to_var], {})
def testNonDictMultiplierRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
slim.learning.multiply_gradients([grad_to_var], 3)
def testMultipleOfNoneGradRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (None, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
slim.learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testMultipleGradientsWithVariables(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = slim.learning.multiply_gradients(
[grad_to_var],
gradient_multipliers)
# Ensure the variable passed through.
self.assertEqual(grad_to_var[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(actual_gradient,
self._multiplied_grad_vec, 5)
def testIndexedSlicesGradIsMultiplied(self):
values = tf.constant(self._grad_vec, dtype=tf.float32)
indices = tf.constant([0, 1, 2], dtype=tf.int32)
dense_shape = tf.constant([self._grad_vec.size], dtype=tf.int32)
gradient = tf.IndexedSlices(values, indices, dense_shape)
variable = tf.Variable(tf.zeros((1, 3)))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = slim.learning.multiply_gradients(
[grad_to_var],
gradient_multipliers)
# Ensure the built IndexedSlice has the right form.
self.assertEqual(grad_to_var[1], variable)
self.assertEqual(grad_to_var[0].indices, indices)
self.assertEqual(grad_to_var[0].dense_shape, dense_shape)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0].values)
np_testing.assert_almost_equal(actual_gradient,
self._multiplied_grad_vec, 5)
def LogisticClassifier(inputs):
return slim.fully_connected(
inputs, 1, activation_fn=tf.sigmoid)
def BatchNormClassifier(inputs):
inputs = slim.batch_norm(inputs, decay=0.1)
return slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid)
class TrainBNClassifierTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertLess(loss, .1)
class CreateTrainOpTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testUseUpdateOps(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = BatchNormClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer,
update_ops=[])
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testRecordTrainOpInCollection(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in tf.get_collection(tf.GraphKeys.TRAIN_OP))
class TrainTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNonDefaultGraph(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithNoneAsLogdir(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, None, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithSessionConfig(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
session_config = tf.ConfigProto(allow_soft_placement=True)
loss = slim.learning.train(
train_op,
None,
number_of_steps=300,
log_every_n_steps=10,
session_config=session_config)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithTrace(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
tf.summary.scalar('total_loss', total_loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(
os.path.isfile(os.path.join(logdir, trace_filename)))
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
tf.summary.scalar('total_loss', total_loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
summary_op = tf.summary.merge_all()
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
saver = tf.train.Saver()
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, init_op=None, number_of_steps=300, saver=saver)
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
with self.assertRaises(RuntimeError):
slim.learning.train(
train_op, logdir, init_op=None, number_of_steps=300)
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
local_multiplier = slim.local_variable(1.0)
tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
number_of_steps = [300, 301, 305]
for i in range(len(number_of_steps)):
with tf.Graph().as_default():
tf.set_random_seed(i)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=number_of_steps[i],
log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = tf.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return slim.learning.create_train_op(
total_loss, optimizer,
gradient_multipliers=gradient_multipliers)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs2')
# First, train the model one step (make sure the error is high).
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with tf.Graph().as_default():
tf.set_random_seed(1)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with tf.Graph().as_default():
tf.set_random_seed(2)
train_op = self.create_train_op()
model_variables = tf.all_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
init_op = tf.global_variables_initializer()
op, init_feed_dict = slim.assign_from_checkpoint(
model_path, model_variables)
def InitAssignFn(sess):
sess.run(op, init_feed_dict)
loss = slim.learning.train(
train_op,
logdir2,
number_of_steps=1,
init_op=init_op,
init_fn=InitAssignFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def testTrainWithInitFromFn(self):
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs2')
# First, train the model one step (make sure the error is high).
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with tf.Graph().as_default():
tf.set_random_seed(1)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
# Finally, advance the model a single step and validate that the loss is
# still low.
with tf.Graph().as_default():
tf.set_random_seed(2)
train_op = self.create_train_op()
model_variables = tf.all_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
saver = tf.train.Saver(model_variables)
def RestoreFn(sess):
saver.restore(sess, model_path)
loss = slim.learning.train(
train_op,
logdir2,
number_of_steps=1,
init_fn=RestoreFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def ModelLoss(self):
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
return slim.losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
# First, train only the weights of the model.
with tf.Graph().as_default():
tf.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
weights = slim.get_variables_by_name('weights')
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
variables_to_train=weights)
loss = slim.learning.train(
train_op, logdir1, number_of_steps=200, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with tf.Graph().as_default():
tf.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
biases = slim.get_variables_by_name('biases')
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
variables_to_train=biases)
loss = slim.learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with tf.Graph().as_default():
tf.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir1, number_of_steps=400, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with tf.Graph().as_default():
tf.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = slim.get_variables()
train_op = slim.learning.create_train_op(total_loss, optimizer)
train_weights = slim.learning.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = slim.learning.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with tf.Session() as sess:
# Initialize the variables.
sess.run(tf.global_variables_initializer())
# Get the intial weights and biases values.
weights_values, biases_values = sess.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = sess.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = sess.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = sess.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs2')
multipliers = [1., 1000.]
number_of_steps = 10
losses = []
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate,
gradient_multiplier=multipliers[0])
loss = slim.learning.train(
train_op, logdir1, number_of_steps=number_of_steps)
losses.append(loss)
self.assertGreater(loss, .5)
# Second, train the model with equivalently larger learning rate.
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate,
gradient_multiplier=multipliers[1])
loss = slim.learning.train(
train_op, logdir2, number_of_steps=number_of_steps)
losses.append(loss)
self.assertIsNotNone(loss)
self.assertLess(loss, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(losses[0], losses[1])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -2,200,278,555,366,416,000 | 35.966942 | 80 | 0.65964 | false |
edry/edx-platform | lms/djangoapps/courseware/tests/test_word_cloud.py | 134 | 8327 | # -*- coding: utf-8 -*-
"""Word cloud integration tests using mongo modulestore."""
import json
from operator import itemgetter
from nose.plugins.attrib import attr
from . import BaseTestXmodule
from xmodule.x_module import STUDENT_VIEW
@attr('shard_1')
class TestWordCloud(BaseTestXmodule):
"""Integration test for word cloud xmodule."""
CATEGORY = "word_cloud"
def _get_users_state(self):
"""Return current state for each user:
{username: json_state}
"""
# check word cloud response for every user
users_state = {}
for user in self.users:
response = self.clients[user.username].post(self.get_url('get_state'))
users_state[user.username] = json.loads(response.content)
return users_state
def _post_words(self, words):
"""Post `words` and return current state for each user:
{username: json_state}
"""
users_state = {}
for user in self.users:
response = self.clients[user.username].post(
self.get_url('submit'),
{'student_words[]': words},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
users_state[user.username] = json.loads(response.content)
return users_state
def _check_response(self, response_contents, correct_jsons):
"""Utility function that compares correct and real responses."""
for username, content in response_contents.items():
# Used in debugger for comparing objects.
# self.maxDiff = None
# We should compare top_words for manually,
# because they are unsorted.
keys_to_compare = set(content.keys()).difference(set(['top_words']))
self.assertDictEqual(
{k: content[k] for k in keys_to_compare},
{k: correct_jsons[username][k] for k in keys_to_compare})
# comparing top_words:
top_words_content = sorted(
content['top_words'],
key=itemgetter('text')
)
top_words_correct = sorted(
correct_jsons[username]['top_words'],
key=itemgetter('text')
)
self.assertListEqual(top_words_content, top_words_correct)
def test_initial_state(self):
"""Inital state of word cloud is correct. Those state that
is sended from server to frontend, when students load word
cloud page.
"""
users_state = self._get_users_state()
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
# correct initial data:
correct_initial_data = {
u'status': u'success',
u'student_words': {},
u'total_count': 0,
u'submitted': False,
u'top_words': {},
u'display_student_percents': False
}
for _, response_content in users_state.items():
self.assertEquals(response_content, correct_initial_data)
def test_post_words(self):
"""Students can submit data succesfully.
Word cloud data properly updates after students submit.
"""
input_words = [
"small",
"BIG",
" Spaced ",
" few words",
]
correct_words = [
u"small",
u"big",
u"spaced",
u"few words",
]
users_state = self._post_words(input_words)
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
correct_state = {}
for index, user in enumerate(self.users):
correct_state[user.username] = {
u'status': u'success',
u'submitted': True,
u'display_student_percents': True,
u'student_words': {word: 1 + index for word in correct_words},
u'total_count': len(input_words) * (1 + index),
u'top_words': [
{
u'text': word, u'percent': 100 / len(input_words),
u'size': (1 + index)
}
for word in correct_words
]
}
self._check_response(users_state, correct_state)
def test_collective_users_submits(self):
"""Test word cloud data flow per single and collective users submits.
Make sures that:
1. Inital state of word cloud is correct. Those state that
is sended from server to frontend, when students load word
cloud page.
2. Students can submit data succesfully.
3. Next submits produce "already voted" error. Next submits for user
are not allowed by user interface, but techically it possible, and
word_cloud should properly react.
4. State of word cloud after #3 is still as after #2.
"""
# 1.
users_state = self._get_users_state()
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
# 2.
# Invcemental state per user.
users_state_after_post = self._post_words(['word1', 'word2'])
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state_after_post.items()
])),
'success')
# Final state after all posts.
users_state_before_fail = self._get_users_state()
# 3.
users_state_after_post = self._post_words(
['word1', 'word2', 'word3'])
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state_after_post.items()
])),
'fail')
# 4.
current_users_state = self._get_users_state()
self._check_response(users_state_before_fail, current_users_state)
def test_unicode(self):
input_words = [u" this is unicode Юникод"]
correct_words = [u"this is unicode юникод"]
users_state = self._post_words(input_words)
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
for user in self.users:
self.assertListEqual(
users_state[user.username]['student_words'].keys(),
correct_words)
def test_handle_ajax_incorrect_dispatch(self):
responses = {
user.username: self.clients[user.username].post(
self.get_url('whatever'),
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
for user in self.users
}
status_codes = {response.status_code for response in responses.values()}
self.assertEqual(status_codes.pop(), 200)
for user in self.users:
self.assertDictEqual(
json.loads(responses[user.username].content),
{
'status': 'fail',
'error': 'Unknown Command!'
}
)
def test_word_cloud_constructor(self):
"""Make sure that all parameters extracted correclty from xml"""
fragment = self.runtime.render(self.item_descriptor, STUDENT_VIEW)
expected_context = {
'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url,
'element_class': self.item_descriptor.location.category,
'element_id': self.item_descriptor.location.html_id(),
'num_inputs': 5, # default value
'submitted': False # default value
}
self.assertEqual(fragment.content, self.runtime.render_template('word_cloud.html', expected_context))
| agpl-3.0 | -5,679,732,946,933,814,000 | 31.73622 | 109 | 0.519904 | false |
collinss/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/cinnamon-settings.py | 1 | 27007 | #!/usr/bin/python3
import sys
import os
import glob
import gettext
import time
import traceback
import locale
import urllib.request as urllib
from functools import cmp_to_key
import unicodedata
import config
from setproctitle import setproctitle
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('XApp', '1.0')
from gi.repository import Gio, Gtk, Pango, Gdk, XApp
sys.path.append(config.currentPath + "/modules")
sys.path.append(config.currentPath + "/bin")
import capi
import proxygsettings
import SettingsWidgets
# i18n
gettext.install("cinnamon", "/usr/share/locale", names="ngettext")
# Standard setting pages... this can be expanded to include applet dirs maybe?
mod_files = glob.glob(config.currentPath + "/modules/*.py")
mod_files.sort()
if len(mod_files) is 0:
print("No settings modules found!!")
sys.exit(1)
mod_files = [x.split('/')[-1].split('.')[0] for x in mod_files]
for mod_file in mod_files:
if mod_file[0:3] != "cs_":
raise Exception("Settings modules must have a prefix of 'cs_' !!")
modules = map(__import__, mod_files)
# i18n for menu item
menuName = _("System Settings")
menuComment = _("Control Center")
WIN_WIDTH = 800
WIN_HEIGHT = 600
WIN_H_PADDING = 20
MIN_LABEL_WIDTH = 16
MAX_LABEL_WIDTH = 25
MIN_PIX_WIDTH = 100
MAX_PIX_WIDTH = 160
MOUSE_BACK_BUTTON = 8
CATEGORIES = [
# Display name ID Show it? Always False to start Icon
{"label": _("Appearance"), "id": "appear", "show": False, "icon": "cs-cat-appearance"},
{"label": _("Preferences"), "id": "prefs", "show": False, "icon": "cs-cat-prefs"},
{"label": _("Hardware"), "id": "hardware", "show": False, "icon": "cs-cat-hardware"},
{"label": _("Administration"), "id": "admin", "show": False, "icon": "cs-cat-admin"}
]
CONTROL_CENTER_MODULES = [
# Label Module ID Icon Category Keywords for filter
[_("Network"), "network", "cs-network", "hardware", _("network, wireless, wifi, ethernet, broadband, internet")],
[_("Display"), "display", "cs-display", "hardware", _("display, screen, monitor, layout, resolution, dual, lcd")],
[_("Color"), "color", "cs-color", "hardware", _("color, profile, display, printer, output")],
[_("Graphics Tablet"), "wacom", "cs-tablet", "hardware", _("wacom, digitize, tablet, graphics, calibrate, stylus")]
]
STANDALONE_MODULES = [
# Label Executable Icon Category Keywords for filter
[_("Printers"), "system-config-printer", "cs-printer", "hardware", _("printers, laser, inkjet")],
[_("Firewall"), "gufw", "cs-firewall", "admin", _("firewall, block, filter, programs")],
[_("Firewall"), "firewall-config", "cs-firewall", "admin", _("firewall, block, filter, programs")],
[_("Languages"), "mintlocale", "cs-language", "prefs", _("language, install, foreign")],
[_("Input Method"), "mintlocale-im", "cs-input-method", "prefs", _("language, install, foreign, input, method, chinese, korean, japanese, typing")],
[_("Login Window"), "pkexec lightdm-settings", "cs-login", "admin", _("login, lightdm, mdm, gdm, manager, user, password, startup, switch")],
[_("Login Window"), "lightdm-gtk-greeter-settings-pkexec", "cs-login", "admin", _("login, lightdm, manager, settings, editor")],
[_("Driver Manager"), "pkexec driver-manager", "cs-drivers", "admin", _("video, driver, wifi, card, hardware, proprietary, nvidia, radeon, nouveau, fglrx")],
[_("Nvidia Settings"), "nvidia-settings", "cs-drivers", "admin", _("video, driver, proprietary, nvidia, settings")],
[_("Software Sources"), "pkexec mintsources", "cs-sources", "admin", _("ppa, repository, package, source, download")],
[_("Package Management"), "dnfdragora", "cs-sources", "admin", _("update, install, repository, package, source, download")],
[_("Package Management"), "yumex-dnf", "cs-sources", "admin", _("update, install, repository, package, source, download")],
[_("Users and Groups"), "cinnamon-settings-users", "cs-user-accounts", "admin", _("user, users, account, accounts, group, groups, password")],
[_("Bluetooth"), "blueberry", "cs-bluetooth", "hardware", _("bluetooth, dongle, transfer, mobile")],
[_("Manage Services and Units"), "systemd-manager-pkexec", "cs-sources", "admin", _("systemd, units, services, systemctl, init")]
]
def print_timing(func):
# decorate functions with @print_timing to output how long they take to run.
def wrapper(*arg):
t1 = time.time()
res = func(*arg)
t2 = time.time()
print('%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0))
return res
return wrapper
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class MainWindow:
# Change pages
def side_view_nav(self, side_view, path, cat):
selected_items = side_view.get_selected_items()
if len(selected_items) > 0:
self.deselect(cat)
filtered_path = side_view.get_model().convert_path_to_child_path(selected_items[0])
if filtered_path is not None:
self.go_to_sidepage(cat, filtered_path, user_action=True)
def _on_sidepage_hide_stack(self):
self.stack_switcher.set_opacity(0)
def _on_sidepage_show_stack(self):
self.stack_switcher.set_opacity(1)
def go_to_sidepage(self, cat, path, user_action=True):
iterator = self.store[cat].get_iter(path)
sidePage = self.store[cat].get_value(iterator,2)
if not sidePage.is_standalone:
if not user_action:
self.window.set_title(sidePage.name)
self.window.set_icon_name(sidePage.icon)
sidePage.build()
if sidePage.stack:
current_page = sidePage.stack.get_visible_child_name()
self.stack_switcher.set_stack(sidePage.stack)
l = sidePage.stack.get_children()
if len(l) > 0:
sidePage.stack.set_visible_child(l[0])
if sidePage.stack.get_visible():
self.stack_switcher.set_opacity(1)
else:
self.stack_switcher.set_opacity(0)
if hasattr(sidePage, "connect_proxy"):
sidePage.connect_proxy("hide_stack", self._on_sidepage_hide_stack)
sidePage.connect_proxy("show_stack", self._on_sidepage_show_stack)
else:
self.stack_switcher.set_opacity(0)
else:
self.stack_switcher.set_opacity(0)
if user_action:
self.main_stack.set_visible_child_name("content_box_page")
self.header_stack.set_visible_child_name("content_box")
else:
self.main_stack.set_visible_child_full("content_box_page", Gtk.StackTransitionType.NONE)
self.header_stack.set_visible_child_full("content_box", Gtk.StackTransitionType.NONE)
self.current_sidepage = sidePage
width = 0
for widget in self.top_bar:
m, n = widget.get_preferred_width()
width += n
self.top_bar.set_size_request(width + 20, -1)
self.maybe_resize(sidePage)
else:
sidePage.build()
def maybe_resize(self, sidePage):
m, n = self.content_box.get_preferred_size()
# Resize vertically depending on the height requested by the module
use_height = WIN_HEIGHT
total_height = n.height + self.bar_heights + WIN_H_PADDING
if not sidePage.size:
# No height requested, resize vertically if the module is taller than the window
if total_height > WIN_HEIGHT:
use_height = total_height
elif sidePage.size > 0:
# Height hardcoded by the module
use_height = sidePage.size + self.bar_heights + WIN_H_PADDING
elif sidePage.size == -1:
# Module requested the window to fit it (i.e. shrink the window if necessary)
use_height = total_height
self.window.resize(WIN_WIDTH, use_height)
def deselect(self, cat):
for key in self.side_view:
if key is not cat:
self.side_view[key].unselect_all()
''' Create the UI '''
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file(config.currentPath + "/cinnamon-settings.ui")
self.window = XApp.GtkWindow(window_position=Gtk.WindowPosition.CENTER,
default_width=800, default_height=600)
main_box = self.builder.get_object("main_box")
self.window.add(main_box)
self.top_bar = self.builder.get_object("top_bar")
self.side_view = {}
self.main_stack = self.builder.get_object("main_stack")
self.main_stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.main_stack.set_transition_duration(150)
self.header_stack = self.builder.get_object("header_stack")
self.header_stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.header_stack.set_transition_duration(150)
self.side_view_container = self.builder.get_object("category_box")
self.side_view_sw = self.builder.get_object("side_view_sw")
context = self.side_view_sw.get_style_context()
context.add_class("cs-category-view")
context.add_class("view")
self.side_view_sw.show_all()
self.content_box = self.builder.get_object("content_box")
self.content_box_sw = self.builder.get_object("content_box_sw")
self.content_box_sw.show_all()
self.button_back = self.builder.get_object("button_back")
self.button_back.set_tooltip_text(_("Back to all settings"))
button_image = self.builder.get_object("image1")
button_image.props.icon_size = Gtk.IconSize.MENU
self.stack_switcher = self.builder.get_object("stack_switcher")
m, n = self.button_back.get_preferred_width()
self.stack_switcher.set_margin_end(n)
self.search_entry = self.builder.get_object("search_box")
self.search_entry.set_placeholder_text(_("Search"))
self.search_entry.connect("changed", self.onSearchTextChanged)
self.search_entry.connect("icon-press", self.onClearSearchBox)
self.window.connect("destroy", self.quit)
self.builder.connect_signals(self)
self.unsortedSidePages = []
self.sidePages = []
self.settings = Gio.Settings.new("org.cinnamon")
self.current_cat_widget = None
self.current_sidepage = None
self.c_manager = capi.CManager()
self.content_box.c_manager = self.c_manager
self.bar_heights = 0
for module in modules:
try:
mod = module.Module(self.content_box)
if self.loadCheck(mod) and self.setParentRefs(mod):
self.unsortedSidePages.append((mod.sidePage, mod.name, mod.category))
except:
print("Failed to load module %s" % module)
traceback.print_exc()
for item in CONTROL_CENTER_MODULES:
ccmodule = SettingsWidgets.CCModule(item[0], item[1], item[2], item[3], item[4], self.content_box)
if ccmodule.process(self.c_manager):
self.unsortedSidePages.append((ccmodule.sidePage, ccmodule.name, ccmodule.category))
for item in STANDALONE_MODULES:
samodule = SettingsWidgets.SAModule(item[0], item[1], item[2], item[3], item[4], self.content_box)
if samodule.process():
self.unsortedSidePages.append((samodule.sidePage, samodule.name, samodule.category))
# sort the modules alphabetically according to the current locale
localeStrKey = cmp_to_key(locale.strcoll)
# Apply locale key to the field name of each side page.
sidePagesKey = lambda m : localeStrKey(m[0].name)
self.sidePages = sorted(self.unsortedSidePages, key=sidePagesKey)
# create the backing stores for the side nav-view.
sidePagesIters = {}
self.store = {}
self.storeFilter = {}
for sidepage in self.sidePages:
sp, sp_id, sp_cat = sidepage
if sp_cat not in self.store: # Label Icon sidePage Category
self.store[sidepage[2]] = Gtk.ListStore(str, str, object, str)
for category in CATEGORIES:
if category["id"] == sp_cat:
category["show"] = True
# Don't allow item names (and their translations) to be more than 30 chars long. It looks ugly and it creates huge gaps in the icon views
name = sp.name
if len(name) > 30:
name = "%s..." % name[:30]
sidePagesIters[sp_id] = (self.store[sp_cat].append([name, sp.icon, sp, sp_cat]), sp_cat)
self.min_label_length = 0
self.min_pix_length = 0
for key in self.store:
char, pix = self.get_label_min_width(self.store[key])
self.min_label_length = max(char, self.min_label_length)
self.min_pix_length = max(pix, self.min_pix_length)
self.storeFilter[key] = self.store[key].filter_new()
self.storeFilter[key].set_visible_func(self.filter_visible_function)
self.min_label_length += 2
self.min_pix_length += 4
self.min_label_length = max(self.min_label_length, MIN_LABEL_WIDTH)
self.min_pix_length = max(self.min_pix_length, MIN_PIX_WIDTH)
self.min_label_length = min(self.min_label_length, MAX_LABEL_WIDTH)
self.min_pix_length = min(self.min_pix_length, MAX_PIX_WIDTH)
self.displayCategories()
# set up larger components.
self.window.set_title(_("System Settings"))
self.button_back.connect('clicked', self.back_to_icon_view)
self.calculate_bar_heights()
# Select the first sidePage
if len(sys.argv) > 1 and sys.argv[1] in sidePagesIters:
# If we're launching a module directly, set the WM class so GWL
# can consider it as a standalone app and give it its own
# group.
wm_class = "cinnamon-settings %s" % sys.argv[1]
self.window.set_wmclass(wm_class, wm_class)
self.button_back.hide()
(iter, cat) = sidePagesIters[sys.argv[1]]
path = self.store[cat].get_path(iter)
if path:
self.go_to_sidepage(cat, path, user_action=False)
else:
self.search_entry.grab_focus()
else:
self.search_entry.grab_focus()
self.window.connect("key-press-event", self.on_keypress)
self.window.connect("button-press-event", self.on_buttonpress)
self.window.show()
def on_keypress(self, widget, event):
grab = False
device = Gtk.get_current_event_device()
if device.get_source() == Gdk.InputSource.KEYBOARD:
grab = Gdk.Display.get_default().device_is_grabbed(device)
if not grab and event.keyval == Gdk.KEY_BackSpace and (type(self.window.get_focus()) not in
(Gtk.TreeView, Gtk.Entry, Gtk.SpinButton, Gtk.TextView)):
self.back_to_icon_view(None)
return True
return False
def on_buttonpress(self, widget, event):
if event.button == MOUSE_BACK_BUTTON:
self.back_to_icon_view(None)
return True
return False
def calculate_bar_heights(self):
h = 0
m, n = self.top_bar.get_preferred_size()
h += n.height
self.bar_heights = h
def onSearchTextChanged(self, widget):
self.displayCategories()
def onClearSearchBox(self, widget, position, event):
if position == Gtk.EntryIconPosition.SECONDARY:
self.search_entry.set_text("")
def strip_accents(self, text):
try:
text = unicode(text, 'utf-8')
except NameError:
# unicode is default in Python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def filter_visible_function(self, model, iter, user_data = None):
sidePage = model.get_value(iter, 2)
text = self.strip_accents(self.search_entry.get_text().lower())
if self.strip_accents(sidePage.name.lower()).find(text) > -1 or \
self.strip_accents(sidePage.keywords.lower()).find(text) > -1:
return True
else:
return False
def displayCategories(self):
widgets = self.side_view_container.get_children()
for widget in widgets:
widget.destroy()
self.first_category_done = False # This is just to prevent an extra separator showing up before the first category
for category in CATEGORIES:
if category["show"] is True:
self.prepCategory(category)
self.side_view_container.show_all()
def get_label_min_width(self, model):
min_width_chars = 0
min_width_pixels = 0
icon_view = Gtk.IconView()
iter = model.get_iter_first()
while iter != None:
string = model.get_value(iter, 0)
split_by_word = string.split(" ")
for word in split_by_word:
layout = icon_view.create_pango_layout(word)
item_width, item_height = layout.get_pixel_size()
if item_width > min_width_pixels:
min_width_pixels = item_width
if len(word) > min_width_chars:
min_width_chars = len(word)
iter = model.iter_next(iter)
return min_width_chars, min_width_pixels
def pixbuf_data_func(self, column, cell, model, iter, data=None):
wrapper = model.get_value(iter, 1)
if wrapper:
cell.set_property('surface', wrapper.surface)
def prepCategory(self, category):
self.storeFilter[category["id"]].refilter()
if not self.anyVisibleInCategory(category):
return
if self.first_category_done:
widget = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL)
self.side_view_container.pack_start(widget, False, False, 10)
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 4)
img = Gtk.Image.new_from_icon_name(category["icon"], Gtk.IconSize.BUTTON)
box.pack_start(img, False, False, 4)
widget = Gtk.Label(yalign=0.5)
widget.set_use_markup(True)
widget.set_markup('<span size="12000">%s</span>' % category["label"])
box.pack_start(widget, False, False, 1)
self.side_view_container.pack_start(box, False, False, 0)
widget = Gtk.IconView.new_with_model(self.storeFilter[category["id"]])
area = widget.get_area()
widget.set_item_width(self.min_pix_length)
widget.set_item_padding(0)
widget.set_column_spacing(18)
widget.set_row_spacing(18)
widget.set_margin(20)
pixbuf_renderer = Gtk.CellRendererPixbuf()
text_renderer = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.NONE, wrap_mode=Pango.WrapMode.WORD_CHAR, wrap_width=0, width_chars=self.min_label_length, alignment=Pango.Alignment.CENTER, xalign=0.5)
area.pack_start(pixbuf_renderer, True, True, False)
area.pack_start(text_renderer, True, True, False)
area.add_attribute(pixbuf_renderer, "icon-name", 1)
pixbuf_renderer.set_property("stock-size", Gtk.IconSize.DIALOG)
pixbuf_renderer.set_property("follow-state", True)
area.add_attribute(text_renderer, "text", 0)
self.side_view[category["id"]] = widget
self.side_view_container.pack_start(self.side_view[category["id"]], False, False, 0)
self.first_category_done = True
self.side_view[category["id"]].connect("item-activated", self.side_view_nav, category["id"])
self.side_view[category["id"]].connect("button-release-event", self.button_press, category["id"])
self.side_view[category["id"]].connect("keynav-failed", self.on_keynav_failed, category["id"])
self.side_view[category["id"]].connect("selection-changed", self.on_selection_changed, category["id"])
def bring_selection_into_view(self, iconview):
sel = iconview.get_selected_items()
if sel:
path = sel[0]
found, rect = iconview.get_cell_rect(path, None)
cw = self.side_view_container.get_window()
cw_x, cw_y = cw.get_position()
ivw = iconview.get_window()
iv_x, iv_y = ivw.get_position()
final_y = rect.y + (rect.height / 2) + cw_y + iv_y
adj = self.side_view_sw.get_vadjustment()
page = adj.get_page_size()
current_pos = adj.get_value()
if final_y > current_pos + page:
adj.set_value(iv_y + rect.y)
elif final_y < current_pos:
adj.set_value(iv_y + rect.y)
def on_selection_changed(self, widget, category):
sel = widget.get_selected_items()
if len(sel) > 0:
self.current_cat_widget = widget
self.bring_selection_into_view(widget)
for iv in self.side_view:
if self.side_view[iv] == self.current_cat_widget:
continue
self.side_view[iv].unselect_all()
def get_cur_cat_index(self, category):
i = 0
for cat in CATEGORIES:
if category == cat["id"]:
return i
i += 1
def get_cur_column(self, iconview):
s, path, cell = iconview.get_cursor()
if path:
col = iconview.get_item_column(path)
return col
def reposition_new_cat(self, sel, iconview):
iconview.set_cursor(sel, None, False)
iconview.select_path(sel)
iconview.grab_focus()
def on_keynav_failed(self, widget, direction, category):
num_cats = len(CATEGORIES)
current_idx = self.get_cur_cat_index(category)
new_cat = CATEGORIES[current_idx]
ret = False
dist = 1000
sel = None
if direction == Gtk.DirectionType.DOWN and current_idx < num_cats - 1:
new_cat = CATEGORIES[current_idx + 1]
col = self.get_cur_column(widget)
new_cat_view = self.side_view[new_cat["id"]]
model = new_cat_view.get_model()
iter = model.get_iter_first()
while iter is not None:
path = model.get_path(iter)
c = new_cat_view.get_item_column(path)
d = abs(c - col)
if d < dist:
sel = path
dist = d
iter = model.iter_next(iter)
self.reposition_new_cat(sel, new_cat_view)
ret = True
elif direction == Gtk.DirectionType.UP and current_idx > 0:
new_cat = CATEGORIES[current_idx - 1]
col = self.get_cur_column(widget)
new_cat_view = self.side_view[new_cat["id"]]
model = new_cat_view.get_model()
iter = model.get_iter_first()
while iter is not None:
path = model.get_path(iter)
c = new_cat_view.get_item_column(path)
d = abs(c - col)
if d <= dist:
sel = path
dist = d
iter = model.iter_next(iter)
self.reposition_new_cat(sel, new_cat_view)
ret = True
return ret
def button_press(self, widget, event, category):
if event.button == 1:
self.side_view_nav(widget, None, category)
def anyVisibleInCategory(self, category):
id = category["id"]
iter = self.storeFilter[id].get_iter_first()
visible = False
while iter is not None:
cat = self.storeFilter[id].get_value(iter, 3)
visible = cat == category["id"]
iter = self.storeFilter[id].iter_next(iter)
return visible
def setParentRefs (self, mod):
try:
mod._setParentRef(self.window)
except AttributeError:
pass
return True
def loadCheck (self, mod):
try:
return mod._loadCheck()
except:
return True
def back_to_icon_view(self, widget):
self.window.set_title(_("System Settings"))
self.window.set_icon_name("preferences-system")
self.window.resize(WIN_WIDTH, WIN_HEIGHT)
children = self.content_box.get_children()
for child in children:
child.hide()
if child.get_name() == "c_box":
c_widgets = child.get_children()
for c_widget in c_widgets:
c_widget.hide()
self.main_stack.set_visible_child_name("side_view_page")
self.header_stack.set_visible_child_name("side_view")
self.search_entry.grab_focus()
self.current_sidepage = None
def quit(self, *args):
self.window.destroy()
Gtk.main_quit()
if __name__ == "__main__":
setproctitle("cinnamon-settings")
import signal
ps = proxygsettings.get_proxy_settings()
if ps:
proxy = urllib.ProxyHandler(ps)
else:
proxy = urllib.ProxyHandler()
urllib.install_opener(urllib.build_opener(proxy))
window = MainWindow()
signal.signal(signal.SIGINT, window.quit)
Gtk.main()
| gpl-2.0 | 8,306,470,051,216,251,000 | 42.34992 | 211 | 0.565816 | false |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause | -7,203,407,705,068,530,000 | 38.022321 | 80 | 0.601419 | false |
heeraj123/oh-mainline | vendor/packages/django-assets/django_assets/jinja2/extension.py | 16 | 2628 | from jinja2.ext import Extension
from jinja2 import nodes
from django_assets.conf import settings
from django_assets.merge import process
from django_assets.bundle import Bundle
from django_assets import registry
__all__ = ('assets',)
class AssetsExtension(Extension):
"""
As opposed to the Django tag, this tag is slightly more capable due
to the expressive powers inherited from Jinja. For example:
{% assets "src1.js", "src2.js", get_src3(),
filter=("jsmin", "gzip"), output=get_output() %}
{% endassets %}
"""
tags = set(['assets'])
def parse(self, parser):
lineno = parser.stream.next().lineno
files = []
output = nodes.Const(None)
filter = nodes.Const(None)
# parse the arguments
first = True
while parser.stream.current.type is not 'block_end':
if not first:
parser.stream.expect('comma')
first = False
# lookahead to see if this is an assignment (an option)
if parser.stream.current.test('name') and parser.stream.look().test('assign'):
name = parser.stream.next().value
parser.stream.skip()
value = parser.parse_expression()
if name == 'filter':
filter = value
elif name == 'output':
output = value
else:
parser.fail('Invalid keyword argument: %s' % name)
# otherwise assume a source file is given, which may
# be any expression, except note that strings are handled
# separately above
else:
files.append(parser.parse_expression())
# parse the contents of this tag, and return a block
body = parser.parse_statements(['name:endassets'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_render_assets',
args=[filter, output, nodes.List(files)]),
[nodes.Name('ASSET_URL', 'store')], [], body).\
set_lineno(lineno)
def _render_assets(self, filter, output, files, caller=None):
# resolve bundle names
registry.autoload()
files = [registry.get(f) or f for f in files]
result = u""
urls = process(Bundle(*files, **{'output': output, 'filters': filter}))
for f in urls:
result += caller(f)
return result
assets = AssetsExtension # nicer import name | agpl-3.0 | 6,206,610,526,919,973,000 | 33.540541 | 90 | 0.547565 | false |
meteorcloudy/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py | 33 | 16857 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.test_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.test_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.test_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.test_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.test_session():
cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
with self.assertRaisesOpError("Condition x > 0 did not hold"):
cauchy.mode().eval()
def testCauchyShape(self):
with self.test_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.test_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
| apache-2.0 | 401,326,081,878,705,400 | 37.486301 | 80 | 0.649938 | false |
kailIII/emaresa | aeroo/report_aeroo/barcode/EANBarCode.py | 13 | 5228 | # Copyright (c) 2009-2011 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <[email protected]>
from tools import config, ustr
fontsize = 15
"""
This class generate EAN bar code, it required PIL (python imaging library)
installed.
If the code has not checksum (12 digits), it added automatically.
Create bar code sample :
from EANBarCode import EanBarCode
bar = EanBarCode()
bar.getImage("9782212110708",50,"gif")
"""
class EanBarCode:
""" Compute the EAN bar code """
def __init__(self):
A = {0 : "0001101", 1 : "0011001", 2 : "0010011", 3 : "0111101", 4 : "0100011",
5 : "0110001", 6 : "0101111", 7 : "0111011", 8 : "0110111", 9 : "0001011"}
B = {0 : "0100111", 1 : "0110011", 2 : "0011011", 3 : "0100001", 4 : "0011101",
5 : "0111001", 6 : "0000101", 7 : "0010001", 8 : "0001001", 9 : "0010111"}
C = {0 : "1110010", 1 : "1100110", 2 : "1101100", 3 : "1000010", 4 : "1011100",
5 : "1001110", 6 : "1010000", 7 : "1000100", 8 : "1001000", 9 : "1110100"}
self.groupC = C
self.family = {0 : (A,A,A,A,A,A), 1 : (A,A,B,A,B,B), 2 : (A,A,B,B,A,B), 3 : (A,A,B,B,B,A), 4 : (A,B,A,A,B,B),
5 : (A,B,B,A,A,B), 6 : (A,B,B,B,A,A), 7 : (A,B,A,B,A,B), 8 : (A,B,A,B,B,A), 9 : (A,B,B,A,B,A)}
def makeCode(self, code):
""" Create the binary code
return a string which contains "0" for white bar, "1" for black bar, "L" for long bar """
# Convert code string in integer list
self.EAN13 = []
for digit in code:
self.EAN13.append(int(digit))
# If the code has already a checksum
if len(self.EAN13) == 13:
# Verify checksum
self.verifyChecksum(self.EAN13)
# If the code has not yet checksum
elif len(self.EAN13) == 12:
# Add checksum value
self.EAN13.append(self.computeChecksum(self.EAN13))
# Get the left codage class
left = self.family[self.EAN13[0]]
# Add start separator
strCode = 'L0L'
# Compute the left part of bar code
for i in range(0,6):
strCode += left[i][self.EAN13[i+1]]
# Add middle separator
strCode += '0L0L0'
# Compute the right codage class
for i in range (7,13):
strCode += self.groupC[self.EAN13[i]]
# Add stop separator
strCode += 'L0L'
return strCode
def computeChecksum(self, arg):
""" Compute the checksum of bar code """
# UPCA/EAN13
weight=[1,3]*6
magic=10
sum = 0
for i in range(12): # checksum based on first 12 digits.
sum = sum + int(arg[i]) * weight[i]
z = ( magic - (sum % magic) ) % magic
if z < 0 or z >= magic:
return None
return z
def verifyChecksum(self, bits):
""" Verify the checksum """
computedChecksum = self.computeChecksum(bits[:12])
codeBarChecksum = bits[12]
if codeBarChecksum != computedChecksum:
raise Exception ("Bad checksum is %s and should be %s"%(codeBarChecksum, computedChecksum))
def getImage(self, value, height = 50, xw=1, rotate=None, extension = "PNG"):
""" Get an image with PIL library
value code barre value
height height in pixel of the bar code
extension image file extension"""
from PIL import Image, ImageFont, ImageDraw
import os
from string import lower, upper
# Get the bar code list
bits = self.makeCode(value)
# Get thee bar code with the checksum added
code = ""
for digit in self.EAN13:
code += "%d"%digit
# Create a new image
position = 8
im = Image.new("1",(len(bits)+position,height))
# Load font
ad = os.path.abspath(os.path.join(ustr(config['root_path']), u'addons'))
mod_path_list = map(lambda m: os.path.abspath(ustr(m.strip())), config['addons_path'].split(','))
mod_path_list.append(ad)
for mod_path in mod_path_list:
font_file = mod_path+os.path.sep+ \
"report_aeroo"+os.path.sep+"barcode"+os.path.sep+"FreeMonoBold.ttf"
if os.path.lexists(font_file):
font = ImageFont.truetype(font_file, fontsize)
# Create drawer
draw = ImageDraw.Draw(im)
# Erase image
draw.rectangle(((0,0),(im.size[0],im.size[1])),fill=256)
# Draw first part of number
draw.text((0, height-9), code[0], font=font, fill=0)
# Draw first part of number
draw.text((position+7, height-9), code[1:7], font=font, fill=0)
# Draw second part of number
draw.text((len(bits)/2+6+position, height-9), code[7:], font=font, fill=0)
# Draw the bar codes
for bit in range(len(bits)):
# Draw normal bar
if bits[bit] == '1':
draw.rectangle(((bit+position,0),(bit+position,height-10)),fill=0)
# Draw long bar
elif bits[bit] == 'L':
draw.rectangle(((bit+position,0),(bit+position,height-3)),fill=0)
# Save the result image
return im
| agpl-3.0 | -404,024,201,272,994,370 | 32.088608 | 115 | 0.555853 | false |
jmighion/ansible | lib/ansible/utils/listify.py | 118 | 1462 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Iterable
from ansible.module_utils.six import string_types
from ansible.template.safe_eval import safe_eval
__all__ = ['listify_lookup_plugin_terms']
def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=True, convert_bare=False):
if isinstance(terms, string_types):
terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
if isinstance(terms, string_types) or not isinstance(terms, Iterable):
terms = [terms]
return terms
| gpl-3.0 | 8,600,727,670,485,132,000 | 34.658537 | 111 | 0.742134 | false |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/lib2to3/fixes/fix_types.py | 304 | 1806 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
| mit | 4,309,489,426,129,548,000 | 28.129032 | 80 | 0.59247 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_status_code_count_py3.py | 5 | 1325 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineStatusCodeCount(Model):
"""The status code and count of the virtual machine scale set instance view
status summary.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: The instance view status code.
:vartype code: str
:ivar count: The number of instances having a particular status code.
:vartype count: int
"""
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
| mit | 7,454,663,260,480,810,000 | 31.317073 | 79 | 0.579623 | false |
adamhajari/spyre | tests/test_app.py | 1 | 5046 | # from spyre import server
from spyre import server
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy import pi
class TestApp(server.App):
colors = [
{"label": "Green", "value": 'g'},
{"label": "Red", "value": 'r'},
{"label": "Blue", "value": 'b'},
{"label": "Yellow", "value": 'y'},
]
on_demand_streaming_services = [
{"label": "Spotify", "value": 's'},
{"label": "Apple Music", "value": 'a'},
]
title = "Simple Sine Wave"
inputs = [
{
"input_type": 'text',
"label": 'Title',
"value": 'Simple Sine Wave',
"variable_name": 'title',
"action_id": "plot",
}, {
"input_type": 'radiobuttons',
"label": 'Function',
"options": [
{"label": "Sine", "value": "sin", "checked": True},
{"label": "Cosine", "value": "cos"}
],
"variable_name": 'func_type',
"action_id": "plot",
}, {
"input_type": 'checkboxgroup',
"label": 'Axis Labels',
"options": [
{"label": "x-axis", "value": 1, "checked": True},
{"label": "y-axis", "value": 2}
],
"variable_name": 'axis_label',
"action_id": "plot",
}, {
"input_type": 'dropdown',
"label": 'Line Color',
"options": colors,
"variable_name": 'color',
"value": "b",
"action_id": "plot",
}, {
"input_type": 'dropdown',
"label": 'On-Demand Streaming Service',
"options": on_demand_streaming_services,
"variable_name": 'on_demand_streaming_service',
"action_id": "plot",
}, {
"input_type": 'slider',
"label": 'frequency',
"variable_name": 'freq',
"value": 2,
"min": 1,
"max": 30,
"action_id": "plot",
}
]
controls = [
{
"control_type": "button",
"control_id": "button1",
"label": "plot",
}, {
"control_type": "button",
"control_id": "button2",
"label": "download",
}
]
outputs = [
{
"output_type": "html",
"output_id": "html1",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "plot",
"output_id": "plot",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "plot",
"output_id": "plot2",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "table",
"output_id": "table_id",
"control_id": "button1",
"sortable": True,
"on_page_load": True,
}, {
"output_type": "download",
"output_id": "download_id",
"control_id": "button2",
}
]
def plot1(self, params):
fig = plt.figure() # make figure object
splt = fig.add_subplot(1, 1, 1)
f = float(params['freq'])
title = params['title']
axis_label = map(int, params['axis_label'])
color = params['color']
func_type = params['func_type']
x = np.arange(0, 6 * pi, pi / 50)
splt.set_title(title)
for axis in axis_label:
if axis == 1:
splt.set_xlabel('x axis')
if axis == 2:
splt.set_ylabel('y axis')
if func_type == 'cos':
y = np.cos(f * x)
else:
y = np.sin(f * x)
splt.plot(x, y, color=color) # sine wave
return fig
def plot2(self, params):
data = self.getData(params)
fig = plt.figure() # make figure object
splt = fig.add_subplot(1, 1, 1)
ind = np.arange(len(data['name']))
width = 0.85
splt.bar(ind, data['count'], width)
splt.set_xticks(ind + width / 2)
splt.set_xticklabels(["A", "B", "C"])
return fig
def html1(self, params):
return "hello world"
def html2(self, params):
func_type = params['func_type']
axis_label = params['axis_label']
color = params['color']
freq = params['freq']
html = (
"function type: {} <br>axis label: {}<br>color: {}<br>frequency: {}"
.format(func_type, axis_label, color, freq)
)
return html
def getJsonData(self, params):
count = [1, 4, 3]
name = ['<a href="http://adamhajari.com">A</a>', 'B', 'C']
return {'name': name, 'count': count}
def getData(self, params):
data = self.getJsonData(params)
df = pd.DataFrame(data)
return df
def noOutput(self, input_params):
return 0
# app = TestApp()
# app.launch()
| mit | -6,567,533,694,828,392,000 | 27.670455 | 80 | 0.442727 | false |
stargaser/astropy | astropy/table/info.py | 3 | 7420 | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
from contextlib import contextmanager
from inspect import isclass
import numpy as np
from astropy.utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo', 'serialize_method_as']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1.0 2.0
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append('length={}'.format(len(tbl)))
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = tbl.columns.values()
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = set(type(col) for col in cols)
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isclass(key) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
if serialize_method:
# Original serialize_method dict, keyed by column name. This only
# gets set if there is an override.
original_sms = {}
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, 'serialize_method'):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {fmt: override_sm
for fmt in col.info.serialize_method}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
| bsd-3-clause | -3,628,156,899,267,853,300 | 33.835681 | 87 | 0.61496 | false |
brion/cerbero | cerbero/build/recipe.py | 1 | 15439 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import logging
import shutil
import tempfile
import time
from cerbero.build import build, source
from cerbero.build.filesprovider import FilesProvider
from cerbero.config import Platform
from cerbero.errors import FatalError
from cerbero.ide.vs.genlib import GenLib
from cerbero.tools.osxuniversalgenerator import OSXUniversalGenerator
from cerbero.utils import N_, _
from cerbero.utils import shell
from cerbero.utils import messages as m
class MetaRecipe(type):
''' This metaclass modifies the base classes of a Receipt, adding 2 new
base classes based on the class attributes 'stype' and 'btype'.
class NewReceipt(Receipt):
btype = Class1 ------> class NewReceipt(Receipt, Class1, Class2)
stype = Class2
'''
def __new__(cls, name, bases, dct):
clsname = '%s.%s' % (dct['__module__'], name)
recipeclsname = '%s.%s' % (cls.__module__, 'Recipe')
# only modify it for Receipt's subclasses
if clsname != recipeclsname and name == 'Recipe':
# get the default build and source classes from Receipt
# Receipt(DefaultSourceType, DefaultBaseType)
basedict = {'btype': bases[0].btype, 'stype': bases[0].stype}
# if this class define stype or btype, override the default one
# Receipt(OverridenSourceType, OverridenBaseType)
for base in ['stype', 'btype']:
if base in dct:
basedict[base] = dct[base]
# finally add this classes the Receipt bases
# Receipt(BaseClass, OverridenSourceType, OverridenBaseType)
bases = bases + tuple(basedict.values())
return type.__new__(cls, name, bases, dct)
class BuildSteps(object):
'''
Enumeration factory for build steps
'''
FETCH = (N_('Fetch'), 'fetch')
EXTRACT = (N_('Extract'), 'extract')
CONFIGURE = (N_('Configure'), 'configure')
COMPILE = (N_('Compile'), 'compile')
INSTALL = (N_('Install'), 'install')
POST_INSTALL = (N_('Post Install'), 'post_install')
# Not added by default
CHECK = (N_('Check'), 'check')
GEN_LIBFILES = (N_('Gen Library File'), 'gen_library_file')
MERGE = (N_('Merge universal binaries'), 'merge')
def __new__(klass):
return [BuildSteps.FETCH, BuildSteps.EXTRACT,
BuildSteps.CONFIGURE, BuildSteps.COMPILE, BuildSteps.INSTALL,
BuildSteps.POST_INSTALL]
class Recipe(FilesProvider):
'''
Base class for recipes.
A Recipe describes a module and the way it's built.
@cvar name: name of the module
@type name: str
@cvar licenses: recipe licenses
@type licenses: Licenses
@cvar version: version of the module
@type version: str
@cvar sources: url of the sources
@type sources: str
@cvar stype: type of sources
@type stype: L{cerbero.source.SourceType}
@cvar btype: build type
@type btype: L{cerbero.build.BuildType}
@cvar deps: module dependencies
@type deps: list
@cvar platform_deps: platform conditional depencies
@type platform_deps: dict
@cvar runtime_dep: runtime dep common to all recipes
@type runtime_dep: bool
'''
__metaclass__ = MetaRecipe
name = None
licenses = []
version = None
package_name = None
sources = None
stype = source.SourceType.GIT_TARBALL
btype = build.BuildType.AUTOTOOLS
deps = list()
platform_deps = {}
force = False
runtime_dep = False
_default_steps = BuildSteps()
def __init__(self, config):
self.config = config
if self.package_name is None:
self.package_name = "%s-%s" % (self.name, self.version)
if not hasattr(self, 'repo_dir'):
self.repo_dir = os.path.join(self.config.local_sources,
self.package_name)
self.repo_dir = os.path.abspath(self.repo_dir)
self.build_dir = os.path.join(self.config.sources, self.package_name)
self.build_dir = os.path.abspath(self.build_dir)
self.deps = self.deps or []
self.platform_deps = self.platform_deps or []
self._steps = self._default_steps[:]
if self.config.target_platform == Platform.WINDOWS:
self._steps.append(BuildSteps.GEN_LIBFILES)
FilesProvider.__init__(self, config)
try:
self.stype.__init__(self)
self.btype.__init__(self)
except TypeError:
# should only work with subclasses that really have Build and
# Source in bases
pass
def __str__(self):
return self.name
def prepare(self):
'''
Can be overriden by subclasess to modify the recipe in function of
the configuration, like modifying steps for a given platform
'''
pass
def post_install(self):
'''
Runs a post installation steps
'''
pass
def built_version(self):
'''
Gets the current built version of the recipe.
Sources can override it to provide extended info in the version
such as the commit hash for recipes using git and building against
master: eg (1.2.0~git+2345435)
'''
if hasattr(self.stype, 'built_version'):
return self.stype.built_version(self)
return self.version
def list_deps(self):
'''
List all dependencies including conditional dependencies
'''
deps = []
deps.extend(self.deps)
if self.config.target_platform in self.platform_deps:
deps.extend(self.platform_deps[self.config.target_platform])
if self.config.variants.gi and self.use_gobject_introspection():
if self.name != 'gobject-introspection':
deps.append('gobject-introspection')
return deps
def list_licenses_by_categories(self, categories):
licenses = {}
for c in categories:
if c in licenses:
raise Exception('multiple licenses for the same category %s '
'defined' % c)
if not c:
licenses[None] = self.licenses
continue
attr = 'licenses_' + c
platform_attr = 'platform_licenses_' + c
if hasattr(self, attr):
licenses[c] = getattr(self, attr)
elif hasattr(self, platform_attr):
l = getattr(self, platform_attr)
licenses[c] = l.get(self.platform, [])
else:
licenses[c] = self.licenses
return licenses
def gen_library_file(self, output_dir=None):
'''
Generates library files (.lib) for the dll's provided by this recipe
'''
genlib = GenLib()
for dllpath in self.libraries():
try:
implib = genlib.create(
os.path.join(self.config.prefix, dllpath),
self.config.target_arch,
os.path.join(self.config.prefix, 'lib'))
logging.debug('Created %s' % implib)
except:
m.warning("Could not create .lib, gendef might be missing")
def recipe_dir(self):
'''
Gets the directory path where this recipe is stored
@return: directory path
@rtype: str
'''
return os.path.dirname(self.__file__)
def relative_path(self, path):
'''
Gets a path relative to the recipe's directory
@return: absolute path relative to the pacakge's directory
@rtype: str
'''
return os.path.abspath(os.path.join(self.recipe_dir(), path))
@property
def steps(self):
return self._steps
def _remove_steps(self, steps):
self._steps = [x for x in self._steps if x not in steps]
class MetaUniversalRecipe(type):
'''
Wraps all the build steps for the universal recipe to be called for each
one of the child recipes.
'''
def __init__(cls, name, bases, ns):
step_func = ns.get('_do_step')
for _, step in BuildSteps():
setattr(cls, step, lambda self, name=step: step_func(self, name))
class UniversalRecipe(object):
'''
Stores similar recipe objects that are going to be built together
Useful for the universal architecture, where the same recipe needs
to be built for different architectures before being merged. For the
other targets, it will likely be a unitary group
'''
__metaclass__ = MetaUniversalRecipe
def __init__(self, config):
self._config = config
self._recipes = {}
self._proxy_recipe = None
def __str__(self):
if self._recipes.values():
return str(self._recipes.values()[0])
return super(UniversalRecipe, self).__str__()
def add_recipe(self, recipe):
'''
Adds a new recipe to the group
'''
if self._proxy_recipe is None:
self._proxy_recipe = recipe
else:
if recipe.name != self._proxy_recipe.name:
raise FatalError(_("Recipes must have the same name"))
self._recipes[recipe.config.target_arch] = recipe
def is_empty(self):
return len(self._recipes) == 0
@property
def steps(self):
if self.is_empty():
return []
return self._proxy_recipe.steps[:]
def __getattr__(self, name):
if not self._proxy_recipe:
raise AttributeError(_("Attribute %s was not found in the "
"Universal recipe, which is empty. You might need to add a "
"recipe first."))
return getattr(self._proxy_recipe, name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name not in ['_config', '_recipes', '_proxy_recipe']:
for o in self._recipes.values():
setattr(o, name, value)
def _do_step(self, step):
if step in BuildSteps.FETCH:
# No, really, let's not download a million times...
stepfunc = getattr(self._recipes.values()[0], step)
stepfunc()
return
for arch, recipe in self._recipes.iteritems():
config = self._config.arch_config[arch]
config.do_setup_env()
stepfunc = getattr(recipe, step)
# Call the step function
stepfunc()
class UniversalFlatRecipe(UniversalRecipe):
'''
Unversal recipe for iOS and OS X creating flat libraries
in the target prefix instead of subdirs for each architecture
'''
def __init__(self, config):
UniversalRecipe.__init__(self, config)
@property
def steps(self):
if self.is_empty():
return []
return self._proxy_recipe.steps[:] + [BuildSteps.MERGE]
def merge(self):
arch_inputs = {}
for arch, recipe in self._recipes.iteritems():
# change the prefix temporarly to the arch prefix where files are
# actually installed
recipe.config.prefix = os.path.join(self.config.prefix, arch)
arch_inputs[arch] = set(recipe.files_list())
recipe.config.prefix = self._config.prefix
# merge the common files
inputs = reduce(lambda x, y: x & y, arch_inputs.values())
output = self._config.prefix
generator = OSXUniversalGenerator(output)
generator.merge_files(list(inputs),
[os.path.join(self._config.prefix, arch) for arch in
self._recipes.keys()])
# merge the architecture specific files
for arch in self._recipes.keys():
ainputs = list(inputs ^ arch_inputs[arch])
output = self._config.prefix
generator = OSXUniversalGenerator(output)
generator.merge_files(ainputs,
[os.path.join(self._config.prefix, arch)])
def _do_step(self, step):
if step in BuildSteps.FETCH:
# No, really, let's not download a million times...
stepfunc = getattr(self._recipes.values()[0], step)
stepfunc()
return
# For the universal build we need to configure both architectures with
# with the same final prefix, but we want to install each architecture
# on a different path (eg: /path/to/prefix/x86).
archs_prefix = self._recipes.keys()
for arch, recipe in self._recipes.iteritems():
config = self._config.arch_config[arch]
config.do_setup_env()
stepfunc = getattr(recipe, step)
# Create a stamp file to list installed files based on the
# modification time of this file
if step in [BuildSteps.INSTALL[1], BuildSteps.POST_INSTALL[1]]:
time.sleep(2) #wait 2 seconds to make sure new files get the
#proper time difference, this fixes an issue of
#the next recipe to be built listing the previous
#recipe files as their own
tmp = tempfile.NamedTemporaryFile()
# the modification time resolution depends on the filesystem,
# where FAT32 has a resolution of 2 seconds and ext4 1 second
t = time.time() - 2
os.utime(tmp.name, (t, t))
# Call the step function
stepfunc()
# Move installed files to the architecture prefix
if step in [BuildSteps.INSTALL[1], BuildSteps.POST_INSTALL[1]]:
installed_files = shell.find_newer_files(self._config.prefix,
tmp.name, True)
tmp.close()
for f in installed_files:
def not_in_prefix(src):
for p in archs_prefix + ['Libraries']:
if src.startswith(p):
return True
return False
# skip files that are installed in the arch prefix
if not_in_prefix(f):
continue
src = os.path.join(self._config.prefix, f)
dest = os.path.join(self._config.prefix,
recipe.config.target_arch, f)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
shutil.move(src, dest)
| lgpl-2.1 | 1,701,697,816,719,793,200 | 34.738426 | 79 | 0.587344 | false |
kornicameister/ansible-modules-extras | monitoring/pagerduty_alert.py | 121 | 7587 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: pagerduty_alert
short_description: Trigger, acknowledge or resolve PagerDuty incidents
description:
- This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
version_added: "1.9"
author:
- "Amanpreet Singh (@aps-sids)"
requirements:
- PagerDuty API access
options:
name:
description:
- PagerDuty unique subdomain.
required: true
service_key:
description:
- The GUID of one of your "Generic API" services.
- This is the "service key" listed on a Generic API's service detail page.
required: true
state:
description:
- Type of event to be sent.
required: true
choices:
- 'triggered'
- 'acknowledged'
- 'resolved'
api_key:
description:
- The pagerduty API key (readonly access), generated on the pagerduty site.
required: true
desc:
description:
- For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters.
- For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
required: false
default: Created via Ansible
incident_key:
description:
- Identifies the incident to which this I(state) should be applied.
- For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports.
- For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
required: false
client:
description:
- The name of the monitoring client that is triggering this event.
required: false
client_url:
description:
- The URL of the monitoring client that is triggering this event.
required: false
'''
EXAMPLES = '''
# Trigger an incident with just the basic options
- pagerduty_alert:
name: companyabc
service_key=xxx
api_key:yourapikey
state=triggered
desc="problem that led to this trigger"
# Trigger an incident with more options
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=triggered
desc="problem that led to this trigger"
incident_key=somekey
client="Sample Monitoring Service"
client_url=http://service.example.com
# Acknowledge an incident based on incident_key
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=acknowledged
incident_key=somekey
desc="some text for incident's log"
# Resolve an incident based on incident_key
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=resolved
incident_key=somekey
desc="some text for incident's log"
'''
def check(module, name, state, service_key, api_key, incident_key=None):
url = "https://%s.pagerduty.com/api/v1/incidents" % name
headers = {
"Content-type": "application/json",
"Authorization": "Token token=%s" % api_key
}
data = {
"service_key": service_key,
"incident_key": incident_key,
"sort_by": "incident_number:desc"
}
response, info = fetch_url(module, url, method='get',
headers=headers, data=json.dumps(data))
if info['status'] != 200:
module.fail_json(msg="failed to check current incident status."
"Reason: %s" % info['msg'])
json_out = json.loads(response.read())["incidents"][0]
if state != json_out["status"]:
return json_out, True
return json_out, False
def send_event(module, service_key, event_type, desc,
incident_key=None, client=None, client_url=None):
url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
headers = {
"Content-type": "application/json"
}
data = {
"service_key": service_key,
"event_type": event_type,
"incident_key": incident_key,
"description": desc,
"client": client,
"client_url": client_url
}
response, info = fetch_url(module, url, method='post',
headers=headers, data=json.dumps(data))
if info['status'] != 200:
module.fail_json(msg="failed to %s. Reason: %s" %
(event_type, info['msg']))
json_out = json.loads(response.read())
return json_out
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
service_key=dict(required=True),
api_key=dict(required=True),
state=dict(required=True,
choices=['triggered', 'acknowledged', 'resolved']),
client=dict(required=False, default=None),
client_url=dict(required=False, default=None),
desc=dict(required=False, default='Created via Ansible'),
incident_key=dict(required=False, default=None)
),
supports_check_mode=True
)
name = module.params['name']
service_key = module.params['service_key']
api_key = module.params['api_key']
state = module.params['state']
client = module.params['client']
client_url = module.params['client_url']
desc = module.params['desc']
incident_key = module.params['incident_key']
state_event_dict = {
'triggered': 'trigger',
'acknowledged': 'acknowledge',
'resolved': 'resolve'
}
event_type = state_event_dict[state]
if event_type != 'trigger' and incident_key is None:
module.fail_json(msg="incident_key is required for "
"acknowledge or resolve events")
out, changed = check(module, name, state,
service_key, api_key, incident_key)
if not module.check_mode and changed is True:
out = send_event(module, service_key, event_type, desc,
incident_key, client, client_url)
module.exit_json(result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | 375,890,904,282,253,500 | 34.619718 | 323 | 0.62963 | false |
8u1a/plaso | plaso/cli/helpers/interface.py | 3 | 1180 | # -*- coding: utf-8 -*-
"""The arguments helper interface."""
class ArgumentsHelper(object):
"""The CLI arguments helper class."""
NAME = u'baseline'
# Category further divides the registered helpers down after function,
# this can be something like: analysis, output, storage, etc.
CATEGORY = u''
DESCRIPTION = u''
@classmethod
def AddArguments(cls, argument_group):
"""Add command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group: the argparse group (instance of argparse._ArgumentGroup or
or argparse.ArgumentParser).
"""
@classmethod
def ParseOptions(cls, options, config_object):
"""Parses and validates options.
Args:
options: the parser option object (instance of argparse.Namespace).
config_object: an object that is configured by this helper.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
| apache-2.0 | -137,777,931,685,640,900 | 30.891892 | 80 | 0.700847 | false |
HyperBaton/ansible | lib/ansible/modules/network/fortios/fortios_switch_controller_sflow.py | 7 | 8728 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_sflow
short_description: Configure FortiSwitch sFlow in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and sflow category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
switch_controller_sflow:
description:
- Configure FortiSwitch sFlow.
default: null
type: dict
suboptions:
collector_ip:
description:
- Collector IP.
type: str
collector_port:
description:
- SFlow collector port (0 - 65535).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiSwitch sFlow.
fortios_switch_controller_sflow:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
switch_controller_sflow:
collector_ip: "<your_own_value>"
collector_port: "4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_switch_controller_sflow_data(json):
option_list = ['collector_ip', 'collector_port']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_sflow(data, fos):
vdom = data['vdom']
switch_controller_sflow_data = data['switch_controller_sflow']
filtered_data = underscore_to_hyphen(filter_switch_controller_sflow_data(switch_controller_sflow_data))
return fos.set('switch-controller',
'sflow',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_sflow']:
resp = switch_controller_sflow(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"switch_controller_sflow": {
"required": False, "type": "dict", "default": None,
"options": {
"collector_ip": {"required": False, "type": "str"},
"collector_port": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,207,733,811,954,386,000 | 28.586441 | 107 | 0.626948 | false |
cmccoy/startreerenaissance | deploy/tag-instances.py | 1 | 1583 | #!/usr/bin/env python
import argparse
import logging
import sys
import boto
def main():
p = argparse.ArgumentParser()
p.add_argument('cluster_name')
p.add_argument('--dry-run', action='store_true')
a = p.parse_args()
conn = boto.connect_ec2()
active = [instance for res in conn.get_all_instances()
for instance in res.instances
if instance.state in set(['pending', 'running', 'stopping', 'stopped'])]
logging.info('%d active instances', len(active))
master_nodes = []
slave_nodes = []
for instance in active:
group_names = [g.name for g in instance.groups]
if group_names == [a.cluster_name + '-master']:
master_nodes.append(instance)
elif group_names == [a.cluster_name + '-slaves']:
slave_nodes.append(instance)
logging.info('%d master, %d slave', len(master_nodes), len(slave_nodes))
if master_nodes:
conn.create_tags([i.id for i in master_nodes],
{'spark_node_type': 'master'})
if slave_nodes:
conn.create_tags([i.id for i in slave_nodes],
{'spark_node_type': 'slave'})
if slave_nodes or master_nodes:
ids = [i.id for l in (master_nodes, slave_nodes) for i in l]
conn.create_tags(ids, {'Owner': 'cmccoy',
'Purpose': 'b-cell-selection',
'spark_cluster_name': a.cluster_name})
logging.info("Tagged nodes.")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| gpl-3.0 | -3,601,361,887,801,866,000 | 30.039216 | 86 | 0.571699 | false |
vmturbo/nova | nova/policies/certificates.py | 1 | 1520 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-certificates:%s'
certificates_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
base.create_rule_default(
POLICY_ROOT % 'create',
base.RULE_ADMIN_OR_OWNER,
"Create a root certificate. This API is deprecated.",
[
{
'method': 'POST',
'path': '/os-certificates'
}
]),
base.create_rule_default(
POLICY_ROOT % 'show',
base.RULE_ADMIN_OR_OWNER,
"Show details for a root certificate. This API is deprecated.",
[
{
'method': 'GET',
'path': '/os-certificates/root'
}
])
]
def list_rules():
return certificates_policies
| apache-2.0 | -4,966,163,898,688,920,000 | 28.230769 | 78 | 0.616447 | false |
coxmediagroup/googleads-python-lib | examples/dfp/v201505/custom_field_service/get_all_line_item_custom_fields.py | 3 | 2324 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom fields that apply to line items.
To create custom fields, run create_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CustomFieldService.getCustomFieldsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201505')
# Create statement to select only custom fields that apply to line items.
values = [{
'key': 'entityType',
'value': {
'xsi_type': 'TextValue',
'value': 'LINE_ITEM'
}
}]
query = 'WHERE entityType = :entityType'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get custom fields by statement.
while True:
response = custom_field_service.getCustomFieldsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for custom_field in response['results']:
print ('Custom field with ID \'%s\' and name \'%s\' was found.'
% (custom_field['id'], custom_field['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | 4,080,584,490,278,728,000 | 31.277778 | 77 | 0.701377 | false |
cgarciabarrera/wazztrick | src/Examples/EchoClient.py | 16 | 3147 | '''
Copyright (c) <2012> Tarek Galal <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import time
from Yowsup.connectionmanager import YowsupConnectionManager
class WhatsappEchoClient:
def __init__(self, target, message, waitForReceipt=False):
self.jids = []
if '-' in target:
self.jids = ["%[email protected]" % target]
else:
self.jids = ["%[email protected]" % t for t in target.split(',')]
self.message = message
self.waitForReceipt = waitForReceipt
connectionManager = YowsupConnectionManager()
self.signalsInterface = connectionManager.getSignalsInterface()
self.methodsInterface = connectionManager.getMethodsInterface()
self.signalsInterface.registerListener("auth_success", self.onAuthSuccess)
self.signalsInterface.registerListener("auth_fail", self.onAuthFailed)
if waitForReceipt:
self.signalsInterface.registerListener("receipt_messageSent", self.onMessageSent)
self.gotReceipt = False
self.signalsInterface.registerListener("disconnected", self.onDisconnected)
self.done = False
def login(self, username, password):
self.username = username
self.methodsInterface.call("auth_login", (username, password))
while not self.done:
time.sleep(0.5)
def onAuthSuccess(self, username):
print("Authed %s" % username)
if self.waitForReceipt:
self.methodsInterface.call("ready")
if len(self.jids) > 1:
self.methodsInterface.call("message_broadcast", (self.jids, self.message))
else:
self.methodsInterface.call("message_send", (self.jids[0], self.message))
print("Sent message")
if self.waitForReceipt:
timeout = 5
t = 0;
while t < timeout and not self.gotReceipt:
time.sleep(0.5)
t+=1
if not self.gotReceipt:
print("print timedout!")
else:
print("Got sent receipt")
self.done = True
def onAuthFailed(self, username, err):
print("Auth Failed!")
def onDisconnected(self, reason):
print("Disconnected because %s" %reason)
def onMessageSent(self, jid, messageId):
self.gotReceipt = True
| mit | -3,374,776,751,430,103,600 | 31.78125 | 86 | 0.745154 | false |
PepperPD/edx-pepper-platform | common/lib/xmodule/xmodule/modulestore/parsers.py | 9 | 4058 | import re
# Prefix for the branch portion of a locator URL
BRANCH_PREFIX = "/branch/"
# Prefix for the block portion of a locator URL
BLOCK_PREFIX = "/block/"
# Prefix for the version portion of a locator URL, when it is preceded by a course ID
VERSION_PREFIX = "/version/"
# Prefix for version when it begins the URL (no course ID).
URL_VERSION_PREFIX = 'version/'
URL_RE = re.compile(r'^edx://(.+)$', re.IGNORECASE)
def parse_url(string):
"""
A url must begin with 'edx://' (case-insensitive match),
followed by either a version_guid or a course_id.
Examples:
'edx://version/0123FFFF'
'edx://mit.eecs.6002x'
'edx://mit.eecs.6002x;published'
'edx://mit.eecs.6002x;published/block/HW3'
'edx://mit.eecs.6002x;published/version/000eee12345/block/HW3'
This returns None if string cannot be parsed.
If it can be parsed as a version_guid with no preceding course_id, returns a dict
with key 'version_guid' and the value,
If it can be parsed as a course_id, returns a dict
with key 'id' and optional keys 'branch' and 'version_guid'.
"""
match = URL_RE.match(string)
if not match:
return None
path = match.group(1)
if path.startswith(URL_VERSION_PREFIX):
return parse_guid(path[len(URL_VERSION_PREFIX):])
return parse_course_id(path)
BLOCK_RE = re.compile(r'^\w+$', re.IGNORECASE)
def parse_block_ref(string):
r"""
A block_ref is a string of word_chars.
<word_chars> matches one or more Unicode word characters; this includes most
characters that can be part of a word in any language, as well as numbers
and the underscore. (see definition of \w in python regular expressions,
at http://docs.python.org/dev/library/re.html)
If string is a block_ref, returns a dict with key 'block_ref' and the value,
otherwise returns None.
"""
if len(string) > 0 and BLOCK_RE.match(string):
return {'block': string}
return None
GUID_RE = re.compile(r'^(?P<version_guid>[A-F0-9]+)(' + BLOCK_PREFIX + '(?P<block>\w+))?$', re.IGNORECASE)
def parse_guid(string):
"""
A version_guid is a string of hex digits (0-F).
If string is a version_guid, returns a dict with key 'version_guid' and the value,
otherwise returns None.
"""
m = GUID_RE.match(string)
if m is not None:
return m.groupdict()
else:
return None
COURSE_ID_RE = re.compile(
r'^(?P<id>(\w+)(\.\w+\w*)*)(' +
BRANCH_PREFIX + '(?P<branch>\w+))?(' +
VERSION_PREFIX + '(?P<version_guid>[A-F0-9]+))?(' +
BLOCK_PREFIX + '(?P<block>\w+))?$', re.IGNORECASE
)
def parse_course_id(string):
r"""
A course_id has a main id component.
There may also be an optional branch (/branch/published or /branch/draft).
There may also be an optional version (/version/519665f6223ebd6980884f2b).
There may also be an optional block (/block/HW3 or /block/Quiz2).
Examples of valid course_ids:
'mit.eecs.6002x'
'mit.eecs.6002x/branch/published'
'mit.eecs.6002x/block/HW3'
'mit.eecs.6002x/branch/published/block/HW3'
'mit.eecs.6002x/branch/published/version/519665f6223ebd6980884f2b/block/HW3'
Syntax:
course_id = main_id [/branch/ branch] [/version/ version ] [/block/ block]
main_id = name [. name]*
branch = name
block = name
name = <word_chars>
<word_chars> matches one or more Unicode word characters; this includes most
characters that can be part of a word in any language, as well as numbers
and the underscore. (see definition of \w in python regular expressions,
at http://docs.python.org/dev/library/re.html)
If string is a course_id, returns a dict with keys 'id', 'branch', and 'block'.
Revision is optional: if missing returned_dict['branch'] is None.
Block is optional: if missing returned_dict['block'] is None.
Else returns None.
"""
match = COURSE_ID_RE.match(string)
if not match:
return None
return match.groupdict()
| agpl-3.0 | 3,609,720,307,175,718,000 | 29.742424 | 106 | 0.653277 | false |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/backends/spatialite/operations.py | 282 | 14391 | import re
from decimal import Decimal
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.utils import DatabaseError
class SpatiaLiteOperator(SpatialOperation):
"For SpatiaLite operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(SpatiaLiteOperator, self).__init__(operator=operator)
class SpatiaLiteFunction(SpatialFunction):
"For SpatiaLite function calls."
def __init__(self, function, **kwargs):
super(SpatiaLiteFunction, self).__init__(function, **kwargs)
class SpatiaLiteFunctionParam(SpatiaLiteFunction):
"For SpatiaLite functions that take another parameter."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class SpatiaLiteDistance(SpatiaLiteFunction):
"For SpatiaLite distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, operator):
super(SpatiaLiteDistance, self).__init__(self.dist_func,
operator=operator)
class SpatiaLiteRelate(SpatiaLiteFunctionParam):
"For SpatiaLite Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(SpatiaLiteRelate, self).__init__('Relate')
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float, int, long)
def get_dist_ops(operator):
"Returns operations for regular distances; spherical distances are not currently supported."
return (SpatiaLiteDistance(operator),)
class SpatiaLiteOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in ('Extent', 'Union')])
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
geometry_functions = {
'equals' : SpatiaLiteFunction('Equals'),
'disjoint' : SpatiaLiteFunction('Disjoint'),
'touches' : SpatiaLiteFunction('Touches'),
'crosses' : SpatiaLiteFunction('Crosses'),
'within' : SpatiaLiteFunction('Within'),
'overlaps' : SpatiaLiteFunction('Overlaps'),
'contains' : SpatiaLiteFunction('Contains'),
'intersects' : SpatiaLiteFunction('Intersects'),
'relate' : (SpatiaLiteRelate, basestring),
# Retruns true if B's bounding box completely contains A's bounding box.
'contained' : SpatiaLiteFunction('MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains' : SpatiaLiteFunction('MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps' : SpatiaLiteFunction('MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as' : SpatiaLiteFunction('Equals'),
'exact' : SpatiaLiteFunction('Equals'),
}
distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
}
geometry_functions.update(distance_functions)
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self.connection = connection
# Determine the version of the SpatiaLite library.
try:
vtup = self.spatialite_version_tuple()
version = vtup[1:]
if version < (2, 3, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.3.0 and above')
self.spatial_version = version
except ImproperlyConfigured:
raise
except Exception, msg:
raise ImproperlyConfigured('Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?' %
(self.connection.settings_dict['NAME'], msg))
# Creating the GIS terms dictionary.
gis_terms = ['isnull']
gis_terms += self.geometry_functions.keys()
self.gis_terms = dict([(term, None) for term in gis_terms])
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
except:
# Responsibility of caller to perform error handling.
raise
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
# Getting the SpatiaLite version.
try:
version = self.spatialite_version()
except DatabaseError:
# The `spatialite_version` function first appeared in version 2.3.1
# of SpatiaLite, so doing a fallback test for 2.3.0 (which is
# used by popular Debian/Ubuntu packages).
version = None
try:
tmp = self._get_spatialite_func("X(GeomFromText('POINT(1 1)'))")
if tmp == 1.0: version = '2.3.0'
except DatabaseError:
pass
# If no version string defined, then just re-raise the original
# exception.
if version is None: raise
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = self.select % '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Returns the SpatiaLite-specific SQL for the given lookup value
[a tuple of (alias, column, db_type)], lookup type, lookup
value, the model field, and the quoting function.
"""
alias, col, db_type = lvalue
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_functions:
# See if a SpatiaLite geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the SpatiaLiteOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
if len(value) != 2:
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(value[1])
elif lookup_type in self.distance_functions:
op = op[0]
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
return SpatialRefSys
| apache-2.0 | 4,610,522,336,507,093,500 | 40.956268 | 112 | 0.603085 | false |
albertrdixon/CouchPotatoServer | libs/html5lib/treewalkers/genshistream.py | 1730 | 2278 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| gpl-3.0 | 8,256,999,951,376,887,000 | 32.014493 | 78 | 0.532924 | false |
sestrella/ansible | lib/ansible/modules/network/fortimanager/fmgr_provisioning.py | 52 | 11458 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_provisioning
version_added: "2.7"
author: Andrew Welsh (@Ghilli3)
short_description: Provision devices via FortiMananger
description:
- Add model devices on the FortiManager using jsonrpc API and have them pre-configured,
so when central management is configured, the configuration is pushed down to the
registering devices
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
vdom:
description:
- The virtual domain (vdom) the configuration belongs to
host:
description:
- The FortiManager's Address.
required: true
username:
description:
- The username to log into the FortiManager
required: true
password:
description:
- The password associated with the username account.
required: false
policy_package:
description:
- The name of the policy package to be assigned to the device.
required: True
name:
description:
- The name of the device to be provisioned.
required: True
group:
description:
- The name of the device group the provisioned device can belong to.
required: False
serial:
description:
- The serial number of the device that will be provisioned.
required: True
platform:
description:
- The platform of the device, such as model number or VM.
required: True
description:
description:
- Description of the device to be provisioned.
required: False
os_version:
description:
- The Fortinet OS version to be used for the device, such as 5.0 or 6.0.
required: True
minor_release:
description:
- The minor release number such as 6.X.1, as X being the minor release.
required: False
patch_release:
description:
- The patch release number such as 6.0.X, as X being the patch release.
required: False
os_type:
description:
- The Fortinet OS type to be pushed to the device, such as 'FOS' for FortiOS.
required: True
'''
EXAMPLES = '''
- name: Create FGT1 Model Device
fmgr_provisioning:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
vdom: "root"
policy_package: "default"
name: "FGT1"
group: "Ansible"
serial: "FGVM000000117994"
platform: "FortiGate-VM64"
description: "Provisioned by Ansible"
os_version: '6.0'
minor_release: 0
patch_release: 0
os_type: 'fos'
- name: Create FGT2 Model Device
fmgr_provisioning:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
vdom: "root"
policy_package: "test_pp"
name: "FGT2"
group: "Ansible"
serial: "FGVM000000117992"
platform: "FortiGate-VM64"
description: "Provisioned by Ansible"
os_version: '5.0'
minor_release: 6
patch_release: 0
os_type: 'fos'
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def dev_group_exists(fmg, dev_grp_name, adom):
datagram = {
'adom': adom,
'name': dev_grp_name,
}
url = '/dvmdb/adom/{adom}/group/{dev_grp_name}'.format(adom=adom, dev_grp_name=dev_grp_name)
response = fmg.get(url, datagram)
return response
def prov_template_exists(fmg, prov_template, adom, vdom):
datagram = {
'name': prov_template,
'adom': adom,
}
url = '/pm/devprof/adom/{adom}/devprof/{name}'.format(adom=adom, name=prov_template)
response = fmg.get(url, datagram)
return response
def create_model_device(fmg, name, serial, group, platform, os_version,
os_type, minor_release, patch_release=0, adom='root'):
datagram = {
'adom': adom,
'flags': ['create_task', 'nonblocking'],
'groups': [{'name': group, 'vdom': 'root'}],
'device': {
'mr': minor_release,
'name': name,
'sn': serial,
'mgmt_mode': 'fmg',
'device action': 'add_model',
'platform_str': platform,
'os_ver': os_version,
'os_type': os_type,
'patch': patch_release,
'desc': 'Provisioned by Ansible',
}
}
url = '/dvm/cmd/add/device'
response = fmg.execute(url, datagram)
return response
def update_flags(fmg, name):
datagram = {
'flags': ['is_model', 'linked_to_model']
}
url = 'dvmdb/device/{name}'.format(name=name)
response = fmg.update(url, datagram)
return response
def assign_provision_template(fmg, template, adom, target):
datagram = {
'name': template,
'type': 'devprof',
'description': 'Provisioned by Ansible',
'scope member': [{'name': target}]
}
url = "/pm/devprof/adom/{adom}".format(adom=adom)
response = fmg.update(url, datagram)
return response
def set_devprof_scope(self, provisioning_template, adom, provision_targets):
"""
GET the DevProf (check to see if exists)
"""
fields = dict()
targets = []
fields["name"] = provisioning_template
fields["type"] = "devprof"
fields["description"] = "CreatedByAnsible"
for target in provision_targets.strip().split(","):
# split the host on the space to get the mask out
new_target = {"name": target}
targets.append(new_target)
fields["scope member"] = targets
body = {"method": "set", "params": [{"url": "/pm/devprof/adom/{adom}".format(adom=adom),
"data": fields, "session": self.session}]}
response = self.make_request(body).json()
return response
def assign_dev_grp(fmg, grp_name, device_name, vdom, adom):
datagram = {
'name': device_name,
'vdom': vdom,
}
url = "/dvmdb/adom/{adom}/group/{grp_name}/object member".format(adom=adom, grp_name=grp_name)
response = fmg.set(url, datagram)
return response
def update_install_target(fmg, device, pp='default', vdom='root', adom='root'):
datagram = {
'scope member': [{'name': device, 'vdom': vdom}],
'type': 'pkg'
}
url = '/pm/pkg/adom/{adom}/{pkg_name}'.format(adom=adom, pkg_name=pp)
response = fmg.update(url, datagram)
return response
def install_pp(fmg, device, pp='default', vdom='root', adom='root'):
datagram = {
'adom': adom,
'flags': 'nonblocking',
'pkg': pp,
'scope': [{'name': device, 'vdom': vdom}],
}
url = 'securityconsole/install/package'
response = fmg.execute(url, datagram)
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str"),
vdom=dict(required=False, type="str"),
host=dict(required=True, type="str"),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"]), no_log=True),
policy_package=dict(required=False, type="str"),
name=dict(required=False, type="str"),
group=dict(required=False, type="str"),
serial=dict(required=True, type="str"),
platform=dict(required=True, type="str"),
description=dict(required=False, type="str"),
os_version=dict(required=True, type="str"),
minor_release=dict(required=False, type="str"),
patch_release=dict(required=False, type="str"),
os_type=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec, supports_check_mode=True, )
# check if params are set
if module.params["host"] is None or module.params["username"] is None:
module.fail_json(msg="Host and username are required for connection")
# check if login failed
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if "FortiManager instance connnected" not in str(response):
module.fail_json(msg="Connection to FortiManager Failed")
else:
if module.params["policy_package"] is None:
module.params["policy_package"] = 'default'
if module.params["adom"] is None:
module.params["adom"] = 'root'
if module.params["vdom"] is None:
module.params["vdom"] = 'root'
if module.params["platform"] is None:
module.params["platform"] = 'FortiGate-VM64'
if module.params["os_type"] is None:
module.params["os_type"] = 'fos'
results = create_model_device(fmg,
module.params["name"],
module.params["serial"],
module.params["group"],
module.params["platform"],
module.params["os_ver"],
module.params["os_type"],
module.params["minor_release"],
module.params["patch_release"],
module.params["adom"])
if results[0] != 0:
module.fail_json(msg="Create model failed", **results)
results = update_flags(fmg, module.params["name"])
if results[0] != 0:
module.fail_json(msg="Update device flags failed", **results)
# results = assign_dev_grp(fmg, 'Ansible', 'FGVM000000117992', 'root', 'root')
# if not results[0] == 0:
# module.fail_json(msg="Setting device group failed", **results)
results = update_install_target(fmg, module.params["name"], module.params["policy_package"])
if results[0] != 0:
module.fail_json(msg="Adding device target to package failed", **results)
results = install_pp(fmg, module.params["name"], module.params["policy_package"])
if results[0] != 0:
module.fail_json(msg="Installing policy package failed", **results)
fmg.logout()
# results is returned as a tuple
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 | -5,808,844,923,420,064,000 | 30.391781 | 114 | 0.609705 | false |
TheBraveWarrior/pyload | module/plugins/hoster/LinkifierCom.py | 5 | 2486 | # -*- coding: utf-8 -*-
import hashlib
import pycurl
from ..internal.MultiHoster import MultiHoster
from ..internal.misc import json, seconds_to_midnight
class LinkifierCom(MultiHoster):
__name__ = "AlldebridCom"
__type__ = "hoster"
__version__ = "0.02"
__status__ = "testing"
__pattern__ = r'^unmatchable$'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True)]
__description__ = """Linkifier.com multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
API_KEY = "d046c4309bb7cabd19f49118a2ab25e0"
API_URL = "https://api.linkifier.com/downloadapi.svc/"
def api_response(self, method, user, password, **kwargs):
post = {'login': user,
'md5Pass': hashlib.md5(password).hexdigest(),
'apiKey': self.API_KEY}
post.update(kwargs)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Content-Type: application/json; charset=utf-8"])
res = json.loads(self.load(self.API_URL + method,
post=json.dumps(post)))
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Content-Type: text/html; charset=utf-8"])
return res
def setup(self):
self.multiDL = True
def handle_premium(self, pyfile):
json_data = self.api_response("stream",
self.account.user,
self.account.info['login']['password'],
url=pyfile.url)
if json_data['hasErrors']:
error_msg = json_data['ErrorMSG'] or "Unknown error"
if error_msg in ("Customer reached daily limit for current hoster",
"Accounts are maxed out for current hoster"):
self.retry(wait=seconds_to_midnight())
self.fail(error_msg)
self.resume_download = json_data['con_resume']
self.chunk_limit = json_data.get('con_max', 1) or 1
self.download(json_data['url'], fixurl=False)
| gpl-3.0 | 8,166,874,974,870,715,000 | 39.096774 | 100 | 0.563154 | false |
max3903/SFLphone | daemon/libs/pjproject/tests/cdash/cfg_msvc.py | 107 | 2726 | #
# cfg_msvc.py - MSVC/Visual Studio target configurator
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import builder
import os
import sys
# Each configurator must export this function
def create_builder(args):
usage = """\
Usage:
main.py cfg_msvc [-h|--help] [-t|--target TARGET] [cfg_site]
Arguments:
cfg_site: site configuration module. If not specified, "cfg_site"
is implied
-t,--target TARGET: Visual Studio build configuration to build. Default is
"Release|Win32". Sample values: "Debug|Win32"
-h, --help Show this help screen
"""
cfg_site = "cfg_site"
target = "Release|Win32"
in_option = ""
for arg in args:
if in_option=="-t":
target = arg
in_option = ""
elif arg=="--target" or arg=="-t":
in_option = "-t"
elif arg=="-h" or arg=="--help":
print usage
sys.exit(0)
elif arg[0]=="-":
print usage
sys.exit(1)
else:
cfg_site = arg
if os.access(cfg_site+".py", os.F_OK) == False:
print "Error: file '%s.py' doesn't exist." % (cfg_site)
sys.exit(1)
cfg_site = __import__(cfg_site)
test_cfg = builder.BaseConfig(cfg_site.BASE_DIR, \
cfg_site.URL, \
cfg_site.SITE_NAME, \
cfg_site.GROUP, \
cfg_site.OPTIONS)
config_site = "#define PJ_TODO(x)\n" + cfg_site.CONFIG_SITE
user_mak = cfg_site.USER_MAK
builders = [
builder.MSVCTestBuilder(test_cfg,
target=target,
build_config_name="default",
config_site=config_site,
exclude=cfg_site.EXCLUDE,
not_exclude=cfg_site.NOT_EXCLUDE)
]
return builders
| gpl-3.0 | -7,746,833,368,510,346,000 | 32.243902 | 79 | 0.558327 | false |
jjx02230808/project0223 | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause | -4,633,440,937,640,423,000 | 33.789474 | 84 | 0.566112 | false |
alvarogzp/telegram-bot | bot/bot.py | 1 | 11029 | import time
from bot import project_info
from bot.action.core.action import Action
from bot.action.core.update import Update
from bot.action.standard.about import VersionAction
from bot.action.standard.admin.config_status import ConfigStatus
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.async import AsyncApi
from bot.api.telegram import TelegramBotApi
from bot.logger.admin_logger import AdminLogger
from bot.logger.worker_logger import WorkerStartStopLogger
from bot.multithreading.scheduler import SchedulerApi
from bot.storage import Config, Cache
from bot.storage import State
CONFIG_DIR = "config"
STATE_DIR = "state"
class Bot:
def __init__(self, project_name: str = ""):
"""
:param project_name: Optional name to be displayed on starting message on admin chat.
"""
self.config = Config(CONFIG_DIR)
self.state = State(STATE_DIR)
self.cache = Cache()
debug = self.config.debug()
telegram_api = TelegramBotApi(self.config.auth_token, self.config.reuse_connections(), debug)
self.api = Api(telegram_api, self.state)
self.cache.bot_info = self.api.getMe()
self.logger = AdminLogger(self.api, self.config.admin_chat_id, debug, self.config.traceback_chat_id())
self.scheduler = self._create_scheduler()
self.starting(project_name)
if self.config.async():
self.scheduler.setup()
self.api.enable_async(AsyncApi(self.api, self.scheduler))
self.action = Action()
self.update_processor = UpdateProcessor(self.action, self.logger)
def _create_scheduler(self):
max_network_workers = int(self.config.max_network_workers)
worker_logger = WorkerStartStopLogger(self.logger.logger)
return SchedulerApi(
max_network_workers, self.logger.work_error, worker_logger.worker_start, worker_logger.worker_stop
)
def set_action(self, action: Action):
action.setup(self.api, self.config, self.state, self.cache, self.scheduler)
self.action = action
self.update_processor = UpdateProcessor(self.action, self.logger)
def run(self):
try:
self.main_loop()
except KeyboardInterrupt:
self.logger.info("KeyboardInterrupt")
except SystemExit as e:
self.logger.info("SystemExit: " + str(e))
raise e
except BaseException as e:
self.logger.error(e, "Fatal error")
raise e
finally:
self.shutdown()
def starting(self, project_name: str):
starting_info = []
if project_name:
version = VersionAction.get_version(project_name)
starting_info.append(
FormattedText()
.normal("Running: {name} {version}")
.start_format()
.bold(name=project_name, version=version)
.end_format()
)
framework_version = VersionAction.get_version(project_info.name)
starting_info.append(
FormattedText()
.normal("Framework: {name} {version}")
.start_format()
.bold(name=project_info.name, version=framework_version)
.end_format()
)
config_status = ConfigStatus(self.config, self.state).get_config_status()
starting_info.extend(config_status)
self.logger.info_formatted_text(
FormattedText().bold("Starting"),
*starting_info
)
def main_loop(self):
while True:
self.process_pending_updates()
self.process_normal_updates()
def process_pending_updates(self):
PendingUpdatesProcessor(self.api.get_pending_updates, self.logger, self.config, self.update_processor).run()
def process_normal_updates(self):
NormalUpdatesProcessor(self.api.get_updates, self.logger, self.config, self.update_processor).run()
def shutdown(self):
self.action.shutdown()
self.scheduler.shutdown()
self.logger.info("Finished")
class UpdateProcessor:
def __init__(self, action: Action, logger: AdminLogger):
self.action = action
self.logger = logger
def process_update(self, update: Update):
try:
self.action.process(update)
except Exception as e:
# As logger errors are probably API failures that the next updates may also get,
# let them to be propagated so that no more updates are processed before waiting some time
self.logger.error(e, "process_update")
class UpdatesProcessor:
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
self.get_updates_func = get_updates_func
self.logger = logger
self.config = config
self.update_processor = update_processor
self.last_error = None
self.number_of_updates_processed = 0
def run(self):
self.processing_starting()
try:
self.__processing_loop()
finally:
self.processing_ended()
self.processing_ended_successfully()
def __processing_loop(self):
while self.should_keep_processing_updates():
self.__get_and_process_handling_errors()
def __get_and_process_handling_errors(self):
try:
self.__get_and_process()
except Exception as e:
self.__handle_error(e)
# notify there has been an error
self.processing_error(e)
else:
# notify successful processing
self.processing_successful()
def __get_and_process(self):
for update in self.get_updates_func():
self.update_processor.process_update(update)
self.number_of_updates_processed += 1
def __handle_error(self, error: Exception):
sleep_seconds = self.config.sleep_seconds_on_get_updates_error
# we do not want to let non-fatal (eg. API) errors to escape from here
self.safe_log_error(error, "get_and_process", "Sleeping for {seconds} seconds.".format(seconds=sleep_seconds))
# there has been an error while getting updates, sleep a little to give a chance
# for the server or the network to recover (if that was the case), and to not to flood the server
time.sleep(int(sleep_seconds))
def safe_log_error(self, error: Exception, *info: str):
"""Log error failing silently on error"""
self.__do_safe(lambda: self.logger.error(error, *info))
def safe_log_info(self, *info: str):
"""Log info failing silently on error"""
self.__do_safe(lambda: self.logger.info(*info))
@staticmethod
def __do_safe(func: callable):
try:
return func()
except Exception:
pass
def should_keep_processing_updates(self):
raise NotImplementedError()
def processing_successful(self):
"""Updates were processed successfully"""
self.last_error = None
def processing_error(self, error: Exception):
"""There has been an error while processing the last updates"""
self.last_error = error
def processing_starting(self):
"""Updates are about to start being processed"""
pass
def processing_ended(self):
"""Processing has ended, we don't know if successfully or caused by an error"""
self.safe_log_info(
"Ending",
"Updates processed: {updates_processed_number}"
.format(updates_processed_number=self.number_of_updates_processed)
)
def processing_ended_successfully(self):
"""Processing has ended successfully"""
pass
class PendingUpdatesProcessor(UpdatesProcessor):
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
super().__init__(get_updates_func, logger, config, update_processor)
# set to some value other than None to let the processing run the first time
self.last_error = True
def should_keep_processing_updates(self):
# if there has been an error not all pending updates were processed
# so try again until it ends without error
was_error = self.last_error is not None
if was_error and self.last_error is not True:
self.safe_log_info("Restarting", "Recovered from error. Continue processing pending updates...")
return was_error
def processing_starting(self):
self.safe_log_info("Pending", "Processing pending updates...")
def processing_ended_successfully(self):
self.safe_log_info("Continuing", "Pending updates successfully processed.")
class NormalUpdatesProcessor(UpdatesProcessor):
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
super().__init__(get_updates_func, logger, config, update_processor)
self.last_successful_processing = time.time()
def processing_successful(self):
super().processing_successful()
self.last_successful_processing = time.time()
def should_keep_processing_updates(self):
if self.last_error is None:
# if last processing ended without error, keep going!
return True
error_seconds_in_normal_mode = time.time() - self.last_successful_processing
max_error_seconds_allowed_in_normal_mode = int(self.config.max_error_seconds_allowed_in_normal_mode)
if error_seconds_in_normal_mode > max_error_seconds_allowed_in_normal_mode:
# it has happened too much time since last successful processing
# although it does not mean no update have been processed, we are
# having problems and updates are being delayed, so going back to
# process pending updates mode
self.safe_log_info(
"Restarting",
"Exceeded {max_seconds} maximum seconds with errors (current value: {seconds} seconds)"
.format(max_seconds=max_error_seconds_allowed_in_normal_mode,
seconds=int(error_seconds_in_normal_mode)),
"Switching to pending updates mode."
)
return False
else:
self.safe_log_info(
"Restarted",
"Recovered from error (current error burst duration is {seconds} seconds of "
"{max_seconds} maximum seconds allowed)"
.format(seconds=int(error_seconds_in_normal_mode),
max_seconds=max_error_seconds_allowed_in_normal_mode),
"Continuing in normal updates mode."
)
return True
def processing_starting(self):
self.safe_log_info("Started", "Switched to normal updates mode.")
| agpl-3.0 | 3,241,992,567,601,851,000 | 38.530466 | 118 | 0.63324 | false |
jaggu303619/asylum-v2.0 | openerp/addons/point_of_sale/report/pos_payment_report_user.py | 62 | 3251 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class pos_payment_report_user(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_payment_report_user, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'pos_payment_user': self.__pos_payment_user__,
'pos_payment_user_total':self.__pos_payment_user__total__,
})
def __pos_payment_user__(self, form):
data={}
ids = form['user_id']
sql = "select pt.name,pp.default_code as code,pol.qty,pu.name as uom,pol.discount,pol.price_unit, " \
"(pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)) as total " \
"from pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt,product_uom as pu " \
"where pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id and pu.id=pt.uom_id " \
"and po.state in ('paid','invoiced') and to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::date = current_date " \
"and po.user_id IN %s"
self.cr.execute (sql, (tuple(ids), ))
data=self.cr.dictfetchall()
return data
def __pos_payment_user__total__(self, form):
res=[]
ids = form['user_id']
self.cr.execute ("select sum(pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)) " \
"from pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt " \
"where pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id " \
"and po.state='paid' and to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::date = current_date " \
"and po.user_id IN %s",(tuple(ids),))
res=self.cr.fetchone()
res = res and res[0] or 0.0
return res
report_sxw.report_sxw('report.pos.payment.report.user', 'pos.order', 'addons/point_of_sale/report/pos_payment_report_user.rml', parser=pos_payment_report_user,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,060,957,367,438,055,400 | 49.796875 | 177 | 0.581052 | false |
Ruide/angr-dev | angr/angr/misc/autoimport.py | 4 | 2072 | import os
import importlib
import logging
l = logging.getLogger('angr.misc.autoimport')
def auto_import_packages(base_module, base_path, ignore_dirs=(), ignore_files=(), scan_modules=True):
for lib_module_name in os.listdir(base_path):
if lib_module_name in ignore_dirs:
continue
lib_path = os.path.join(base_path, lib_module_name)
if not os.path.isdir(lib_path):
l.debug("Not a dir: %s", lib_module_name)
continue
l.debug("Loading %s.%s", base_module, lib_module_name)
try:
package = importlib.import_module(".%s" % lib_module_name, base_module)
except ImportError:
l.warning("Unable to autoimport package %s.%s", base_module, lib_module_name, exc_info=True)
else:
if scan_modules:
for name, mod in auto_import_modules('%s.%s' % (base_module, lib_module_name), lib_path, ignore_files=ignore_files):
if name not in dir(package):
setattr(package, name, mod)
yield lib_module_name, package
def auto_import_modules(base_module, base_path, ignore_files=()):
for proc_file_name in os.listdir(base_path):
if not proc_file_name.endswith('.py'):
continue
if proc_file_name in ignore_files or proc_file_name == '__init__.py':
continue
proc_module_name = proc_file_name[:-3]
try:
proc_module = importlib.import_module(".%s" % proc_module_name, base_module)
except ImportError:
l.warning("Unable to autoimport module %s.%s", base_module, proc_module_name, exc_info=True)
continue
else:
yield proc_module_name, proc_module
def filter_module(mod, type_req=None, subclass_req=None):
for name in dir(mod):
val = getattr(mod, name)
if type_req is not None and not isinstance(val, type_req):
continue
if subclass_req is not None and not issubclass(val, subclass_req):
continue
yield name, val
| bsd-2-clause | 5,928,540,721,124,063,000 | 38.09434 | 132 | 0.600386 | false |
Nu3001/external_chromium_org | chrome/browser/safe_browsing/safe_browsing_testserver.py | 74 | 1483 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wraps the upstream safebrowsing_test_server.py to run in Chrome tests."""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', 'net',
'tools', 'testserver'))
import testserver_base
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for safebrowsing_test_server.py."""
def create_server(self, server_data):
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', 'third_party',
'safe_browsing', 'testing'))
import safebrowsing_test_server
server = safebrowsing_test_server.SetupServer(
self.options.data_file, self.options.host, self.options.port,
opt_enforce_caching=False, opt_validate_database=True)
print 'Safebrowsing HTTP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
return server
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--data-file', dest='data_file',
help='File containing safebrowsing test '
'data and expectations')
if __name__ == '__main__':
sys.exit(ServerRunner().main())
| bsd-3-clause | 4,451,859,953,441,034,000 | 35.170732 | 79 | 0.640593 | false |
antivirtel/Flexget | flexget/plugins/input/rottentomatoes_list.py | 13 | 2974 | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
try:
from flexget.plugins.api_rottentomatoes import lists
except ImportError:
raise plugin.DependencyError(issued_by='rottentomatoes_lookup', missing='api_rottentomatoes',
message='rottentomatoes_lookup requires the `api_rottentomatoes` plugin')
log = logging.getLogger('rottentomatoes_list')
class RottenTomatoesList(object):
"""
Emits an entry for each movie in a Rotten Tomatoes list.
Configuration:
dvds:
- top_rentals
- upcoming
movies:
- box_office
Possible lists are
* dvds: top_rentals, current_releases, new_releases, upcoming
* movies: box_office, in_theaters, opening, upcoming
"""
def __init__(self):
# We could pull these from the API through lists.json but that's extra web/API key usage
self.dvd_lists = ['top_rentals', 'current_releases', 'new_releases', 'upcoming']
self.movie_lists = ['box_office', 'in_theaters', 'opening', 'upcoming']
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('list', key='dvds').accept('choice').accept_choices(self.dvd_lists)
root.accept('list', key='movies').accept('choice').accept_choices(self.movie_lists)
root.accept('text', key='api_key')
return root
@cached('rottentomatoes_list', persist='2 hours')
def on_task_input(self, task, config):
entries = []
api_key = config.get('api_key', None)
for l_type, l_names in config.items():
if type(l_names) is not list:
continue
for l_name in l_names:
results = lists(list_type=l_type, list_name=l_name, api_key=api_key)
if results:
for movie in results['movies']:
if [entry for entry in entries if movie['title'] == entry.get('title')]:
continue
imdb_id = movie.get('alternate_ids', {}).get('imdb')
if imdb_id:
imdb_id = 'tt' + str(imdb_id)
entries.append(Entry(title=movie['title'], rt_id=movie['id'],
imdb_id=imdb_id,
rt_name=movie['title'],
url=movie['links']['alternate']))
else:
log.critical('Failed to fetch Rotten tomatoes %s list: %s. List doesn\'t exist?' %
(l_type, l_name))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RottenTomatoesList, 'rottentomatoes_list', api_ver=2)
| mit | -2,264,749,738,912,225,300 | 36.64557 | 110 | 0.569603 | false |
zhujzhuo/Sahara | sahara/service/edp/oozie/workflow_creator/hive_workflow.py | 14 | 1576 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp.oozie.workflow_creator import base_workflow
from sahara.utils import xmlutils as x
class HiveWorkflowCreator(base_workflow.OozieWorkflowCreator):
def __init__(self):
super(HiveWorkflowCreator, self).__init__('hive')
hive_elem = self.doc.getElementsByTagName('hive')[0]
hive_elem.setAttribute('xmlns', 'uri:oozie:hive-action:0.2')
def build_workflow_xml(self, script, job_xml, prepare={},
configuration=None, params={},
files=[], archives=[]):
for k in sorted(prepare):
self._add_to_prepare_element(k, prepare[k])
self._add_job_xml_element(job_xml)
self._add_configuration_elements(configuration)
x.add_text_element_to_tag(self.doc, self.tag_name,
'script', script)
x.add_equal_separated_dict(self.doc, self.tag_name, 'param', params)
self._add_files_and_archives(files, archives)
| apache-2.0 | -161,730,020,974,715,460 | 36.52381 | 76 | 0.667513 | false |
edbrannin/library-searcher-python | render_results.py | 1 | 2109 | import click
from jinja2 import Environment, FileSystemLoader
from model import *
session = setup_sqlalchemy()()
env = Environment(loader=FileSystemLoader("."))
template = env.get_template("results.html.j2")
sql = """
select
r.author,
r.title,
h.branch_name,
h.collection_name,
h.call_class,
count(*) as "count"
from
resources r,
resource_holdings h,
resource_status s
where
r.id = h.item_id
and s.resource_id = h.item_id
and s.item_identifier = h.barcode
and position = 0
and branch_name in (
'Rochester Public Library Central',
'Irondequoit Public Library',
'Fairport Public Library'
)
and available = 1
group by
r.author,
r.title,
h.branch_name,
h.collection_name,
h.call_class
order by
author,
title,
branch_name,
collection_name asc,
call_class
"""
<<<<<<< HEAD
unlisted_sql = """
SELECT max(rr.search_query) search_query
, rr.author
, rr.title
FROM resources rr
WHERE NOT EXISTS
(SELECT 1
FROM resources r
, resource_holdings h
, resource_status s
WHERE r.id = h.item_id
AND s.resource_id = h.item_id
AND s.item_identifier = h.barcode
AND POSITION = 0
AND branch_name IN ( 'Rochester Public Library Central'
, 'Irondequoit Public Library' )
AND available = 1
AND r.author = rr.author
AND r.title = rr.title
GROUP BY r.author
, r.title
, h.branch_name
, h.collection_name
, h.call_class
ORDER BY author
, title
, branch_name
, collection_name ASC, call_class)
GROUP BY author
, title
ORDER BY search_query
, author
, title
"""
@click.command()
@click.argument("out_file", type=click.File('wb'))
def render_results(out_file):
rows = session.execute(sql).fetchall()
missing_rows = session.execute(unlisted_sql).fetchall()
text = template.render(rows=rows, missing_rows=missing_rows).encode('utf-8')
out_file.write(text)
if __name__ == '__main__':
render_results()
| mit | -2,486,035,145,565,878,300 | 20.96875 | 80 | 0.614035 | false |
esrf-emotion/emotion | emotion/task_utils.py | 1 | 2961 | import sys
import types
import gevent
import signal
import functools
class cleanup:
def __init__(self, *args, **keys):
self.cleanup_funcs = args
self.keys = keys
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if self.cleanup_funcs:
for cleanup_func in self.cleanup_funcs:
try:
cleanup_func(**self.keys)
except:
sys.excepthook(*sys.exc_info())
continue
if exc_type is not None:
raise exc_type, value, traceback
class error_cleanup:
def __init__(self, *args, **keys):
self.error_funcs = args
self.keys = keys
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if exc_type is not None:
if self.error_funcs:
for error_func in self.error_funcs:
try:
error_func(**self.keys)
except:
sys.excepthook(*sys.exc_info())
continue
# the previous try..except is resetting exception,
# so re-raise it from here
raise exc_type, value, traceback
class TaskException:
def __init__(self, exception, error_string, tb):
self.exception = exception
self.error_string = error_string
self.tb = tb
class wrap_errors(object):
def __init__(self, func):
"""Make a new function from `func', such that it catches all exceptions
and return it as a TaskException object
"""
self.func = func
def __call__(self, *args, **kwargs):
func = self.func
try:
return func(*args, **kwargs)
except:
return TaskException(*sys.exc_info())
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __getattr__(self, item):
return getattr(self.func, item)
def kill_with_kbint(g):
g.kill(KeyboardInterrupt)
def special_get(self, *args, **kwargs):
sigint_handler = gevent.signal(signal.SIGINT, functools.partial(kill_with_kbint, self))
try:
ret = self._get(*args, **kwargs)
finally:
sigint_handler.cancel()
if isinstance(ret, TaskException):
raise ret.exception, ret.error_string, ret.tb
else:
return ret
def task(func):
def start_task(*args, **kwargs):
wait = kwargs.pop("wait", True)
timeout = kwargs.pop("timeout", None)
t = gevent.spawn(wrap_errors(func), *args, **kwargs)
t._get = t.get
try:
setattr(t, "get", types.MethodType(special_get, t))
if wait:
return t.get(timeout=timeout)
else:
return t
except:
t.kill()
raise
return start_task
| gpl-2.0 | -7,357,949,262,622,977,000 | 23.270492 | 91 | 0.530902 | false |
MaDKaTZe/phantomjs | src/qt/qtwebkit/Tools/gtk/gtkdoc.py | 113 | 17832 | # Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import logging
import os
import os.path
import subprocess
import sys
class GTKDoc(object):
"""Class that controls a gtkdoc run.
Each instance of this class represents one gtkdoc configuration
and set of documentation. The gtkdoc package is a series of tools
run consecutively which converts inline C/C++ documentation into
docbook files and then into HTML. This class is suitable for
generating documentation or simply verifying correctness.
Keyword arguments:
output_dir -- The path where gtkdoc output should be placed. Generation
may overwrite file in this directory. Required.
module_name -- The name of the documentation module. For libraries this
is typically the library name. Required if not library path
is given.
source_dirs -- A list of paths to the source code to be scanned. Required.
ignored_files -- A list of filenames to ignore in the source directory. It is
only necessary to provide the basenames of these files.
Typically it is important to provide an updated list of
ignored files to prevent warnings about undocumented symbols.
decorator -- If a decorator is used to unhide certain symbols in header
files this parameter is required for successful scanning.
(default '')
deprecation_guard -- gtkdoc tries to ensure that symbols marked as deprecated
are encased in this C preprocessor define. This is required
to avoid gtkdoc warnings. (default '')
cflags -- This parameter specifies any preprocessor flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes all absolute include paths necessary to resolve
all header dependencies. (default '')
ldflags -- This parameter specifies any linker flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes "-lyourlibraryname". (default '')
library_path -- This parameter specifies the path to the directory where you
library resides used for building the scanner binary during
gtkdoc-scanobj. (default '')
doc_dir -- The path to other documentation files necessary to build
the documentation. This files in this directory as well as
the files in the 'html' subdirectory will be copied
recursively into the output directory. (default '')
main_sgml_file -- The path or name (if a doc_dir is given) of the SGML file
that is the considered the main page of your documentation.
(default: <module_name>-docs.sgml)
version -- The version number of the module. If this is provided,
a version.xml file containing the version will be created
in the output directory during documentation generation.
interactive -- Whether or not errors or warnings should prompt the user
to continue or not. When this value is false, generation
will continue despite warnings. (default False)
virtual_root -- A temporary installation directory which is used as the root
where the actual installation prefix lives; this is mostly
useful for packagers, and should be set to what is given to
make install as DESTDIR.
"""
def __init__(self, args):
# Parameters specific to scanning.
self.module_name = ''
self.source_dirs = []
self.ignored_files = []
self.decorator = ''
self.deprecation_guard = ''
# Parameters specific to gtkdoc-scanobj.
self.cflags = ''
self.ldflags = ''
self.library_path = ''
# Parameters specific to generation.
self.output_dir = ''
self.doc_dir = ''
self.main_sgml_file = ''
# Parameters specific to gtkdoc-fixxref.
self.cross_reference_deps = []
self.interactive = False
self.logger = logging.getLogger('gtkdoc')
for key, value in iter(args.items()):
setattr(self, key, value)
def raise_error_if_not_specified(key):
if not getattr(self, key):
raise Exception('%s not specified.' % key)
raise_error_if_not_specified('output_dir')
raise_error_if_not_specified('source_dirs')
raise_error_if_not_specified('module_name')
# Make all paths absolute in case we were passed relative paths, since
# we change the current working directory when executing subcommands.
self.output_dir = os.path.abspath(self.output_dir)
self.source_dirs = [os.path.abspath(x) for x in self.source_dirs]
if self.library_path:
self.library_path = os.path.abspath(self.library_path)
if not self.main_sgml_file:
self.main_sgml_file = self.module_name + "-docs.sgml"
def generate(self, html=True):
self.saw_warnings = False
self._copy_doc_files_to_output_dir(html)
self._write_version_xml()
self._run_gtkdoc_scan()
self._run_gtkdoc_scangobj()
self._run_gtkdoc_mktmpl()
self._run_gtkdoc_mkdb()
if not html:
return
self._run_gtkdoc_mkhtml()
self._run_gtkdoc_fixxref()
def _delete_file_if_exists(self, path):
if not os.access(path, os.F_OK | os.R_OK):
return
self.logger.debug('deleting %s', path)
os.unlink(path)
def _create_directory_if_nonexistent(self, path):
try:
os.makedirs(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _raise_exception_if_file_inaccessible(self, path):
if not os.path.exists(path) or not os.access(path, os.R_OK):
raise Exception("Could not access file at: %s" % path)
def _output_has_warnings(self, outputs):
for output in outputs:
if output and output.find('warning'):
return True
return False
def _ask_yes_or_no_question(self, question):
if not self.interactive:
return True
question += ' [y/N] '
answer = None
while answer != 'y' and answer != 'n' and answer != '':
answer = raw_input(question).lower()
return answer == 'y'
def _run_command(self, args, env=None, cwd=None, print_output=True, ignore_warnings=False):
if print_output:
self.logger.info("Running %s", args[0])
self.logger.debug("Full command args: %s", str(args))
process = subprocess.Popen(args, env=env, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = [b.decode("utf-8") for b in process.communicate()]
if print_output:
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
if process.returncode != 0:
raise Exception('%s produced a non-zero return code %i'
% (args[0], process.returncode))
if not ignore_warnings and ('warning' in stderr or 'warning' in stdout):
self.saw_warnings = True
if not self._ask_yes_or_no_question('%s produced warnings, '
'try to continue?' % args[0]):
raise Exception('%s step failed' % args[0])
return stdout.strip()
def _copy_doc_files_to_output_dir(self, html=True):
if not self.doc_dir:
self.logger.info('Not copying any files from doc directory,'
' because no doc directory given.')
return
def copy_file_replacing_existing(src, dest):
if os.path.isdir(src):
self.logger.debug('skipped directory %s', src)
return
if not os.access(src, os.F_OK | os.R_OK):
self.logger.debug('skipped unreadable %s', src)
return
self._delete_file_if_exists(dest)
self.logger.debug('created %s', dest)
os.link(src, dest)
def copy_all_files_in_directory(src, dest):
for path in os.listdir(src):
copy_file_replacing_existing(os.path.join(src, path),
os.path.join(dest, path))
self.logger.info('Copying template files to output directory...')
self._create_directory_if_nonexistent(self.output_dir)
copy_all_files_in_directory(self.doc_dir, self.output_dir)
if not html:
return
self.logger.info('Copying HTML files to output directory...')
html_src_dir = os.path.join(self.doc_dir, 'html')
html_dest_dir = os.path.join(self.output_dir, 'html')
self._create_directory_if_nonexistent(html_dest_dir)
if os.path.exists(html_src_dir):
copy_all_files_in_directory(html_src_dir, html_dest_dir)
def _write_version_xml(self):
if not self.version:
self.logger.info('No version specified, so not writing version.xml')
return
version_xml_path = os.path.join(self.output_dir, 'version.xml')
src_version_xml_path = os.path.join(self.doc_dir, 'version.xml')
# Don't overwrite version.xml if it was in the doc directory.
if os.path.exists(version_xml_path) and \
os.path.exists(src_version_xml_path):
return
output_file = open(version_xml_path, 'w')
output_file.write(self.version)
output_file.close()
def _ignored_files_basenames(self):
return ' '.join([os.path.basename(x) for x in self.ignored_files])
def _run_gtkdoc_scan(self):
args = ['gtkdoc-scan',
'--module=%s' % self.module_name,
'--rebuild-types']
# Each source directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
if self.decorator:
args.append('--ignore-decorators=%s' % self.decorator)
if self.deprecation_guard:
args.append('--deprecated-guards=%s' % self.deprecation_guard)
if self.output_dir:
args.append('--output-dir=%s' % self.output_dir)
# gtkdoc-scan wants the basenames of ignored headers, so strip the
# dirname. Different from "--source-dir", the headers should be
# specified as one long string.
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-headers=%s' % ignored_files_basenames)
self._run_command(args)
def _run_gtkdoc_scangobj(self):
env = os.environ
ldflags = self.ldflags
if self.library_path:
ldflags = ' "-L%s" ' % self.library_path + ldflags
current_ld_library_path = env.get('LD_LIBRARY_PATH')
if current_ld_library_path:
env['RUN'] = 'LD_LIBRARY_PATH="%s:%s" ' % (self.library_path, current_ld_library_path)
else:
env['RUN'] = 'LD_LIBRARY_PATH="%s" ' % self.library_path
if ldflags:
env['LDFLAGS'] = '%s %s' % (ldflags, env.get('LDFLAGS', ''))
if self.cflags:
env['CFLAGS'] = '%s %s' % (self.cflags, env.get('CFLAGS', ''))
if 'CFLAGS' in env:
self.logger.debug('CFLAGS=%s', env['CFLAGS'])
if 'LDFLAGS' in env:
self.logger.debug('LDFLAGS %s', env['LDFLAGS'])
if 'RUN' in env:
self.logger.debug('RUN=%s', env['RUN'])
self._run_command(['gtkdoc-scangobj', '--module=%s' % self.module_name],
env=env, cwd=self.output_dir)
def _run_gtkdoc_mktmpl(self):
args = ['gtkdoc-mktmpl', '--module=%s' % self.module_name]
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkdb(self):
sgml_file = os.path.join(self.output_dir, self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
args = ['gtkdoc-mkdb',
'--module=%s' % self.module_name,
'--main-sgml-file=%s' % sgml_file,
'--source-suffixes=h,c,cpp,cc',
'--output-format=xml',
'--sgml-mode']
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-files=%s' % ignored_files_basenames)
# Each directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkhtml(self):
html_dest_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(html_dest_dir):
raise Exception("%s is not a directory, could not generate HTML"
% html_dest_dir)
elif not os.access(html_dest_dir, os.X_OK | os.R_OK | os.W_OK):
raise Exception("Could not access %s to generate HTML"
% html_dest_dir)
# gtkdoc-mkhtml expects the SGML path to be absolute.
sgml_file = os.path.join(os.path.abspath(self.output_dir),
self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
self._run_command(['gtkdoc-mkhtml', self.module_name, sgml_file],
cwd=html_dest_dir)
def _run_gtkdoc_fixxref(self):
args = ['gtkdoc-fixxref',
'--module-dir=html',
'--html-dir=html']
args.extend(['--extra-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
self._run_command(args, cwd=self.output_dir, ignore_warnings=True)
def rebase_installed_docs(self):
if not os.path.isdir(self.output_dir):
raise Exception("Tried to rebase documentation before generating it.")
html_dir = os.path.join(self.virtual_root + self.prefix, 'share', 'gtk-doc', 'html', self.module_name)
if not os.path.isdir(html_dir):
return
args = ['gtkdoc-rebase',
'--relative',
'--html-dir=%s' % html_dir]
args.extend(['--other-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
if self.virtual_root:
args.extend(['--dest-dir=%s' % self.virtual_root])
self._run_command(args, cwd=self.output_dir)
def api_missing_documentation(self):
unused_doc_file = os.path.join(self.output_dir, self.module_name + "-unused.txt")
if not os.path.exists(unused_doc_file) or not os.access(unused_doc_file, os.R_OK):
return []
return open(unused_doc_file).read().splitlines()
class PkgConfigGTKDoc(GTKDoc):
"""Class reads a library's pkgconfig file to guess gtkdoc parameters.
Some gtkdoc parameters can be guessed by reading a library's pkgconfig
file, including the cflags, ldflags and version parameters. If you
provide these parameters as well, they will be appended to the ones
guessed via the pkgconfig file.
Keyword arguments:
pkg_config_path -- Path to the pkgconfig file for the library. Required.
"""
def __init__(self, pkg_config_path, args):
super(PkgConfigGTKDoc, self).__init__(args)
pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
if not os.path.exists(pkg_config_path):
raise Exception('Could not find pkg-config file at: %s'
% pkg_config_path)
self.cflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--cflags'], print_output=False)
self.ldflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--libs'], print_output=False)
self.version = self._run_command([pkg_config,
pkg_config_path,
'--modversion'], print_output=False)
self.prefix = self._run_command([pkg_config,
pkg_config_path,
'--variable=prefix'], print_output=False)
| bsd-3-clause | 3,566,588,341,338,195,000 | 41.865385 | 110 | 0.57694 | false |
github-account-because-they-want-it/django-activity-stream | actstream/south_migrations/0006_auto__add_field_action_data.py | 8 | 5977 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from actstream.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Action.data'
db.add_column('actstream_action', 'data', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Action.data'
db.delete_column('actstream_action', 'data')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'actstream.follow': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Follow'},
'actor_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['actstream']
| bsd-3-clause | -4,569,155,226,692,120,000 | 71.890244 | 204 | 0.565836 | false |
edx/edx-platform | common/djangoapps/student/role_helpers.py | 5 | 1412 | """
Helpers for student roles
"""
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
Role
)
from common.djangoapps.student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
OrgInstructorRole,
OrgStaffRole
)
def has_staff_roles(user, course_key):
"""
Return true if a user has any of the following roles
Staff, Instructor, Beta Tester, Forum Community TA, Forum Group Moderator, Forum Moderator, Forum Administrator
"""
forum_roles = [FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR]
is_staff = CourseStaffRole(course_key).has_user(user)
is_instructor = CourseInstructorRole(course_key).has_user(user)
is_beta_tester = CourseBetaTesterRole(course_key).has_user(user)
is_org_staff = OrgStaffRole(course_key.org).has_user(user)
is_org_instructor = OrgInstructorRole(course_key.org).has_user(user)
is_global_staff = GlobalStaff().has_user(user)
has_forum_role = Role.user_has_role_for_course(user, course_key, forum_roles)
if any([is_staff, is_instructor, is_beta_tester, is_org_staff,
is_org_instructor, is_global_staff, has_forum_role]):
return True
return False
| agpl-3.0 | -2,067,997,800,688,518,700 | 34.3 | 115 | 0.708924 | false |
ernestask/meson | mesonbuild/backend/xcodebackend.py | 2 | 40565 | # Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import backends
from .. import build
from .. import dependencies
from .. import mesonlib
import uuid, os, sys
from ..mesonlib import MesonException
class XCodeBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.name = 'xcode'
self.project_uid = self.environment.coredata.guid.replace('-', '')[:24]
self.project_conflist = self.gen_id()
self.indent = ' '
self.indent_level = 0
self.xcodetypemap = {'c': 'sourcecode.c.c',
'a': 'archive.ar',
'cc': 'sourcecode.cpp.cpp',
'cxx': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'c++': 'sourcecode.cpp.cpp',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'h': 'sourcecode.c.h',
'hpp': 'sourcecode.cpp.h',
'hxx': 'sourcecode.cpp.h',
'hh': 'sourcecode.cpp.hh',
'inc': 'sourcecode.c.h',
'dylib': 'compiled.mach-o.dylib',
'o': 'compiled.mach-o.objfile',
}
self.maingroup_id = self.gen_id()
self.all_id = self.gen_id()
self.all_buildconf_id = self.gen_id()
self.buildtypes = ['debug']
self.test_id = self.gen_id()
self.test_command_id = self.gen_id()
self.test_buildconf_id = self.gen_id()
def gen_id(self):
return str(uuid.uuid4()).upper().replace('-', '')[:24]
def get_target_dir(self, target):
dirname = os.path.join(target.get_subdir(), self.environment.coredata.get_builtin_option('buildtype'))
os.makedirs(os.path.join(self.environment.get_build_dir(), dirname), exist_ok=True)
return dirname
def write_line(self, text):
self.ofile.write(self.indent * self.indent_level + text)
if not text.endswith('\n'):
self.ofile.write('\n')
def generate(self, interp):
self.interpreter = interp
test_data = self.serialize_tests()[0]
self.generate_filemap()
self.generate_buildmap()
self.generate_buildstylemap()
self.generate_build_phase_map()
self.generate_build_configuration_map()
self.generate_build_configurationlist_map()
self.generate_project_configurations_map()
self.generate_buildall_configurations_map()
self.generate_test_configurations_map()
self.generate_native_target_map()
self.generate_native_frameworks_map()
self.generate_source_phase_map()
self.generate_target_dependency_map()
self.generate_pbxdep_map()
self.generate_containerproxy_map()
self.proj_dir = os.path.join(self.environment.get_build_dir(), self.build.project_name + '.xcodeproj')
os.makedirs(self.proj_dir, exist_ok=True)
self.proj_file = os.path.join(self.proj_dir, 'project.pbxproj')
with open(self.proj_file, 'w') as self.ofile:
self.generate_prefix()
self.generate_pbx_aggregate_target()
self.generate_pbx_build_file()
self.generate_pbx_build_style()
self.generate_pbx_container_item_proxy()
self.generate_pbx_file_reference()
self.generate_pbx_frameworks_buildphase()
self.generate_pbx_group()
self.generate_pbx_native_target()
self.generate_pbx_project()
self.generate_pbx_shell_build_phase(test_data)
self.generate_pbx_sources_build_phase()
self.generate_pbx_target_dependency()
self.generate_xc_build_configuration()
self.generate_xc_configurationList()
self.generate_suffix()
def get_xcodetype(self, fname):
return self.xcodetypemap[fname.split('.')[-1]]
def generate_filemap(self):
self.filemap = {} # Key is source file relative to src root.
self.target_filemap = {}
for name, t in self.build.targets.items():
for s in t.sources:
if isinstance(s, mesonlib.File):
s = os.path.join(s.subdir, s.fname)
self.filemap[s] = self.gen_id()
for o in t.objects:
if isinstance(o, str):
o = os.path.join(t.subdir, o)
self.filemap[o] = self.gen_id()
self.target_filemap[name] = self.gen_id()
def generate_buildmap(self):
self.buildmap = {}
for t in self.build.targets.values():
for s in t.sources:
s = os.path.join(s.subdir, s.fname)
self.buildmap[s] = self.gen_id()
for o in t.objects:
o = os.path.join(t.subdir, o)
if isinstance(o, str):
self.buildmap[o] = self.gen_id()
def generate_buildstylemap(self):
self.buildstylemap = {'debug': self.gen_id()}
def generate_build_phase_map(self):
for tname, t in self.build.targets.items():
# generate id for our own target-name
t.buildphasemap = {}
t.buildphasemap[tname] = self.gen_id()
# each target can have it's own Frameworks/Sources/..., generate id's for those
t.buildphasemap['Frameworks'] = self.gen_id()
t.buildphasemap['Resources'] = self.gen_id()
t.buildphasemap['Sources'] = self.gen_id()
def generate_build_configuration_map(self):
self.buildconfmap = {}
for t in self.build.targets:
bconfs = {'debug': self.gen_id()}
self.buildconfmap[t] = bconfs
def generate_project_configurations_map(self):
self.project_configurations = {'debug': self.gen_id()}
def generate_buildall_configurations_map(self):
self.buildall_configurations = {'debug': self.gen_id()}
def generate_test_configurations_map(self):
self.test_configurations = {'debug': self.gen_id()}
def generate_build_configurationlist_map(self):
self.buildconflistmap = {}
for t in self.build.targets:
self.buildconflistmap[t] = self.gen_id()
def generate_native_target_map(self):
self.native_targets = {}
for t in self.build.targets:
self.native_targets[t] = self.gen_id()
def generate_native_frameworks_map(self):
self.native_frameworks = {}
self.native_frameworks_fileref = {}
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.native_frameworks[f] = self.gen_id()
self.native_frameworks_fileref[f] = self.gen_id()
def generate_target_dependency_map(self):
self.target_dependency_map = {}
for tname, t in self.build.targets.items():
for target in t.link_targets:
self.target_dependency_map[(tname, target.get_basename())] = self.gen_id()
def generate_pbxdep_map(self):
self.pbx_dep_map = {}
for t in self.build.targets:
self.pbx_dep_map[t] = self.gen_id()
def generate_containerproxy_map(self):
self.containerproxy_map = {}
for t in self.build.targets:
self.containerproxy_map[t] = self.gen_id()
def generate_source_phase_map(self):
self.source_phase = {}
for t in self.build.targets:
self.source_phase[t] = self.gen_id()
def generate_pbx_aggregate_target(self):
self.ofile.write('\n/* Begin PBXAggregateTarget section */\n')
self.write_line('%s /* ALL_BUILD */ = {' % self.all_id)
self.indent_level += 1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s;' % self.all_buildconf_id)
self.write_line('buildPhases = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* PBXTargetDependency */,' % self.pbx_dep_map[t])
self.indent_level -= 1
self.write_line(');')
self.write_line('name = ALL_BUILD;')
self.write_line('productName = ALL_BUILD;')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* RUN_TESTS */ = {' % self.test_id)
self.indent_level += 1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s;' % self.test_buildconf_id)
self.write_line('buildPhases = (')
self.indent_level += 1
self.write_line('%s /* test run command */,' % self.test_command_id)
self.indent_level -= 1
self.write_line(');')
self.write_line('dependencies = (')
self.write_line(');')
self.write_line('name = RUN_TESTS;')
self.write_line('productName = RUN_TESTS;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXAggregateTarget section */\n')
def generate_pbx_build_file(self):
self.ofile.write('\n/* Begin PBXBuildFile section */\n')
templ = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */; settings = { COMPILER_FLAGS = "%s"; }; };\n'
otempl = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */;};\n'
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.ofile.write('%s /* %s.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = %s /* %s.framework */; };\n' % (self.native_frameworks[f], f, self.native_frameworks_fileref[f], f))
for s in t.sources:
if isinstance(s, mesonlib.File):
s = s.fname
if isinstance(s, str):
s = os.path.join(t.subdir, s)
idval = self.buildmap[s]
fullpath = os.path.join(self.environment.get_source_dir(), s)
fileref = self.filemap[s]
fullpath2 = fullpath
compiler_args = ''
self.ofile.write(templ % (idval, fullpath, fileref, fullpath2, compiler_args))
for o in t.objects:
o = os.path.join(t.subdir, o)
idval = self.buildmap[o]
fileref = self.filemap[o]
fullpath = os.path.join(self.environment.get_source_dir(), o)
fullpath2 = fullpath
self.ofile.write(otempl % (idval, fullpath, fileref, fullpath2))
self.ofile.write('/* End PBXBuildFile section */\n')
def generate_pbx_build_style(self):
self.ofile.write('\n/* Begin PBXBuildStyle section */\n')
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */ = {\n' % (idval, name))
self.indent_level += 1
self.write_line('isa = PBXBuildStyle;\n')
self.write_line('buildSettings = {\n')
self.indent_level += 1
self.write_line('COPY_PHASE_STRIP = NO;\n')
self.indent_level -= 1
self.write_line('};\n')
self.write_line('name = "%s";\n' % name)
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXBuildStyle section */\n')
def generate_pbx_container_item_proxy(self):
self.ofile.write('\n/* Begin PBXContainerItemProxy section */\n')
for t in self.build.targets:
self.write_line('%s /* PBXContainerItemProxy */ = {' % self.containerproxy_map[t])
self.indent_level += 1
self.write_line('isa = PBXContainerItemProxy;')
self.write_line('containerPortal = %s /* Project object */;' % self.project_uid)
self.write_line('proxyType = 1;')
self.write_line('remoteGlobalIDString = %s;' % self.native_targets[t])
self.write_line('remoteInfo = "%s";' % t)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXContainerItemProxy section */\n')
def generate_pbx_file_reference(self):
self.ofile.write('\n/* Begin PBXFileReference section */\n')
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.ofile.write('%s /* %s.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = %s.framework; path = System/Library/Frameworks/%s.framework; sourceTree = SDKROOT; };\n' % (self.native_frameworks_fileref[f], f, f, f))
src_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; fileEncoding = 4; name = "%s"; path = "%s"; sourceTree = SOURCE_ROOT; };\n'
for fname, idval in self.filemap.items():
fullpath = os.path.join(self.environment.get_source_dir(), fname)
xcodetype = self.get_xcodetype(fname)
name = os.path.split(fname)[-1]
path = fname
self.ofile.write(src_templ % (idval, fullpath, xcodetype, name, path))
target_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; path = %s; refType = %d; sourceTree = BUILT_PRODUCTS_DIR; };\n'
for tname, idval in self.target_filemap.items():
t = self.build.targets[tname]
fname = t.get_filename()
reftype = 0
if isinstance(t, build.Executable):
typestr = 'compiled.mach-o.executable'
path = fname
elif isinstance(t, build.SharedLibrary):
typestr = self.get_xcodetype('dummy.dylib')
path = fname
else:
typestr = self.get_xcodetype(fname)
path = '"%s"' % t.get_filename()
self.ofile.write(target_templ % (idval, tname, typestr, path, reftype))
self.ofile.write('/* End PBXFileReference section */\n')
def generate_pbx_frameworks_buildphase(self):
for tname, t in self.build.targets.items():
self.ofile.write('\n/* Begin PBXFrameworksBuildPhase section */\n')
self.indent_level += 1
self.write_line('%s /* %s */ = {\n' % (t.buildphasemap['Frameworks'], 'Frameworks'))
self.indent_level += 1
self.write_line('isa = PBXFrameworksBuildPhase;\n')
self.write_line('buildActionMask = %s;\n' % (2147483647))
self.write_line('files = (\n')
self.indent_level += 1
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework in Frameworks */,\n' % (self.native_frameworks[f], f))
self.indent_level -= 1
self.write_line(');\n')
self.write_line('runOnlyForDeploymentPostprocessing = 0;\n')
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXFrameworksBuildPhase section */\n')
def generate_pbx_group(self):
groupmap = {}
target_src_map = {}
for t in self.build.targets:
groupmap[t] = self.gen_id()
target_src_map[t] = self.gen_id()
self.ofile.write('\n/* Begin PBXGroup section */\n')
sources_id = self.gen_id()
resources_id = self.gen_id()
products_id = self.gen_id()
frameworks_id = self.gen_id()
self.write_line('%s = {' % self.maingroup_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
self.write_line('%s /* Sources */,' % sources_id)
self.write_line('%s /* Resources */,' % resources_id)
self.write_line('%s /* Products */,' % products_id)
self.write_line('%s /* Frameworks */,' % frameworks_id)
self.indent_level -= 1
self.write_line(');')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Sources
self.write_line('%s /* Sources */ = {' % sources_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (groupmap[t], t))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Sources;')
self.write_line('sourcetree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Resources */ = {' % resources_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.write_line(');')
self.write_line('name = Resources;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Frameworks */ = {' % frameworks_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
# write frameworks
self.indent_level += 1
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework */,\n' % (self.native_frameworks_fileref[f], f))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Frameworks;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Targets
for t in self.build.targets:
self.write_line('%s /* %s */ = {' % (groupmap[t], t))
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
self.write_line('%s /* Source files */,' % target_src_map[t])
self.indent_level -= 1
self.write_line(');')
self.write_line('name = "%s";' % t)
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Source files */ = {' % target_src_map[t])
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for s in self.build.targets[t].sources:
s = os.path.join(s.subdir, s.fname)
if isinstance(s, str):
self.write_line('%s /* %s */,' % (self.filemap[s], s))
for o in self.build.targets[t].objects:
o = os.path.join(self.build.targets[t].subdir, o)
self.write_line('%s /* %s */,' % (self.filemap[o], o))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = "Source files";')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# And finally products
self.write_line('%s /* Products */ = {' % products_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.target_filemap[t], t))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Products;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXGroup section */\n')
def generate_pbx_native_target(self):
self.ofile.write('\n/* Begin PBXNativeTarget section */\n')
for tname, idval in self.native_targets.items():
t = self.build.targets[tname]
self.write_line('%s /* %s */ = {' % (idval, tname))
self.indent_level += 1
self.write_line('isa = PBXNativeTarget;')
self.write_line('buildConfigurationList = %s /* Build configuration list for PBXNativeTarget "%s" */;'
% (self.buildconflistmap[tname], tname))
self.write_line('buildPhases = (')
self.indent_level += 1
for bpname, bpval in t.buildphasemap.items():
self.write_line('%s /* %s yyy */,' % (bpval, bpname))
self.indent_level -= 1
self.write_line(');')
self.write_line('buildRules = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level += 1
for lt in self.build.targets[tname].link_targets:
# NOT DOCUMENTED, may need to make different links
# to same target have different targetdependency item.
idval = self.pbx_dep_map[lt.get_id()]
self.write_line('%s /* PBXTargetDependency */,' % idval)
self.indent_level -= 1
self.write_line(");")
self.write_line('name = "%s";' % tname)
self.write_line('productName = "%s";' % tname)
self.write_line('productReference = %s /* %s */;' % (self.target_filemap[tname], tname))
if isinstance(t, build.Executable):
typestr = 'com.apple.product-type.tool'
elif isinstance(t, build.StaticLibrary):
typestr = 'com.apple.product-type.library.static'
elif isinstance(t, build.SharedLibrary):
typestr = 'com.apple.product-type.library.dynamic'
else:
raise MesonException('Unknown target type for %s' % tname)
self.write_line('productType = "%s";' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXNativeTarget section */\n')
def generate_pbx_project(self):
self.ofile.write('\n/* Begin PBXProject section */\n')
self.write_line('%s /* Project object */ = {' % self.project_uid)
self.indent_level += 1
self.write_line('isa = PBXProject;')
self.write_line('attributes = {')
self.indent_level += 1
self.write_line('BuildIndependentTargetsInParallel = YES;')
self.indent_level -= 1
self.write_line('};')
conftempl = 'buildConfigurationList = %s /* build configuration list for PBXProject "%s"*/;'
self.write_line(conftempl % (self.project_conflist, self.build.project_name))
self.write_line('buildSettings = {')
self.write_line('};')
self.write_line('buildStyles = (')
self.indent_level += 1
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */,' % (idval, name))
self.indent_level -= 1
self.write_line(');')
self.write_line('compatibilityVersion = "Xcode 3.2";')
self.write_line('hasScannedForEncodings = 0;')
self.write_line('mainGroup = %s;' % self.maingroup_id)
self.write_line('projectDirPath = "%s";' % self.build_to_src)
self.write_line('projectRoot = "";')
self.write_line('targets = (')
self.indent_level += 1
self.write_line('%s /* ALL_BUILD */,' % self.all_id)
self.write_line('%s /* RUN_TESTS */,' % self.test_id)
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.native_targets[t], t))
self.indent_level -= 1
self.write_line(');')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXProject section */\n')
def generate_pbx_shell_build_phase(self, test_data):
self.ofile.write('\n/* Begin PBXShellScriptBuildPhase section */\n')
self.write_line('%s = {' % self.test_command_id)
self.indent_level += 1
self.write_line('isa = PBXShellScriptBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.write_line(');')
self.write_line('inputPaths = (')
self.write_line(');')
self.write_line('outputPaths = (')
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.write_line('shellPath = /bin/sh;')
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
cmd = mesonlib.python_command + [test_script, test_data, '--wd', self.environment.get_build_dir()]
cmdstr = ' '.join(["'%s'" % i for i in cmd])
self.write_line('shellScript = "%s";' % cmdstr)
self.write_line('showEnvVarsInLog = 0;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXShellScriptBuildPhase section */\n')
def generate_pbx_sources_build_phase(self):
self.ofile.write('\n/* Begin PBXSourcesBuildPhase section */\n')
for name, phase_id in self.source_phase.items():
t = self.build.targets[name]
self.write_line('%s /* Sources */ = {' % (t.buildphasemap[name]))
self.indent_level += 1
self.write_line('isa = PBXSourcesBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.indent_level += 1
for s in self.build.targets[name].sources:
s = os.path.join(s.subdir, s.fname)
if not self.environment.is_header(s):
self.write_line('%s /* %s */,' % (self.buildmap[s], os.path.join(self.environment.get_source_dir(), s)))
self.indent_level -= 1
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXSourcesBuildPhase section */\n')
def generate_pbx_target_dependency(self):
self.ofile.write('\n/* Begin PBXTargetDependency section */\n')
for t in self.build.targets:
idval = self.pbx_dep_map[t] # VERIFY: is this correct?
self.write_line('%s /* PBXTargetDependency */ = {' % idval)
self.indent_level += 1
self.write_line('isa = PBXTargetDependency;')
self.write_line('target = %s /* %s */;' % (self.native_targets[t], t))
self.write_line('targetProxy = %s /* PBXContainerItemProxy */;' % self.containerproxy_map[t])
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXTargetDependency section */\n')
def generate_xc_build_configuration(self):
self.ofile.write('\n/* Begin XCBuildConfiguration section */\n')
# First the setup for the toplevel project.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.project_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";')
self.write_line('ONLY_ACTIVE_ARCH = YES;')
self.write_line('SDKROOT = "macosx";')
self.write_line('SYMROOT = "%s/build";' % self.environment.get_build_dir())
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Then the all target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = ALL_BUILD;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Then the test target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.test_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = RUN_TESTS;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Now finally targets.
langnamemap = {'c': 'C', 'cpp': 'CPLUSPLUS', 'objc': 'OBJC', 'objcpp': 'OBJCPLUSPLUS'}
for target_name, target in self.build.targets.items():
for buildtype in self.buildtypes:
dep_libs = []
links_dylib = False
headerdirs = []
for d in target.include_dirs:
for sd in d.incdirs:
cd = os.path.join(d.curdir, sd)
headerdirs.append(os.path.join(self.environment.get_source_dir(), cd))
headerdirs.append(os.path.join(self.environment.get_build_dir(), cd))
for l in target.link_targets:
abs_path = os.path.join(self.environment.get_build_dir(),
l.subdir, buildtype, l.get_filename())
dep_libs.append("'%s'" % abs_path)
if isinstance(l, build.SharedLibrary):
links_dylib = True
if links_dylib:
dep_libs = ['-Wl,-search_paths_first', '-Wl,-headerpad_max_install_names'] + dep_libs
dylib_version = None
if isinstance(target, build.SharedLibrary):
ldargs = ['-dynamiclib', '-Wl,-headerpad_max_install_names'] + dep_libs
install_path = os.path.join(self.environment.get_build_dir(), target.subdir, buildtype)
dylib_version = target.version
else:
ldargs = dep_libs
install_path = ''
if dylib_version is not None:
product_name = target.get_basename() + '.' + dylib_version
else:
product_name = target.get_basename()
ldargs += target.link_args
ldstr = ' '.join(ldargs)
valid = self.buildconfmap[target_name][buildtype]
langargs = {}
for lang in self.environment.coredata.compilers:
if lang not in langnamemap:
continue
gargs = self.build.global_args.get(lang, [])
targs = target.get_extra_args(lang)
args = gargs + targs
if len(args) > 0:
langargs[langnamemap[lang]] = args
symroot = os.path.join(self.environment.get_build_dir(), target.subdir)
self.write_line('%s /* %s */ = {' % (valid, buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
if dylib_version is not None:
self.write_line('DYLIB_CURRENT_VERSION = "%s";' % dylib_version)
self.write_line('EXECUTABLE_PREFIX = "%s";' % target.prefix)
if target.suffix == '':
suffix = ''
else:
suffix = '.' + target.suffix
self.write_line('EXECUTABLE_SUFFIX = "%s";' % suffix)
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = YES;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
if len(headerdirs) > 0:
quotedh = ','.join(['"\\"%s\\""' % i for i in headerdirs])
self.write_line('HEADER_SEARCH_PATHS=(%s);' % quotedh)
self.write_line('INSTALL_PATH = "%s";' % install_path)
self.write_line('LIBRARY_SEARCH_PATHS = "";')
if isinstance(target, build.SharedLibrary):
self.write_line('LIBRARY_STYLE = DYNAMIC;')
for langname, args in langargs.items():
argstr = ' '.join(args)
self.write_line('OTHER_%sFLAGS = "%s";' % (langname, argstr))
self.write_line('OTHER_LDFLAGS = "%s";' % ldstr)
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = %s;' % product_name)
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % symroot)
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCBuildConfiguration section */\n')
def generate_xc_configurationList(self):
self.ofile.write('\n/* Begin XCConfigurationList section */\n')
self.write_line('%s /* Build configuration list for PBXProject "%s" */ = {' % (self.project_conflist, self.build.project_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.project_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
# Now the all target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.all_buildconf_id)
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
# Test target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.test_buildconf_id)
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.test_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
for target_name in self.build.targets:
listid = self.buildconflistmap[target_name]
self.write_line('%s /* Build configuration list for PBXNativeTarget "%s" */ = {' % (listid, target_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
typestr = 'debug'
idval = self.buildconfmap[target_name][typestr]
self.write_line('%s /* %s */,' % (idval, typestr))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = "%s";' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCConfigurationList section */\n')
def generate_prefix(self):
self.ofile.write('// !$*UTF8*$!\n{\n')
self.indent_level += 1
self.write_line('archiveVersion = 1;\n')
self.write_line('classes = {\n')
self.write_line('};\n')
self.write_line('objectVersion = 46;\n')
self.write_line('objects = {\n')
self.indent_level += 1
def generate_suffix(self):
self.indent_level -= 1
self.write_line('};\n')
self.write_line('rootObject = ' + self.project_uid + ';')
self.indent_level -= 1
self.write_line('}\n')
| apache-2.0 | 1,277,606,074,454,850,800 | 46.5 | 273 | 0.546062 | false |
berfinsari/metricbeat | membeat/vendor/github.com/elastic/beats/packetbeat/tests/system/gen/memcache/mc.py | 15 | 1393 |
from contextlib import (contextmanager)
import argparse
import sys
import pylibmc
def parse_args(args=None):
p = argparse.ArgumentParser()
p.add_argument('--protocol', '-p', default='text',
help="choose protocol type. One of text or bin")
p.add_argument('--remote', '-r', default='127.0.0.1:11211',
help="remote server address")
return p.parse_args(sys.argv[1:] if args is None else args)
def connect_tcp(opts=None):
opts = opts or parse_args()
opts.transport = 'tcp'
return connect(opts)
def connect_udp(opts=None):
opts = opts or parse_args()
opts.transport = 'udp'
return connect(opts)
def connect(opts):
if opts.transport == 'udp':
addr = 'udp:' + opts.remote
else:
addr = opts.remote
return pylibmc.Client([addr],
binary=opts.protocol == 'bin')
def make_connect_cmd(con):
def go(opts=None):
mc = con(opts)
try:
yield mc
finally:
mc.disconnect_all()
return contextmanager(go)
def make_run(con):
def go(fn, opts=None):
with con() as mc:
fn(mc)
return go
connection = make_connect_cmd(connect)
tcp_connection = make_connect_cmd(connect_tcp)
udp_connection = make_connect_cmd(connect_udp)
run_tcp = make_run(tcp_connection)
run_udp = make_run(udp_connection)
| gpl-3.0 | 2,342,726,615,677,904,000 | 21.836066 | 67 | 0.607322 | false |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/pypy/lib-python/2.7/lib2to3/pgen2/driver.py | 98 | 4694 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import StringIO
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
| agpl-3.0 | 8,154,849,403,621,065,000 | 32.528571 | 76 | 0.549638 | false |
dragonfi/snowfall | pyglet-1.1.4/tests/font/ADD_FONT.py | 12 | 1603 | #!/usr/bin/env python
'''Test that a font distributed with the application can be displayed.
Four lines of text should be displayed, each in a different variant
(bold/italic/regular) of Action Man at 24pt. The Action Man fonts are
included in the test directory (tests/font) as action_man*.ttf.
Press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import unittest
from pyglet import font
import base_text
base_path = os.path.dirname(__file__)
class TEST_ADD_FONT(base_text.TextTestBase):
font_name = 'Action Man'
def render(self):
font.add_file(os.path.join(base_path, 'action_man.ttf'))
font.add_file(os.path.join(base_path, 'action_man_bold.ttf'))
font.add_file(os.path.join(base_path, 'action_man_italic.ttf'))
font.add_file(os.path.join(base_path, 'action_man_bold_italic.ttf'))
fnt = font.load('Action Man', self.font_size)
fnt_b = font.load('Action Man', self.font_size, bold=True)
fnt_i = font.load('Action Man', self.font_size, italic=True)
fnt_bi = font.load('Action Man', self.font_size, bold=True, italic=True)
h = fnt.ascent - fnt.descent
self.labels = [
font.Text(fnt, 'Action Man', 10, 10 + 3 * h),
font.Text(fnt_i, 'Action Man Italic', 10, 10 + 2 * h),
font.Text(fnt_b, 'Action Man Bold', 10, 10 + h),
font.Text(fnt_bi, 'Action Man Bold Italic', 10, 10)
]
def draw(self):
for label in self.labels:
label.draw()
if __name__ == '__main__':
unittest.main()
| mit | -7,036,868,276,115,667,000 | 29.826923 | 80 | 0.623206 | false |
stscieisenhamer/ginga | ginga/tkw/TkHelp.py | 1 | 1380 | #
# TkHelp.py -- help module for Ginga Tk backend
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
class Timer(object):
"""Abstraction of a GUI-toolkit implemented timer."""
def __init__(self, ival_sec, expire_cb, data=None, tkcanvas=None):
"""Create a timer set to expire after `ival_sec` and which will
call the callable `expire_cb` when it expires.
"""
self.ival_sec = ival_sec
self.cb = expire_cb
self.data = data
self.tkcanvas = tkcanvas
self._timer = None
def start(self, ival_sec=None):
"""Start the timer. If `ival_sec` is not None, it should
specify the time to expiration in seconds.
"""
if ival_sec is None:
ival_sec = self.ival_sec
self.cancel()
# Tk timer set in milliseconds
time_ms = int(ival_sec * 1000.0)
self._timer = self.tkcanvas.after(time_ms, self._redirect_cb)
def _redirect_cb(self):
self._timer = None
self.cb(self)
def cancel(self):
"""Cancel this timer. If the timer is not running, there
is no error.
"""
try:
if self._timer is not None:
self.tkcanvas.after_cancel(self._timer)
self._timer = None
except:
pass
| bsd-3-clause | -5,126,402,470,867,204,000 | 29 | 71 | 0.575362 | false |
12019/cyberflex-shell | tests/utilstest.py | 2 | 1220 | """Unit test for utils.py"""
import utils
import unittest
class APDUCase1Tests(unittest.TestCase):
def setUp(self):
self.a4 = utils.C_APDU("\x00\xa4\x00\x00")
def tearDown(self):
del self.a4
def testCreation(self):
self.assertEqual(0, self.a4.CLA)
self.assertEqual(0xa4, self.a4.INS)
self.assertEqual(0, self.a4.P1)
self.assertEqual(0, self.a4.P2)
def testRender(self):
self.assertEqual("\x00\xa4\x00\x00", self.a4.render())
def testCopy(self):
b0 = utils.C_APDU(self.a4, INS=0xb0)
self.assertEqual("\x00\xb0\x00\x00", b0.render())
def testAssign(self):
self.a4.p2 = 5
self.assertEqual(5, self.a4.P2)
self.assertEqual("\x00\xa4\x00\x05", self.a4.render())
def testCreateSequence(self):
a4_2 = utils.C_APDU(0, 0xa4, 0, 0)
self.assertEqual(self.a4.render(), a4_2.render())
class APDUChainTests(unittest.TestCase):
def testChain(self):
a = utils.R_APDU("abcd\x61\x04")
b = utils.R_APDU("efgh\x90\x00")
c = a.append(b)
self.assertEqual("abcdefgh\x90\x00", c.render())
| gpl-2.0 | -3,415,213,007,315,696,600 | 24.957447 | 62 | 0.57459 | false |
thaumos/ansible | test/units/modules/network/slxos/test_slxos_interface.py | 38 | 4938 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.slxos import slxos_interface
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosInterfaceModule(TestSlxosModule):
module = slxos_interface
def setUp(self):
super(TestSlxosInterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_interface.load_config'
)
self._patch_exec_command = patch(
'ansible.modules.network.slxos.slxos_interface.exec_command'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._exec_command = self._patch_exec_command.start()
def tearDown(self):
super(TestSlxosInterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_exec_command.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_interface_description(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
description='show version'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'description show version'
],
'changed': True
}
)
def test_slxos_interface_speed(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
speed=1000
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'speed 1000'
],
'changed': True
}
)
def test_slxos_interface_mtu(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=1548
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'mtu 1548'
],
'changed': True
}
)
def test_slxos_interface_mtu_out_of_range(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=15000
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'mtu must be between 1548 and 9216',
'failed': True
}
)
def test_slxos_interface_enabled(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
enabled=True
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/1',
'no shutdown'
],
'changed': True
}
)
def test_slxos_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: aggregate, '
'delay, description, enabled, mtu, name, neighbors, '
'rx_rate, speed, state, tx_rate',
result['msg']
))
| gpl-3.0 | -7,293,324,537,941,211,000 | 30.858065 | 75 | 0.557716 | false |
Bismarrck/tensorflow | tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_lstm_test.py | 9 | 8637 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.tflite_lstm import TFLiteLSTMCell
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.nn.rnn_cell.MultiRNNCell([
TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=0, name="rnn1"),
TFLiteLSTMCell(self.num_units, num_proj=8, forget_bias=0, name="rnn2"),
TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
TFLiteLSTMCell(self.num_units, forget_bias=0, name="rnn4")
])
def buildModel(self, lstm_layer, is_dynamic_rnn, is_train):
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# For dynamic_rnn, train with dynamic_rnn and inference with static_rnn.
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
if is_train:
lstm_input = x
outputs, _ = tf.nn.dynamic_rnn(lstm_layer, lstm_input, dtype="float32")
outputs = tf.unstack(outputs, axis=1)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, lstm_layer, sess, saver, is_dynamic_rnn):
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
lstm_layer, is_dynamic_rnn, is_train=False)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, [output_class.op.name])
return sample_input, expected_output, frozen_graph
def tfliteInvoke(self, graph, test_inputs, outputs):
tf.reset_default_graph()
# Turn the input into placeholder of shape 1
tflite_input = tf.placeholder(
"float", [1, self.time_steps, self.n_input], name="INPUT_IMAGE_LITE")
tf.import_graph_def(graph, name="", input_map={"INPUT_IMAGE": tflite_input})
with tf.Session() as sess:
curr = sess.graph_def
curr = convert_op_hints_to_stubs(graph_def=curr)
curr = optimize_for_inference_lib.optimize_for_inference(
curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
[tf.float32.as_datatype_enum])
tflite = tf.lite.toco_convert(
curr, [tflite_input], [outputs], allow_custom_ops=False)
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=False, is_train=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output, frozen_graph = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(frozen_graph, test_inputs, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=True, is_train=True)
self.trainModel(x, prediction, output_class, sess)
# Since we don't yet support OpHints for dynamic, we will load the model
# back in as a static model. This requires the variables to have the same
# names as if they were trained as a static. Thus, we get rid of while/rnn
# names.
variables_to_save = {}
for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
op_name = i.name
if op_name.startswith("while/rnn/"):
op_name = op_name.split("while/rnn/")[1]
if op_name.endswith(":0"):
op_name = op_name.split(":0")[0]
variables_to_save[op_name] = i
saver = tf.train.Saver(variables_to_save)
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output, frozen_graph = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(frozen_graph, test_inputs, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,389,040,249,005,102,600 | 37.048458 | 81 | 0.672224 | false |
partofthething/home-assistant | homeassistant/components/nissan_leaf/__init__.py | 1 | 17730 | """Support for the Nissan Leaf Carwings/Nissan Connect API."""
import asyncio
from datetime import datetime, timedelta
import logging
import sys
from pycarwings2 import CarwingsError, Session
import voluptuous as vol
from homeassistant.const import CONF_PASSWORD, CONF_REGION, CONF_USERNAME, HTTP_OK
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
DOMAIN = "nissan_leaf"
DATA_LEAF = "nissan_leaf_data"
DATA_BATTERY = "battery"
DATA_CHARGING = "charging"
DATA_PLUGGED_IN = "plugged_in"
DATA_CLIMATE = "climate"
DATA_RANGE_AC = "range_ac_on"
DATA_RANGE_AC_OFF = "range_ac_off"
CONF_INTERVAL = "update_interval"
CONF_CHARGING_INTERVAL = "update_interval_charging"
CONF_CLIMATE_INTERVAL = "update_interval_climate"
CONF_VALID_REGIONS = ["NNA", "NE", "NCI", "NMA", "NML"]
CONF_FORCE_MILES = "force_miles"
INITIAL_UPDATE = timedelta(seconds=15)
MIN_UPDATE_INTERVAL = timedelta(minutes=2)
DEFAULT_INTERVAL = timedelta(hours=1)
DEFAULT_CHARGING_INTERVAL = timedelta(minutes=15)
DEFAULT_CLIMATE_INTERVAL = timedelta(minutes=5)
RESTRICTED_BATTERY = 2
RESTRICTED_INTERVAL = timedelta(hours=12)
MAX_RESPONSE_ATTEMPTS = 10
PYCARWINGS2_SLEEP = 30
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_REGION): vol.In(CONF_VALID_REGIONS),
vol.Optional(CONF_INTERVAL, default=DEFAULT_INTERVAL): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))
),
vol.Optional(
CONF_CHARGING_INTERVAL, default=DEFAULT_CHARGING_INTERVAL
): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))
),
vol.Optional(
CONF_CLIMATE_INTERVAL, default=DEFAULT_CLIMATE_INTERVAL
): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))
),
vol.Optional(CONF_FORCE_MILES, default=False): cv.boolean,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["sensor", "switch", "binary_sensor"]
SIGNAL_UPDATE_LEAF = "nissan_leaf_update"
SERVICE_UPDATE_LEAF = "update"
SERVICE_START_CHARGE_LEAF = "start_charge"
ATTR_VIN = "vin"
UPDATE_LEAF_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
START_CHARGE_LEAF_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
def setup(hass, config):
"""Set up the Nissan Leaf integration."""
async def async_handle_update(service):
"""Handle service to update leaf data from Nissan servers."""
# It would be better if this was changed to use nickname, or
# an entity name rather than a vin.
vin = service.data[ATTR_VIN]
if vin in hass.data[DATA_LEAF]:
data_store = hass.data[DATA_LEAF][vin]
await data_store.async_update_data(utcnow())
else:
_LOGGER.debug("Vin %s not recognised for update", vin)
async def async_handle_start_charge(service):
"""Handle service to start charging."""
# It would be better if this was changed to use nickname, or
# an entity name rather than a vin.
vin = service.data[ATTR_VIN]
if vin in hass.data[DATA_LEAF]:
data_store = hass.data[DATA_LEAF][vin]
# Send the command to request charging is started to Nissan
# servers. If that completes OK then trigger a fresh update to
# pull the charging status from the car after waiting a minute
# for the charging request to reach the car.
result = await hass.async_add_executor_job(data_store.leaf.start_charging)
if result:
_LOGGER.debug("Start charging sent, request updated data in 1 minute")
check_charge_at = utcnow() + timedelta(minutes=1)
data_store.next_update = check_charge_at
async_track_point_in_utc_time(
hass, data_store.async_update_data, check_charge_at
)
else:
_LOGGER.debug("Vin %s not recognised for update", vin)
def setup_leaf(car_config):
"""Set up a car."""
_LOGGER.debug("Logging into You+Nissan...")
username = car_config[CONF_USERNAME]
password = car_config[CONF_PASSWORD]
region = car_config[CONF_REGION]
leaf = None
try:
# This might need to be made async (somehow) causes
# homeassistant to be slow to start
sess = Session(username, password, region)
leaf = sess.get_leaf()
except KeyError:
_LOGGER.error(
"Unable to fetch car details..."
" do you actually have a Leaf connected to your account?"
)
return False
except CarwingsError:
_LOGGER.error(
"An unknown error occurred while connecting to Nissan: %s",
sys.exc_info()[0],
)
return False
_LOGGER.warning(
"WARNING: This may poll your Leaf too often, and drain the 12V"
" battery. If you drain your cars 12V battery it WILL NOT START"
" as the drive train battery won't connect."
" Don't set the intervals too low"
)
data_store = LeafDataStore(hass, leaf, car_config)
hass.data[DATA_LEAF][leaf.vin] = data_store
for platform in PLATFORMS:
load_platform(hass, platform, DOMAIN, {}, car_config)
async_track_point_in_utc_time(
hass, data_store.async_update_data, utcnow() + INITIAL_UPDATE
)
hass.data[DATA_LEAF] = {}
for car in config[DOMAIN]:
setup_leaf(car)
hass.services.register(
DOMAIN, SERVICE_UPDATE_LEAF, async_handle_update, schema=UPDATE_LEAF_SCHEMA
)
hass.services.register(
DOMAIN,
SERVICE_START_CHARGE_LEAF,
async_handle_start_charge,
schema=START_CHARGE_LEAF_SCHEMA,
)
return True
class LeafDataStore:
"""Nissan Leaf Data Store."""
def __init__(self, hass, leaf, car_config):
"""Initialise the data store."""
self.hass = hass
self.leaf = leaf
self.car_config = car_config
self.force_miles = car_config[CONF_FORCE_MILES]
self.data = {}
self.data[DATA_CLIMATE] = False
self.data[DATA_BATTERY] = 0
self.data[DATA_CHARGING] = False
self.data[DATA_RANGE_AC] = 0
self.data[DATA_RANGE_AC_OFF] = 0
self.data[DATA_PLUGGED_IN] = False
self.next_update = None
self.last_check = None
self.request_in_progress = False
# Timestamp of last successful response from battery or climate.
self.last_battery_response = None
self.last_climate_response = None
self._remove_listener = None
async def async_update_data(self, now):
"""Update data from nissan leaf."""
# Prevent against a previously scheduled update and an ad-hoc update
# started from an update from both being triggered.
if self._remove_listener:
self._remove_listener()
self._remove_listener = None
# Clear next update whilst this update is underway
self.next_update = None
await self.async_refresh_data(now)
self.next_update = self.get_next_interval()
_LOGGER.debug("Next update=%s", self.next_update)
self._remove_listener = async_track_point_in_utc_time(
self.hass, self.async_update_data, self.next_update
)
def get_next_interval(self):
"""Calculate when the next update should occur."""
base_interval = self.car_config[CONF_INTERVAL]
climate_interval = self.car_config[CONF_CLIMATE_INTERVAL]
charging_interval = self.car_config[CONF_CHARGING_INTERVAL]
# The 12V battery is used when communicating with Nissan servers.
# The 12V battery is charged from the traction battery when not
# connected and when the traction battery has enough charge. To
# avoid draining the 12V battery we shall restrict the update
# frequency if low battery detected.
if (
self.last_battery_response is not None
and self.data[DATA_CHARGING] is False
and self.data[DATA_BATTERY] <= RESTRICTED_BATTERY
):
_LOGGER.debug(
"Low battery so restricting refresh frequency (%s)", self.leaf.nickname
)
interval = RESTRICTED_INTERVAL
else:
intervals = [base_interval]
if self.data[DATA_CHARGING]:
intervals.append(charging_interval)
if self.data[DATA_CLIMATE]:
intervals.append(climate_interval)
interval = min(intervals)
return utcnow() + interval
async def async_refresh_data(self, now):
"""Refresh the leaf data and update the datastore."""
if self.request_in_progress:
_LOGGER.debug("Refresh currently in progress for %s", self.leaf.nickname)
return
_LOGGER.debug("Updating Nissan Leaf Data")
self.last_check = datetime.today()
self.request_in_progress = True
server_response = await self.async_get_battery()
if server_response is not None:
_LOGGER.debug("Server Response: %s", server_response.__dict__)
if server_response.answer["status"] == HTTP_OK:
self.data[DATA_BATTERY] = server_response.battery_percent
# pycarwings2 library doesn't always provide cruising rnages
# so we have to check if they exist before we can use them.
# Root cause: the nissan servers don't always send the data.
if hasattr(server_response, "cruising_range_ac_on_km"):
self.data[DATA_RANGE_AC] = server_response.cruising_range_ac_on_km
else:
self.data[DATA_RANGE_AC] = None
if hasattr(server_response, "cruising_range_ac_off_km"):
self.data[
DATA_RANGE_AC_OFF
] = server_response.cruising_range_ac_off_km
else:
self.data[DATA_RANGE_AC_OFF] = None
self.data[DATA_PLUGGED_IN] = server_response.is_connected
self.data[DATA_CHARGING] = server_response.is_charging
async_dispatcher_send(self.hass, SIGNAL_UPDATE_LEAF)
self.last_battery_response = utcnow()
# Climate response only updated if battery data updated first.
if server_response is not None:
try:
climate_response = await self.async_get_climate()
if climate_response is not None:
_LOGGER.debug(
"Got climate data for Leaf: %s", climate_response.__dict__
)
self.data[DATA_CLIMATE] = climate_response.is_hvac_running
self.last_climate_response = utcnow()
except CarwingsError:
_LOGGER.error("Error fetching climate info")
self.request_in_progress = False
async_dispatcher_send(self.hass, SIGNAL_UPDATE_LEAF)
@staticmethod
def _extract_start_date(battery_info):
"""Extract the server date from the battery response."""
try:
return battery_info.answer["BatteryStatusRecords"]["OperationDateAndTime"]
except KeyError:
return None
async def async_get_battery(self):
"""Request battery update from Nissan servers."""
try:
# Request battery update from the car
_LOGGER.debug("Requesting battery update, %s", self.leaf.vin)
request = await self.hass.async_add_executor_job(self.leaf.request_update)
if not request:
_LOGGER.error("Battery update request failed")
return None
for attempt in range(MAX_RESPONSE_ATTEMPTS):
_LOGGER.debug(
"Waiting %s seconds for battery update (%s) (%s)",
PYCARWINGS2_SLEEP,
self.leaf.vin,
attempt,
)
await asyncio.sleep(PYCARWINGS2_SLEEP)
# We don't use the response from get_status_from_update
# apart from knowing that the car has responded saying it
# has given the latest battery status to Nissan.
check_result_info = await self.hass.async_add_executor_job(
self.leaf.get_status_from_update, request
)
if check_result_info is not None:
# Get the latest battery status from Nissan servers.
# This has the SOC in it.
server_info = await self.hass.async_add_executor_job(
self.leaf.get_latest_battery_status
)
return server_info
_LOGGER.debug(
"%s attempts exceeded return latest data from server",
MAX_RESPONSE_ATTEMPTS,
)
# Get the latest data from the nissan servers, even though
# it may be out of date, it's better than nothing.
server_info = await self.hass.async_add_executor_job(
self.leaf.get_latest_battery_status
)
return server_info
except CarwingsError:
_LOGGER.error("An error occurred getting battery status")
return None
except KeyError:
_LOGGER.error("An error occurred parsing response from server")
return None
async def async_get_climate(self):
"""Request climate data from Nissan servers."""
try:
return await self.hass.async_add_executor_job(
self.leaf.get_latest_hvac_status
)
except CarwingsError:
_LOGGER.error(
"An error occurred communicating with the car %s", self.leaf.vin
)
return None
async def async_set_climate(self, toggle):
"""Set climate control mode via Nissan servers."""
climate_result = None
if toggle:
_LOGGER.debug("Requesting climate turn on for %s", self.leaf.vin)
set_function = self.leaf.start_climate_control
result_function = self.leaf.get_start_climate_control_result
else:
_LOGGER.debug("Requesting climate turn off for %s", self.leaf.vin)
set_function = self.leaf.stop_climate_control
result_function = self.leaf.get_stop_climate_control_result
request = await self.hass.async_add_executor_job(set_function)
for attempt in range(MAX_RESPONSE_ATTEMPTS):
if attempt > 0:
_LOGGER.debug(
"Climate data not in yet (%s) (%s). Waiting (%s) seconds",
self.leaf.vin,
attempt,
PYCARWINGS2_SLEEP,
)
await asyncio.sleep(PYCARWINGS2_SLEEP)
climate_result = await self.hass.async_add_executor_job(
result_function, request
)
if climate_result is not None:
break
if climate_result is not None:
_LOGGER.debug("Climate result: %s", climate_result.__dict__)
async_dispatcher_send(self.hass, SIGNAL_UPDATE_LEAF)
return climate_result.is_hvac_running == toggle
_LOGGER.debug("Climate result not returned by Nissan servers")
return False
class LeafEntity(Entity):
"""Base class for Nissan Leaf entity."""
def __init__(self, car):
"""Store LeafDataStore upon init."""
self.car = car
def log_registration(self):
"""Log registration."""
_LOGGER.debug(
"Registered %s integration for VIN %s",
self.__class__.__name__,
self.car.leaf.vin,
)
@property
def device_state_attributes(self):
"""Return default attributes for Nissan leaf entities."""
return {
"next_update": self.car.next_update,
"last_attempt": self.car.last_check,
"updated_on": self.car.last_battery_response,
"update_in_progress": self.car.request_in_progress,
"vin": self.car.leaf.vin,
}
async def async_added_to_hass(self):
"""Register callbacks."""
self.log_registration()
self.async_on_remove(
async_dispatcher_connect(
self.car.hass, SIGNAL_UPDATE_LEAF, self._update_callback
)
)
@callback
def _update_callback(self):
"""Update the state."""
self.async_schedule_update_ha_state(True)
| mit | -5,440,146,955,563,262,000 | 36.326316 | 87 | 0.58291 | false |
zhanghenry/stocks | django/conf/locale/sk/formats.py | 115 | 1173 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause | 872,452,780,063,872,600 | 35.65625 | 77 | 0.590793 | false |
blckshrk/Weboob | weboob/tools/value.py | 1 | 7675 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from .ordereddict import OrderedDict
from .misc import to_unicode
__all__ = ['ValuesDict', 'Value', 'ValueBackendPassword', 'ValueInt', 'ValueFloat', 'ValueBool']
class ValuesDict(OrderedDict):
"""
Ordered dictionarry which can take values in constructor.
>>> ValuesDict(Value('a', label='Test'), ValueInt('b', label='Test2'))
"""
def __init__(self, *values):
OrderedDict.__init__(self)
for v in values:
self[v.id] = v
class Value(object):
"""
Value.
:param label: human readable description of a value
:type label: str
:param required: if ``True``, the backend can't load if the key isn't found in its configuration
:type required: bool
:param default: an optional default value, used when the key is not in config. If there is no default value and the key
is not found in configuration, the **required** parameter is implicitly set
:param masked: if ``True``, the value is masked. It is useful for applications to know if this key is a password
:type masked: bool
:param regexp: if specified, on load the specified value is checked against this regexp, and an error is raised if it doesn't match
:type regexp: str
:param choices: if this parameter is set, the value must be in the list
:param tiny: the value of choices can be entered by an user (as they are small)
:type choices: (list,dict)
"""
def __init__(self, *args, **kwargs):
if len(args) > 0:
self.id = args[0]
else:
self.id = ''
self.label = kwargs.get('label', kwargs.get('description', None))
self.description = kwargs.get('description', kwargs.get('label', None))
self.default = kwargs.get('default', None)
self.regexp = kwargs.get('regexp', None)
self.choices = kwargs.get('choices', None)
if isinstance(self.choices, (list, tuple)):
self.choices = dict(((v, v) for v in self.choices))
self.tiny = kwargs.get('tiny', None)
self.masked = kwargs.get('masked', False)
self.required = kwargs.get('required', self.default is None)
self._value = kwargs.get('value', None)
def check_valid(self, v):
"""
Check if the given value is valid.
:raises: ValueError
"""
if self.default is not None and v == self.default:
return
if v == '' and self.default != '':
raise ValueError('Value can\'t be empty')
if self.regexp is not None and not re.match(self.regexp, unicode(v)):
raise ValueError('Value "%s" does not match regexp "%s"' % (v, self.regexp))
if self.choices is not None and not v in self.choices.iterkeys():
raise ValueError('Value "%s" is not in list: %s' % (
v, ', '.join(unicode(s) for s in self.choices.iterkeys())))
def load(self, domain, v, callbacks):
"""
Load value.
:param domain: what is the domain of this value
:type domain: str
:param v: value to load
:param callbacks: list of weboob callbacks
:type callbacks: dict
"""
return self.set(v)
def set(self, v):
"""
Set a value.
"""
self.check_valid(v)
if isinstance(v, str):
v = to_unicode(v)
self._value = v
def dump(self):
"""
Dump value to be stored.
"""
return self.get()
def get(self):
"""
Get the value.
"""
return self._value
class ValueBackendPassword(Value):
_domain = None
_callbacks = {}
_stored = True
def __init__(self, *args, **kwargs):
kwargs['masked'] = kwargs.pop('masked', True)
self.noprompt = kwargs.pop('noprompt', False)
Value.__init__(self, *args, **kwargs)
def load(self, domain, password, callbacks):
self.check_valid(password)
self._domain = domain
self._value = to_unicode(password)
self._callbacks = callbacks
def check_valid(self, passwd):
if passwd == '':
# always allow empty passwords
return True
return Value.check_valid(self, passwd)
def set(self, passwd):
self.check_valid(passwd)
if passwd is None:
# no change
return
self._value = ''
if passwd == '':
return
if self._domain is None:
self._value = to_unicode(passwd)
return
try:
raise ImportError('Keyrings are disabled (see #706)')
import keyring
keyring.set_password(self._domain, self.id, passwd)
except Exception:
self._value = to_unicode(passwd)
else:
self._value = ''
def dump(self):
if self._stored:
return self._value
else:
return ''
def get(self):
if self._value != '' or self._domain is None:
return self._value
try:
raise ImportError('Keyrings are disabled (see #706)')
import keyring
except ImportError:
passwd = None
else:
passwd = keyring.get_password(self._domain, self.id)
if passwd is not None:
# Password has been read in the keyring.
return to_unicode(passwd)
# Prompt user to enter password by hand.
if not self.noprompt and 'login' in self._callbacks:
self._value = to_unicode(self._callbacks['login'](self._domain, self))
if self._value is None:
self._value = ''
else:
self._stored = False
return self._value
class ValueInt(Value):
def __init__(self, *args, **kwargs):
kwargs['regexp'] = '^\d+$'
Value.__init__(self, *args, **kwargs)
def get(self):
return int(self._value)
class ValueFloat(Value):
def __init__(self, *args, **kwargs):
kwargs['regexp'] = '^[\d\.]+$'
Value.__init__(self, *args, **kwargs)
def check_valid(self, v):
try:
float(v)
except ValueError:
raise ValueError('Value "%s" is not a float value')
def get(self):
return float(self._value)
class ValueBool(Value):
def __init__(self, *args, **kwargs):
kwargs['choices'] = {'y': 'True', 'n': 'False'}
Value.__init__(self, *args, **kwargs)
def check_valid(self, v):
if not isinstance(v, bool) and \
not unicode(v).lower() in ('y', 'yes', '1', 'true', 'on',
'n', 'no', '0', 'false', 'off'):
raise ValueError('Value "%s" is not a boolean (y/n)' % v)
def get(self):
return (isinstance(self._value, bool) and self._value) or \
unicode(self._value).lower() in ('y', 'yes', '1', 'true', 'on')
| agpl-3.0 | 1,109,137,606,076,457,300 | 31.112971 | 135 | 0.571205 | false |
seanwestfall/django | tests/contenttypes_tests/test_models.py | 249 | 12059 | from __future__ import unicode_literals
import warnings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.shortcuts import get_current_site
from django.db.utils import IntegrityError, OperationalError, ProgrammingError
from django.http import Http404, HttpRequest
from django.test import TestCase, mock, override_settings
from django.utils import six
from .models import (
ConcreteModel, FooWithBrokenAbsoluteUrl, FooWithoutUrl, FooWithUrl,
ProxyModel,
)
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel,
for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel,
for_concrete_model=False))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel,
for_concrete_model=False))
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1])
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(six.text_type(ct), 'OldModel')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
def test_name_deprecation(self):
"""
ContentType.name has been removed. Test that a warning is emitted when
creating a ContentType with a `name`, but the creation should not fail.
"""
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
ContentType.objects.create(
name='Name',
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"ContentType.name field doesn't exist any longer. Please remove it from your code."
)
self.assertTrue(ContentType.objects.filter(model='OldModel').exists())
@mock.patch('django.contrib.contenttypes.models.ContentTypeManager.get_or_create')
@mock.patch('django.contrib.contenttypes.models.ContentTypeManager.get')
def test_message_if_get_for_model_fails(self, mocked_get, mocked_get_or_create):
"""
Check that `RuntimeError` with nice error message is raised if
`get_for_model` fails because of database errors.
"""
def _test_message(mocked_method):
for ExceptionClass in (IntegrityError, OperationalError, ProgrammingError):
mocked_method.side_effect = ExceptionClass
with self.assertRaisesMessage(
RuntimeError,
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
):
ContentType.objects.get_for_model(ContentType)
_test_message(mocked_get)
mocked_get.side_effect = ContentType.DoesNotExist
_test_message(mocked_get_or_create)
| bsd-3-clause | 6,557,368,015,281,206,000 | 40.582759 | 95 | 0.615557 | false |
saukrIppl/seahub | seahub/base/accounts.py | 1 | 22247 | # encoding: utf-8
from django import forms
from django.core.mail import send_mail
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from seahub.auth import login
from registration import signals
import seaserv
from seaserv import ccnet_threaded_rpc, unset_repo_passwd, is_passwd_set, \
seafile_api
from seahub.profile.models import Profile, DetailedProfile
from seahub.utils import is_valid_username, is_user_password_strong, \
clear_token, get_system_admins
from seahub.utils.mail import send_html_email_with_dj_template, MAIL_PRIORITY
try:
from seahub.settings import CLOUD_MODE
except ImportError:
CLOUD_MODE = False
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
from constance import config
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
class UserManager(object):
def create_user(self, email, password=None, is_staff=False, is_active=False):
"""
Creates and saves a User with given username and password.
"""
# Lowercasing email address to avoid confusion.
email = email.lower()
user = User(email=email)
user.is_staff = is_staff
user.is_active = is_active
user.set_password(password)
user.save()
return self.get(email=email)
def update_role(self, email, role):
"""
If user has a role, update it; or create a role for user.
"""
ccnet_threaded_rpc.update_role_emailuser(email, role)
return self.get(email=email)
def create_superuser(self, email, password):
u = self.create_user(email, password, is_staff=True, is_active=True)
return u
def get_superusers(self):
"""Return a list of admins.
"""
emailusers = ccnet_threaded_rpc.get_superusers()
user_list = []
for e in emailusers:
user = User(e.email)
user.id = e.id
user.is_staff = e.is_staff
user.is_active = e.is_active
user.ctime = e.ctime
user_list.append(user)
return user_list
def get(self, email=None, id=None):
if not email and not id:
raise User.DoesNotExist, 'User matching query does not exits.'
if email:
emailuser = ccnet_threaded_rpc.get_emailuser(email)
if id:
emailuser = ccnet_threaded_rpc.get_emailuser_by_id(id)
if not emailuser:
raise User.DoesNotExist, 'User matching query does not exits.'
user = User(emailuser.email)
user.id = emailuser.id
user.enc_password = emailuser.password
user.is_staff = emailuser.is_staff
user.is_active = emailuser.is_active
user.ctime = emailuser.ctime
user.org = emailuser.org
user.source = emailuser.source
user.role = emailuser.role
return user
class UserPermissions(object):
def __init__(self, user):
self.user = user
def can_add_repo(self):
return True
def can_add_group(self):
return True
def can_generate_shared_link(self):
return True
def can_use_global_address_book(self):
return True
def can_view_org(self):
if MULTI_TENANCY:
return True if self.user.org is not None else False
return False if CLOUD_MODE else True
class User(object):
is_staff = False
is_active = False
is_superuser = False
groups = []
org = None
objects = UserManager()
class DoesNotExist(Exception):
pass
def __init__(self, email):
self.username = email
self.email = email
self.permissions = UserPermissions(self)
def __unicode__(self):
return self.username
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def save(self):
emailuser = ccnet_threaded_rpc.get_emailuser(self.username)
if emailuser:
if not hasattr(self, 'password'):
self.set_unusable_password()
if emailuser.source == "DB":
source = "DB"
else:
source = "LDAP"
result_code = ccnet_threaded_rpc.update_emailuser(source,
emailuser.id,
self.password,
int(self.is_staff),
int(self.is_active))
else:
result_code = ccnet_threaded_rpc.add_emailuser(self.username,
self.password,
int(self.is_staff),
int(self.is_active))
# -1 stands for failed; 0 stands for success
return result_code
def delete(self):
"""
When delete user, we should also delete group relationships.
"""
if self.source == "DB":
source = "DB"
else:
source = "LDAP"
owned_repos = []
orgs = ccnet_threaded_rpc.get_orgs_by_user(self.username)
if orgs:
for org in orgs:
owned_repos += seafile_api.get_org_owned_repo_list(org.org_id,
self.username)
else:
owned_repos += seafile_api.get_owned_repo_list(self.username)
for r in owned_repos:
seafile_api.remove_repo(r.id)
clear_token(self.username)
ccnet_threaded_rpc.remove_emailuser(source, self.username)
Profile.objects.delete_profile_by_user(self.username)
def get_and_delete_messages(self):
messages = []
return messages
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
self.password = '%s' % raw_password
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
# if '$' not in self.password:
# is_correct = (self.password == \
# get_hexdigest('sha1', '', raw_password))
# return is_correct
return (ccnet_threaded_rpc.validate_emailuser(self.username, raw_password) == 0)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
send_mail(subject, message, from_email, [self.email])
def freeze_user(self, notify_admins=False):
self.is_active = False
self.save()
if notify_admins:
admins = get_system_admins()
for u in admins:
# save current language
cur_language = translation.get_language()
# get and active user language
user_language = Profile.objects.get_user_language(u.email)
translation.activate(user_language)
send_html_email_with_dj_template(
u.email, dj_template='sysadmin/user_freeze_email.html',
subject=_('Account %(account)s froze on %(site)s.') % {
"account": self.email,
"site": settings.SITE_NAME,
},
context={'user': self.email},
priority=MAIL_PRIORITY.now
)
# restore current language
translation.activate(cur_language)
def remove_repo_passwds(self):
"""
Remove all repo decryption passwords stored on server.
"""
from seahub.utils import get_user_repos
owned_repos, shared_repos, groups_repos, public_repos = get_user_repos(self.email)
def has_repo(repos, repo):
for r in repos:
if repo.id == r.id:
return True
return False
passwd_setted_repos = []
for r in owned_repos + shared_repos + groups_repos + public_repos:
if not has_repo(passwd_setted_repos, r) and r.encrypted and \
is_passwd_set(r.id, self.email):
passwd_setted_repos.append(r)
for r in passwd_setted_repos:
unset_repo_passwd(r.id, self.email)
def remove_org_repo_passwds(self, org_id):
"""
Remove all org repo decryption passwords stored on server.
"""
from seahub.utils import get_user_repos
owned_repos, shared_repos, groups_repos, public_repos = get_user_repos(self.email, org_id=org_id)
def has_repo(repos, repo):
for r in repos:
if repo.id == r.id:
return True
return False
passwd_setted_repos = []
for r in owned_repos + shared_repos + groups_repos + public_repos:
if not has_repo(passwd_setted_repos, r) and r.encrypted and \
is_passwd_set(r.id, self.email):
passwd_setted_repos.append(r)
for r in passwd_setted_repos:
unset_repo_passwd(r.id, self.email)
class AuthBackend(object):
def get_user_with_import(self, username):
emailuser = seaserv.get_emailuser_with_import(username)
if not emailuser:
raise User.DoesNotExist, 'User matching query does not exits.'
user = User(emailuser.email)
user.id = emailuser.id
user.enc_password = emailuser.password
user.is_staff = emailuser.is_staff
user.is_active = emailuser.is_active
user.ctime = emailuser.ctime
user.org = emailuser.org
user.source = emailuser.source
user.role = emailuser.role
return user
def get_user(self, username):
try:
user = self.get_user_with_import(username)
except User.DoesNotExist:
user = None
return user
def authenticate(self, username=None, password=None):
user = self.get_user(username)
if not user:
return None
if user.check_password(password):
return user
########## Register related
class RegistrationBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
email, password = kwargs['email'], kwargs['password1']
username = email
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
from registration.models import RegistrationProfile
if bool(config.ACTIVATE_AFTER_REGISTRATION) is True:
# since user will be activated after registration,
# so we will not use email sending, just create acitvated user
new_user = RegistrationProfile.objects.create_active_user(username, email,
password, site,
send_email=False)
# login the user
new_user.backend=settings.AUTHENTICATION_BACKENDS[0]
login(request, new_user)
else:
# create inactive user, user can be activated by admin, or through activated email
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site,
send_email=config.REGISTRATION_SEND_MAIL)
# userid = kwargs['userid']
# if userid:
# ccnet_threaded_rpc.add_binding(new_user.username, userid)
if settings.REQUIRE_DETAIL_ON_REGISTRATION:
name = kwargs.get('name', '')
department = kwargs.get('department', '')
telephone = kwargs.get('telephone', '')
note = kwargs.get('note', '')
Profile.objects.add_or_update(new_user.username, name, note)
DetailedProfile.objects.add_detailed_profile(new_user.username,
department,
telephone)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
from registration.models import RegistrationProfile
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
# login the user
activated.backend=settings.AUTHENTICATION_BACKENDS[0]
login(request, activated)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('libraries', (), {})
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested email is not already in use, and
requires the password to be entered twice to catch typos.
"""
attrs_dict = { 'class': 'input' }
email = forms.CharField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
userid = forms.RegexField(regex=r'^\w+$',
max_length=40,
required=False,
widget=forms.TextInput(),
label=_("Username"),
error_messages={ 'invalid': _("This value must be of length 40") })
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_email(self):
email = self.cleaned_data['email']
if not is_valid_username(email):
raise forms.ValidationError(_("Enter a valid email address."))
emailuser = ccnet_threaded_rpc.get_emailuser(email)
if not emailuser:
return self.cleaned_data['email']
else:
raise forms.ValidationError(_("A user with this email already"))
def clean_userid(self):
if self.cleaned_data['userid'] and len(self.cleaned_data['userid']) != 40:
raise forms.ValidationError(_("Invalid user id."))
return self.cleaned_data['userid']
def clean_password1(self):
if 'password1' in self.cleaned_data:
pwd = self.cleaned_data['password1']
if bool(config.USER_STRONG_PASSWORD_REQUIRED) is True:
if bool(is_user_password_strong(pwd)) is True:
return pwd
else:
raise forms.ValidationError(
_(("%(pwd_len)s characters or more, include "
"%(num_types)s types or more of these: "
"letters(case sensitive), numbers, and symbols")) %
{'pwd_len': config.USER_PASSWORD_MIN_LENGTH,
'num_types': config.USER_PASSWORD_STRENGTH_LEVEL})
else:
return pwd
def clean_password2(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class DetailedRegistrationForm(RegistrationForm):
attrs_dict = { 'class': 'input' }
try:
from seahub.settings import REGISTRATION_DETAILS_MAP
except:
REGISTRATION_DETAILS_MAP = None
if REGISTRATION_DETAILS_MAP:
name_required = REGISTRATION_DETAILS_MAP.get('name', False)
dept_required = REGISTRATION_DETAILS_MAP.get('department', False)
tele_required = REGISTRATION_DETAILS_MAP.get('telephone', False)
note_required = REGISTRATION_DETAILS_MAP.get('note', False)
else:
# Backward compatible
name_required = dept_required = tele_required = note_required = True
name = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=64)), label=_("name"),
required=name_required)
department = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=512)), label=_("department"),
required=dept_required)
telephone = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=100)), label=_("telephone"),
required=tele_required)
note = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=100)), label=_("note"),
required=note_required)
| apache-2.0 | -2,725,696,775,162,005,000 | 35.650741 | 113 | 0.578505 | false |
jredrejo/web2pyreactpoc | languages/zh.py | 152 | 10080 | # coding: utf8
{
'!langcode!': 'zh-tw',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"',
'%s %%{row} deleted': '已刪除 %s 筆',
'%s %%{row} updated': '已更新 %s 筆',
'%s selected': '%s 已選擇',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式類似 "zh-tw")',
'A new version of web2py is available': '新版的 web2py 已發行',
'A new version of web2py is available: %s': '新版的 web2py 已發行: %s',
'about': '關於',
'About': '關於',
'About application': '關於本應用程式',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '點此處進入管理介面',
'Administrator Password:': '管理員密碼:',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉',
'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"',
'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例',
'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!',
'Authentication': '驗證',
'Available Databases and Tables': '可提供的資料庫和資料表',
'Buy this book': 'Buy this book',
'cache': '快取記憶體',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.',
'Change Password': '變更密碼',
'change password': '變更密碼',
'Check to delete': '打勾代表刪除',
'Check to delete:': '點選以示刪除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客戶端網址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版權所有',
'Create new application': '創建應用程式',
'Current request': '目前網路資料要求(request)',
'Current response': '目前網路資料回應(response)',
'Current session': '目前網路連線資訊(session)',
'customize me!': '請調整我!',
'data uploaded': '資料已上傳',
'Database': '資料庫',
'Database %s select': '已選擇 %s 資料庫',
'Date and Time': '日期和時間',
'db': 'db',
'DB Model': '資料庫模組',
'Delete': '刪除',
'Delete:': '刪除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '配置到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '設計',
'design': '設計',
'Design for': '設計為了',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': 'Download',
'E-mail': '電子郵件',
'EDIT': '編輯',
'Edit': '編輯',
'Edit application': '編輯應用程式',
'Edit current record': '編輯當前紀錄',
'edit profile': '編輯設定檔',
'Edit Profile': '編輯設定檔',
'Edit This App': '編輯本應用程式',
'Editing file': '編輯檔案',
'Editing file "%s"': '編輯檔案"%s"',
'Email and SMS': 'Email and SMS',
'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄',
'Errors': 'Errors',
'export as csv file': '以逗號分隔檔(csv)格式匯出',
'FAQ': 'FAQ',
'First name': '名',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].',
'Group ID': '群組編號',
'Groups': 'Groups',
'Hello World': '嗨! 世界',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '匯入/匯出',
'Index': '索引',
'insert new': '插入新資料',
'insert new %s': '插入新資料 %s',
'Installed applications': '已安裝應用程式',
'Internal State': '內部狀態',
'Introduction': 'Introduction',
'Invalid action': '不合法的動作(action)',
'Invalid email': '不合法的電子郵件',
'Invalid Query': '不合法的查詢',
'invalid request': '不合法的網路要求(request)',
'Key': 'Key',
'Language files (static strings) updated': '語言檔已更新',
'Languages': '各國語言',
'Last name': '姓',
'Last saved on:': '最後儲存時間:',
'Layout': '網頁配置',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '軟體版權為',
'Live Chat': 'Live Chat',
'login': '登入',
'Login': '登入',
'Login to the Administrative Interface': '登入到管理員介面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '密碼遺忘',
'Main Menu': '主選單',
'Manage Cache': 'Manage Cache',
'Menu Model': '選單模組(menu)',
'Models': '資料模組',
'Modules': '程式模組',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新紀錄',
'new record inserted': '已插入新紀錄',
'next 100 rows': '往後 100 筆',
'NO': '否',
'No databases in this application': '這應用程式不含資料庫',
'Online examples': '點此處進入線上範例',
'or import from csv file': '或是從逗號分隔檔(CSV)匯入',
'Origin': '原文',
'Original/Translation': '原文/翻譯',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': '密碼',
"Password fields don't match": '密碼欄不匹配',
'Peeking at file': '選擇檔案',
'Plugins': 'Plugins',
'Powered by': '基於以下技術構建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 筆',
'Python': 'Python',
'Query:': '查詢:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '紀錄',
'record does not exist': '紀錄不存在',
'Record ID': '紀錄編號',
'Record id': '紀錄編號',
'Register': '註冊',
'register': '註冊',
'Registration key': '註冊金鑰',
'Remember me (for 30 days)': '記住我(30 天)',
'Reset Password key': '重設密碼',
'Resolve Conflict file': '解決衝突檔案',
'Role': '角色',
'Rows in Table': '在資料表裏的資料',
'Rows selected': '筆資料被選擇',
'Saved file hash:': '檔案雜湊值已紀錄:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '狀態',
'Static files': '靜態檔案',
'Statistics': 'Statistics',
'Stylesheet': '網頁風格檔',
'submit': 'submit',
'Submit': '傳送',
'Support': 'Support',
'Sure you want to delete this object?': '確定要刪除此物件?',
'Table': '資料表',
'Table name': '資料表名稱',
'Testing application': '測試中的應用程式',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有資料庫模組(models)',
'There are no modules': '沒有程式模組(modules)',
'There are no static files': '沒有靜態檔案',
'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言',
'There are no views': '沒有視圖',
'This App': 'This App',
'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)',
'Ticket': '問題單',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '時間標記',
'Twitter': 'Twitter',
'Unable to check for upgrades': '無法做升級檢查',
'Unable to download': '無法下載',
'Unable to download app': '無法下載應用程式',
'unable to parse csv file': '無法解析逗號分隔檔(csv)',
'Update:': '更新:',
'Upload existing application': '更新存在的應用程式',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.',
'User %(id)s Logged-in': '使用者 %(id)s 已登入',
'User %(id)s Registered': '使用者 %(id)s 已註冊',
'User ID': '使用者編號',
'Verify Password': '驗證密碼',
'Videos': 'Videos',
'View': '視圖',
'Views': '視圖',
'Welcome %s': '歡迎 %s',
'Welcome to web2py': '歡迎使用 web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| gpl-3.0 | -6,285,118,582,899,288,000 | 34.480519 | 219 | 0.657516 | false |
wshallum/ansible | lib/ansible/compat/tests/unittest.py | 375 | 1147 | # (c) 2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python2.7's unittest module
'''
import sys
# Python 2.6
if sys.version_info < (2, 7):
try:
# Need unittest2 on python2.6
from unittest2 import *
except ImportError:
print('You need unittest2 installed on python2.6.x to run tests')
else:
from unittest import *
| gpl-3.0 | -4,917,376,785,741,402,000 | 30.861111 | 73 | 0.724499 | false |
mozilla/treeherder | treeherder/etl/taskcluster_pulse/parse_route.py | 2 | 1512 | # Code imported from https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
# A Taskcluster routing key will be in the form:
# treeherder.<version>.<user/project>|<project>.<revision>.<pushLogId/pullRequestId>
# [0] Routing key prefix used for listening to only treeherder relevant messages
# [1] Routing key version
# [2] In the form of user/project for github repos and just project for hg.mozilla.org
# [3] Top level revision for the push
# [4] Pull Request ID (github) or Push Log ID (hg.mozilla.org) of the push
# Note: pushes on a branch on Github would not have a PR ID
# Function extracted from
# https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
def parseRoute(route):
id = None
owner = None
parsedProject = None
parsedRoute = route.split('.')
project = parsedRoute[2]
if len(project.split('/')) == 2:
[owner, parsedProject] = project.split('/')
else:
parsedProject = project
if len(parsedRoute) == 5:
id = parsedRoute[4]
pushInfo = {
"destination": parsedRoute[0],
"id": int(id) if id else 0,
"project": parsedProject,
"revision": parsedRoute[3],
}
if owner and parsedProject:
pushInfo["owner"] = owner
pushInfo["origin"] = 'github.com'
else:
pushInfo["origin"] = 'hg.mozilla.org'
return pushInfo
| mpl-2.0 | -8,495,469,707,879,713,000 | 35.878049 | 154 | 0.684524 | false |
jdreaver/vispy | vispy/visuals/shaders/compiler.py | 20 | 7684 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import re
from ... import gloo
class Compiler(object):
"""
Compiler is used to convert Function and Variable instances into
ready-to-use GLSL code. This class handles name mangling to ensure that
there are no name collisions amongst global objects. The final name of
each object may be retrieved using ``Compiler.__getitem__(obj)``.
Accepts multiple root Functions as keyword arguments. ``compile()`` then
returns a dict of GLSL strings with the same keys.
Example::
# initialize with two main functions
compiler = Compiler(vert=v_func, frag=f_func)
# compile and extract shaders
code = compiler.compile()
v_code = code['vert']
f_code = code['frag']
# look up name of some object
name = compiler[obj]
"""
def __init__(self, namespace=None, **shaders):
# cache of compilation results for each function and variable
if namespace is None:
namespace = {}
self._object_names = namespace # {object: name}
self.shaders = shaders
def __getitem__(self, item):
"""
Return the name of the specified object, if it has been assigned one.
"""
return self._object_names[item]
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled
def _rename_objects_fast(self):
""" Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
"""
for shader_name, deps in self._shader_deps.items():
for dep in deps:
name = dep.name
if name != 'main':
ext = '_%x' % id(dep)
name = name[:32-len(ext)] + ext
self._object_names[dep] = name
def _rename_objects_pretty(self):
""" Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
"""
#
# 1. For each object, add its static names to the global namespace
# and make a list of the shaders used by the object.
#
# {name: obj} mapping for finding unique names
# initialize with reserved keywords.
self._global_ns = dict([(kwd, None) for kwd in gloo.util.KEYWORDS])
# functions are local per-shader
self._shader_ns = dict([(shader, {}) for shader in self.shaders])
# for each object, keep a list of shaders the object appears in
obj_shaders = {}
for shader_name, deps in self._shader_deps.items():
for dep in deps:
# Add static names to namespace
for name in dep.static_names():
self._global_ns[name] = None
obj_shaders.setdefault(dep, []).append(shader_name)
#
# 2. Assign new object names
#
name_index = {}
for obj, shaders in obj_shaders.items():
name = obj.name
if self._name_available(obj, name, shaders):
# hooray, we get to keep this name
self._assign_name(obj, name, shaders)
else:
# boo, find a new name
while True:
index = name_index.get(name, 0) + 1
name_index[name] = index
ext = '_%d' % index
new_name = name[:32-len(ext)] + ext
if self._name_available(obj, new_name, shaders):
self._assign_name(obj, new_name, shaders)
break
def _is_global(self, obj):
""" Return True if *obj* should be declared in the global namespace.
Some objects need to be declared only in per-shader namespaces:
functions, static variables, and const variables may all be given
different definitions in each shader.
"""
# todo: right now we assume all Variables are global, and all
# Functions are local. Is this actually correct? Are there any
# global functions? Are there any local variables?
from .variable import Variable
return isinstance(obj, Variable)
def _name_available(self, obj, name, shaders):
""" Return True if *name* is available for *obj* in *shaders*.
"""
if name in self._global_ns:
return False
shaders = self.shaders if self._is_global(obj) else shaders
for shader in shaders:
if name in self._shader_ns[shader]:
return False
return True
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
| bsd-3-clause | -2,992,703,531,778,196,000 | 34.247706 | 78 | 0.544768 | false |
RDXT/django-guardian | guardian/south_migrations/0002_auto__add_field_groupobjectpermission_object_pk__add_field_userobjectp.py | 85 | 5650 | # encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
from guardian.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupObjectPermission.object_pk'
db.add_column('guardian_groupobjectpermission', 'object_pk', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
# Adding field 'UserObjectPermission.object_pk'
db.add_column('guardian_userobjectpermission', 'object_pk', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupObjectPermission.object_pk'
db.delete_column('guardian_groupobjectpermission', 'object_pk')
# Deleting field 'UserObjectPermission.object_pk'
db.delete_column('guardian_userobjectpermission', 'object_pk')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'guardian.groupobjectpermission': {
'Meta': {'unique_together': "(['group', 'permission', 'content_type', 'object_id'],)", 'object_name': 'GroupObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_pk': ('django.db.models.fields.TextField', [], {'default': "''"}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"})
},
'guardian.userobjectpermission': {
'Meta': {'unique_together': "(['user', 'permission', 'content_type', 'object_id'],)", 'object_name': 'UserObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_pk': ('django.db.models.fields.TextField', [], {'default': "''"}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
}
}
complete_apps = ['guardian']
| bsd-2-clause | 4,004,785,558,221,671,000 | 65.470588 | 182 | 0.578938 | false |
ed-/solum | solum/tests/deployer/handlers/test_noop.py | 1 | 1963 | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.deployer.handlers import noop as noop_handler
from solum.openstack.common.gettextutils import _
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_echo(self, fake_LOG):
noop_handler.Handler().echo({}, 'foo')
fake_LOG.debug.assert_called_once_with(_('%s') % 'foo')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_deploy(self, fake_LOG):
args = [77, 'created_image_id']
noop_handler.Handler().deploy(self.ctx, *args)
message = 'Deploy %s %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_destroy(self, fake_LOG, fake_registry):
fake_assembly = fakes.FakeAssembly()
fake_registry.Assembly.get_by_id.return_value = fake_assembly
args = [fake_assembly.id]
noop_handler.Handler().destroy(self.ctx, *args)
fake_assembly.destroy.assert_called_once_with(self.ctx)
message = 'Destroy %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
| apache-2.0 | -4,476,960,257,940,762,600 | 37.490196 | 75 | 0.691798 | false |
jessie935513/omaha | plugins/update/generate_plugin_idls.py | 67 | 3325 | #!/usr/bin/python2.4
#
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""
Generates IDL file for the OneClick ActiveX control from the passed-in IDL
template. The input template is a complete IDL file in all but one respect;
It has one replaceable entry for the CLSID for GoopdateOneClickControl.
We generate a GUID using UUIDGEN.EXE, and write out an IDL with a new CLSID.
"""
import sys
import os
import getopt
import commands
def _GetStatusOutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
if os.name == "nt":
pipe = os.popen(cmd + " 2>&1", 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
else:
return commands.getstatusoutput(cmd)
def _GenerateIDLText(idl_template):
(status, guid) = _GetStatusOutput("uuidgen.exe")
if status != 0:
raise SystemExit("Failed to get GUID: %s" % guid)
return idl_template % guid
def _GenerateIDLFile(idl_template_filename, idl_output_filename):
f_in = open(idl_template_filename, 'r')
idl_template = f_in.read()
f_in.close()
idl_output = _GenerateIDLText(idl_template)
f_out = open(idl_output_filename, 'w')
f_out.write("""
// ** AUTOGENERATED FILE. DO NOT HAND-EDIT **
""")
f_out.write(idl_output)
f_out.close()
def _Usage():
"""Prints out script usage information."""
print """
generate_oneclick_idl.py: Write out the given IDL file.
Usage:
generate_oneclick_idl.py [--help
| --idl_template_file filename
--idl_output_file filename]
Options:
--help Show this information.
--idl_output_file filename Path/name of output IDL filename.
--idl_template_file filename Path/name of input IDL template.
"""
def _Main():
"""Generates IDL file."""
# use getopt to parse the option and argument list; this may raise, but
# don't catch it
_ARGUMENT_LIST = ["help", "idl_template_file=", "idl_output_file="]
(opts, args) = getopt.getopt(sys.argv[1:], "", _ARGUMENT_LIST)
if not opts or ("--help", "") in opts:
_Usage()
sys.exit()
idl_template_filename = ""
idl_output_filename = ""
for (o, v) in opts:
if o == "--idl_template_file":
idl_template_filename = v
if o == "--idl_output_file":
idl_output_filename = v
# make sure we have work to do
if not idl_template_filename:
raise SystemExit("no idl_template_filename specified")
if not idl_output_filename:
raise SystemExit("no idl_output_filename specified")
_GenerateIDLFile(idl_template_filename, idl_output_filename)
sys.exit()
if __name__ == "__main__":
_Main()
| apache-2.0 | 7,466,912,736,280,997,000 | 27.663793 | 76 | 0.643308 | false |
imsparsh/python-for-android | python3-alpha/extra_modules/bs4/tests/test_tree.py | 46 | 59853 | # -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
CData,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup('<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(text='Räksmörgås'), 'Räksmörgås')
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(text="bar"), ["bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), ["Foo", "bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
["Foo", "bar", '\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
["Foo", "bar", '\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
# Passing class='class2' would cause a syntax error.
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
# Passing in a string to 'attrs' will search the CSS class.
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
attrs = { 'class' : re.compile("o") }
f = tree.find_all("gar", attrs=attrs)
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", re.compile("a"))
self.assertSelects(f, ["Found it"])
# Since the class is not the string "foo bar", but the two
# strings "foo" and "bar", this will not find anything.
attrs = { 'class' : re.compile("o b") }
f = tree.find_all("gar", attrs=attrs)
self.assertSelects(f, [])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the attribute as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_atribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<wrap>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</wrap>""")
wrap = tree.wrap
for i, element in enumerate(wrap.contents):
self.assertEqual(i, wrap.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_replace_with_children_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with_children()
self.assertEqual(a, new_a)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_valueerror_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(ValueError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_valueerror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(ValueError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_replace_with_children(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.replace_with_children()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertFalse(hasattr(em, "contents"))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:[email protected]">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for("<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = "<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for("<b><FOO></b><b>BAR</b>"))
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>")
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(str, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset is replaced with a
# generic value.
self.assertEqual(soup.meta['content'],
'text/html; charset=%SOUP-ENCODING%')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<div id="main">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
</span>
</div>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML)
def assertSelects(self, selector, expected_ids):
el_ids = [el['id'] for el in self.soup.select(selector)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, ['The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 3)
for div in els:
self.assertEqual(div.name, 'div')
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['main', 'inner', 'footer'])
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertEqual(len(self.soup.select('tag%t')), 0)
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse('class' in els[0])
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']),
('div[id$="1"]', []),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('a[href*="http://"]', ['bob', 'me']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']),
('div[id*="1"]', []),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
)
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner'])
| apache-2.0 | -2,534,676,821,109,609,000 | 37.361538 | 118 | 0.564384 | false |
darjeeling/django | tests/utils_tests/test_encoding.py | 53 | 6599 | import datetime
import unittest
from unittest import mock
from urllib.parse import quote_plus
from django.test import SimpleTestCase
from django.utils.encoding import (
DjangoUnicodeDecodeError, escape_uri_path, filepath_to_uri, force_bytes,
force_text, get_system_encoding, iri_to_uri, smart_bytes, smart_text,
uri_to_iri,
)
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy
class TestEncodingUtils(SimpleTestCase):
def test_force_text_exception(self):
"""
Broken __str__ actually raises an error.
"""
class MyString:
def __str__(self):
return b'\xc3\xb6\xc3\xa4\xc3\xbc'
# str(s) raises a TypeError if the result is not a text type.
with self.assertRaises(TypeError):
force_text(MyString())
def test_force_text_lazy(self):
s = SimpleLazyObject(lambda: 'x')
self.assertTrue(type(force_text(s)), str)
def test_force_text_DjangoUnicodeDecodeError(self):
msg = (
"'utf-8' codec can't decode byte 0xff in position 0: invalid "
"start byte. You passed in b'\\xff' (<class 'bytes'>)"
)
with self.assertRaisesMessage(DjangoUnicodeDecodeError, msg):
force_text(b'\xff')
def test_force_bytes_exception(self):
"""
force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
self.assertEqual(force_bytes(exc), error_msg.encode())
self.assertEqual(force_bytes(exc, encoding='ascii', errors='ignore'), b'This is an exception, voil')
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_force_bytes_encoding(self):
error_msg = 'This is an exception, voilà'.encode()
result = force_bytes(error_msg, encoding='ascii', errors='ignore')
self.assertEqual(result, b'This is an exception, voil')
def test_force_bytes_memory_view(self):
self.assertEqual(force_bytes(memoryview(b'abc')), b'abc')
def test_smart_bytes(self):
class Test:
def __str__(self):
return 'ŠĐĆŽćžšđ'
lazy_func = gettext_lazy('x')
self.assertIs(smart_bytes(lazy_func), lazy_func)
self.assertEqual(smart_bytes(Test()), b'\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91')
self.assertEqual(smart_bytes(1), b'1')
self.assertEqual(smart_bytes('foo'), b'foo')
def test_smart_text(self):
class Test:
def __str__(self):
return 'ŠĐĆŽćžšđ'
lazy_func = gettext_lazy('x')
self.assertIs(smart_text(lazy_func), lazy_func)
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_get_default_encoding(self):
with mock.patch('locale.getdefaultlocale', side_effect=Exception):
self.assertEqual(get_system_encoding(), 'ascii')
class TestRFC3987IEncodingUtils(unittest.TestCase):
def test_filepath_to_uri(self):
self.assertEqual(filepath_to_uri(None), None)
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'), 'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
def test_iri_to_uri(self):
cases = [
# Valid UTF-8 sequences are encoded.
('red%09rosé#red', 'red%09ros%C3%A9#red'),
('/blog/for/Jürgen Münster/', '/blog/for/J%C3%BCrgen%20M%C3%BCnster/'),
('locations/%s' % quote_plus('Paris & Orléans'), 'locations/Paris+%26+Orl%C3%A9ans'),
# Reserved chars remain unescaped.
('%&', '%&'),
('red&♥ros%#red', 'red&%E2%99%A5ros%#red'),
(gettext_lazy('red&♥ros%#red'), 'red&%E2%99%A5ros%#red'),
]
for iri, uri in cases:
self.assertEqual(iri_to_uri(iri), uri)
# Test idempotency.
self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri)
def test_uri_to_iri(self):
cases = [
(None, None),
# Valid UTF-8 sequences are decoded.
('/%e2%89%Ab%E2%99%a5%E2%89%aB/', '/≫♥≫/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
('/%41%5a%6B/', '/AZk/'),
# Reserved and non-URL valid ASCII chars are not decoded.
('/%25%20%02%41%7b/', '/%25%20%02A%7b/'),
# Broken UTF-8 sequences remain escaped.
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(uri_to_iri(uri), iri)
# Test idempotency.
self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri)
def test_complementarity(self):
cases = [
('/blog/for/J%C3%BCrgen%20M%C3%BCnster/', '/blog/for/J\xfcrgen%20M\xfcnster/'),
('%&', '%&'),
('red&%E2%99%A5ros%#red', 'red&♥ros%#red'),
('/%E2%99%A5%E2%99%A5/', '/♥♥/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
('/%25%20%02%7b/', '/%25%20%02%7b/'),
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri)
self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri)
def test_escape_uri_path(self):
self.assertEqual(
escape_uri_path('/;some/=awful/?path/:with/@lots/&of/+awful/chars'),
'/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars'
)
self.assertEqual(escape_uri_path('/foo#bar'), '/foo%23bar')
self.assertEqual(escape_uri_path('/foo?bar'), '/foo%3Fbar')
| bsd-3-clause | 5,188,043,730,174,004,000 | 38.920245 | 114 | 0.564315 | false |
colejohnson66/distorm | disOps/x86header.py | 29 | 5884 | #
# x86header.py
#
# Copyright (C) 2009 Gil Dabah, http://ragestorm.net/disops/
#
class OperandType:
""" Types of possible operands in an opcode.
Refer to the diStorm's documentation or diStorm's instructions.h
for more explanation about every one of them. """
(NONE,
IMM8,
IMM16,
IMM_FULL,
IMM32,
SEIMM8,
IMM16_1, # NEW
IMM8_1, # NEW
IMM8_2, # NEW
REG8,
REG16,
REG_FULL,
REG32,
REG32_64,
FREG32_64_RM,
RM8,
RM16,
RM_FULL,
RM32_64,
RM16_32,
FPUM16,
FPUM32,
FPUM64,
FPUM80,
R32_M8,
R32_M16,
R32_64_M8,
R32_64_M16,
RFULL_M16,
CREG,
DREG,
SREG,
SEG,
ACC8,
ACC16,
ACC_FULL,
ACC_FULL_NOT64,
MEM16_FULL,
PTR16_FULL,
MEM16_3264,
RELCB,
RELC_FULL,
MEM,
MEM_OPT, # NEW
MEM32,
MEM32_64, # NEW
MEM64,
MEM128,
MEM64_128,
MOFFS8,
MOFFS_FULL,
CONST1,
REGCL,
IB_RB,
IB_R_FULL,
REGI_ESI,
REGI_EDI,
REGI_EBXAL,
REGI_EAX,
REGDX,
REGECX,
FPU_SI,
FPU_SSI,
FPU_SIS,
MM,
MM_RM,
MM32,
MM64,
XMM,
XMM_RM,
XMM16,
XMM32,
XMM64,
XMM128,
REGXMM0,
# Below new for AVX:
RM32,
REG32_64_M8,
REG32_64_M16,
WREG32_64,
WRM32_64,
WXMM32_64,
VXMM,
XMM_IMM,
YXMM,
YXMM_IMM,
YMM,
YMM256,
VYMM,
VYXMM,
YXMM64_256,
YXMM128_256,
LXMM64_128,
LMEM128_256) = range(93)
class OpcodeLength:
""" The length of the opcode in bytes.
Where a suffix of '3' means we have to read the REG field of the ModR/M byte (REG size is 3 bits).
Suffix of 'd' means it's a Divided instruction (see documentation),
tells the disassembler to read the REG field or the whole next byte.
OL_33 and OL_4 are used in raw opcode bytes, they include the mandatory prefix,
therefore when they are defined in the instruction tables, the mandatory prefix table is added,
and they become OL_23 and OL_3 correspondingly. There is no effective opcode which is more than 3 bytes. """
(OL_1, # 0
OL_13, # 1
OL_1d, # 2 - Can be prefixed (only by WAIT/9b)
OL_2, # 3 - Can be prefixed
OL_23, # 4 - Can be prefixed
OL_2d, # 5
OL_3, # 6 - Can be prefixed
OL_33, # 7 - Internal only
OL_4 # 8 - Internal only
) = range(9)
""" Next-Opcode-Length dictionary is used in order to recursively build the instructions' tables dynamically.
It is used in such a way that it indicates how many more nested tables
we have to build and link starting from a given OL. """
NextOL = {OL_13: OL_1, OL_1d: OL_1, OL_2: OL_1, OL_23: OL_13,
OL_2d: OL_1d, OL_3: OL_2, OL_33: OL_23, OL_4: OL_3}
class InstFlag:
""" Instruction Flag contains all bit mask constants for describing an instruction.
You can bitwise-or the flags. See diStorm's documentation for more explanation.
The GEN_BLOCK is a special flag, it is used in the tables generator only;
See GenBlock class inside x86db.py. """
FLAGS_EX_START_INDEX = 32
INST_FLAGS_NONE = 0
(MODRM_REQUIRED, # 0
NOT_DIVIDED, # 1
_16BITS, # 2
_32BITS, # 3
PRE_LOCK, # 4
PRE_REPNZ, # 5
PRE_REP, # 6
PRE_CS, # 7
PRE_SS, # 8
PRE_DS, # 9
PRE_ES, # 10
PRE_FS, # 11
PRE_GS, # 12
PRE_OP_SIZE, # 13
PRE_ADDR_SIZE, # 14
NATIVE, # 15
USE_EXMNEMONIC, # 16
USE_OP3, # 17
USE_OP4, # 18
MNEMONIC_MODRM_BASED, # 19
MODRR_REQUIRED, # 20
_3DNOW_FETCH, # 21
PSEUDO_OPCODE, # 22
INVALID_64BITS, # 23
_64BITS, # 24
PRE_REX, # 25
USE_EXMNEMONIC2, # 26
_64BITS_FETCH, # 27
FORCE_REG0, # 28
PRE_VEX, # 29
MODRM_INCLUDED, # 30
DST_WR, # 31
VEX_L, # 32 From here on: flagsEx.
VEX_W, # 33
MNEMONIC_VEXW_BASED, # 34
MNEMONIC_VEXL_BASED, # 35
FORCE_VEXL, # 36
MODRR_BASED, # 37
VEX_V_UNUSED, # 38
GEN_BLOCK, # 39 From here on: internal to disOps.
EXPORTED # 40
) = [1 << i for i in xrange(41)]
# Nodes are extended if they have any of the following flags:
EXTENDED = (PRE_VEX | USE_EXMNEMONIC | USE_EXMNEMONIC2 | USE_OP3 | USE_OP4)
SEGMENTS = (PRE_CS | PRE_SS | PRE_DS | PRE_ES | PRE_FS | PRE_FS)
class ISetClass:
""" Instruction-Set-Class indicates to which set the instruction belongs.
These types are taken from the documentation of Intel/AMD. """
(INTEGER,
FPU,
P6,
MMX,
SSE,
SSE2,
SSE3,
SSSE3,
SSE4_1,
SSE4_2,
SSE4_A,
_3DNOW,
_3DNOWEXT,
VMX,
SVM,
AVX,
FMA,
CLMUL,
AES) = range(1, 20)
class FlowControl:
""" The flow control instruction will be flagged in the lo nibble of the 'meta' field in _InstInfo of diStorm.
They are used to distinguish between flow control instructions (such as: ret, call, jmp, jz, etc) to normal ones. """
(CALL,
RET,
SYS,
UNC_BRANCH,
CND_BRANCH,
INT,
CMOV) = range(1, 8)
class NodeType:
""" A node can really be an object holder for an instruction-info object or
another table (list) with a different size.
GROUP - 8 entries in the table
FULL - 256 entries in the table.
Divided - 72 entries in the table (ranges: 0x0-0x7, 0xc0-0xff).
Prefixed - 12 entries in the table (none, 0x66, 0xf2, 0xf3). """
(NONE, # 0
INFO, # 1
INFOEX, # 2
LIST_GROUP, # 3
LIST_FULL, # 4
LIST_DIVIDED, # 5
LIST_PREFIXED # 6
) = range(0, 7)
class CPUFlags:
""" Specifies all the flags that the x86/x64 CPU supports, in a special compact order. """
(CF, # 0
IF, # 1
PF, # 2
DF, # 3
AF, # 4
OF, # 5
ZF, # 6
SF # 7
) = [1 << i for i in xrange(8)]
| gpl-3.0 | -7,681,699,246,151,668,000 | 21.821862 | 118 | 0.573929 | false |
chirilo/mozillians | vendor-local/src/mimeparse/setup.py | 43 | 1807 | # -*- coding: utf-8 -*-
#old way
from distutils.core import setup
#new way
#from setuptools import setup, find_packages
setup(name='mimeparse',
version='0.1.3',
description='A module provides basic functions for parsing mime-type names and matching them against a list of media-ranges.',
long_description="""
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
""",
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='mime-type',
author='Joe Gregorio',
author_email='[email protected]',
maintainer='Joe Gregorio',
maintainer_email='[email protected]',
url='http://code.google.com/p/mimeparse/',
license='MIT',
py_modules=['mimeparse'],
zip_safe=True,
)
| bsd-3-clause | 2,163,356,957,297,469,400 | 40.022727 | 132 | 0.658726 | false |
pimier15/PyGUI | Kivy/Kivy/Bk_Interractive/Kivy-Interractive application/sample/Chapter_04_code/python3/08 - Behaviours - enhancing widget functionality/comicwidgets.py | 22 | 1999 | # File name: comicwidgets.py
import kivy
kivy.require('1.9.0')
from kivy.uix.scatter import Scatter
from kivy.graphics import Line
class DraggableWidget(Scatter):
def __init__(self, **kwargs):
self.selected = None
self.touched = False
super(DraggableWidget, self).__init__(**kwargs)
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
self.touched = True
self.select()
super(DraggableWidget, self).on_touch_down(touch)
return True
return super(DraggableWidget, self).on_touch_down(touch)
def select(self):
if not self.selected:
self.ix = self.center_x
self.iy = self.center_y
with self.canvas:
self.selected = Line(rectangle=(0,0,self.width,self.height), dash_offset=2)
def on_pos(self, instance, value):
if self.selected and self.touched:
go = self.parent.general_options
go.translation = (self.center_x- self.ix, self.center_y - self.iy)
self.ix = self.center_x
self.iy = self.center_y
def on_rotation(self, instance, value):
if self.selected and self.touched:
go = self.parent.general_options
go.rotation = value
def on_scale(self, instance, value):
if self.selected and self.touched:
go = self.parent.general_options
go.scale = value
def translate(self, x, y):
self.center_x = self.ix = self.ix + x
self.center_y = self.iy = self.iy + y
def on_touch_up(self, touch):
self.touched = False
if self.selected:
if not self.parent.general_options.group_mode:
self.unselect()
return super(DraggableWidget, self).on_touch_up(touch)
def unselect(self):
if self.selected:
self.canvas.remove(self.selected)
self.selected = None
class StickMan(DraggableWidget):
pass
| mit | -5,905,939,070,889,005,000 | 31.241935 | 91 | 0.595298 | false |
jamiefolsom/edx-platform | common/lib/xmodule/xmodule/imageannotation_module.py | 107 | 7163 | """
Module for Image annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, html_to_text
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `ImageModule` and `ImageDescriptor`. """
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
<json>
navigatorSizeRatio: 0.25,
wrapHorizontal: false,
showNavigator: true,
navigatorPosition: "BOTTOM_LEFT",
showNavigationControl: true,
tileSources: [{"profile": "http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level2", "scale_factors": [1, 2, 4, 8, 16, 32, 64], "tile_height": 1024, "height": 3466, "width": 113793, "tile_width": 1024, "qualities": ["native", "bitonal", "grey", "color"], "formats": ["jpg", "png", "gif"], "@context": "http://library.stanford.edu/iiif/image-api/1.1/context.json", "@id": "http://54.187.32.48/loris/suzhou_orig.jp2"}],
</json>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Image Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='professor:green,teachingAssistant:blue',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class ImageAnnotationModule(AnnotatableFields, XModule):
'''Image Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'imageannotation'
def __init__(self, *args, **kwargs):
super(ImageAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.openseadragonjson = html_to_text(etree.tostring(xmltree.find('json'), encoding='unicode'))
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
'instructions_html': self.instructions,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'tag': self.instructor_tags,
'openseadragonjson': self.openseadragonjson,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('imageannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class ImageAnnotationDescriptor(AnnotatableFields, RawDescriptor): # pylint: disable=abstract-method
''' Image annotation descriptor '''
module_class = ImageAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ImageAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ImageAnnotationDescriptor.annotation_storage_url,
ImageAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 | 502,868,566,008,465,660 | 43.216049 | 450 | 0.636605 | false |
andreshp/Algorithms | MachineLearning/Clustering/SingleLinkedClustering/SLClusteringDistance.py | 1 | 12524 | #!/usr/bin/python
######################################################################
# Autor: Andrés Herrera Poyatos
# Universidad de Granada, March, 2015
# Single-Linkage Clustering Algorithm
#######################################################################
# This program read the values asociated to the vertices from a file.
# The distance between 2 vertices is calculated with those values.
# For example, if the values are a string of 0s and 1s with a fixed size,
# the hamming distance is the sum of the bits where both strings differ.
# (This is the distance considered in the code but it is easy to change).
#
# Given a positive integer k, the program executes the Single-Linkage
# Clustering Algorithm to find the k clusters that maximize:
# min d(x, y)
# x, y are in a different cluster
import sys
import time
from gmpy2 import popcount
#------------- MINHEAP IMPLEMENTATION --------------#
# Swap two components of an array
def swap(array, i, j):
copy = array[i]
array[i] = array[j]
array[j] = copy
array[i].index = i
array[j].index = j
# MinHeap Class.
# A heap is a representation of a complete binary tree as an array.
# The array has the Breadth-First Order of the nodes. Consecuently,
# the following equitities are true:
# leftChild(index) = 2*index+1
# rightChild(index) = 2*index+2
# parent(index) = (index-1) // 2
#
# A MinHeap is a heap where there is a total order relation and verifies the following property:
# "heap[i] >= heap[parent(i)] for all i in range(0, size())"
# analogously:
# "Each children is greater or equal than its parent."
#
# Consecuently, heap[0] is the minimum of the elements of the heap.
# A MinHeap supports the following operations:
# - Get the minimum in O(1) (return heap[0])
# - Insert an element in O(log n)
# - Delete an element in O(log n)
class MinHeap(object):
# Init method
def __init__(self):
self.heap = []
# Check if the Heap is empty
def empty(self):
return (not self.heap)
# Return the min of the Heap.
# Precondition: The Heap must be not empty.
def min(self):
return self.heap[0] # A MinHeap keeps the min in the first position.
# Size of the Heap
def size(self):
return len(self.heap)
# Insert Method
def insert(self, element):
element.index = len(self.heap)
self.heap.append(element)
self._repairUp(len(self.heap)-1)
# Insert the elements of an array
def insertArray(self, array):
for number in array:
self.insert(number)
# Delete an element from the Heap
# Precondition: The Heap must be not empty.
def delete(self, index):
swap(self.heap, index, len(self.heap)-1)
self.heap.pop()
self._repairDown(index)
# Delete min from the Heap.
# Precondition: The Heap must be not empty.
def deleteMin(self):
swap(self.heap, 0, len(self.heap)-1)
self.heap.pop()
self._repairDown(0)
# Change the value of an element and repair the MinHeap Structure.
def changeElement(self, index, value):
self.heap[index] = value
self.repairHeap(index)
# Execute HeapSort to the elements of the heap.
def heapSort(self):
sorted_array = []
while(not self.empty()):
sorted_array.append(self.min())
self.deleteMin()
return sorted_array
# Print Heap by levels
def printHeap(self):
elements_level = 1
print("Heap:")
for i in range(0, len(self.heap)):
if i == elements_level:
elements_level += elements_level+1; print()
print(self.heap[i], " ", end="")
print(); print()
# Check that it is a MinHeap.
# The invariant is checked.
def _checkHeap(self):
is_heap = True; fail = -1
for i in range(1, len(self.heap)):
if self.heap[i] < self.heap[(i-1) // 2]:
is_heap = False; fail = i; break
return is_heap, fail
# Repair the Min Heap invariant:
# Each parent key is less or equal than their children keys.
def _repairHeap(self, index):
self._repairUp(index)
self._repairDown(index)
# Go up in the Heap repairing its invariant
def _repairUp(self, index):
parent = (index-1) // 2
while index > 0:
if self.heap[index] < self.heap[parent]:
swap(self.heap, index, parent)
else: break
index = parent
parent = (index-1) // 2
# Go down in the Heap repairing its invariant
def _repairDown(self, index):
child = 2 * index + 1
while child < len(self.heap):
if child + 1 < len(self.heap) and self.heap[child] > self.heap[child+1]:
child += 1
if self.heap[index] > self.heap[child]:
swap(self.heap, child, index)
else: break
index = child
child = 2 * index +1
#------------- VERTEX IMPLEMENTATION --------------#
# Vertex Class.
# It keeps the vertex value and the cluster
# asociated with the vertex.
class Vertex(object):
# Contructor
def __init__(self, key, value):
self.key = key
self.value = value
self.cluster = Cluster(self)
self.edge = -1 # Used in the clustering algorithm
self.distance = float("inf") # Used in the clustering algorithm
# Overloading comparisons operators
def __lt__(self, other):
return (self.distance < other.distance)
def __le__(self, other):
return(self.distance <= other.distance)
def __gt__(self, other):
return(self.distance > other.distance)
def __ge__(self, other):
return(self.distance >= other.distance)
# Hash function
def __hash__(self):
return self.key.__hash__()
# Distance between two vertices.
# In this case, it is the Hamming Distance.
def hammingDistance(self, vertex):
return popcount(self.value ^ vertex.value)
#------------- CLUSTER IMPLEMENTATION --------------#
# Union - Find Data Structure
# Each vertex has an asociated cluster. We can:
# - Get the asociated cluster of a vertex in O(1)
# - Join two clusters of size r and s in O(min{r,s})
class Cluster(object):
# Number of active clusters (class attribute)
n_clusters = 0
# Initializes a cluster
def __init__(self, vertex):
self.index = Cluster.n_clusters
Cluster.n_clusters += 1
self.members = [vertex]
# Adds a vertex to the cluster
def add(self, vertex):
self.members.append(vertex)
vertex.cluster = self
# Size of the cluster
def size(self):
return len(self.members)
# Get the cluster of a given vertex. It is a class method
def getSet(vertex):
return vertex.cluster
# Returns True if both nodes are in the same cluster.
# Returns False otherwise.
def sameCluster(node1, node2):
return node1.cluster is node2.cluster
# Class method to join nodes' cluster in just one
def join(node1, node2):
node1.cluster._join(node2.cluster)
# Both clusters are joined in one of them
def _join(self, other):
if self.size() < other.size():
self.__join(other)
else:
other.__join(self)
# Private method to accomplish the join
def __join(self, other):
for vertex in self.members:
other.add(vertex)
self.members = []
Cluster.n_clusters -= 1
# Hash function
def __hash__(self):
return self.index.__hash__()
#------------- SINGLE-LINKAGE CLUSTERING --------------#
# Single-Linkage Clustering Algorithm
# Parameters:
# - edges :
# - k : Number of clusters
def SLClustering(vertices, k):
# For each vertex, we find the one to which there is less distance (and not used yet).
for i in range(1, len(vertices)-1):
for j in range(i+1, len(vertices)):
new_distance = vertices[i].hammingDistance(vertices[j])
if new_distance < vertices[i].distance:
vertices[i].distance = new_distance
vertices[i].edge = j
for i in range(2, len(vertices)):
for j in range(1, i):
if vertices[j].edge != i:
new_distance = vertices[i].hammingDistance(vertices[j])
if new_distance < vertices[i].distance:
vertices[i].distance = new_distance
vertices[i].edge = j
# Build a min heap with all the vertices:
heap = MinHeap()
for i in range(1, len(vertices)):
heap.insert(vertices[i])
# Add the max_edges times the edge between separated clusters
# that has the minimum cost and join the respective clusters.
max_edges = len(vertices) - k - 1
added_edges = 0
while added_edges < max_edges:
# Next vertex of the heap
next_vertex = heap.min(); heap.deleteMin()
# If it has a valid edge (an edge between two different clusters)
# join those clusters and count it.
if not Cluster.sameCluster(next_vertex, vertices[next_vertex.edge]):
Cluster.join(next_vertex, vertices[next_vertex.edge])
added_edges += 1
# Put the vertex again in the heap with the edge with minimum cost
# from those which go to a different cluster.
next_vertex.distance = float("inf")
next_vertex.edge = -1
for j in range(1, len(vertices)):
if not Cluster.sameCluster(next_vertex, vertices[j]):
new_distance = next_vertex.hammingDistance(vertices[j])
if new_distance < next_vertex.distance:
next_vertex.distance = new_distance
next_vertex.edge = j
if next_vertex.distance < float("inf"):
heap.insert(next_vertex)
if added_edges % 10 == 0:
print("Completed: ", (added_edges / max_edges) * 100.0)
# Find the maximum spacing distance between k clusters
max_spacing = float("inf")
while Cluster.sameCluster(heap.min(), vertices[heap.min().edge]):
heap.deleteMin()
max_spacing = heap.min().distance
return max_spacing
# Read the vertices from a file.
# It initializes the vertices, the clusters (one per vertex)
# and return list with the vertices.lk
def readVertices(distances_file):
data = open(distances_file, "r")
# Build the vertices
first_line = data.readline()
num_vertices = int(first_line.split()[0])
num_bits = int(first_line.split()[1])
vertices = [None] * (num_vertices+1)
# Each line corresponds to a vertex
# It contains the value of the vertex, a string with num_bits bits of 0s and 1s,
# such as: 1 1 1 0 0 0 0 0 1 1 0 1 0 0 1 1 1 1 0 0 1 1 1 1.
# We represent it as an integer in base 2.
i = 1
for line in data:
vertices[i] = Vertex(i, int(line.replace(" ", ""), 2))
i += 1
return vertices
######################## MAIN ##########################
# See if arguments are correct
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("Sintax: SLClusteringDistance.py <options> distances.txt k \n The option -n don't print the clusters.")
sys.exit()
print_clusters = True
if len(sys.argv) > 3:
if sys.argv[1] == "-n":
print_clusters = False
# Read the distances between the vertices and the value of k
try:
distances_file = sys.argv[1 if len(sys.argv) == 3 else 2]
vertices = readVertices(distances_file)
k = int(sys.argv[2 if len(sys.argv) == 3 else 3])
except IOError:
print("Error: The file", distances_file, "can\'t be read.")
sys.exit()
# Execute clustering algorithm and compute the time wasted
start_time = time.time()
try:
maximum_spacing_distance = SLClustering(vertices, k)
except RuntimeError as element:
print("Error:", element.args[0] , "is not a vertex.")
sys.exit()
print("--- %f seconds ---" % (time.time() - start_time))
# Print the result
print("Maximum Spacing of a", k, "-Clustering:", maximum_spacing_distance)
# If chosen, print the clusters
if print_clusters:
print("Clusters:")
clusters = set()
for j in range(1,len(vertices)):
clusters.add(vertices[j].cluster)
i = 1
for cluster in clusters:
print("Cluster", i, ":")
for vertex in cluster.members:
print(vertex.key, end=" ")
print()
i += 1
| gpl-2.0 | 4,601,940,508,247,455,000 | 32.663978 | 113 | 0.598738 | false |
IZSVenezie/VetEpiGIS-Stat | plugin/local_dialog.py | 1 | 5920 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'local_dialog_base.ui'
#
# Created: Sat Jan 7 15:11:01 2017
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(588, 551)
self.gridLayout_3 = QtGui.QGridLayout(Dialog)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.label = QtGui.QLabel(self.splitter)
self.label.setObjectName(_fromUtf8("label"))
self.comboBox = QtGui.QComboBox(self.splitter)
self.comboBox.setMinimumSize(QtCore.QSize(251, 0))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.gridLayout_3.addWidget(self.splitter, 0, 0, 1, 6)
self.label_5 = QtGui.QLabel(Dialog)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_3.addWidget(self.label_5, 1, 0, 1, 1)
self.comboBox_5 = QtGui.QComboBox(Dialog)
self.comboBox_5.setObjectName(_fromUtf8("comboBox_5"))
self.gridLayout_3.addWidget(self.comboBox_5, 1, 1, 1, 1)
self.lineEdit = QtGui.QLineEdit(Dialog)
self.lineEdit.setInputMethodHints(QtCore.Qt.ImhNone)
self.lineEdit.setInputMask(_fromUtf8(""))
self.lineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout_3.addWidget(self.lineEdit, 1, 3, 1, 1)
self.comboBox_6 = QtGui.QComboBox(Dialog)
self.comboBox_6.setObjectName(_fromUtf8("comboBox_6"))
self.gridLayout_3.addWidget(self.comboBox_6, 1, 4, 1, 1)
spacerItem = QtGui.QSpacerItem(21, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem, 1, 5, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.comboBox_2 = QtGui.QComboBox(Dialog)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.gridLayout.addWidget(self.comboBox_2, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout, 2, 0, 1, 2)
spacerItem1 = QtGui.QSpacerItem(39, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem1, 2, 2, 1, 1)
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_3.addWidget(self.label_4, 2, 3, 1, 1)
self.comboBox_4 = QtGui.QComboBox(Dialog)
self.comboBox_4.setObjectName(_fromUtf8("comboBox_4"))
self.gridLayout_3.addWidget(self.comboBox_4, 2, 4, 1, 1)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.comboBox_3 = QtGui.QComboBox(Dialog)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.gridLayout_2.addWidget(self.comboBox_3, 0, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_2, 3, 0, 1, 2)
spacerItem2 = QtGui.QSpacerItem(233, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 3, 3, 1, 2)
self.toolButton = QtGui.QToolButton(Dialog)
self.toolButton.setIconSize(QtCore.QSize(30, 30))
self.toolButton.setObjectName(_fromUtf8("toolButton"))
self.gridLayout_3.addWidget(self.toolButton, 3, 5, 1, 1)
self.tableView = QtGui.QTableView(Dialog)
self.tableView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableView.setObjectName(_fromUtf8("tableView"))
self.gridLayout_3.addWidget(self.tableView, 4, 0, 1, 6)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close|QtGui.QDialogButtonBox.Save)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_3.addWidget(self.buttonBox, 5, 3, 1, 3)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", "Data field:", None))
self.label_5.setText(_translate("Dialog", "Neighbouring method:", None))
self.label_2.setText(_translate("Dialog", "Weighting scheme:", None))
self.label_4.setText(_translate("Dialog", "Variance assumption:", None))
self.label_3.setText(_translate("Dialog", "Alternative hypothesis:", None))
self.toolButton.setText(_translate("Dialog", "...", None))
| gpl-3.0 | -3,711,803,308,895,401,000 | 51.389381 | 104 | 0.681588 | false |
luomiao/docker-volume-vsphere | esx_service/utils/auth_api.py | 2 | 39067 | # Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""APIs for tenant management.
Note that externally used functions are named _function() and internally are function(),
which contradicts python module function naming. It will be fixed later (issue #1153) """
import auth
import auth_data_const
import convert
import auth_data
import vmdk_utils
import error_code
import log_config
import logging
from error_code import ErrorCode
from error_code import ErrorInfo
from error_code import generate_error_info
from error_code import error_code_to_message
import re
# regex for valid tenant name
VALID_TENANT_NAME_REGEXP = "[a-zA-Z0-9_][a-zA-Z0-9_.-]*"
VALID_TENANT_NAMES = 'rename of vmgroups other than _DEFAULT'
global valid_tenant_name_reg
valid_tenant_name_reg = re.compile("^" + VALID_TENANT_NAME_REGEXP + "$")
def get_auth_mgr_object():
""" Get a auth_mgr object which needed to connect to auth DB. """
# auth.get_auth_mgr will not throw an Exception
# it will return err_msg when it fails
err_msg, auth_mgr = auth.get_auth_mgr()
if err_msg:
error_info = error_code.generate_error_info(ErrorCode.INTERNAL_ERROR, err_msg)
return error_info, None
return None, auth_mgr
def only_when_configured(ret_obj=False):
"""
Decorator to check if the DB was already inited.
Serves functions which return ErrInfo (when ret_obj=False), and the ones
returning (ErrInfo, None) when ret_obj=True.
Makes sure the decorated function is called only when DB is connected,
otherwise a proper ErrInfo is returned.
"""
def real_decorator(func):
'The actual logic for decorator.'
def not_inited():
'Returns err code for not initialized'
return generate_error_info(ErrorCode.INIT_NEEDED)
def internal_error():
'Returns error code for internal errors'
return generate_error_info(ErrorCode.INTERNAL_ERROR,
"@only_when_configured: %s" % func.__name__)
def check_config(*args, **kwargs):
'call func() if DB is configured and issue an error if not.'
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
if ret_obj:
return internal_error(), None
else:
return internal_error()
if auth_mgr.allow_all_access():
if ret_obj:
return not_inited(), None
else:
return not_inited()
# No error, we can go ahead and call the function
return func(*args, **kwargs)
# this function will actually do the checks for connection
return check_config
# @only_when_configured just handles the ret_obj param and returns real_decorator to be called
return real_decorator
def get_tenant_from_db(name):
"""
Get a tenant object with given name
Return value:
-- error_code: return None on success or error info on failure
-- tenant: return tenant object on success or None on failure
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
logging.debug("auth_api.get_tenant_from_db name=%s", name)
error_msg, tenant = auth_mgr.get_tenant(name)
if error_msg:
error_info = generate_error_info(error_msg)
return error_info, None
return None, tenant
def get_tenant_name(tenant_uuid):
"""
Get tenant name with given tenant_uuid
Return value:
-- error_info: return None on success or error info on failure
-- tenant_name: return tenant name on success or None on failure
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
error_msg, tenant_name = auth_mgr.get_tenant_name(tenant_uuid)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info, tenant_name
def check_tenant_exist(name):
""" Check tenant with @param name exist or not
Return value:
-- Return None if tenant with given name does not exist
-- Return error_info on failure or the tenant with given name exists
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg, exist_tenant = auth_mgr.get_tenant(name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
if exist_tenant:
error_info = generate_error_info(ErrorCode.TENANT_ALREADY_EXIST, name)
return error_info
def create_tenant_in_db(name, description, vms, privileges):
"""
Create a tenant object in DB
Return value:
-- error_info: return None on success or error info on failure
-- tenant: return tenant object on success or None on failure
"""
error_info = check_tenant_exist(name)
if error_info:
return error_info, None
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
error_msg, tenant = auth_mgr.create_tenant(name=name,
description=description,
vms=vms,
privileges=privileges)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info, tenant
def get_tenant_list_from_db(name=None):
"""
List all tenants or tenant with the name specified
Params:
-- name: if "name" is specified, return a list of one tenant with name specified
if "name" is not specified, return a list of all tenants
Return value:
-- error_info: return None on success or error info on failure
-- tenant_list: return a list of tenant objects on success or None on failure
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
if not name:
error_msg, tenant_list = auth_mgr.list_tenants()
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
else:
error_msg, tenant = auth_mgr.get_tenant(name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
if error_msg or not tenant:
tenant_list = []
else:
tenant_list = [tenant]
return error_info, tenant_list
def generate_tuple_from_vm_list(vm_list):
""" Generate a list of (vm_uuid, vm_name) pair """
if not vm_list:
return None, [], []
vms = []
error_msg = ""
not_found_vms = []
for vm_name in vm_list:
vm_uuid = vmdk_utils.get_vm_uuid_by_name(vm_name)
if not vm_uuid:
err = "Cannot find vm_uuid for vm {0} ".format(vm_name)
if err:
error_msg = error_msg + err
not_found_vms.append(vm_name)
vms.append((vm_uuid, vm_name))
if error_msg:
return error_msg, vms, not_found_vms
return None, vms, not_found_vms
def default_privileges():
""" Return a privilege object with default value """
privileges = [{'datastore': '',
'allow_create': 0,
'max_volume_size': 0,
'usage_quota': 0}]
return privileges
def set_privileges(allow_create, privileges, value):
""" set or unset allow_create privileges based on input param """
logging.debug("set_privileges: allow_create=%s, privileges=%s, value=%d", allow_create,
privileges, value)
privileges[auth_data_const.COL_ALLOW_CREATE] = value
return privileges
def validate_string_to_bool(allow_create):
"""
Validating case insensitive true, false strings
Return boolean value of the arguement if it is valid,
else return original value. Also return status if arguement is valid or not
"""
is_valid = True
# If already bool, return
if type(allow_create) is bool:
return allow_create, is_valid
allow_create = str(allow_create).lower()
if allow_create == "true":
return True, is_valid
elif allow_create == "false":
return False, is_valid
else:
is_valid = False
return allow_create, is_valid
def generate_privileges(datastore_url, allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB):
""" Generate privileges based on input params """
logging.debug("generate_privileges: datastore_url=%s allow_create=%s"
"volume_maxsize_in_MB=%s volume_totalsize_in_MB=%s",
datastore_url, allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB)
privileges = default_privileges()[0]
privileges[auth_data_const.COL_DATASTORE_URL] = datastore_url
if allow_create is True:
privileges = set_privileges(allow_create, privileges, 1)
if volume_maxsize_in_MB:
privileges[auth_data_const.COL_MAX_VOLUME_SIZE] = volume_maxsize_in_MB
if volume_totalsize_in_MB:
privileges[auth_data_const.COL_USAGE_QUOTA] = volume_totalsize_in_MB
logging.debug("generate_privileges: privileges=%s", privileges)
return privileges
def modify_privileges(privileges, allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB):
""" Modify privileges based on input params """
logging.debug("modify_privileges: allow_create=%s, volume_maxsize_in_MB=%s, volume_totalsize_in_MB=%s",
allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB)
# If None, don't change the privilege
# If not None, change accordingly
if allow_create is not None:
# allow_create has been validated. It is either True or False
if allow_create is True:
privileges = set_privileges(allow_create, privileges, 1)
else:
privileges = set_privileges(allow_create, privileges, 0)
if volume_maxsize_in_MB:
privileges[auth_data_const.COL_MAX_VOLUME_SIZE] = volume_maxsize_in_MB
if volume_totalsize_in_MB:
privileges[auth_data_const.COL_USAGE_QUOTA] = volume_totalsize_in_MB
return privileges
def generate_privileges_dict(privileges):
# privileges is a list which is read from auth DB
# it has the following format
# (tenant_uuid, datastore_url, allow_create, max_volume_size, usage_quota)
privileges_dict = {}
privileges_dict[auth_data_const.COL_DATASTORE_URL] = privileges.datastore_url
privileges_dict[auth_data_const.COL_ALLOW_CREATE] = privileges.allow_create
privileges_dict[auth_data_const.COL_MAX_VOLUME_SIZE] = privileges.max_volume_size
privileges_dict[auth_data_const.COL_USAGE_QUOTA] = privileges.usage_quota
return privileges_dict
def get_default_datastore_url(name):
"""
Get default_datastore url for given tenant
Return value:
--- error_info: return None on success or error info on failure
--- default_datastore: return name of default_datastore on success or None on failure
"""
logging.debug("auth_api.get_default_datastore_url: for tenant with name=%s", name)
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
if auth_mgr.allow_all_access():
if name == auth_data_const.DEFAULT_TENANT:
return None, auth_data_const.VM_DS_URL
else:
return generate_error_info(ErrorCode.INIT_NEEDED), None
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info, None
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info, None
# if default_datastore is not set for this tenant, default_datastore will be None
error_msg, default_datastore_url = tenant.get_default_datastore(auth_mgr.conn)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
logging.debug("returning url %s", default_datastore_url)
return error_info, default_datastore_url
def is_tenant_name_valid(name):
""" Check given tenant name is valid or not """
if valid_tenant_name_reg.match(name):
return True
else:
return False
def is_vm_duplicate(vm_list):
"""
Check if vm names in vm_list contain duplicates
"""
if len(vm_list) != len(set(vm_list)):
error_info = error_code.generate_error_info(ErrorCode.VM_DUPLICATE, vm_list)
logging.error(error_info.msg)
return error_info
return None
def check_default_datastore(datastore_name):
"""
Check datastore with given name is a valid value for default_datastore
Returns None for success and err message for errors
"""
# The valid default_datastore name are:
# named datastore existing on the host
# hard coded datastore name "_VM_DS"
# "_ALL_DS" is not a valid value to set as "default_datastore"
if datastore_name == auth_data_const.VM_DS:
return None
if datastore_name == auth_data_const.ALL_DS:
return generate_error_info(ErrorCode.DS_DEFAULT_CANNOT_USE_ALL_DS)
if not vmdk_utils.validate_datastore(datastore_name):
error_info = generate_error_info(ErrorCode.DS_NOT_EXIST, datastore_name)
return error_info
return None
def set_default_ds(tenant, default_datastore, check_existing):
"""
Set "default_datastore" for given tenant and create a full access privilege
to "default_datastore" if entry does not exist
Need to check whether the default_datastore to be set is the same as exiting
default_datastore when @Param check_existing is set to True
"""
# @Param tenant is a DockerVolumeTenant object
logging.debug("set_default_ds: tenant_name=%s default_datastore=%s check_existing=%d",
tenant.name, default_datastore, check_existing)
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(default_datastore)
# datastore_url will be set to "None" by "vmdk_utils.get_datastore_url" is "default_datastore"
# is not a valid datastore
if datastore_url is None:
error_info = generate_error_info(ErrorCode.DS_DEFAULT_NAME_INVALID, default_datastore)
return error_info
existing_default_ds_url = None
if check_existing:
error_msg, existing_default_ds_url = tenant.get_default_datastore(auth_mgr.conn)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
# the "default_datastore" to be set is the same as existing "default_datastore" for this tenant
if datastore_url == existing_default_ds_url:
return None
error_msg = tenant.set_default_datastore(auth_mgr.conn, datastore_url)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
existing_default_ds = vmdk_utils.get_datastore_name(existing_default_ds_url) if existing_default_ds_url is not None else None
logging.info("Existing default_datastore %s is being changed to %s for tenant %s", existing_default_ds,
default_datastore, tenant)
# create full access privilege to default_datastore
error_info = _tenant_access_add(name=tenant.name,
datastore=default_datastore,
allow_create=True)
# privilege to default_datastore already exist, no need to create
if error_info and error_info.code == ErrorCode.PRIVILEGE_ALREADY_EXIST:
logging.info(error_info.msg + " not overwriting the existing access privilege")
error_info = None
return error_info
@only_when_configured(ret_obj=True)
def _tenant_create(name, default_datastore, description="", vm_list=None, privileges=None):
""" API to create a tenant . Returns (ErrInfo, Tenant) """
logging.debug("_tenant_create: name=%s description=%s vm_list=%s privileges=%s default_ds=%s",
name, description, vm_list, privileges, default_datastore)
if not is_tenant_name_valid(name):
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAME_REGEXP)
return error_info, None
# if param "description" is not set by caller, the default value is empty string
if not description:
description = ""
# VM list can be empty during tenant create. Validate only if it exists
vms = None
if vm_list:
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info, None
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info, None
error_info = vm_in_any_tenant(vms)
if error_info:
return error_info, None
error_info = vmdk_utils.check_volumes_mounted(vms)
if error_info:
error_info.msg = "Cannot add VM to vmgroup " + error_info.msg
logging.error(error_info.msg)
return error_info, None
logging.debug("_tenant_create: vms=%s", vms)
error_info = check_default_datastore(default_datastore)
if error_info:
return error_info, None
error_info, tenant = create_tenant_in_db(
name=name,
description=description,
vms=vms,
privileges=privileges)
if error_info:
return error_info, None
error_info = set_default_ds(tenant=tenant,
default_datastore=default_datastore,
check_existing=False)
if error_info:
return error_info, None
return None, tenant
@only_when_configured()
def _tenant_update(name, new_name=None, description=None, default_datastore=None):
""" API to update a tenant """
logging.debug("_tenant_update: name=%s, new_name=%s, descrption=%s, default_datastore=%s",
name, new_name, description, default_datastore)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
if default_datastore:
error_info = check_default_datastore(default_datastore)
if error_info:
return error_info
error_info = set_default_ds(tenant=tenant,
default_datastore=default_datastore,
check_existing=True)
if error_info:
return error_info
if new_name:
if name == auth_data_const.DEFAULT_TENANT:
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAMES)
return error_info
# check whether tenant with new_name already exist or not
error_info = check_tenant_exist(new_name)
if error_info:
return error_info
if not is_tenant_name_valid(name):
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAME_REGEXP)
return error_info
error_msg = tenant.set_name(auth_mgr.conn, name, new_name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
if description:
error_msg = tenant.set_description(auth_mgr.conn, description)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return None
@only_when_configured()
def _tenant_rm(name, remove_volumes=False):
""" API to remove a tenant """
logging.debug("_tenant_rm: name=%s remove_volumes=%s", name, remove_volumes)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if tenant.vms:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EMPTY, name)
logging.error(error_info.msg)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = auth_mgr.remove_tenant(tenant.id, remove_volumes)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
def _tenant_ls(name=None):
""" API to list all tenants """
logging.debug("_tenant_ls: name=%s", name)
error_info, tenant_list = get_tenant_list_from_db(name)
return error_info, tenant_list
def vm_already_in_tenant(name, vms):
"""
Check whether any vm in @param "vms" already exists in tenant @param "name"
"""
error_info, existing_vms = _tenant_vm_ls(name)
if error_info:
return error_info
for vm_id, vm_name in vms:
if vm_id in existing_vms:
error_info = generate_error_info(ErrorCode.VM_ALREADY_IN_TENANT,
vm_name, name)
logging.error(error_info.msg)
return error_info
return None
def vm_not_exist(name, vms):
"""
Check whether any vm in @param "vms" does not exist in tenant @param "name"
"""
error_info, existing_vms = _tenant_vm_ls(name)
if error_info:
return error_info
existing_vm_uuids = [vm_id for (vm_id, _) in existing_vms]
for vm_id, vm_name in vms:
if not vm_id in existing_vm_uuids:
error_info = error_code.generate_error_info(ErrorCode.VM_NOT_IN_TENANT, vm_name, name)
logging.error(error_info.msg)
return error_info
return None
def vm_in_any_tenant(vms):
"""
Check if any vm in @param "vms" is a part of another tenant
"""
error_info, tenant_list = get_tenant_list_from_db()
if error_info:
return error_info
for tenant in tenant_list:
for vm_id, vm_name in vms:
if vm_id in dict(tenant.vms):
error_info = error_code.generate_error_info(ErrorCode.VM_IN_ANOTHER_TENANT,
vm_name, tenant.name)
logging.error(error_info.msg)
return error_info
return None
def named_tenant(func):
"""
Decorator to check whether the function is called by a named tenant.
Return error 'feature is not supported' if called by _DEFAULT tenant
"""
def not_supported():
return generate_error_info(ErrorCode.FEATURE_NOT_SUPPORTED, auth_data_const.DEFAULT_TENANT)
def check_name(name, vm_list):
if name == auth_data_const.DEFAULT_TENANT:
return not_supported()
return func(name, vm_list)
return check_name
@only_when_configured()
@named_tenant
def _tenant_vm_add(name, vm_list):
""" API to add vms for a tenant """
logging.debug("_tenant_vm_add: name=%s vm_list=%s", name, vm_list)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if not vm_list:
error_info = generate_error_info(ErrorCode.VM_LIST_EMPTY)
return error_info
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info
error_info = vm_already_in_tenant(name, vms)
if error_info:
return error_info
error_info = vm_in_any_tenant(vms)
if error_info:
return error_info
error_info = vmdk_utils.check_volumes_mounted(vms)
if error_info:
error_info.msg = "Cannot add VM to vmgroup " + error_info.msg
logging.error(error_info.msg)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
logging.debug("_tenant_vm_add: vms=%s", vms)
error_msg = tenant.add_vms(auth_mgr.conn, vms)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
@only_when_configured()
@named_tenant
def _tenant_vm_rm(name, vm_list):
""" API to remove vms for a tenant """
logging.debug("_tenant_vm_rm: name=%s vm_list=%s", name, vm_list)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if not vm_list:
error_info = generate_error_info(ErrorCode.VM_LIST_EMPTY)
return error_info
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info
# check if vms to be removed have any volumes mounted.
error_info = vmdk_utils.check_volumes_mounted(vms)
if error_info:
error_info.msg = "Cannot complete vmgroup vm rm. " + error_info.msg
logging.error(error_info.msg)
return error_info
logging.debug("_tenant_vm_rm: vms=%s", vms)
error_info = vm_not_exist(name, vms)
if error_info:
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.remove_vms(auth_mgr.conn, vms)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
def _tenant_vm_ls(name):
""" API to get vms for a tenant """
logging.debug("_tenant_vm_ls: name=%s", name)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info, None
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info, None
# tenant.vms is a list of vm_uuid of vms which belong to this tenant
return None, tenant.vms
@only_when_configured()
@named_tenant
def _tenant_vm_replace(name, vm_list):
""" API to replace vms for a tenant """
logging.debug("_tenant_vm_replace: name=%s vm_list=%s", name, vm_list)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if not vm_list:
error_info = generate_error_info(ErrorCode.REPLACE_VM_EMPTY)
return error_info
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info
error_info = vm_already_in_tenant(name, vms)
if error_info:
return error_info
error_info = vm_in_any_tenant(vms)
if error_info:
return error_info
# check if vms that would be replaced out have any volumes mounted
error_info, existing_vms = _tenant_vm_ls(name)
if error_info:
return error_info
error_info = vmdk_utils.check_volumes_mounted(existing_vms)
if error_info:
error_info.msg = "Cannot complete vmgroup vm replace. " + error_info.msg
logging.error(error_info.msg)
return error_info
logging.debug("_tenant_vm_replace: vms=%s", vms)
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.replace_vms(auth_mgr.conn, vms)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
def check_datastore(datastore_name):
""" Check datastore with given name is a valid datastore or not """
if datastore_name == auth_data_const.VM_DS:
return None
if datastore_name == auth_data_const.ALL_DS:
return None
if not vmdk_utils.validate_datastore(datastore_name):
error_info = generate_error_info(ErrorCode.DS_NOT_EXIST, datastore_name)
return error_info
return None
def privilege_exist(privileges, datastore_url):
""" Check whether a entry with given datastore_name exists in privileges """
for p in privileges:
if datastore_url == p.datastore_url:
return True
return False
def check_privilege_parameters(privilege):
"""
Check whether the privilege parameters are invalid
Params:
-- privilege: privilege is a dictionary that contains privilege properties
Return value:
-- error_info: return None on success or error info on failure
"""
volume_maxsize = privilege[auth_data_const.COL_MAX_VOLUME_SIZE]
volume_totalsize = privilege[auth_data_const.COL_USAGE_QUOTA]
# If both volume max size and volume total size are set,
# volume max size should not exceed volume total size
if (volume_maxsize and volume_totalsize and (volume_maxsize > volume_totalsize)):
error_info = generate_error_info(ErrorCode.PRIVILEGE_INVALID_VOLUME_SIZE, volume_maxsize, volume_totalsize)
return error_info
return None
def check_usage_quota(datastore, volume_totalsize_in_MB):
"""
Check if the requested quota is valid in the given datastore
Return None if the usage_quota is valid
Return error_info if the usage_quota is invalid
"""
# usage_quota on "_VM_DS" and "_ALL_DS" should be "Unset"
if datastore == auth_data_const.VM_DS or datastore == auth_data_const.ALL_DS:
if volume_totalsize_in_MB is not None:
error_info = generate_error_info(ErrorCode.PRIVILEGE_SET_TOTAL_VOLUME_SIZE_LIMIT_NOT_ALLOWED,
datastore)
return error_info
@only_when_configured()
def _tenant_access_add(name, datastore, allow_create=None,
volume_maxsize_in_MB=None, volume_totalsize_in_MB=None):
""" API to add datastore access for a tenant """
logging.debug("_tenant_access_add: name=%s datastore=%s, allow_create=%s "
"volume_maxsize(MB)=%s volume_totalsize(MB)=%s", name, datastore, allow_create,
volume_maxsize_in_MB, volume_totalsize_in_MB)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info = check_datastore(datastore)
if error_info:
return error_info
error_info = check_usage_quota(datastore, volume_totalsize_in_MB)
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(datastore)
error_info, existing_privileges = _tenant_access_ls(name)
if error_info:
return error_info
if privilege_exist(existing_privileges, datastore_url):
error_info = generate_error_info(ErrorCode.PRIVILEGE_ALREADY_EXIST, name, datastore)
return error_info
# Possible value:
# None - no change required
# True/False (boolean or string) - change to corresponding True/False
if allow_create is not None:
# validate to boolean value if it is a string
allow_create_val, valid = validate_string_to_bool(allow_create)
if not valid:
err_code = ErrorCode.PRIVILEGE_INVALID_ALLOW_CREATE_VALUE
err_msg = error_code_to_message[err_code].format(allow_create)
logging.error(err_msg)
return ErrorInfo(err_code, err_msg)
allow_create = allow_create_val
privileges = generate_privileges(datastore_url=datastore_url,
allow_create=allow_create,
volume_maxsize_in_MB=volume_maxsize_in_MB,
volume_totalsize_in_MB=volume_totalsize_in_MB)
logging.debug("_tenant_access_add: privileges=%s", privileges)
error_info = check_privilege_parameters(privilege=privileges)
if error_info:
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.set_datastore_access_privileges(auth_mgr.conn, [privileges])
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return error_info
@only_when_configured()
def _tenant_access_set(name, datastore, allow_create=None, volume_maxsize_in_MB=None, volume_totalsize_in_MB=None):
""" API to modify datastore access for a tenant """
logging.debug("_tenant_access_set: name=%s datastore=%s, allow_create=%s "
"volume_maxsize(MB)=%s volume_totalsize(MB)=%s", name, datastore, allow_create,
volume_maxsize_in_MB, volume_totalsize_in_MB)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info = check_datastore(datastore)
if error_info:
return error_info
error_info = check_usage_quota(datastore, volume_totalsize_in_MB)
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(datastore)
error_info, existing_privileges = _tenant_access_ls(name)
if error_info:
return error_info
if not privilege_exist(existing_privileges, datastore_url):
error_info = generate_error_info(ErrorCode.PRIVILEGE_NOT_FOUND, name, datastore)
return error_info
logging.debug("_tenant_access_set: datastore_url=%s", datastore_url)
privileges = [d for d in tenant.privileges if d.datastore_url == datastore_url]
if not privileges:
err_code = ErrorCode.PRIVILEGE_NOT_FOUND
err_msg = error_code_to_message[err_code].format(name, datastore)
error_info = ErrorInfo(err_code, err_msg)
return error_info
if allow_create is not None:
allow_create_val, valid = validate_string_to_bool(allow_create)
if not valid:
err_code = ErrorCode.PRIVILEGE_INVALID_ALLOW_CREATE_VALUE
err_msg = error_code_to_message[err_code].format(allow_create)
logging.error(err_msg)
return ErrorInfo(err_code, err_msg)
allow_create = allow_create_val
privileges_dict = generate_privileges_dict(privileges[0])
logging.debug("_tenant_access_set: originial privileges_dict=%s", privileges_dict)
privileges_dict = modify_privileges(privileges=privileges_dict,
allow_create=allow_create,
volume_maxsize_in_MB=volume_maxsize_in_MB,
volume_totalsize_in_MB=volume_totalsize_in_MB)
logging.debug("_tenant_access_set: modified privileges_dict=%s", privileges_dict)
error_info = check_privilege_parameters(privilege=privileges_dict)
if error_info:
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.set_datastore_access_privileges(auth_mgr.conn, [privileges_dict])
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
@only_when_configured()
def _tenant_access_rm(name, datastore):
""" API to remove datastore access for a tenant """
logging.debug("_tenant_access_rm: name=%s datastore=%s", name, datastore)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info = check_datastore(datastore)
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(datastore)
error_info, existing_privileges = _tenant_access_ls(name)
if error_info:
return error_info
if not privilege_exist(existing_privileges, datastore_url):
error_info = generate_error_info(ErrorCode.PRIVILEGE_NOT_FOUND, name, datastore)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
# get dafault_datastore for this tenant
# if the default_datastore is equal to param "datastore", which means
# we are trying to remove a row in "privilege" table with datastore which is
# marked as default_datastore of this tenant, should return with error
error_info, default_datastore_url = get_default_datastore_url(name)
if error_info:
return error_info
if default_datastore_url == datastore_url:
error_info = generate_error_info(ErrorCode.PRIVILEGE_REMOVE_NOT_ALLOWED)
return error_info
logging.debug("_tenant_access_rm: datastore_url=%s", datastore_url)
error_msg = tenant.remove_datastore_access_privileges(auth_mgr.conn, datastore_url)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return None
@only_when_configured(ret_obj=True)
def _tenant_access_ls(name):
""" Handle tenant access ls command. Returns (ErrInfo, [list of privileges]) """
logging.debug("_tenant_access_ls: name=%s", name)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info, None
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info, None
return None, tenant.privileges
| apache-2.0 | -7,017,315,525,861,771,000 | 34.450998 | 129 | 0.64543 | false |
SpatialMetabolomics/SM_distributed | tests/test_imzml_txt_converter_db.py | 2 | 1137 | import numpy as np
from unittest.mock import patch
from sm.engine.ms_txt_converter import MsTxtConverter
from sm.engine.util import SMConfig
from sm.engine.tests.util import sm_config, ds_config
@patch('sm.engine.ms_txt_converter.MsTxtConverter._parser_factory')
def test_convert(MockImzMLParser, sm_config):
mock_parser = MockImzMLParser.return_value
mock_parser.coordinates = [(1, 1), (1, 2)]
mock_parser.getspectrum.side_effect = [(np.array([100., 200.]), np.array([100., 10.])),
(np.array([100., 200.]), np.array([100., 10.]))]
SMConfig._config_dict = sm_config
converter = MsTxtConverter('imzml_path', 'txt_path', 'coord_path')
with patch('sm.engine.ms_txt_converter.open', create=True) as mock_open:
converter.convert()
mock_open_write_args = [args[0] for _, args, kw_args in mock_open.mock_calls if args]
assert '0|100.0 200.0|100.0 10.0\n' in mock_open_write_args
assert '1|100.0 200.0|100.0 10.0\n' in mock_open_write_args
assert '0,1,1\n' in mock_open_write_args
assert '1,1,2\n' in mock_open_write_args
| apache-2.0 | -4,899,548,670,970,884,000 | 39.607143 | 93 | 0.651715 | false |
EduPepperPD/pepper2013 | cms/djangoapps/contentstore/tests/test_import_export.py | 7 | 3135 | """
Unit tests for course import and export
"""
import os
import shutil
import tarfile
import tempfile
import copy
from uuid import uuid4
from pymongo import MongoClient
from .utils import CourseTestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.conf import settings
from xmodule.contentstore.django import _CONTENTSTORE
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['OPTIONS']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ImportTestCase(CourseTestCase):
"""
Unit tests for importing a course
"""
def setUp(self):
super(ImportTestCase, self).setUp()
self.url = reverse("import_course", kwargs={
'org': self.course.location.org,
'course': self.course.location.course,
'name': self.course.location.name,
})
self.content_dir = tempfile.mkdtemp()
def touch(name):
""" Equivalent to shell's 'touch'"""
with file(name, 'a'):
os.utime(name, None)
# Create tar test files -----------------------------------------------
# OK course:
good_dir = tempfile.mkdtemp(dir=self.content_dir)
os.makedirs(os.path.join(good_dir, "course"))
with open(os.path.join(good_dir, "course.xml"), "w+") as f:
f.write('<course url_name="2013_Spring" org="EDx" course="0.00x"/>')
with open(os.path.join(good_dir, "course", "2013_Spring.xml"), "w+") as f:
f.write('<course></course>')
self.good_tar = os.path.join(self.content_dir, "good.tar.gz")
with tarfile.open(self.good_tar, "w:gz") as gtar:
gtar.add(good_dir)
# Bad course (no 'course.xml' file):
bad_dir = tempfile.mkdtemp(dir=self.content_dir)
touch(os.path.join(bad_dir, "bad.xml"))
self.bad_tar = os.path.join(self.content_dir, "bad.tar.gz")
with tarfile.open(self.bad_tar, "w:gz") as btar:
btar.add(bad_dir)
def tearDown(self):
shutil.rmtree(self.content_dir)
MongoClient().drop_database(TEST_DATA_CONTENTSTORE['OPTIONS']['db'])
_CONTENTSTORE.clear()
def test_no_coursexml(self):
"""
Check that the response for a tar.gz import without a course.xml is
correct.
"""
with open(self.bad_tar) as btar:
resp = self.client.post(
self.url,
{
"name": self.bad_tar,
"course-data": [btar]
})
self.assertEquals(resp.status_code, 415)
def test_with_coursexml(self):
"""
Check that the response for a tar.gz import with a course.xml is
correct.
"""
with open(self.good_tar) as gtar:
resp = self.client.post(
self.url,
{
"name": self.good_tar,
"course-data": [gtar]
})
self.assertEquals(resp.status_code, 200)
| agpl-3.0 | 6,503,613,350,077,849,000 | 32.351064 | 82 | 0.571292 | false |
google-research/rigl | rigl/imagenet_resnet/utils.py | 1 | 4331 | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helped functions to concatenate subset of noisy images to batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v2 import summary
IMG_SUMMARY_PREFIX = '_img_'
def format_tensors(*dicts):
"""Format metrics to be callable as tf.summary scalars on tpu's.
Args:
*dicts: A set of metric dictionaries, containing metric name + value tensor.
Returns:
A single formatted dictionary that holds all tensors.
Raises:
ValueError: if any tensor is not a scalar.
"""
merged_summaries = {}
for d in dicts:
for metric_name, value in d.items():
shape = value.shape.as_list()
if metric_name.startswith(IMG_SUMMARY_PREFIX):
# If image, shape it into 2d.
merged_summaries[metric_name] = tf.reshape(value,
(1, -1, value.shape[-1], 1))
elif not shape:
merged_summaries[metric_name] = tf.expand_dims(value, axis=0)
elif shape == [1]:
merged_summaries[metric_name] = value
else:
raise ValueError(
'Metric {} has value {} that is not reconciliable'.format(
metric_name, value))
return merged_summaries
def host_call_fn(model_dir, **kwargs):
"""host_call function used for creating training summaries when using TPU.
Args:
model_dir: String indicating the output_dir to save summaries in.
**kwargs: Set of metric names and tensor values for all desired summaries.
Returns:
Summary op to be passed to the host_call arg of the estimator function.
"""
gs = kwargs.pop('global_step')[0]
with summary.create_file_writer(model_dir).as_default():
# Always record summaries.
with summary.record_if(True):
for name, tensor in kwargs.items():
if name.startswith(IMG_SUMMARY_PREFIX):
summary.image(name.replace(IMG_SUMMARY_PREFIX, ''), tensor,
max_images=1)
else:
summary.scalar(name, tensor[0], step=gs)
# Following function is under tf:1x, so we use it.
return tf.summary.all_v2_summary_ops()
def mask_summaries(masks, with_img=False):
metrics = {}
for mask in masks:
metrics['pruning/{}/sparsity'.format(
mask.op.name)] = tf.nn.zero_fraction(mask)
if with_img:
metrics[IMG_SUMMARY_PREFIX + 'mask/' + mask.op.name] = mask
return metrics
def initialize_parameters_from_ckpt(ckpt_path, model_dir, param_suffixes):
"""Load parameters from an existing checkpoint.
Args:
ckpt_path: str, loads the mask variables from this checkpoint.
model_dir: str, if checkpoint exists in this folder no-op.
param_suffixes: list or str, suffix of parameters to be load from
checkpoint.
"""
already_has_ckpt = model_dir and tf.train.latest_checkpoint(
model_dir) is not None
if already_has_ckpt:
tf.logging.info(
'Training already started on this model, not loading masks from'
'previously trained model')
return
reader = tf.train.NewCheckpointReader(ckpt_path)
param_names = reader.get_variable_to_shape_map().keys()
param_names = [x for x in param_names if x.endswith(param_suffixes)]
variable_map = {}
for var in tf.global_variables():
var_name = var.name.split(':')[0]
if var_name in param_names:
tf.logging.info('Loading parameter variable from checkpoint: %s',
var_name)
variable_map[var_name] = var
elif var_name.endswith(param_suffixes):
tf.logging.info(
'Cannot find parameter variable in checkpoint, skipping: %s',
var_name)
tf.train.init_from_checkpoint(ckpt_path, variable_map)
| apache-2.0 | -6,161,119,297,711,570,000 | 33.648 | 80 | 0.670977 | false |
dsajkl/reqiop | common/test/acceptance/pages/studio/container.py | 9 | 14433 | """
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
from . import BASE_URL
from utils import click_css, confirm_prompt
class ContainerPage(PageObject):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/coffee/src/xblock/core.coffee which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
self.q(css='div.ui-loading.is-hidden').present and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
return 'icon-check' in self.q(css='a.action-staff-lock>i').attrs('class')
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def preview(self):
"""
Clicks "Preview Changes", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, 'a.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, 'a.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self)
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if not descendant.locator in grand_locators]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, bounded_selector=lambda(x): x):
"""
Click on the first edit button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector('.edit-button')).first.click()
EmptyPromise(
lambda: page_object.q(css='.xblock-studio_view').present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object
| agpl-3.0 | -6,662,962,624,902,506,000 | 34.202439 | 152 | 0.613802 | false |
aroche/django | django/template/context.py | 105 | 9348 | import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango110Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango110Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
@property
def is_current_app_set(self):
return self._current_app is not _current_app_undefined
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
return ContextDict(self, other_dict)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango110Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
# placeholder for context processors output
self.update({})
# empty dict for any new modifications
# (so that context processors don't overwrite them)
self.update({})
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request)
if original_context:
context.push(original_context)
return context
| bsd-3-clause | -717,145,767,156,255,500 | 31.915493 | 89 | 0.619384 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.