content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/python3
"""
Summary
-------
This application simulates showers to be used in trigger rate calculations.
Arrays with one (1MST) or four telescopes (4LST) can be used, in case of \
mono or stereo trigger configurations, respectively.
Simulations are managed by the shower_simulator module.
Each run is simulated in a job. Each job is submitted by using the submission \
command from the global config settings (see config_template.yml). \
The config entry extraCommands can be used to extra commands to be ran in each job,
before the actual simulation.
At the moment, the shower simulations are performed by CORSIKA, which requires \
the zstd package. Please, make sure that the command to set your zstd path is \
properly set by the extraCommands in config.yml.
Command line arguments
----------------------
array (str, required)
Name of the array (1MST, 4LST ...).
site (str, required)
South or North.
primary (str, required)
Name of the primary particle (proton, helium ...).
nruns (int, optional)
Number of runs to be simulated (default=100).
nevents (int, optional)
Number of events simulated per run (default=100000).
zenith (float, optional)
Zenith angle in deg (default=20).
azimuth (float, optional)
Azimuth angle in deg (default=0).
output (str, optional)
Path of the directory to store the output simulations. By default, \
the standard output directory defined by config will be used.
test (activation mode, optional)
If activated, no job will be submitted. Instead, an example of the \
run script will be printed.
verbosity (str, optional)
Log level to print (default=INFO).
Example
-------
Producing a set of proton showers for trigger rate simulations of LST.
.. code-block:: console
python applications/sim_showers_for_trigger_rates.py -a 4LST -s North \
--primary proton --nruns 100 --nevents 10000 --output {some dir for large files}
"""
import logging
import argparse
import astropy.units as u
import simtools.io_handler as io
import simtools.config as cfg
import simtools.util.general as gen
from simtools.shower_simulator import ShowerSimulator
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Simulate showers to be used for trigger rate calculations")
)
parser.add_argument(
"-a",
"--array",
help="Name of the array (e.g. 1MST, 4LST ...)",
type=str,
required=True,
)
parser.add_argument(
"-s", "--site", help="Site name (North or South)", type=str, required=True
)
parser.add_argument(
"--primary",
help="Name of the primary particle (e.g. proton, helium ...)",
type=str,
required=True,
)
parser.add_argument(
"--nruns", help="Number of runs (default=100)", type=int, default=100
)
parser.add_argument(
"--nevents", help="Number of events/run (default=100)", type=int, default=100000
)
parser.add_argument(
"--zenith", help="Zenith angle in deg (default=20)", type=float, default=20
)
parser.add_argument(
"--azimuth", help="Azimuth angle in deg (default=0)", type=float, default=0
)
parser.add_argument(
"--output",
help="Path of the output directory where the simulations will be saved.",
type=str,
default=None,
)
parser.add_argument(
"--test", help="Test option will not submit any job.", action="store_true"
)
parser.add_argument(
"-v",
"--verbosity",
dest="logLevel",
action="store",
default="info",
help="Log level to print (default is INFO)",
)
args = parser.parse_args()
label = "trigger_rates"
logger = logging.getLogger()
logger.setLevel(gen.getLogLevelFromUser(args.logLevel))
# Output directory to save files related directly to this app
outputDir = io.getApplicationOutputDirectory(cfg.get("outputLocation"), label)
showerConfigData = {
"corsikaDataDirectory": args.output,
"site": args.site,
"layoutName": args.array,
"runRange": [1, args.nruns + 1],
"nshow": args.nevents,
"primary": args.primary,
"erange": [10 * u.GeV, 300 * u.TeV],
"eslope": -2,
"zenith": args.zenith * u.deg,
"azimuth": args.azimuth * u.deg,
"viewcone": 10 * u.deg,
"cscat": [20, 1500 * u.m, 0],
}
showerSimulator = ShowerSimulator(label=label, showerConfigData=showerConfigData)
if not args.test:
showerSimulator.submit()
else:
logger.info("Test flag is on - it will not submit any job.")
logger.info("This is an example of the run script:")
showerSimulator.submit(runList=[1], submitCommand="more ")
# Exporting the list of output/log/input files into the application folder
outputFileList = outputDir.joinpath("outputFiles_{}.list".format(args.primary))
logFileList = outputDir.joinpath("logFiles_{}.list".format(args.primary))
def printListIntoFile(listOfFiles, fileName):
with open(fileName, "w") as f:
for line in listOfFiles:
f.write(line + "\n")
logger.info("List of output files exported to {}".format(outputFileList))
printListIntoFile(showerSimulator.getListOfOutputFiles(), outputFileList)
logger.info("List of log files exported to {}".format(logFileList))
printListIntoFile(showerSimulator.getListOfLogFiles(), logFileList)
| 34.779141 | 88 | 0.646499 | [
"BSD-3-Clause"
] | RaulRPrado/ctamclib | applications/sim_showers_for_trigger_rates.py | 5,669 | Python |
# Test various AP mode parameters
# Copyright (c) 2014, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import os
import struct
import subprocess
import time
import hwsim_utils
import hostapd
from tshark import run_tshark
from utils import *
@remote_compatible
def test_ap_fragmentation_rts_set_high(dev, apdev):
"""WPA2-PSK AP with fragmentation and RTS thresholds larger than frame length"""
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['rts_threshold'] = "1000"
params['fragm_threshold'] = "2000"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
hapd.disable()
hapd.set('fragm_threshold', '-1')
hapd.set('rts_threshold', '-1')
hapd.enable()
@remote_compatible
def test_ap_fragmentation_open(dev, apdev):
"""Open AP with fragmentation threshold"""
ssid = "fragmentation"
params = {}
params['ssid'] = ssid
params['fragm_threshold'] = "1000"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
hapd.disable()
hapd.set('fragm_threshold', '-1')
hapd.enable()
@remote_compatible
def test_ap_fragmentation_wpa2(dev, apdev):
"""WPA2-PSK AP with fragmentation threshold"""
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['fragm_threshold'] = "1000"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
hapd.disable()
hapd.set('fragm_threshold', '-1')
hapd.enable()
def test_ap_vendor_elements(dev, apdev):
"""WPA2-PSK AP with vendor elements added"""
bssid = apdev[0]['bssid']
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['vendor_elements'] = "dd0411223301"
params['assocresp_elements'] = "dd0411223302"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
bss = dev[0].get_bss(bssid)
if "dd0411223301" not in bss['ie']:
raise Exception("Vendor element not shown in scan results")
hapd.set('vendor_elements', 'dd051122330203dd0400137400dd04001374ff')
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[1].scan_for_bss(apdev[0]['bssid'], freq="2412")
bss = dev[1].get_bss(bssid)
if "dd0411223301" in bss['ie']:
raise Exception("Old vendor element still in scan results")
if "dd051122330203" not in bss['ie']:
raise Exception("New vendor element not shown in scan results")
def test_ap_element_parse(dev, apdev):
"""Information element parsing - extra coverage"""
bssid = apdev[0]['bssid']
ssid = "test-wpa2-psk"
params = {'ssid': ssid,
'vendor_elements': "380501020304059e009e009e009e009e009e00"}
hapd = hostapd.add_ap(apdev[0], params)
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
bss = dev[0].get_bss(bssid)
if "38050102030405" not in bss['ie']:
raise Exception("Timeout element not shown in scan results")
@remote_compatible
def test_ap_element_parse_oom(dev, apdev):
"""Information element parsing OOM"""
bssid = apdev[0]['bssid']
ssid = "test-wpa2-psk"
params = {'ssid': ssid,
'vendor_elements': "dd0d506f9a0a00000600411c440028"}
hapd = hostapd.add_ap(apdev[0], params)
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
with alloc_fail(dev[0], 1, "wpabuf_alloc;ieee802_11_vendor_ie_concat"):
bss = dev[0].get_bss(bssid)
logger.info(str(bss))
def test_ap_country(dev, apdev):
"""WPA2-PSK AP setting country code and using 5 GHz band"""
try:
hapd = None
bssid = apdev[0]['bssid']
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['country_code'] = 'FI'
params['ieee80211d'] = '1'
params['hw_mode'] = 'a'
params['channel'] = '36'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="5180")
hwsim_utils.test_connectivity(dev[0], hapd)
finally:
if hapd:
hapd.request("DISABLE")
dev[0].disconnect_and_stop_scan()
hostapd.cmd_execute(apdev[0], ['iw', 'reg', 'set', '00'])
dev[0].wait_event(["CTRL-EVENT-REGDOM-CHANGE"], timeout=0.5)
dev[0].flush_scan_cache()
def test_ap_acl_accept(dev, apdev):
"""MAC ACL accept list"""
ssid = "acl"
params = {}
filename = hostapd.acl_file(dev, apdev, 'hostapd.macaddr')
hostapd.send_file(apdev[0], filename, filename)
params['ssid'] = ssid
params['accept_mac_file'] = filename
hapd = hostapd.add_ap(apdev[0], params)
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
dev[1].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[1].connect(ssid, key_mgmt="NONE", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
hapd.request("SET macaddr_acl 1")
dev[1].dump_monitor()
dev[1].connect(ssid, key_mgmt="NONE", scan_freq="2412", wait_connect=False)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
ev = dev[1].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected association")
if filename.startswith('/tmp/'):
os.unlink(filename)
def test_ap_acl_deny(dev, apdev):
"""MAC ACL deny list"""
ssid = "acl"
params = {}
filename = hostapd.acl_file(dev, apdev, 'hostapd.macaddr')
hostapd.send_file(apdev[0], filename, filename)
params['ssid'] = ssid
params['deny_mac_file'] = filename
hapd = hostapd.add_ap(apdev[0], params)
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412", passive=True)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412", wait_connect=False)
dev[1].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[1].connect(ssid, key_mgmt="NONE", scan_freq="2412")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected association")
if filename.startswith('/tmp/'):
os.unlink(filename)
def test_ap_acl_mgmt(dev, apdev):
"""MAC ACL accept/deny management"""
ssid = "acl"
params = {}
filename = hostapd.acl_file(dev, apdev, 'hostapd.macaddr')
hostapd.send_file(apdev[0], filename, filename)
params['ssid'] = ssid
params['deny_mac_file'] = filename
hapd = hostapd.add_ap(apdev[0], params)
accept = hapd.request("ACCEPT_ACL SHOW").splitlines()
logger.info("accept: " + str(accept))
deny = hapd.request("DENY_ACL SHOW").splitlines()
logger.info("deny: " + str(deny))
if len(accept) != 0:
raise Exception("Unexpected number of accept entries")
if len(deny) != 3:
raise Exception("Unexpected number of deny entries")
if "01:01:01:01:01:01 VLAN_ID=0" not in deny:
raise Exception("Missing deny entry")
hapd.request("ACCEPT_ACL ADD_MAC 22:33:44:55:66:77")
hapd.request("DENY_ACL ADD_MAC 22:33:44:55:66:88 VLAN_ID=2")
accept = hapd.request("ACCEPT_ACL SHOW").splitlines()
logger.info("accept: " + str(accept))
deny = hapd.request("DENY_ACL SHOW").splitlines()
logger.info("deny: " + str(deny))
if len(accept) != 1:
raise Exception("Unexpected number of accept entries (2)")
if len(deny) != 4:
raise Exception("Unexpected number of deny entries (2)")
if "01:01:01:01:01:01 VLAN_ID=0" not in deny:
raise Exception("Missing deny entry (2)")
if "22:33:44:55:66:88 VLAN_ID=2" not in deny:
raise Exception("Missing deny entry (2)")
if "22:33:44:55:66:77 VLAN_ID=0" not in accept:
raise Exception("Missing accept entry (2)")
hapd.request("ACCEPT_ACL DEL_MAC 22:33:44:55:66:77")
hapd.request("DENY_ACL DEL_MAC 22:33:44:55:66:88")
accept = hapd.request("ACCEPT_ACL SHOW").splitlines()
logger.info("accept: " + str(accept))
deny = hapd.request("DENY_ACL SHOW").splitlines()
logger.info("deny: " + str(deny))
if len(accept) != 0:
raise Exception("Unexpected number of accept entries (3)")
if len(deny) != 3:
raise Exception("Unexpected number of deny entries (3)")
if "01:01:01:01:01:01 VLAN_ID=0" not in deny:
raise Exception("Missing deny entry (3)")
hapd.request("ACCEPT_ACL CLEAR")
hapd.request("DENY_ACL CLEAR")
accept = hapd.request("ACCEPT_ACL SHOW").splitlines()
logger.info("accept: " + str(accept))
deny = hapd.request("DENY_ACL SHOW").splitlines()
logger.info("deny: " + str(deny))
if len(accept) != 0:
raise Exception("Unexpected number of accept entries (4)")
if len(deny) != 0:
raise Exception("Unexpected number of deny entries (4)")
dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
dev[0].dump_monitor()
hapd.request("DENY_ACL ADD_MAC " + dev[0].own_addr())
dev[0].wait_disconnected()
dev[0].request("DISCONNECT")
if filename.startswith('/tmp/'):
os.unlink(filename)
@remote_compatible
def test_ap_wds_sta(dev, apdev):
"""WPA2-PSK AP with STA using 4addr mode"""
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['wds_sta'] = "1"
params['wds_bridge'] = "wds-br0"
hapd = hostapd.add_ap(apdev[0], params)
try:
dev[0].cmd_execute(['brctl', 'addbr', 'wds-br0'])
dev[0].cmd_execute(['brctl', 'setfd', 'wds-br0', '0'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'up'])
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'on'])
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
ev = hapd.wait_event(["WDS-STA-INTERFACE-ADDED"], timeout=10)
if ev is None:
raise Exception("No WDS-STA-INTERFACE-ADDED event seen")
if "sta_addr=" + dev[0].own_addr() not in ev:
raise Exception("No sta_addr match in " + ev)
if "ifname=" + hapd.ifname + ".sta" not in ev:
raise Exception("No ifname match in " + ev)
sta = hapd.get_sta(dev[0].own_addr())
if "wds_sta_ifname" not in sta:
raise Exception("Missing wds_sta_ifname in STA data")
if "ifname=" + sta['wds_sta_ifname'] not in ev:
raise Exception("wds_sta_ifname %s not in event: %s" %
(sta['wds_sta_ifname'], ev))
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
dev[0].request("REATTACH")
dev[0].wait_connected()
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
dev[0].request("SET reassoc_same_bss_optim 1")
dev[0].request("REATTACH")
dev[0].wait_connected()
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=5, timeout=1)
finally:
dev[0].request("SET reassoc_same_bss_optim 0")
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'off'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'down'])
dev[0].cmd_execute(['brctl', 'delbr', 'wds-br0'])
def test_ap_wds_sta_eap(dev, apdev):
"""WPA2-EAP AP with STA using 4addr mode"""
ssid = "test-wpa2-eap"
params = hostapd.wpa2_eap_params(ssid=ssid)
params['wds_sta'] = "1"
params['wds_bridge'] = "wds-br0"
hapd = hostapd.add_ap(apdev[0], params)
try:
dev[0].cmd_execute(['brctl', 'addbr', 'wds-br0'])
dev[0].cmd_execute(['brctl', 'setfd', 'wds-br0', '0'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'up'])
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'on'])
dev[0].connect(ssid, key_mgmt="WPA-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
ev = hapd.wait_event(["WDS-STA-INTERFACE-ADDED"], timeout=10)
if ev is None:
raise Exception("No WDS-STA-INTERFACE-ADDED event seen")
if "sta_addr=" + dev[0].own_addr() not in ev:
raise Exception("No sta_addr match in " + ev)
if "ifname=" + hapd.ifname + ".sta" not in ev:
raise Exception("No ifname match in " + ev)
sta = hapd.get_sta(dev[0].own_addr())
if "wds_sta_ifname" not in sta:
raise Exception("Missing wds_sta_ifname in STA data")
if "ifname=" + sta['wds_sta_ifname'] not in ev:
raise Exception("wds_sta_ifname %s not in event: %s" %
(sta['wds_sta_ifname'], ev))
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
finally:
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'off'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'down'])
dev[0].cmd_execute(['brctl', 'delbr', 'wds-br0'])
def test_ap_wds_sta_open(dev, apdev):
"""Open AP with STA using 4addr mode"""
ssid = "test-wds-open"
params = {}
params['ssid'] = ssid
params['wds_sta'] = "1"
params['wds_bridge'] = "wds-br0"
hapd = hostapd.add_ap(apdev[0], params)
try:
dev[0].cmd_execute(['brctl', 'addbr', 'wds-br0'])
dev[0].cmd_execute(['brctl', 'setfd', 'wds-br0', '0'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'up'])
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'on'])
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
dev[0].request("REATTACH")
dev[0].wait_connected()
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
dev[0].request("SET reassoc_same_bss_optim 1")
dev[0].request("REATTACH")
dev[0].wait_connected()
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=5, timeout=1)
finally:
dev[0].request("SET reassoc_same_bss_optim 0")
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'off'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'down'])
dev[0].cmd_execute(['brctl', 'delbr', 'wds-br0'])
def test_ap_wds_sta_wep(dev, apdev):
"""WEP AP with STA using 4addr mode"""
check_wep_capa(dev[0])
ssid = "test-wds-wep"
params = {}
params['ssid'] = ssid
params["ieee80211n"] = "0"
params['wep_key0'] = '"hello"'
params['wds_sta'] = "1"
params['wds_bridge'] = "wds-br0"
hapd = hostapd.add_ap(apdev[0], params)
try:
dev[0].cmd_execute(['brctl', 'addbr', 'wds-br0'])
dev[0].cmd_execute(['brctl', 'setfd', 'wds-br0', '0'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'up'])
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'on'])
dev[0].connect(ssid, key_mgmt="NONE", wep_key0='"hello"',
scan_freq="2412")
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
dev[0].request("REATTACH")
dev[0].wait_connected()
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=15)
dev[0].request("SET reassoc_same_bss_optim 1")
dev[0].request("REATTACH")
dev[0].wait_connected()
hwsim_utils.test_connectivity_iface(dev[0], hapd, "wds-br0",
max_tries=5, timeout=1)
finally:
dev[0].request("SET reassoc_same_bss_optim 0")
dev[0].cmd_execute(['iw', dev[0].ifname, 'set', '4addr', 'off'])
dev[0].cmd_execute(['ip', 'link', 'set', 'dev', 'wds-br0', 'down'])
dev[0].cmd_execute(['brctl', 'delbr', 'wds-br0'])
@remote_compatible
def test_ap_inactivity_poll(dev, apdev):
"""AP using inactivity poll"""
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['ap_max_inactivity'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
hapd.set("ext_mgmt_frame_handling", "1")
dev[0].request("DISCONNECT")
ev = hapd.wait_event(["MGMT-RX"], timeout=5)
if ev is None:
raise Exception("MGMT RX wait timed out for Deauth")
hapd.set("ext_mgmt_frame_handling", "0")
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=30)
if ev is None:
raise Exception("STA disconnection on inactivity was not reported")
@remote_compatible
def test_ap_inactivity_disconnect(dev, apdev):
"""AP using inactivity disconnect"""
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['ap_max_inactivity'] = "1"
params['skip_inactivity_poll'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412")
hapd.set("ext_mgmt_frame_handling", "1")
dev[0].request("DISCONNECT")
ev = hapd.wait_event(["MGMT-RX"], timeout=5)
if ev is None:
raise Exception("MGMT RX wait timed out for Deauth")
hapd.set("ext_mgmt_frame_handling", "0")
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=30)
if ev is None:
raise Exception("STA disconnection on inactivity was not reported")
@remote_compatible
def test_ap_basic_rates(dev, apdev):
"""Open AP with lots of basic rates"""
ssid = "basic rates"
params = {}
params['ssid'] = ssid
params['basic_rates'] = "10 20 55 110 60 90 120 180 240 360 480 540"
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
@remote_compatible
def test_ap_short_preamble(dev, apdev):
"""Open AP with short preamble"""
ssid = "short preamble"
params = {}
params['ssid'] = ssid
params['preamble'] = "1"
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
def test_ap_spectrum_management_required(dev, apdev):
"""Open AP with spectrum management required"""
ssid = "spectrum mgmt"
params = {}
params['ssid'] = ssid
params["country_code"] = "JP"
params["hw_mode"] = "a"
params["channel"] = "36"
params["ieee80211d"] = "1"
params["local_pwr_constraint"] = "3"
params['spectrum_mgmt_required'] = "1"
try:
hapd = None
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="5180")
dev[0].wait_regdom(country_ie=True)
finally:
if hapd:
hapd.request("DISABLE")
dev[0].disconnect_and_stop_scan()
hostapd.cmd_execute(apdev[0], ['iw', 'reg', 'set', '00'])
dev[0].wait_event(["CTRL-EVENT-REGDOM-CHANGE"], timeout=0.5)
dev[0].flush_scan_cache()
@remote_compatible
def test_ap_max_listen_interval(dev, apdev):
"""Open AP with maximum listen interval limit"""
ssid = "listen"
params = {}
params['ssid'] = ssid
params['max_listen_interval'] = "1"
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"])
if ev is None:
raise Exception("Association rejection not reported")
if "status_code=51" not in ev:
raise Exception("Unexpected ASSOC-REJECT reason")
@remote_compatible
def test_ap_max_num_sta(dev, apdev):
"""Open AP with maximum STA count"""
ssid = "max"
params = {}
params['ssid'] = ssid
params['max_num_sta'] = "1"
hostapd.add_ap(apdev[0], params)
dev[1].connect(ssid, key_mgmt="NONE", scan_freq="2412")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected association")
def test_ap_max_num_sta_no_probe_resp(dev, apdev, params):
"""Maximum STA count and limit on Probe Response frames"""
logdir = params['logdir']
dev[0].flush_scan_cache()
ssid = "max"
params = {}
params['ssid'] = ssid
params['beacon_int'] = "2000"
params['max_num_sta'] = "1"
params['no_probe_resp_if_max_sta'] = "1"
hostapd.add_ap(apdev[0], params)
dev[1].connect(ssid, key_mgmt="NONE", scan_freq="2412")
dev[0].scan(freq=2412, type="ONLY")
dev[0].scan(freq=2412, type="ONLY")
seen = dev[0].get_bss(apdev[0]['bssid']) != None
dev[1].scan(freq=2412, type="ONLY")
if seen:
out = run_tshark(os.path.join(logdir, "hwsim0.pcapng"),
"wlan.fc.type_subtype == 5", ["wlan.da"])
if out:
if dev[0].own_addr() not in out:
# Discovery happened through Beacon frame reception. That's not
# an error case.
seen = False
if dev[1].own_addr() not in out:
raise Exception("No Probe Response frames to dev[1] seen")
if seen:
raise Exception("AP found unexpectedly")
@remote_compatible
def test_ap_tx_queue_params(dev, apdev):
"""Open AP with TX queue params set"""
ssid = "tx"
params = {}
params['ssid'] = ssid
params['tx_queue_data2_aifs'] = "4"
params['tx_queue_data2_cwmin'] = "7"
params['tx_queue_data2_cwmax'] = "1023"
params['tx_queue_data2_burst'] = "4.2"
params['tx_queue_data1_aifs'] = "4"
params['tx_queue_data1_cwmin'] = "7"
params['tx_queue_data1_cwmax'] = "1023"
params['tx_queue_data1_burst'] = "2"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_tx_queue_params_invalid(dev, apdev):
"""Invalid TX queue params set (cwmin/cwmax)"""
ssid = "tx"
params = {}
params['ssid'] = ssid
params['tx_queue_data2_aifs'] = "4"
params['tx_queue_data2_cwmin'] = "7"
params['tx_queue_data2_cwmax'] = "1023"
params['tx_queue_data2_burst'] = "4.2"
params['wmm_ac_bk_cwmin'] = "4"
params['wmm_ac_bk_cwmax'] = "10"
params['wmm_ac_bk_aifs'] = "7"
params['wmm_ac_bk_txop_limit'] = "0"
params['wmm_ac_bk_acm'] = "0"
hapd = hostapd.add_ap(apdev[0], params)
# Valid WMM change
hapd.set("wmm_ac_be_cwmin", "3")
# "Invalid TX queue cwMin/cwMax values. cwMin(7) greater than cwMax(3)"
if "FAIL" not in hapd.request('SET tx_queue_data2_cwmax 3'):
raise Exception("TX cwMax < cwMin accepted")
# "Invalid WMM AC cwMin/cwMax values. cwMin(4) greater than cwMax(3)"
if "FAIL" not in hapd.request('SET wmm_ac_bk_cwmax 3'):
raise Exception("AC cwMax < cwMin accepted")
def test_ap_beacon_rate_legacy(dev, apdev):
"""Open AP with Beacon frame TX rate 5.5 Mbps"""
hapd = hostapd.add_ap(apdev[0], {'ssid': 'beacon-rate'})
res = hapd.get_driver_status_field('capa.flags')
if (int(res, 0) & 0x0000080000000000) == 0:
raise HwsimSkip("Setting Beacon frame TX rate not supported")
hapd.disable()
hapd.set('beacon_rate', '55')
hapd.enable()
dev[0].connect('beacon-rate', key_mgmt="NONE", scan_freq="2412")
time.sleep(0.5)
def test_ap_beacon_rate_legacy2(dev, apdev):
"""Open AP with Beacon frame TX rate 12 Mbps in VHT BSS"""
hapd = hostapd.add_ap(apdev[0], {'ssid': 'beacon-rate'})
res = hapd.get_driver_status_field('capa.flags')
if (int(res, 0) & 0x0000080000000000) == 0:
raise HwsimSkip("Setting Beacon frame TX rate not supported")
hapd.disable()
hapd.set('beacon_rate', '120')
hapd.set("country_code", "DE")
hapd.set("hw_mode", "a")
hapd.set("channel", "36")
hapd.set("ieee80211n", "1")
hapd.set("ieee80211ac", "1")
hapd.set("ht_capab", "[HT40+]")
hapd.set("vht_capab", "")
hapd.set("vht_oper_chwidth", "0")
hapd.set("vht_oper_centr_freq_seg0_idx", "0")
try:
hapd.enable()
dev[0].scan_for_bss(hapd.own_addr(), freq="5180")
dev[0].connect('beacon-rate', key_mgmt="NONE", scan_freq="5180")
time.sleep(0.5)
finally:
dev[0].request("DISCONNECT")
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def test_ap_beacon_rate_ht(dev, apdev):
"""Open AP with Beacon frame TX rate HT-MCS 0"""
hapd = hostapd.add_ap(apdev[0], {'ssid': 'beacon-rate'})
res = hapd.get_driver_status_field('capa.flags')
if (int(res, 0) & 0x0000100000000000) == 0:
raise HwsimSkip("Setting Beacon frame TX rate not supported")
hapd.disable()
hapd.set('beacon_rate', 'ht:0')
hapd.enable()
dev[0].connect('beacon-rate', key_mgmt="NONE", scan_freq="2412")
time.sleep(0.5)
def test_ap_beacon_rate_ht2(dev, apdev):
"""Open AP with Beacon frame TX rate HT-MCS 1 in VHT BSS"""
hapd = hostapd.add_ap(apdev[0], {'ssid': 'beacon-rate'})
res = hapd.get_driver_status_field('capa.flags')
if (int(res, 0) & 0x0000100000000000) == 0:
raise HwsimSkip("Setting Beacon frame TX rate not supported")
hapd.disable()
hapd.set('beacon_rate', 'ht:1')
hapd.set("country_code", "DE")
hapd.set("hw_mode", "a")
hapd.set("channel", "36")
hapd.set("ieee80211n", "1")
hapd.set("ieee80211ac", "1")
hapd.set("ht_capab", "[HT40+]")
hapd.set("vht_capab", "")
hapd.set("vht_oper_chwidth", "0")
hapd.set("vht_oper_centr_freq_seg0_idx", "0")
try:
hapd.enable()
dev[0].scan_for_bss(hapd.own_addr(), freq="5180")
dev[0].connect('beacon-rate', key_mgmt="NONE", scan_freq="5180")
time.sleep(0.5)
finally:
dev[0].request("DISCONNECT")
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def test_ap_beacon_rate_vht(dev, apdev):
"""Open AP with Beacon frame TX rate VHT-MCS 0"""
hapd = hostapd.add_ap(apdev[0], {'ssid': 'beacon-rate'})
res = hapd.get_driver_status_field('capa.flags')
if (int(res, 0) & 0x0000200000000000) == 0:
raise HwsimSkip("Setting Beacon frame TX rate not supported")
hapd.disable()
hapd.set('beacon_rate', 'vht:0')
hapd.set("country_code", "DE")
hapd.set("hw_mode", "a")
hapd.set("channel", "36")
hapd.set("ieee80211n", "1")
hapd.set("ieee80211ac", "1")
hapd.set("ht_capab", "[HT40+]")
hapd.set("vht_capab", "")
hapd.set("vht_oper_chwidth", "0")
hapd.set("vht_oper_centr_freq_seg0_idx", "0")
try:
hapd.enable()
dev[0].scan_for_bss(hapd.own_addr(), freq="5180")
dev[0].connect('beacon-rate', key_mgmt="NONE", scan_freq="5180")
time.sleep(0.5)
finally:
dev[0].request("DISCONNECT")
hapd.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def test_ap_wep_to_wpa(dev, apdev):
"""WEP to WPA2-PSK configuration change in hostapd"""
check_wep_capa(dev[0])
hapd = hostapd.add_ap(apdev[0],
{"ssid": "wep-to-wpa",
"wep_key0": '"hello"'})
dev[0].flush_scan_cache()
dev[0].connect("wep-to-wpa", key_mgmt="NONE", wep_key0='"hello"',
scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
hapd.set("wep_key0", "")
hapd.set("wpa_passphrase", "12345678")
hapd.set("wpa", "2")
hapd.set("wpa_key_mgmt", "WPA-PSK")
hapd.set("rsn_pairwise", "CCMP")
hapd.enable()
dev[0].connect("wep-to-wpa", psk="12345678", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_missing_psk(dev, apdev):
"""WPA2-PSK AP and no PSK configured"""
ssid = "test-wpa2-psk"
params = hostapd.wpa2_params(ssid=ssid)
try:
# "WPA-PSK enabled, but PSK or passphrase is not configured."
hostapd.add_ap(apdev[0], params)
raise Exception("AP setup succeeded unexpectedly")
except Exception as e:
if "Failed to enable hostapd" in str(e):
pass
else:
raise
def test_ap_eapol_version(dev, apdev):
"""hostapd eapol_version configuration"""
passphrase = "asdfghjkl"
params = hostapd.wpa2_params(ssid="test1", passphrase=passphrase)
hapd = hostapd.add_ap(apdev[0], params)
params = hostapd.wpa2_params(ssid="test2", passphrase=passphrase)
params['eapol_version'] = '1'
hapd2 = hostapd.add_ap(apdev[1], params)
hapd.request("SET ext_eapol_frame_io 1")
dev[0].connect("test1", psk=passphrase, scan_freq="2412",
wait_connect=False)
ev1 = hapd.wait_event(["EAPOL-TX"], timeout=15)
if ev1 is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
hapd.request("SET ext_eapol_frame_io 0")
hapd2.request("SET ext_eapol_frame_io 1")
dev[1].connect("test2", psk=passphrase, scan_freq="2412",
wait_connect=False)
ev2 = hapd2.wait_event(["EAPOL-TX"], timeout=15)
if ev2 is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
hapd2.request("SET ext_eapol_frame_io 0")
dev[0].wait_connected()
dev[1].wait_connected()
ver1 = ev1.split(' ')[2][0:2]
ver2 = ev2.split(' ')[2][0:2]
if ver1 != "02":
raise Exception("Unexpected default eapol_version: " + ver1)
if ver2 != "01":
raise Exception("eapol_version did not match configuration: " + ver2)
def test_ap_dtim_period(dev, apdev):
"""DTIM period configuration"""
ssid = "dtim-period"
params = {'ssid': ssid, 'dtim_period': "10"}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
for i in range(10):
dev[0].scan(freq="2412")
bss = dev[0].get_bss(bssid)
if 'beacon_ie' in bss:
break
time.sleep(0.2)
if 'beacon_ie' not in bss:
raise Exception("Did not find Beacon IEs")
ie = parse_ie(bss['beacon_ie'])
if 5 not in ie:
raise Exception("TIM element missing")
count, period = struct.unpack('BB', ie[5][0:2])
logger.info("DTIM count %d DTIM period %d" % (count, period))
if period != 10:
raise Exception("Unexpected DTIM period: %d" % period)
if count >= period:
raise Exception("Unexpected DTIM count: %d" % count)
def test_ap_no_probe_resp(dev, apdev):
"""AP with Probe Response frame sending from hostapd disabled"""
ssid = "no-probe-resp"
params = {'ssid': ssid, 'send_probe_response': "0"}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412", passive=True)
dev[0].scan_for_bss(bssid, freq="2412", force_scan=True)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
bss = dev[0].get_bss(bssid)
if 'ie' in bss and 'beacon_ie' in bss and \
len(bss['ie']) != len(bss['beacon_ie']):
raise Exception("Probe Response frames seen")
def test_ap_long_preamble(dev, apdev):
"""AP with long preamble"""
ssid = "long-preamble"
params = {'ssid': ssid, 'preamble': "0",
'hw_mode': 'b', 'ieee80211n': '0',
'supported_rates': '10', 'basic_rates': '10'}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_wmm_uapsd(dev, apdev):
"""AP with U-APSD advertisement"""
ssid = "uapsd"
params = {'ssid': ssid, 'uapsd_advertisement_enabled': "1"}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_wowlan_triggers(dev, apdev):
"""AP with wowlan_triggers"""
ssid = "wowlan"
params = {'ssid': ssid, 'wowlan_triggers': "any"}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd)
| 39.516548 | 84 | 0.62119 | [
"Unlicense"
] | AreedAhmed/fragattacks | tests/hwsim/test_ap_params.py | 33,431 | Python |
import unittest
import sys
sys.path.insert(1, '..')
import easy_gui
class GUI(easy_gui.EasyGUI):
def __init__(self, **kwargs):
self.add_widget(type='button', text='Button1', command_func=lambda e: print('Button1 working!'))
self.test_lbl = self.add_widget(type='label', text='Here\'s an awesome label!')
self.add_widget('btn', 'Update Label', command_func=self.update_lbl)
def update_lbl(self, *args):
self.test_lbl.set(self.test_lbl.get() + 'X')
class TestEasyGUI(unittest.TestCase):
def test_gui_creation(self):
GUI()
GUI(alpha=0.7)
GUI(fullscreen=True) # use Alt + F4 to close
GUI(toolwindow=True)
GUI(topmost=True)
GUI(overrideredirect=True)
GUI(disable_interaction=True)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main() #buffer=True)
| 25.823529 | 104 | 0.650342 | [
"MIT"
] | zachbateman/easy_gui | tests/test_wm_attributes.py | 878 | Python |
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.persistent` module.
These tests ensure that :meth:`bridgedb.persistent.State.save`,
:func:`bridgedb.persistent.load`, and :meth:`bridgedb.persistent.State.load`
are all functioning as expected.
This module should not import :mod:`sure`.
"""
import os
from copy import deepcopy
from io import StringIO
from twisted.trial import unittest
from bridgedb import persistent
TEST_CONFIG_FILE = StringIO(unicode("""\
BRIDGE_FILES = ['bridge-descriptors', 'bridge-descriptors.new']
LOGFILE = 'bridgedb.log'"""))
class StateSaveAndLoadTests(unittest.TestCase):
"""Test save() and load() of :mod:`~bridgedb.persistent`."""
timeout = 15
def setUp(self):
configuration = {}
TEST_CONFIG_FILE.seek(0)
compiled = compile(TEST_CONFIG_FILE.read(), '<string>', 'exec')
exec compiled in configuration
config = persistent.Conf(**configuration)
self.config = config
self.state = persistent.State(**config.__dict__)
self.state.config = config
self.state.statefile = os.path.abspath('bridgedb.state')
def loadedStateAssertions(self, loadedState):
# For some reason, twisted.trial.unittest.TestCase in Python2.6
# doesn't have an 'assertIsNotNone' attribute...
self.assertTrue(loadedState is not None)
self.assertIsInstance(loadedState, persistent.State)
self.assertNotIdentical(self.state, loadedState)
self.assertNotEqual(self.state, loadedState)
# For some reason, twisted.trial.unittest.TestCase in Python2.6
# doesn't have an 'assertItemsEqual' attribute...
self.assertEqual(self.state.__dict__.keys().sort(),
loadedState.__dict__.keys().sort())
def savedStateAssertions(self, savedStatefile=None):
self.assertTrue(os.path.isfile(str(self.state.statefile)))
if savedStatefile:
self.assertTrue(os.path.isfile(str(savedStatefile)))
def test_save(self):
self.state.save()
self.savedStateAssertions()
def test_stateSaveTempfile(self):
savefile = self.mktemp()
self.state.statefile = savefile
self.state.save(savefile)
savedStatefile = str(self.state.statefile)
def test_stateLoadTempfile(self):
savefile = self.mktemp()
self.state.statefile = savefile
self.assertTrue(self.state.statefile.endswith(savefile))
self.state.save(savefile)
self.savedStateAssertions(savefile)
loadedState = self.state.load(savefile)
self.loadedStateAssertions(loadedState)
def test_stateSaveAndLoad(self):
self.state.save()
loadedState = self.state.load()
self.loadedStateAssertions(loadedState)
def test_load(self):
self.state.save()
loadedState = persistent.load()
self.loadedStateAssertions(loadedState)
| 34.081633 | 76 | 0.678743 | [
"BSD-3-Clause-Clear"
] | gsathya/bridgedb | lib/bridgedb/test/test_persistentSaveAndLoad.py | 3,340 | Python |
'''
Implements the targetcli target related UI.
This file is part of targetcli.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from ui_node import UINode, UIRTSLibNode
from rtslib import RTSLibError, RTSLibBrokenLink, utils
from rtslib import NodeACL, NetworkPortal, MappedLUN
from rtslib import Target, TPG, LUN
from configshell import ExecutionError
import os
try:
import ethtool
except ImportError:
ethtool = None
auth_params = ('userid', 'password', 'mutual_userid', 'mutual_password')
discovery_params = auth_params + ("enable",)
class UIFabricModule(UIRTSLibNode):
'''
A fabric module UI.
'''
def __init__(self, fabric_module, parent):
super(UIFabricModule, self).__init__(fabric_module.name,
fabric_module, parent,
late_params=True)
self.cfs_cwd = fabric_module.path
self.refresh()
if self.rtsnode.has_feature('discovery_auth'):
for param in discovery_params:
self.define_config_group_param('discovery_auth',
param, 'string')
self.refresh()
# Support late params
#
# By default the base class will call list_parameters and list_attributes
# in init. This stops us from being able to lazy-load fabric modules.
# We declare we support "late_params" to stop this, and then
# this code overrides the base class methods that involve enumerating
# this stuff, so we don't need to call list_parameters/attrs (which
# would cause the module to load) until the ui is actually asking for
# them from us.
# Currently fabricmodules don't have these anyways, this is all a CYA thing.
def list_config_groups(self):
groups = super(UIFabricModule, self).list_config_groups()
if len(self.rtsnode.list_parameters()):
groups.append('parameter')
if len(self.rtsnode.list_attributes()):
groups.append('attribute')
return groups
# Support late params (see above)
def list_group_params(self, group, writable=None):
if group not in ("parameter", "attribute"):
return super(UIFabricModule, self).list_group_params(group,
writable)
params_func = getattr(self.rtsnode, "list_%ss" % group)
params = params_func()
params_ro = params_func(writable=False)
ret_list = []
for param in params:
p_writable = param not in params_ro
if writable is not None and p_writable != writable:
continue
ret_list.append(param)
ret_list.sort()
return ret_list
# Support late params (see above)
def get_group_param(self, group, param):
if group not in ("parameter", "attribute"):
return super(UIFabricModule, self).get_group_param(group, param)
if param not in self.list_group_params(group):
raise ValueError("Not such parameter %s in configuration group %s"
% (param, group))
description = "The %s %s." % (param, group)
writable = param in self.list_group_params(group, writable=True)
return dict(name=param, group=group, type="string",
description=description, writable=writable)
def ui_getgroup_discovery_auth(self, auth_attr):
'''
This is the backend method for getting discovery_auth attributes.
@param auth_attr: The auth attribute to get the value of.
@type auth_attr: str
@return: The auth attribute's value
@rtype: str
'''
if auth_attr == 'enable':
return self.rtsnode.discovery_enable_auth
else:
return getattr(self.rtsnode, "discovery_" + auth_attr)
def ui_setgroup_discovery_auth(self, auth_attr, value):
'''
This is the backend method for setting discovery auth attributes.
@param auth_attr: The auth attribute to set the value of.
@type auth_attr: str
@param value: The auth's value
@type value: str
'''
self.assert_root()
if value is None:
value = ''
if auth_attr == 'enable':
self.rtsnode.discovery_enable_auth = value
else:
setattr(self.rtsnode, "discovery_" + auth_attr, value)
def refresh(self):
self._children = set([])
for target in self.rtsnode.targets:
self.shell.log.debug("Found target %s under fabric module %s."
% (target.wwn, target.fabric_module))
if target.has_feature('tpgts'):
UIMultiTPGTarget(target, self)
else:
UITarget(target, self)
def summary(self):
status = None
msg = []
fm = self.rtsnode
if fm.has_feature('discovery_auth') and fm.discovery_enable_auth:
if not (fm.discovery_password and fm.discovery_userid):
status = False
else:
status = True
if fm.discovery_authenticate_target:
msg.append("mutual disc auth")
else:
msg.append("1-way disc auth")
msg.append("Targets: %d" % len(self._children))
return (", ".join(msg), status)
def ui_command_create(self, wwn=None):
'''
Creates a new target. The I{wwn} format depends on the transport(s)
supported by the fabric module. If the I{wwn} is ommited, then a
target will be created using either a randomly generated WWN of the
proper type, or the first unused WWN in the list of possible WWNs if
one is available. If WWNs are constrained to a list (i.e. for hardware
targets addresses) and all WWNs are in use, the target creation will
fail. Use the B{info} command to get more information abour WWN type
and possible values.
SEE ALSO
========
B{info}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='create')
wwn = target.wwn
if target.has_feature('tpgts'):
ui_target = UIMultiTPGTarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return ui_target.ui_command_create()
else:
ui_target = UITarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return self.new_node(ui_target)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn' and self.rtsnode.wwns is not None:
existing_wwns = [child.wwn for child in self.rtsnode.targets]
completions = [wwn for wwn in self.rtsnode.wwns
if wwn.startswith(text)
if wwn not in existing_wwns]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, wwn):
'''
Recursively deletes the target with the specified I{wwn}, and all
objects hanging under it.
SEE ALSO
========
B{create}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='lookup')
target.delete()
self.shell.log.info("Deleted Target %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [child.name for child in self.children]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_info(self):
'''
Displays information about the fabric module, notably the supported
transports(s) and accepted B{wwn} format(s), as long as supported
features.
'''
fabric = self.rtsnode
self.shell.log.info("Fabric module name: %s" % self.name)
self.shell.log.info("ConfigFS path: %s" % self.rtsnode.path)
self.shell.log.info("Allowed WWN types: %s" % ", ".join(fabric.wwn_types))
if fabric.wwns is not None:
self.shell.log.info("Allowed WWNs list: %s" % ', '.join(fabric.wwns))
self.shell.log.info("Fabric module features: %s" % ', '.join(fabric.features))
self.shell.log.info("Corresponding kernel module: %s"
% fabric.kernel_module)
def ui_command_version(self):
'''
Displays the target fabric module version.
'''
version = "Target fabric module %s: %s" \
% (self.rtsnode.name, self.rtsnode.version)
self.shell.con.display(version.strip())
class UIMultiTPGTarget(UIRTSLibNode):
'''
A generic target UI that has multiple TPGs.
'''
def __init__(self, target, parent):
super(UIMultiTPGTarget, self).__init__(target.wwn, target, parent)
self.cfs_cwd = target.path
self.refresh()
def refresh(self):
self._children = set([])
for tpg in self.rtsnode.tpgs:
UITPG(tpg, self)
def summary(self):
try:
self.rtsnode.fabric_module.to_normalized_wwn(self.rtsnode.wwn)
except:
return ("INVALID WWN", False)
return ("TPGs: %d" % len(self._children), None)
def ui_command_create(self, tag=None):
'''
Creates a new Target Portal Group within the target. The I{tag} must be
a strictly positive integer value. If omitted, the next available
Target Portal Group Tag (TPGT) will be used.
SEE ALSO
========
B{delete}
'''
self.assert_root()
tpg = TPG(self.rtsnode, tag, mode='create')
if self.shell.prefs['auto_enable_tpgt']:
tpg.enable = True
if tpg.has_feature("auth"):
tpg.set_attribute("authentication", 0)
self.shell.log.info("Created TPG %s." % tpg.tag)
ui_tpg = UITPG(tpg, self)
return self.new_node(ui_tpg)
def ui_command_delete(self, tag):
'''
Deletes the Target Portal Group with TPGT I{tag} from the target. The
I{tag} must be a positive integer matching an existing TPGT.
SEE ALSO
========
B{create}
'''
self.assert_root()
if tag.startswith("tpg"):
tag = tag[3:]
tpg = TPG(self.rtsnode, int(tag), mode='lookup')
tpg.delete()
self.shell.log.info("Deleted TPGT %s." % tag)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'tag':
tags = [child.name[4:] for child in self.children]
completions = [tag for tag in tags if tag.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UITPG(UIRTSLibNode):
'''
A generic TPG UI.
'''
def __init__(self, tpg, parent):
name = "tpg%d" % tpg.tag
super(UITPG, self).__init__(name, tpg, parent)
self.cfs_cwd = tpg.path
self.refresh()
UILUNs(tpg, self)
if tpg.has_feature('acls'):
UINodeACLs(self.rtsnode, self)
if tpg.has_feature('nps'):
UIPortals(self.rtsnode, self)
if self.rtsnode.has_feature('auth') \
and os.path.exists(self.rtsnode.path + "/auth"):
for param in auth_params:
self.define_config_group_param('auth', param, 'string')
def summary(self):
tpg = self.rtsnode
status = None
msg = []
if tpg.has_feature('nexus'):
msg.append(str(self.rtsnode.nexus))
if not tpg.enable:
return ("disabled", False)
if tpg.has_feature("acls"):
if "generate_node_acls" in tpg.list_attributes() and \
int(tpg.get_attribute("generate_node_acls")):
msg.append("gen-acls")
else:
msg.append("no-gen-acls")
# 'auth' feature requires 'acls'
if tpg.has_feature("auth"):
if not int(tpg.get_attribute("authentication")):
msg.append("no-auth")
if int(tpg.get_attribute("generate_node_acls")):
# if auth=0, g_n_a=1 is recommended
status = True
else:
if not int(tpg.get_attribute("generate_node_acls")):
msg.append("auth per-acl")
else:
msg.append("tpg-auth")
status = True
if not (tpg.chap_password and tpg.chap_userid):
status = False
if tpg.authenticate_target:
msg.append("mutual auth")
else:
msg.append("1-way auth")
return (", ".join(msg), status)
def ui_getgroup_auth(self, auth_attr):
return getattr(self.rtsnode, "chap_" + auth_attr)
def ui_setgroup_auth(self, auth_attr, value):
self.assert_root()
if value is None:
value = ''
setattr(self.rtsnode, "chap_" + auth_attr, value)
def ui_command_enable(self):
'''
Enables the TPG.
SEE ALSO
========
B{disable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.shell.log.info("The TPGT is already enabled.")
else:
try:
self.rtsnode.enable = True
self.shell.log.info("The TPGT has been enabled.")
except:
self.shell.log.error("The TPGT could not be enabled.")
def ui_command_disable(self):
'''
Disables the TPG.
SEE ALSO
========
B{enable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.rtsnode.enable = False
self.shell.log.info("The TPGT has been disabled.")
else:
self.shell.log.info("The TPGT is already disabled.")
class UITarget(UITPG):
'''
A generic target UI merged with its only TPG.
'''
def __init__(self, target, parent):
super(UITarget, self).__init__(TPG(target, 1), parent)
self._name = target.wwn
self.target = target
if self.parent.name != "sbp":
self.rtsnode.enable = True
def summary(self):
try:
self.target.fabric_module.to_normalized_wwn(self.target.wwn)
except:
return ("INVALID WWN", False)
return super(UITarget, self).summary()
class UINodeACLs(UINode):
'''
A generic UI for node ACLs.
'''
def __init__(self, tpg, parent):
super(UINodeACLs, self).__init__("acls", parent)
self.tpg = tpg
self.cfs_cwd = "%s/acls" % tpg.path
self.refresh()
def refresh(self):
self._children = set([])
for name in self.all_names():
UINodeACL(name, self)
def summary(self):
return ("ACLs: %d" % len(self._children), None)
def ui_command_create(self, wwn, add_mapped_luns=None):
'''
Creates a Node ACL for the initiator node with the specified I{wwn}.
The node's I{wwn} must match the expected WWN Type of the target's
fabric module.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the ACL, mapped LUNs will be
automatically created for all existing LUNs.
SEE ALSO
========
B{delete}
'''
self.assert_root()
add_mapped_luns = self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
node_acl = NodeACL(self.tpg, wwn, mode="create")
ui_node_acl = UINodeACL(node_acl.node_wwn, self)
self.shell.log.info("Created Node ACL for %s" % node_acl.node_wwn)
if add_mapped_luns:
for lun in self.tpg.luns:
MappedLUN(node_acl, lun.lun, lun.lun, write_protect=False)
self.shell.log.info("Created mapped LUN %d." % lun.lun)
self.refresh()
return self.new_node(ui_node_acl)
def ui_command_delete(self, wwn):
'''
Deletes the Node ACL with the specified I{wwn}.
SEE ALSO
========
B{create}
'''
self.assert_root()
node_acl = NodeACL(self.tpg, wwn, mode='lookup')
node_acl.delete()
self.shell.log.info("Deleted Node ACL %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [acl.node_wwn for acl in self.tpg.node_acls]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def find_tagged(self, name):
for na in self.tpg.node_acls:
if na.node_wwn == name:
yield na
elif na.tag == name:
yield na
def all_names(self):
names = set([])
for na in self.tpg.node_acls:
if na.tag:
names.add(na.tag)
else:
names.add(na.node_wwn)
return names
def ui_command_tag(self, wwn_or_tag, new_tag):
'''
Tag a NodeACL.
Usage: tag <wwn_or_tag> <new_tag>
Tags help manage initiator WWNs. A tag can apply to one or
more WWNs. This can give a more meaningful name to a single
initiator's configuration, or allow multiple initiators with
identical settings to be configured en masse.
The WWNs described by <wwn_or_tag> will be given the new
tag. If new_tag already exists, its new members will adopt the
current tag's configuration.
Within a tag, the 'info' command shows the WWNs the tag applies to.
Use 'untag' to remove tags.
NOTE: tags are only supported in kernel 3.8 and above.
'''
if wwn_or_tag == new_tag:
return
# Since all WWNs have a '.' in them, let's avoid confusion.
if '.' in new_tag:
self.shell.log.error("'.' not permitted in tag names.")
return
src = list(self.find_tagged(wwn_or_tag))
if not src:
self.shell.log.error("wwn_or_tag %s not found." % wwn_or_tag)
return
old_tag_members = list(self.find_tagged(new_tag))
# handle overlap
src_wwns = [na.node_wwn for na in src]
old_tag_members = [old for old in old_tag_members if old.node_wwn not in src_wwns]
for na in src:
na.tag = new_tag
# if joining a tag, take its config
if old_tag_members:
model = old_tag_members[0]
for mlun in na.mapped_luns:
mlun.delete()
for mlun in model.mapped_luns:
MappedLUN(na, mlun.mapped_lun, mlun.tpg_lun, mlun.write_protect)
if self.parent.rtsnode.has_feature("auth"):
for param in auth_params:
setattr(na, "chap_" + param, getattr(model, "chap_" + param))
for item in model.list_attributes(writable=True):
na.set_attribute(item, model.get_attribute(item))
for item in model.list_parameters(writable=True):
na.set_parameter(item, model.get_parameter(item))
self.refresh()
def ui_command_untag(self, wwn_or_tag):
'''
Untag a NodeACL.
Usage: untag <tag>
Remove the tag given to one or more initiator WWNs. They will
return to being displayed by WWN in the configuration tree, and
will maintain settings from when they were tagged.
'''
for na in list(self.find_tagged(wwn_or_tag)):
na.tag = None
self.refresh()
def ui_complete_tag(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command tag
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn_or_tag':
completions = [n for n in self.all_names() if n.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
ui_complete_untag = ui_complete_tag
class UINodeACL(UIRTSLibNode):
'''
A generic UI for a node ACL.
Handles grouping multiple NodeACLs in UI via tags.
All gets are performed against first NodeACL.
All sets are performed on all NodeACLs.
This is to make management of multiple ACLs easier.
'''
def __init__(self, name, parent):
# Don't want to duplicate work in UIRTSLibNode, so call it but
# del self.rtsnode to make sure we always use self.rtsnodes.
self.rtsnodes = list(parent.find_tagged(name))
super(UINodeACL, self).__init__(name, self.rtsnodes[0], parent)
del self.rtsnode
if self.parent.parent.rtsnode.has_feature('auth'):
for parameter in auth_params:
self.define_config_group_param('auth', parameter, 'string')
self.refresh()
def ui_getgroup_auth(self, auth_attr):
'''
This is the backend method for getting auths attributes.
@param auth_attr: The auth attribute to get the value of.
@type auth_attr: str
@return: The auth attribute's value
@rtype: str
'''
# All should return same, so just return from the first one
return getattr(self.rtsnodes[0], "chap_" + auth_attr)
def ui_setgroup_auth(self, auth_attr, value):
'''
This is the backend method for setting auths attributes.
@param auth_attr: The auth attribute to set the value of.
@type auth_attr: str
@param value: The auth's value
@type value: str
'''
self.assert_root()
if value is None:
value = ''
for na in self.rtsnodes:
setattr(na, "chap_" + auth_attr, value)
def refresh(self):
self._children = set([])
for mlun in self.rtsnodes[0].mapped_luns:
UIMappedLUN(mlun, self)
def summary(self):
msg = []
if self.name != self.rtsnodes[0].node_wwn:
if len(self.rtsnodes) > 1:
msg.append("(group of %d) " % len(self.rtsnodes))
else:
msg.append("(%s) " % self.rtsnodes[0].node_wwn)
status = None
na = self.rtsnodes[0]
tpg = self.parent.parent.rtsnode
if tpg.has_feature("auth") and \
int(tpg.get_attribute("authentication")):
if int(tpg.get_attribute("generate_node_acls")):
msg.append("auth via tpg")
else:
status = True
if not (na.chap_password and na.chap_userid):
status = False
if na.authenticate_target:
msg.append("mutual auth")
else:
msg.append("1-way auth")
msg.append("Mapped LUNs: %d" % len(self._children))
return (", ".join(msg), status)
def ui_command_create(self, mapped_lun, tpg_lun_or_backstore, write_protect=None):
'''
Creates a mapping to one of the TPG LUNs for the initiator referenced
by the ACL. The provided I{tpg_lun_or_backstore} will appear to that
initiator as LUN I{mapped_lun}. If the I{write_protect} flag is set to
B{1}, the initiator will not have write access to the Mapped LUN.
A storage object may also be given for the I{tpg_lun_or_backstore} parameter,
in which case the TPG LUN will be created for that backstore before
mapping the LUN to the initiator. If a TPG LUN for the backstore already
exists, the Mapped LUN will map to that TPG LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
try:
mapped_lun = int(mapped_lun)
except ValueError:
self.shell.log.error("mapped_lun must be an integer")
return
try:
if tpg_lun_or_backstore.startswith("lun"):
tpg_lun_or_backstore = tpg_lun_or_backstore[3:]
tpg_lun = int(tpg_lun_or_backstore)
except ValueError:
try:
so = self.get_node(tpg_lun_or_backstore).rtsnode
except ValueError:
self.shell.log.error("LUN or storage object not found")
return
ui_tpg = self.parent.parent
for lun in ui_tpg.rtsnode.luns:
if so == lun.storage_object:
tpg_lun = lun.lun
break
else:
lun_object = LUN(ui_tpg.rtsnode, storage_object=so)
self.shell.log.info("Created LUN %s." % lun_object.lun)
ui_lun = UILUN(lun_object, ui_tpg.get_node("luns"))
tpg_lun = ui_lun.rtsnode.lun
if tpg_lun in (ml.tpg_lun.lun for ml in self.rtsnodes[0].mapped_luns):
self.shell.log.warning(
"Warning: TPG LUN %d already mapped to this NodeACL" % tpg_lun)
for na in self.rtsnodes:
mlun = MappedLUN(na, mapped_lun, tpg_lun, write_protect)
ui_mlun = UIMappedLUN(mlun, self)
self.shell.log.info("Created Mapped LUN %s." % mlun.mapped_lun)
return self.new_node(ui_mlun)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'tpg_lun_or_backstore':
completions = []
for backstore in self.get_node('/backstores').children:
for storage_object in backstore.children:
completions.append(storage_object.path)
completions.extend(lun.name for lun in self.parent.parent.get_node("luns").children)
completions = [c for c in completions if c.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, mapped_lun):
'''
Deletes the specified I{mapped_lun}.
SEE ALSO
========
B{create}
'''
self.assert_root()
for na in self.rtsnodes:
mlun = MappedLUN(na, mapped_lun)
mlun.delete()
self.shell.log.info("Deleted Mapped LUN %s." % mapped_lun)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'mapped_lun':
mluns = [str(mlun.mapped_lun) for mlun in self.rtsnodes[0].mapped_luns]
completions = [mlun for mlun in mluns if mlun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
# Override these four methods to handle multiple NodeACLs
def ui_getgroup_attribute(self, attribute):
return self.rtsnodes[0].get_attribute(attribute)
def ui_setgroup_attribute(self, attribute, value):
self.assert_root()
for na in self.rtsnodes:
na.set_attribute(attribute, value)
def ui_getgroup_parameter(self, parameter):
return self.rtsnodes[0].get_parameter(parameter)
def ui_setgroup_parameter(self, parameter, value):
self.assert_root()
for na in self.rtsnodes:
na.set_parameter(parameter, value)
def ui_command_info(self):
'''
Since we don't have a self.rtsnode we can't use the base implementation
of this method. We also want to not print node_wwn, but list *all*
wwns for this entry.
'''
info = self.rtsnodes[0].dump()
for item in ('attributes', 'parameters', "node_wwn"):
if item in info:
del info[item]
for name, value in sorted(info.iteritems()):
if not isinstance (value, (dict, list)):
self.shell.log.info("%s: %s" % (name, value))
self.shell.log.info("wwns:")
for na in self.parent.find_tagged(self.name):
self.shell.log.info(na.node_wwn)
class UIMappedLUN(UIRTSLibNode):
'''
A generic UI for MappedLUN objects.
'''
def __init__(self, mapped_lun, parent):
name = "mapped_lun%d" % mapped_lun.mapped_lun
super(UIMappedLUN, self).__init__(name, mapped_lun, parent)
self.cfs_cwd = mapped_lun.path
self.refresh()
def summary(self):
mapped_lun = self.rtsnode
is_healthy = True
try:
tpg_lun = mapped_lun.tpg_lun
except RTSLibBrokenLink:
description = "BROKEN LUN LINK"
is_healthy = False
else:
if mapped_lun.write_protect:
access_mode = 'ro'
else:
access_mode = 'rw'
description = "lun%d %s/%s (%s)" \
% (tpg_lun.lun, tpg_lun.storage_object.plugin,
tpg_lun.storage_object.name, access_mode)
return (description, is_healthy)
class UILUNs(UINode):
'''
A generic UI for TPG LUNs.
'''
def __init__(self, tpg, parent):
super(UILUNs, self).__init__("luns", parent)
self.cfs_cwd = "%s/lun" % tpg.path
self.tpg = tpg
self.refresh()
def refresh(self):
self._children = set([])
for lun in self.tpg.luns:
UILUN(lun, self)
def summary(self):
return ("LUNs: %d" % len(self._children), None)
def ui_command_create(self, storage_object, lun=None,
add_mapped_luns=None):
'''
Creates a new LUN in the Target Portal Group, attached to a storage
object. If the I{lun} parameter is omitted, the first available LUN in
the TPG will be used. If present, it must be a number greater than 0.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
The I{storage_object} must be the path of an existing storage object,
i.e. B{/backstore/pscsi0/mydisk} to reference the B{mydisk} storage
object of the virtual HBA B{pscsi0}.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the LUN, mapped LUNs will be
automatically created for all existing node ACLs, mapping the new LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
add_mapped_luns = \
self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
try:
so = self.get_node(storage_object).rtsnode
except ValueError:
self.shell.log.error("Invalid storage object %s." % storage_object)
return
if so in (l.storage_object for l in self.parent.rtsnode.luns):
raise ExecutionError("lun for storage object %s already exists" \
% storage_object)
if lun and lun.lower().startswith('lun'):
lun = lun[3:]
lun_object = LUN(self.tpg, lun, so)
self.shell.log.info("Created LUN %s." % lun_object.lun)
ui_lun = UILUN(lun_object, self)
if add_mapped_luns:
for acl in self.tpg.node_acls:
if lun:
mapped_lun = lun
else:
mapped_lun = 0
existing_mluns = [mlun.mapped_lun for mlun in acl.mapped_luns]
if mapped_lun in existing_mluns:
mapped_lun = None
for possible_mlun in xrange(LUN.MAX_LUN):
if possible_mlun not in existing_mluns:
mapped_lun = possible_mlun
break
if mapped_lun == None:
self.shell.log.warning(
"Cannot map new lun %s into ACL %s"
% (lun_object.lun, acl.node_wwn))
else:
mlun = MappedLUN(acl, mapped_lun, lun_object, write_protect=False)
self.shell.log.info("Created LUN %d->%d mapping in node ACL %s"
% (mlun.tpg_lun.lun, mlun.mapped_lun, acl.node_wwn))
self.parent.refresh()
return self.new_node(ui_lun)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'storage_object':
storage_objects = []
for backstore in self.get_node('/backstores').children:
for storage_object in backstore.children:
storage_objects.append(storage_object.path)
completions = [so for so in storage_objects if so.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, lun):
'''
Deletes the supplied LUN from the Target Portal Group. The I{lun} must
be a positive number matching an existing LUN.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
SEE ALSO
========
B{create}
'''
self.assert_root()
if lun.lower().startswith("lun"):
lun = lun[3:]
try:
lun_object = LUN(self.tpg, lun)
except:
raise RTSLibError("Invalid LUN")
lun_object.delete()
self.shell.log.info("Deleted LUN %s." % lun)
# Refresh the TPG as we need to also refresh acls MappedLUNs
self.parent.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'lun':
luns = [str(lun.lun) for lun in self.tpg.luns]
completions = [lun for lun in luns if lun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UILUN(UIRTSLibNode):
'''
A generic UI for LUN objects.
'''
def __init__(self, lun, parent):
name = "lun%d" % lun.lun
super(UILUN, self).__init__(name, lun, parent)
self.cfs_cwd = lun.path
self.refresh()
def summary(self):
lun = self.rtsnode
is_healthy = True
try:
storage_object = lun.storage_object
except RTSLibBrokenLink:
description = "BROKEN STORAGE LINK"
is_healthy = False
else:
if storage_object.plugin == "ramdisk":
description = "%s/%s" % (storage_object.plugin, storage_object.name,)
else:
description = "%s/%s (%s)" % (storage_object.plugin,
storage_object.name,
storage_object.udev_path)
return (description, is_healthy)
class UIPortals(UINode):
'''
A generic UI for TPG network portals.
'''
def __init__(self, tpg, parent):
super(UIPortals, self).__init__("portals", parent)
self.tpg = tpg
self.cfs_cwd = "%s/np" % tpg.path
self.refresh()
def refresh(self):
self._children = set([])
for portal in self.tpg.network_portals:
UIPortal(portal, self)
def summary(self):
return ("Portals: %d" % len(self._children), None)
def _canonicalize_ip(self, ip_address):
"""
rtslib expects ipv4 addresses as a dotted-quad string, and IPv6
addresses surrounded by brackets.
"""
# Contains a '.'? Must be ipv4, right?
if "." in ip_address:
return ip_address
return "[" + ip_address + "]"
def ui_command_create(self, ip_address=None, ip_port=None):
'''
Creates a Network Portal with specified I{ip_address} and
I{ip_port}. If I{ip_port} is omitted, the default port for
the target fabric will be used. If I{ip_address} is omitted,
INADDR_ANY (0.0.0.0) will be used.
Choosing IN6ADDR_ANY (::0) will listen on all IPv6 interfaces
as well as IPv4, assuming IPV6_V6ONLY sockopt has not been
set.
Note: Portals on Link-local IPv6 addresses are currently not
supported.
SEE ALSO
========
B{delete}
'''
self.assert_root()
# FIXME: Add a specfile parameter to determine default port
ip_port = self.ui_eval_param(ip_port, 'number', 3260)
ip_address = self.ui_eval_param(ip_address, 'string', "0.0.0.0")
if ip_port == 3260:
self.shell.log.info("Using default IP port %d" % ip_port)
if ip_address == "0.0.0.0":
self.shell.log.info("Binding to INADDR_ANY (0.0.0.0)")
portal = NetworkPortal(self.tpg, self._canonicalize_ip(ip_address),
ip_port, mode='create')
self.shell.log.info("Created network portal %s:%d."
% (ip_address, ip_port))
ui_portal = UIPortal(portal, self)
return self.new_node(ui_portal)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
def list_eth_ips():
if not ethtool:
return []
devcfgs = ethtool.get_interfaces_info(ethtool.get_devices())
addrs = set()
for d in devcfgs:
if d.ipv4_address:
addrs.add(d.ipv4_address)
addrs.add("0.0.0.0")
for ip6 in d.get_ipv6_addresses():
addrs.add(ip6.address)
addrs.add("::0") # only list ::0 if ipv6 present
return sorted(addrs)
if current_param == 'ip_address':
completions = [addr for addr in list_eth_ips()
if addr.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, ip_address, ip_port):
'''
Deletes the Network Portal with specified I{ip_address} and I{ip_port}.
SEE ALSO
========
B{create}
'''
self.assert_root()
portal = NetworkPortal(self.tpg, self._canonicalize_ip(ip_address),
ip_port, mode='lookup')
portal.delete()
self.shell.log.info("Deleted network portal %s:%s"
% (ip_address, ip_port))
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
completions = []
# TODO: Check if a dict comprehension is acceptable here with supported
# XXX: python versions.
portals = {}
all_ports = set([])
for portal in self.tpg.network_portals:
all_ports.add(str(portal.port))
portal_ip = portal.ip_address.strip('[]')
if not portal_ip in portals:
portals[portal_ip] = []
portals[portal_ip].append(str(portal.port))
if current_param == 'ip_address':
completions = [addr for addr in portals if addr.startswith(text)]
if 'ip_port' in parameters:
port = parameters['ip_port']
completions = [addr for addr in completions
if port in portals[addr]]
elif current_param == 'ip_port':
if 'ip_address' in parameters:
addr = parameters['ip_address']
if addr in portals:
completions = [port for port in portals[addr]
if port.startswith(text)]
else:
completions = [port for port in all_ports
if port.startswith(text)]
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UIPortal(UIRTSLibNode):
'''
A generic UI for a network portal.
'''
def __init__(self, portal, parent):
name = "%s:%s" % (portal.ip_address, portal.port)
super(UIPortal, self).__init__(name, portal, parent)
self.cfs_cwd = portal.path
self.refresh()
def summary(self):
if self.rtsnode.iser:
return('iser', True)
return ('', True)
def ui_command_enable_iser(self, boolean):
'''
Enables or disables iSER for this NetworkPortal.
If iSER is not supported by the kernel, this command will do nothing.
'''
boolean = self.ui_eval_param(boolean, 'bool', False)
self.rtsnode.iser = boolean
self.shell.log.info("iSER enable now: %s" % self.rtsnode.iser)
| 34.629684 | 96 | 0.575927 | [
"Apache-2.0"
] | JonnyJD/targetcli | targetcli/ui_target.py | 47,131 | Python |
"""
Django settings for the Sphinx documentation builder.
All configuration is imported from :mod:`backend.settings` except it sets :attr:`USE_I18N` to ``False`` to make sure
the documentation is not partially translated.
For more information on this file, see :doc:`topics/settings`.
For the full list of settings and their values, see :doc:`ref/settings`.
"""
# pylint: disable=wildcard-import
# pylint: disable=unused-wildcard-import
from .settings import *
#: A boolean that specifies whether Django’s translation system should be enabled
#: (see :setting:`django:USE_I18N` and :doc:`topics/i18n/index`)
USE_I18N = False
# Remove cacheops during documentation build because it changes related names
if "cacheops" in INSTALLED_APPS:
INSTALLED_APPS.remove("cacheops")
| 38.85 | 116 | 0.773488 | [
"Apache-2.0"
] | Integreat/integreat-cms | src/backend/sphinx_settings.py | 779 | Python |
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
from .base import VerticaPythonTestCase
class UnicodeTestCase(VerticaPythonTestCase):
def test_unicode_query(self):
value = u'\u16a0'
query = u"SELECT '{0}'".format(value)
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
res = cur.fetchone()
self.assertResultEqual(value, res[0])
def test_unicode_list_parameter(self):
values = [u'\u00f1', 'foo', 3]
query = u"SELECT {0}".format(", ".join(["%s"] * len(values)))
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query, tuple(values))
results = cur.fetchone()
for val, res in zip(values, results):
self.assertResultEqual(val, res)
def test_unicode_named_parameter_binding(self):
values = [u'\u16b1', 'foo', 3]
keys = [u'\u16a0', 'foo', 3]
query = u"SELECT {0}".format(", ".join([u":{0}".format(key) for key in keys]))
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query, dict(zip(keys, values)))
results = cur.fetchone()
for val, res in zip(values, results):
self.assertResultEqual(val, res)
def test_string_query(self):
value = u'test'
query = u"SELECT '{0}'".format(value)
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
res = cur.fetchone()
self.assertEqual(value, res[0])
def test_string_named_parameter_binding(self):
key = u'test'
value = u'value'
query = u"SELECT :{0}".format(key)
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query, {key: value})
res = cur.fetchone()
self.assertResultEqual(value, res[0])
# unit test for issue #160
def test_null_named_parameter_binding(self):
key = u'test'
value = None
query = u"SELECT :{0}".format(key)
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query, {key: value})
res = cur.fetchone()
self.assertResultEqual(value, res[0])
# unit test for issue #160
def test_null_list_parameter(self):
values = [u'\u00f1', 'foo', None]
query = u"SELECT {0}".format(", ".join(["%s"] * len(values)))
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query, tuple(values))
results = cur.fetchone()
for val, res in zip(values, results):
self.assertResultEqual(val, res)
| 35.111111 | 86 | 0.641727 | [
"Apache-2.0"
] | masrouge/vertica-python | vertica_python/tests/test_unicode.py | 4,424 | Python |
import pygame # Tested with pygame v1.9.6
from UIControls import Button
from constants import *
import numpy as np
import random
import time
import os
from nodes import bfs_node
import sys
import threading
###############################################
# Globals
###############################################
initial_cell_row = 0
initial_cell_col = 0
initial_cell_dragging = False
terminal_cell_row = ROWS - 1
terminal_cell_col = COLS - 1
terminal_cell_dragging = False
grid = np.ndarray((COLS, ROWS), np.int8)
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
clear_button = Button((BUTTON_WIDTH * 0),
BUTTON_STRIP_TOP,
BUTTON_WIDTH,
BUTTON_STRIP_HEIGHT,
CLEAR_BUTTON_LABEL)
create_button = Button((BUTTON_WIDTH * 1),
BUTTON_STRIP_TOP,
BUTTON_WIDTH,
BUTTON_STRIP_HEIGHT,
CREATE_BUTTON_LABEL)
dfs_button = Button((BUTTON_WIDTH * 2),
BUTTON_STRIP_TOP,
BUTTON_WIDTH,
BUTTON_STRIP_HEIGHT,
DFS_BUTTON_LABEL)
bfs_button = Button((BUTTON_WIDTH * 3),
BUTTON_STRIP_TOP,
BUTTON_WIDTH,
BUTTON_STRIP_HEIGHT,
BFS_BUTTON_LABEL)
quit_button = Button((BUTTON_WIDTH * 4),
BUTTON_STRIP_TOP,
BUTTON_WIDTH,
BUTTON_STRIP_HEIGHT,
QUIT_BUTTON_LABEL)
processing = False
###############################################
# initialise()
###############################################
def initialise():
global processing
processing = True
# Set all cells to EMPTY by default
for col in range(COLS):
for row in range(ROWS):
grid[col, row] = EMPTY
# Set the Initial and Terminal cells
grid[initial_cell_col, initial_cell_row] = INITIAL
grid[terminal_cell_col, terminal_cell_row] = TERMINAL
# print(grid)
processing = False
###############################################
# create_ui()
###############################################
def create_ui():
screen.fill(BLACK)
clear_button.draw(screen)
create_button.draw(screen)
dfs_button.draw(screen)
bfs_button.draw(screen)
quit_button.draw(screen)
draw_grid()
###############################################
# draw_grid()
###############################################
def draw_grid():
for col in range(COLS):
for row in range(ROWS):
# Only set the Initial cell if we are NOT dragging
if (grid[col, row] == INITIAL and not initial_cell_dragging):
draw_cell(INITIAL_CELL_COLOR, col, row)
# Only set the Terminal cell if we are NOT dragging
elif (grid[col, row] == TERMINAL and not terminal_cell_dragging):
draw_cell(TERMINAL_CELL_COLOR, col, row)
elif (grid[col, row] == WALL):
draw_cell(WALL_CELL_COLOR, col, row)
elif (grid[col, row] == VISITED):
draw_cell(VISITED_CELL_COLOR, col, row)
elif (grid[col, row] == PATH):
draw_cell(PATH_CELL_COLOR, col, row)
else: # (grid[col, row] == EMPTY)
draw_cell(EMPTY_CELL_COLOR, col, row)
if (initial_cell_dragging):
(mouse_x, mouse_y) = pygame.mouse.get_pos()
cell_col = int(mouse_x / CELL_WIDTH)
cell_row = int(mouse_y / CELL_HEIGHT)
# Check the current mouse-pointer for the dragging
# motion is actually on the board
if (valid_cell(cell_col, cell_row)):
draw_cell(INITIAL_CELL_COLOR,
cell_col,
cell_row)
elif (terminal_cell_dragging):
(mouse_x, mouse_y) = pygame.mouse.get_pos()
cell_col = int(mouse_x / CELL_WIDTH)
cell_row = int(mouse_y / CELL_HEIGHT)
# Check the current mouse-pointer for the dragging motion
# is actually on the board
if (valid_cell(cell_col, cell_row)):
draw_cell(TERMINAL_CELL_COLOR, cell_col, cell_row)
###############################################
# game_loop()
###############################################
def game_loop():
game_exit = False
clock = pygame.time.Clock()
global initial_cell_row
global initial_cell_col
global initial_cell_dragging
global terminal_cell_row
global terminal_cell_col
global terminal_cell_dragging
while not game_exit:
for event in pygame.event.get():
if (event.type == pygame.QUIT) and (not processing):
game_exit = True
elif (event.type == pygame.MOUSEBUTTONDOWN) and (not processing):
(mouse_x, mouse_y) = pygame.mouse.get_pos()
cell_col = int(mouse_x / CELL_WIDTH)
cell_row = int(mouse_y / CELL_HEIGHT)
if (valid_cell(cell_col, cell_row)):
if (grid[cell_col, cell_row] == INITIAL):
# Set the flag for dragging the Initial cell
initial_cell_dragging = True
elif (grid[cell_col, cell_row] == TERMINAL):
# Set the flag for dragging the Terminal cell
terminal_cell_dragging = True
elif (not (initial_cell_dragging
or terminal_cell_dragging)):
# Otherwise, if we have clicked with mouse and
# we are not dragging anything, toggle
# the current cell between EMPTY and WALL
if (grid[cell_col, cell_row] == WALL):
grid[cell_col, cell_row] = EMPTY
elif (grid[cell_col, cell_row] == EMPTY):
grid[cell_col, cell_row] = WALL
elif (event.type == pygame.MOUSEBUTTONUP) and (not processing):
if clear_button.is_over(mouse_x, mouse_y):
thread = threading.Thread(target=initialise,
args=())
thread.start()
elif create_button.is_over(mouse_x, mouse_y):
thread = threading.Thread(target=create_maze,
args=())
thread.start()
elif dfs_button.is_over(mouse_x, mouse_y):
thread = threading.Thread(target=depth_first_search,
args=())
thread.start()
elif bfs_button.is_over(mouse_x, mouse_y):
thread = threading.Thread(target=breadth_first_search,
args=())
thread.start()
elif quit_button.is_over(mouse_x, mouse_y):
game_exit = True
elif initial_cell_dragging:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
cell_col = int(mouse_x / CELL_WIDTH)
cell_row = int(mouse_y / CELL_HEIGHT)
# Make sure we have not dragged the
# Initial cell off the screen
if (valid_cell(cell_col, cell_row)):
# Also make sure we aren't trying to drag Initial
# cell on top of Terminal cell
if (not((cell_col == terminal_cell_col) and
(cell_row == terminal_cell_row))):
grid[initial_cell_col, initial_cell_row] = EMPTY
initial_cell_col = cell_col
initial_cell_row = cell_row
grid[initial_cell_col, initial_cell_row] = INITIAL
# Whatever happens, cancel the dragging flag
initial_cell_dragging = False
elif terminal_cell_dragging:
(mouse_x, mouse_y) = pygame.mouse.get_pos()
cell_col = int(mouse_x / CELL_WIDTH)
cell_row = int(mouse_y / CELL_HEIGHT)
# Make sure we have not dragged the
# Terminal cell off the screen
if (valid_cell(cell_col, cell_row)):
# Also make sure we aren't trying to drag Terminal
# cell on top of Initial cell
if (not((cell_col == initial_cell_col) and
(cell_row == initial_cell_row))):
grid[terminal_cell_col, terminal_cell_row] = EMPTY
terminal_cell_col = cell_col
terminal_cell_row = cell_row
grid[terminal_cell_col, terminal_cell_row] = TERMINAL
# Whatever happens, cancel the dragging flag
terminal_cell_dragging = False
draw_grid()
pygame.display.update()
clock.tick(CLOCK_TICK)
pygame.quit()
###############################################
# create_maze()
###############################################
def create_maze():
###############################################
# make_holes()
###############################################
def make_holes(col1, row1, col2, row2, vertical, horizontal):
# print(f"\tmake_holes({col1},
# {row1},
# {col2},
# {row2},
# {vertical},
# {horizontal})")
all_lists = []
list = []
for row in range(row1, horizontal):
if (has_horizontal_empty(vertical, row)):
list.append((vertical, row))
if (len(list) > 0):
all_lists.append(list)
list = []
for row in range(horizontal + 1, row2):
if (has_horizontal_empty(vertical, row)):
list.append((vertical, row))
if (len(list) > 0):
all_lists.append(list)
list = []
for col in range(col1, vertical):
if (has_vertical_empty(col, horizontal)):
list.append((col, horizontal))
if (len(list) > 0):
all_lists.append(list)
list = []
for col in range(vertical + 1, col2):
if (has_vertical_empty(col, horizontal)):
list.append((col, horizontal))
if (len(list) > 0):
all_lists.append(list)
if (len(all_lists) == 4):
item_index_to_remove = random.randint(0, 3)
del (all_lists[item_index_to_remove])
for sub_list in all_lists:
(hole_col, hole_row) = sub_list[
random.randint(0, len(sub_list) - 1)]
draw_cell(EMPTY_CELL_COLOR, hole_col, hole_row)
grid[hole_col, hole_row] = EMPTY
###############################################
# divide()
###############################################
def divide(col1, row1, col2, row2):
# print(f"divide({col1}, {row1}, {col2}, {row2})")
vertical = col2
if ((col2 - col1) > 2):
vertical = int(((col2 - col1) / 2) + col1)
for row in range(row1, row2):
draw_cell(WALL_CELL_COLOR, vertical, row)
grid[vertical, row] = WALL
horizontal = row2
if ((row2 - row1) > 2):
horizontal = int(((row2 - row1) / 2) + row1)
for col in range(col1, col2):
draw_cell(WALL_CELL_COLOR, col, horizontal)
grid[col, horizontal] = WALL
# top-left
new_col1 = col1
new_row1 = row1
new_col2 = vertical
new_row2 = horizontal
if (((new_col2 - new_col1) > 2) or ((new_row2 - new_row1) > 2)):
(new_vertical, new_horizontal) = divide(new_col1,
new_row1,
new_col2,
new_row2)
make_holes(new_col1,
new_row1,
new_col2,
new_row2,
new_vertical,
new_horizontal)
# top-right
new_col1 = vertical + 1
new_row1 = row1
new_col2 = col2
new_row2 = horizontal
if (((new_col2 - new_col1) > 2) or ((new_row2 - new_row1) > 2)):
(new_vertical, new_horizontal) = divide(new_col1,
new_row1,
new_col2,
new_row2)
make_holes(new_col1,
new_row1,
new_col2,
new_row2,
new_vertical,
new_horizontal)
# bottom-left
new_col1 = col1
new_row1 = horizontal + 1
new_col2 = vertical
new_row2 = row2
if (((new_col2 - new_col1) > 2) or ((new_row2 - new_row1) > 2)):
(new_vertical, new_horizontal) = divide(new_col1,
new_row1,
new_col2,
new_row2)
make_holes(new_col1,
new_row1,
new_col2,
new_row2,
new_vertical,
new_horizontal)
# bottom-right
new_col1 = vertical + 1
new_row1 = horizontal + 1
new_col2 = col2
new_row2 = row2
if (((new_col2 - new_col1) > 2) or ((new_row2 - new_row1) > 2)):
(new_vertical, new_horizontal) = divide(new_col1,
new_row1,
new_col2,
new_row2)
make_holes(new_col1,
new_row1,
new_col2,
new_row2,
new_vertical,
new_horizontal)
time.sleep(SMALL_SLEEP)
pygame.display.update()
return (vertical, horizontal)
global processing
processing = True
initialise()
draw_grid()
(new_vertical, new_horizontal) = divide(0, 0, COLS, ROWS)
make_holes(0, 0, COLS, ROWS, new_vertical, new_horizontal)
grid[initial_cell_col, initial_cell_row] = INITIAL
grid[terminal_cell_col, terminal_cell_row] = TERMINAL
processing = False
###############################################
# has_horizontal_neighbours()
###############################################
def has_horizontal_neighbours(col, row, cell_types):
left_col = col - 1
right_col = col + 1
if (left_col >= 0) and (right_col < COLS):
return ((grid[left_col, row] in cell_types) and
(grid[right_col, row] in cell_types))
return False
###############################################
# has_vertical_neighbours()
###############################################
def has_vertical_neighbours(col, row, cell_types):
above_row = row - 1
below_row = row + 1
if (above_row >= 0) and (below_row < ROWS):
return ((grid[col, above_row] in cell_types) and
(grid[col, below_row] in cell_types))
return False
###############################################
# has_horizontal_empty()
###############################################
def has_horizontal_empty(col, row):
return has_horizontal_neighbours(col, row, [EMPTY, INITIAL, TERMINAL])
###############################################
# has_vertical_empty()
###############################################
def has_vertical_empty(col, row):
return has_vertical_neighbours(col, row, [EMPTY, INITIAL, TERMINAL])
###############################################
# reset_maze()
###############################################
def reset_maze():
"""Resets any cells that are VISITED or PATH to EMPTY again, so that we
can commence a search on a potentially partially completed board"""
for col in range(COLS):
for row in range(ROWS):
grid[col, row] = EMPTY if (grid[col, row] in [VISITED, PATH]) else grid[col, row]
###############################################
# valid_cell()
###############################################
def valid_cell(col, row):
return ((col >= 0) and (row >= 0) and (col < COLS) and (row < ROWS))
###############################################
# depth_first_search()
###############################################
def depth_first_search():
###############################################
# check()
###############################################
def check(col, row):
if (valid_cell(col, row)):
if (search(col, row)):
return True
return False
###############################################
# search()
###############################################
def search(col, row):
print(f"search({col}, {row})")
pygame.display.update()
# time.sleep(SMALL_SLEEP)
if (grid[col, row] == TERMINAL):
return True
if (grid[col, row] in [WALL, VISITED, PATH]):
return False
if (grid[col, row] != INITIAL):
grid[col, row] = PATH
draw_cell(PATH_CELL_COLOR, col, row)
if (check(col - 1, row)):
return True
if (check(col + 1, row)):
return True
if (check(col, row - 1)):
return True
if (check(col, row + 1)):
return True
grid[col, row] = VISITED
draw_cell(VISITED_CELL_COLOR, col, row)
return False
global processing
processing = True
reset_maze()
draw_grid()
if (check(initial_cell_col - 1, initial_cell_row)):
processing = False
return
if (check(initial_cell_col + 1, initial_cell_row)):
processing = False
return
if (check(initial_cell_col, initial_cell_row - 1)):
processing = False
return
if (check(initial_cell_col, initial_cell_row + 1)):
processing = False
return
processing = False
###############################################
# breadth_first_search()
###############################################
def breadth_first_search():
###############################################
# search()
###############################################
def search(nodes):
###############################################
# check()
###############################################
def check(next_col, next_row, sub_nodes):
if (valid_cell(next_col, next_row)):
if (grid[next_col, next_row] == TERMINAL):
backtrack_node = node
while (backtrack_node is not None):
if (backtrack_node.get_parent() is not None):
grid[backtrack_node.get_col(),
backtrack_node.get_row()] = PATH
draw_cell(PATH_CELL_COLOR,
backtrack_node.get_col(),
backtrack_node.get_row())
pygame.display.update()
backtrack_node = backtrack_node.get_parent()
return True
elif ((grid[next_col, next_row] != WALL) and
(grid[next_col, next_row] != VISITED) and
(grid[next_col, next_row] != INITIAL)):
grid[next_col, next_row] = VISITED
draw_cell(VISITED_CELL_COLOR, next_col, next_row)
pygame.display.update()
child_node = bfs_node(next_col, next_row, node)
sub_nodes.append(child_node)
return False
pygame.display.update()
time.sleep(SMALL_SLEEP)
sub_nodes = []
for node in nodes:
# print(f"\tNode at ({node.get_col()}, {node.get_row()})")
if (check(node.get_col() - 1, node.get_row(), sub_nodes)):
return
if (check(node.get_col() + 1, node.get_row(), sub_nodes)):
return
if (check(node.get_col(), node.get_row() + 1, sub_nodes)):
return
if (check(node.get_col(), node.get_row() - 1, sub_nodes)):
return
if(len(sub_nodes) > 0):
return search(sub_nodes)
else:
return False
global processing
processing = True
reset_maze()
draw_grid()
nodes = []
nodes.append(bfs_node(initial_cell_col, initial_cell_row, None))
search(nodes)
processing = False
###############################################
# draw_cell()
###############################################
def draw_cell(color, col, row):
pygame.draw.rect(screen,
color,
(col * CELL_WIDTH,
row * CELL_HEIGHT,
CELL_WIDTH,
CELL_HEIGHT),
0)
###############################################
# main()
###############################################
def main():
# Increase stack size (depth-first-search is stack intensive)
sys.setrecursionlimit(10 ** 6)
pygame.init()
pygame.display.set_caption("Maze")
initialise()
create_ui()
game_loop()
###############################################
# Startup
###############################################
if __name__ == "__main__":
main()
| 33.792049 | 93 | 0.462081 | [
"MIT"
] | James-P-D/Maze | src/MazePy/MazePy/MazePy.py | 22,100 | Python |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class BankCardOpenUrl(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.BankCardOpenUrl`.
Details:
- Layer: ``123``
- ID: ``0xf568028a``
Parameters:
url: ``str``
name: ``str``
"""
__slots__: List[str] = ["url", "name"]
ID = 0xf568028a
QUALNAME = "types.BankCardOpenUrl"
def __init__(self, *, url: str, name: str) -> None:
self.url = url # string
self.name = name # string
@staticmethod
def read(data: BytesIO, *args: Any) -> "BankCardOpenUrl":
# No flags
url = String.read(data)
name = String.read(data)
return BankCardOpenUrl(url=url, name=name)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(String(self.url))
data.write(String(self.name))
return data.getvalue()
| 29.88 | 103 | 0.621151 | [
"Apache-2.0"
] | appheap/social-media-analyzer | backend/pyrogram/raw/types/bank_card_open_url.py | 2,241 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListManagedClusterAdminCredentialsResult',
'AwaitableListManagedClusterAdminCredentialsResult',
'list_managed_cluster_admin_credentials',
]
@pulumi.output_type
class ListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
"""
def __init__(__self__, kubeconfigs=None):
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.CredentialResultResponseResult']:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kubeconfigs")
class AwaitableListManagedClusterAdminCredentialsResult(ListManagedClusterAdminCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAdminCredentialsResult(
kubeconfigs=self.kubeconfigs)
def list_managed_cluster_admin_credentials(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20200301:listManagedClusterAdminCredentials', __args__, opts=opts, typ=ListManagedClusterAdminCredentialsResult).value
return AwaitableListManagedClusterAdminCredentialsResult(
kubeconfigs=__ret__.kubeconfigs)
| 37.074627 | 186 | 0.714171 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/containerservice/v20200301/list_managed_cluster_admin_credentials.py | 2,484 | Python |
from typing import List, Tuple
from ....source_shared.base import Base
from ....utilities.byte_io_mdl import ByteIO
class StudioTrivert(Base):
def __init__(self):
self.vertex_index = 0
self.normal_index = 0
self.uv = []
def read(self, reader: ByteIO):
self.vertex_index = reader.read_uint16()
self.normal_index = reader.read_uint16()
self.uv = [reader.read_uint16(), reader.read_uint16()]
class StudioMesh(Base):
def __init__(self):
self.triangle_count = 0
self.triangle_offset = 0
self.skin_ref = 0
self.normal_count = 0
self.normal_offset = 0
self.triangles: List[StudioTrivert] = []
def read(self, reader: ByteIO):
(self.triangle_count, self.triangle_offset,
self.skin_ref,
self.normal_count, self.normal_offset) = reader.read_fmt('5i')
with reader.save_current_pos():
reader.seek(self.triangle_offset)
for _ in range(self.triangle_count * 3):
trivert = StudioTrivert()
trivert.read(reader)
self.triangles.append(trivert)
| 30.263158 | 71 | 0.624348 | [
"MIT"
] | syborg64/SourceIO | goldsrc/mdl_v6/structs/mesh.py | 1,150 | Python |
"""
===================================
Demo of OPTICS clustering algorithm
===================================
.. currentmodule:: sklearn
Finds core samples of high density and expands clusters from them.
This example uses data that is generated so that the clusters have
different densities.
The :class:`~cluster.OPTICS` is first used with its Xi cluster detection
method, and then setting specific thresholds on the reachability, which
corresponds to :class:`~cluster.DBSCAN`. We can see that the different
clusters of OPTICS's Xi method can be recovered with different choices of
thresholds in DBSCAN.
"""
# Authors: Shane Grigsby <[email protected]>
# Adrin Jalali <[email protected]>
# License: BSD 3 clause
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
# Generate sample data
np.random.seed(0)
n_points_per_cluster = 250
C1 = [-5, -2] + .8 * np.random.randn(n_points_per_cluster, 2)
C2 = [4, -1] + .1 * np.random.randn(n_points_per_cluster, 2)
C3 = [1, -2] + .2 * np.random.randn(n_points_per_cluster, 2)
C4 = [-2, 3] + .3 * np.random.randn(n_points_per_cluster, 2)
C5 = [3, -2] + 1.6 * np.random.randn(n_points_per_cluster, 2)
C6 = [5, 6] + 2 * np.random.randn(n_points_per_cluster, 2)
X = np.vstack((C1, C2, C3, C4, C5, C6))
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
# Run the fit
clust.fit(X)
labels_050 = cluster_optics_dbscan(reachability=clust.reachability_,
core_distances=clust.core_distances_,
ordering=clust.ordering_, eps=0.5)
labels_200 = cluster_optics_dbscan(reachability=clust.reachability_,
core_distances=clust.core_distances_,
ordering=clust.ordering_, eps=2)
space = np.arange(len(X))
reachability = clust.reachability_[clust.ordering_]
labels = clust.labels_[clust.ordering_]
plt.figure(figsize=(10, 7))
G = gridspec.GridSpec(2, 3)
ax1 = plt.subplot(G[0, :])
ax2 = plt.subplot(G[1, 0])
ax3 = plt.subplot(G[1, 1])
ax4 = plt.subplot(G[1, 2])
# Reachability plot
colors = ['g.', 'r.', 'b.', 'y.', 'c.']
for klass, color in zip(range(5), colors):
Xk = space[labels == klass]
Rk = reachability[labels == klass]
ax1.plot(Xk, Rk, color, alpha=0.3)
ax1.plot(space[labels == -1], reachability[labels == -1], 'k.', alpha=0.3)
ax1.plot(space, np.full_like(space, 2., dtype=float), 'k-', alpha=0.5)
ax1.plot(space, np.full_like(space, 0.5, dtype=float), 'k-.', alpha=0.5)
ax1.set_ylabel('Reachability (epsilon distance)')
ax1.set_title('Reachability Plot')
# OPTICS
colors = ['g.', 'r.', 'b.', 'y.', 'c.']
for klass, color in zip(range(5), colors):
Xk = X[clust.labels_ == klass]
ax2.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax2.plot(X[clust.labels_ == -1, 0], X[clust.labels_ == -1, 1], 'k+', alpha=0.1)
ax2.set_title('Automatic Clustering\nOPTICS')
# DBSCAN at 0.5
colors = ['g', 'greenyellow', 'olive', 'r', 'b', 'c']
for klass, color in zip(range(6), colors):
Xk = X[labels_050 == klass]
ax3.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3, marker='.')
ax3.plot(X[labels_050 == -1, 0], X[labels_050 == -1, 1], 'k+', alpha=0.1)
ax3.set_title('Clustering at 0.5 epsilon cut\nDBSCAN')
# DBSCAN at 2.
colors = ['g.', 'm.', 'y.', 'c.']
for klass, color in zip(range(4), colors):
Xk = X[labels_200 == klass]
ax4.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax4.plot(X[labels_200 == -1, 0], X[labels_200 == -1, 1], 'k+', alpha=0.1)
ax4.set_title('Clustering at 2.0 epsilon cut\nDBSCAN')
plt.tight_layout()
plt.show()
| 35.495146 | 79 | 0.64442 | [
"BSD-3-Clause"
] | sourcery-ai-bot/scikit-learn | examples/cluster/plot_optics.py | 3,656 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author : Heethesh Vhavle
Email : [email protected]
Version : 1.0.0
Date : Apr 08, 2019
'''
import random
import rospy
from delta_perception.msg import VehicleGroundTruth, VehicleGroundTruthArray
random_vehicles = ['car', 'truck', 'bus', 'motorbike', 'bicycle']
def talker():
# Setup node
pub = rospy.Publisher('/delta/ground_truth/vehicles', VehicleGroundTruthArray, queue_size=10)
rospy.init_node('vehicle_ground_truth_publisher_test', anonymous=True)
# Ignore this if you use a camera/image callback to publish the data
r = rospy.Rate(0.25)
# Randomly publish some data
while not rospy.is_shutdown():
# Create the message array
msg = VehicleGroundTruthArray()
# Create few random vehicles
for i in range(random.randrange(5)):
# Populate single vehicle with random ground truth data
vehicle = VehicleGroundTruth()
# std_msgs/string - class name
vehicle.class_name = random.choice(random_vehicles)
# std_msgs/uint16 - 2D bbox corners (range: 0 - 65535)
vehicle.left = random.randint(0, 1000)
vehicle.top = random.randint(0, 1000)
vehicle.right = random.randint(0, 1000)
vehicle.bottom = random.randint(0, 1000)
# std_msgs/float64 - radial distance (m) of vehicle from ego vehicle
vehicle.distance = random.random() * 150
# std_msgs/bool - difficult flag, True, if vehicle distance > 50m
vehicle.difficult = vehicle.distance > 50
# Add the vehicle to the message array
msg.vehicles.append(vehicle)
# Header stamp and publish the message
print('Sending')
msg.header.stamp = rospy.Time.now()
pub.publish(msg)
# Ignore this if you use a camera/image callback to publish the data
r.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| 32.539683 | 97 | 0.638049 | [
"BSD-3-Clause"
] | deltaautonomy/delta_perception | test/vehicle_ground_truth_publisher_test.py | 2,050 | Python |
# Test some Unicode file name semantics
# We dont test many operations on files other than
# that their names can be used with Unicode characters.
import os, glob, time, shutil
import unicodedata
import unittest
from test.support import (run_unittest, rmtree, change_cwd,
TESTFN_ENCODING, TESTFN_UNICODE, TESTFN_UNENCODABLE, create_empty_file)
if not os.path.supports_unicode_filenames:
try:
TESTFN_UNICODE.encode(TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
raise unittest.SkipTest("No Unicode filesystem semantics on this platform.")
def remove_if_exists(filename):
if os.path.exists(filename):
os.unlink(filename)
class TestUnicodeFiles(unittest.TestCase):
# The 'do_' functions are the actual tests. They generally assume the
# file already exists etc.
# Do all the tests we can given only a single filename. The file should
# exist.
def _do_single(self, filename):
self.assertTrue(os.path.exists(filename))
self.assertTrue(os.path.isfile(filename))
self.assertTrue(os.access(filename, os.R_OK))
self.assertTrue(os.path.exists(os.path.abspath(filename)))
self.assertTrue(os.path.isfile(os.path.abspath(filename)))
self.assertTrue(os.access(os.path.abspath(filename), os.R_OK))
os.chmod(filename, 0o777)
os.utime(filename, None)
os.utime(filename, (time.time(), time.time()))
# Copy/rename etc tests using the same filename
self._do_copyish(filename, filename)
# Filename should appear in glob output
self.assertTrue(
os.path.abspath(filename)==os.path.abspath(glob.glob(filename)[0]))
# basename should appear in listdir.
path, base = os.path.split(os.path.abspath(filename))
file_list = os.listdir(path)
# Normalize the unicode strings, as round-tripping the name via the OS
# may return a different (but equivalent) value.
base = unicodedata.normalize("NFD", base)
file_list = [unicodedata.normalize("NFD", f) for f in file_list]
self.assertIn(base, file_list)
# Tests that copy, move, etc one file to another.
def _do_copyish(self, filename1, filename2):
# Should be able to rename the file using either name.
self.assertTrue(os.path.isfile(filename1)) # must exist.
os.rename(filename1, filename2 + ".new")
self.assertFalse(os.path.isfile(filename2))
self.assertTrue(os.path.isfile(filename1 + '.new'))
os.rename(filename1 + ".new", filename2)
self.assertFalse(os.path.isfile(filename1 + '.new'))
self.assertTrue(os.path.isfile(filename2))
shutil.copy(filename1, filename2 + ".new")
os.unlink(filename1 + ".new") # remove using equiv name.
# And a couple of moves, one using each name.
shutil.move(filename1, filename2 + ".new")
self.assertFalse(os.path.exists(filename2))
self.assertTrue(os.path.exists(filename1 + '.new'))
shutil.move(filename1 + ".new", filename2)
self.assertFalse(os.path.exists(filename2 + '.new'))
self.assertTrue(os.path.exists(filename1))
# Note - due to the implementation of shutil.move,
# it tries a rename first. This only fails on Windows when on
# different file systems - and this test can't ensure that.
# So we test the shutil.copy2 function, which is the thing most
# likely to fail.
shutil.copy2(filename1, filename2 + ".new")
self.assertTrue(os.path.isfile(filename1 + '.new'))
os.unlink(filename1 + ".new")
self.assertFalse(os.path.exists(filename2 + '.new'))
def _do_directory(self, make_name, chdir_name):
if os.path.isdir(make_name):
rmtree(make_name)
os.mkdir(make_name)
try:
with change_cwd(chdir_name):
cwd_result = os.getcwd()
name_result = make_name
cwd_result = unicodedata.normalize("NFD", cwd_result)
name_result = unicodedata.normalize("NFD", name_result)
self.assertEqual(os.path.basename(cwd_result),
os.path.basename(name_result))
finally:
os.rmdir(make_name)
# The '_test' functions 'entry points with params' - ie, what the
# top-level 'test' functions would be if they could take params
def _test_single(self, filename):
remove_if_exists(filename)
create_empty_file(filename)
try:
self._do_single(filename)
finally:
os.unlink(filename)
self.assertTrue(not os.path.exists(filename))
# and again with os.open.
f = os.open(filename, os.O_CREAT)
os.close(f)
try:
self._do_single(filename)
finally:
os.unlink(filename)
# The 'test' functions are unittest entry points, and simply call our
# _test functions with each of the filename combinations we wish to test
def test_single_files(self):
self._test_single(TESTFN_UNICODE)
if TESTFN_UNENCODABLE is not None:
self._test_single(TESTFN_UNENCODABLE)
def test_directories(self):
# For all 'equivalent' combinations:
# Make dir with encoded, chdir with unicode, checkdir with encoded
# (or unicode/encoded/unicode, etc
ext = ".dir"
self._do_directory(TESTFN_UNICODE+ext, TESTFN_UNICODE+ext)
# Our directory name that can't use a non-unicode name.
if TESTFN_UNENCODABLE is not None:
self._do_directory(TESTFN_UNENCODABLE+ext,
TESTFN_UNENCODABLE+ext)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
| 42.25 | 84 | 0.651564 | [
"ISC"
] | Et7f3/cosmopolitan | third_party/python/Lib/test/test_unicode_file.py | 5,915 | Python |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreateFilepoolPolicyResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str'
}
attribute_map = {
'id': 'id'
}
def __init__(self, id=None): # noqa: E501
"""CreateFilepoolPolicyResponse - a model defined in Swagger""" # noqa: E501
self._id = None
self.discriminator = None
self.id = id
@property
def id(self):
"""Gets the id of this CreateFilepoolPolicyResponse. # noqa: E501
The name of the new policy # noqa: E501
:return: The id of this CreateFilepoolPolicyResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CreateFilepoolPolicyResponse.
The name of the new policy # noqa: E501
:param id: The id of this CreateFilepoolPolicyResponse. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateFilepoolPolicyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.844828 | 88 | 0.556519 | [
"Unlicense"
] | Isilon/isilon_sdk_python | isi_sdk_8_2_2/isi_sdk_8_2_2/models/create_filepool_policy_response.py | 3,114 | Python |
# This is your project's main settings file that can be committed to your
# repo. If you need to override a setting locally, use settings_local.py
from funfactory.settings_base import *
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
'example_css': (
'css/examples/main.css',
),
'example_mobile_css': (
'css/examples/mobile.css',
),
},
'js': {
'example_js': (
'js/examples/libs/jquery-1.4.4.min.js',
'js/examples/libs/jquery.cookie.js',
'js/examples/init.js',
),
}
}
INSTALLED_APPS = list(INSTALLED_APPS) + [
# Example code. Can (and should) be removed for actual projects.
#'examples',
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django_nose',
'piston',
'south',
'slurpee',
'systems',
'oncall',
'migrate_dns',
'user_systems',
'dhcp',
'truth',
'api',
'api_v2',
'reports',
'mcsv',
'mozdns',
'base',
'core',
'core.task',
'core.site',
'core.vlan',
'core.network',
'core.range',
'core.dhcp',
'core.group',
'core.registration',
'core.registration.static',
'core.hwadapter',
'core.search',
'core.service',
'core.keyvalue',
'mozdns',
'mozdns.address_record',
'mozdns.cname',
'mozdns.domain',
'mozdns.ip',
'mozdns.mx',
'mozdns.nameserver',
'mozdns.ptr',
'mozdns.soa',
'mozdns.sshfp',
'mozdns.srv',
'mozdns.txt',
'mozdns.view',
'mozdns.mozbind',
'mozdns.record',
'mozdns.create_zone',
'mozdns.delete_zone',
#'debug_toolbar',
'tastypie',
'tastytools',
'reversion',
'reversion_compare',
]
INSTALLED_APPS.remove('product_details')
# Because Jinja2 is the default template loader, add any non-Jinja templated
# apps here:
JINGO_EXCLUDE_APPS = [
#'debug_toolbar',
'build',
'admin',
'user_systems',
'tastytools',
]
DJANGO_TEMPLATE_APPS = [
'admin',
'build',
'user_systems',
]
# Tells the extract script what files to look for L10n in and what function
# handles the extraction. The Tower library expects this.
# # Use this if you have localizable HTML files:
# DOMAIN_METHODS['lhtml'] = [
# ('**/templates/**.lhtml',
# 'tower.management.commands.extract.extract_tower_template'),
# ]
# # Use this if you have localizable HTML files:
# DOMAIN_METHODS['javascript'] = [
# # Make sure that this won't pull in strings from external libraries you
# # may use.
# ('media/js/**.js', 'javascript'),
# ]
LOGGING = dict(loggers=dict(playdoh = {'level': logging.INFO}))
AUTH_PROFILE_MODULE = 'systems.UserProfile'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.RemoteUserBackend',
)
AUTH_PROFILE_MODULE = "systems.UserProfile"
PISTON_IGNORE_DUPE_MODELS = True
#TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
#########################################################
# MOZ DNS #
#########################################################
JINJA_CONFIG = {'autoescape': False}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + (
'middleware.disable_csrf.DisableCSRF',
'reversion.middleware.RevisionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1','10.22.74.139','10.250.2.54')
def custom_show_toolbar(request):
return True # Always show toolbar, for example purposes only.
BUG_URL = 'https://bugzilla.mozilla.org/show_bug.cgi?id='
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,
'HIDE_DJANGO_SQL': False,
'TAG': 'div',
'ENABLE_STACKTRACES' : True,
}
#############################################################
# MOZ DNS #
#############################################################
from settings.dnsbuilds import *
MOZDNS_BASE_URL = "/mozdns"
CORE_BASE_URL = "/core"
ROOT_URLCONF = 'urls'
BUILD_PATH = '/home/juber/dnsbuilds/'
# HACK HACK This will need to be fixed
from settings.local import *
| 27.255435 | 79 | 0.622134 | [
"BSD-3-Clause"
] | Mozilla-GitHub-Standards/b6a5bb5c98b18d87c72c770f29c4270008fc6fc6b787d531a2afcd382dc4cbad | settings/base.py | 5,015 | Python |
import pandas as pd
import csv
import numpy as np
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense, Concatenate, Input, Conv1D, MaxPooling1D, Flatten, Dropout, GlobalAveragePooling1D
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from numpy import linalg as LA
class nn_em:
def __init__(self):
print("model initialized")
def my_init(self, shape):
value = np.random.random(shape)
return K.variable(value)
def init_probabilities(self,n):
# initialize probability z_i (item's quality) randomly
p_z_i = np.random.randint(2, size=(n, 1)).astype(float)
return p_z_i, 1 - p_z_i
def define_nn(self,n_neurons, hidden,m,nb_hidden_layer,learning_rate):
classifier = Sequential()
if hidden == True:
# First Hidden Layer
layer0 = Dense(n_neurons, activation='sigmoid', kernel_initializer=initializers.random_normal(stddev=0.03, seed=98765), input_dim=m)
classifier.add(layer0)
nb = 1
while (nb < nb_hidden_layer):
layer_nb = Dense(n_neurons, activation='sigmoid', kernel_initializer=initializers.random_normal(stddev=0.03, seed=98765))
classifier.add(layer_nb)
nb += 1
# Output Layer
layer1 = Dense(1, activation='sigmoid', kernel_initializer=initializers.random_normal(stddev=0.03, seed=98765), \
kernel_regularizer=regularizers.l2(0.5))
classifier.add(layer1)
# Compiling the neural network
sgd = optimizers.SGD(lr=learning_rate, clipvalue=0.5)
classifier.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
return classifier
def define_multiclass_nn(self,n_neurons,m,class_num):
classifier = Sequential()
# Hidden Layer
layer0 = Dense(n_neurons, input_dim=m, activation='relu')
classifier.add(layer0)
# Output Layer
layer1 = Dense(class_num,activation='softmax')
classifier.add(layer1)
# Compiling the neural network
classifier.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics =['accuracy'])
return classifier
def lr_pzi(self,classifier, X_train, X_test, y_train, y_test, steps):
classifier.fit(X_train, y_train, epochs=steps, verbose=0)
theta_i = classifier.predict(X_test)
loss_and_metrics = classifier.evaluate(X_test, y_test)
print(theta_i[1:10])
eval_model = accuracy_score(y_test, np.where(theta_i > 0.5, 1, 0))
print("eval model",eval_model)
weights = classifier.get_weights()
return theta_i, eval_model,loss_and_metrics, weights[0]
def nn_pzi(self,classifier, social_features, y, steps, true_labels):
classifier.fit(social_features, y, epochs=steps, verbose=0)
theta_i = classifier.predict(social_features)
eval_model = accuracy_score(true_labels, np.where(theta_i > theta_i.mean(), 1, 0))
weights = classifier.get_weights()
return theta_i, eval_model, weights[0]
def nn_pzi_test_val(self, classifier, social_features, prob_e_step, steps):
classifier.fit(social_features, prob_e_step, epochs=steps, verbose=0)
theta_i = classifier.predict(social_features)
weights = classifier.get_weights()
return theta_i, weights[0],classifier
def train_m_step_early_stopp(self, classifier, social_features, prob_e_step,
steps, total_epochs, y_test, y_val, X_val, start_val):
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=10,
verbose=0, mode='auto', restore_best_weights=True)
classifier.fit(social_features, prob_e_step, validation_data=(X_val,y_val),
callbacks=[monitor],verbose=0,epochs=total_epochs, batch_size=4)
theta_i = classifier.predict(social_features)
weights = classifier.get_weights()[0]
return theta_i,classifier, weights
def train_m_step(self, classifier, social_features, prob_e_step,
steps, total_epochs, y_test, y_val,start_val):
theta_i = prob_e_step.copy()
weights = np.array([])
iter = 0
old_theta_i = np.zeros((social_features.shape[0], 1))
epsilon = 1e-3
y_test = np.argmax(y_test,axis=1)
y_val = np.argmax(y_val,axis=1)
while (LA.norm(theta_i - old_theta_i) > epsilon) and (iter < total_epochs):
# if (iter % 5 == 0) and (iter>0):
# min_norm = LA.norm(theta_i - old_theta_i)
old_theta_i = theta_i.copy()
theta_i, weights, classifier = self.nn_pzi_test_val(classifier, social_features, prob_e_step, steps)
end_val = start_val + y_val.shape[0]
# theta_i_test = theta_i[strat_val:(end_val)]
# theta_i_val = theta_i[(end_val):]
theta_i_val = theta_i[start_val:end_val]
theta_i_test = theta_i[end_val:]
theta_i_test = np.argmax(theta_i_test,axis=1)
theta_i_val = np.argmax(theta_i_val,axis=1)
eval_model_test = accuracy_score(y_test, theta_i_test)
eval_model_val = accuracy_score(y_val, theta_i_val)
if iter%10==0:
print ("epoch", iter," convergence influencer:", LA.norm(theta_i - old_theta_i),"val", eval_model_val,\
"test", eval_model_test)
iter +=1
print ("epoch", iter, " convergence influencer:", LA.norm(theta_i - old_theta_i), "val", eval_model_val, \
"test", eval_model_test)
return theta_i,classifier, weights
def train(self,classifier,social_features,true_labels, p_z_i_1, total_epochs, steps, size_train):
y = np.concatenate((true_labels[0:size_train], p_z_i_1[size_train:]))
for i in range(total_epochs):
#print("epoch", i)
theta_i, eval_model, weights = self.nn_pzi(classifier, social_features, y, steps,true_labels)
y = np.concatenate((true_labels[0:size_train], theta_i[size_train:]))
result = pd.DataFrame(data=np.concatenate([np.where(theta_i > theta_i.mean(), 1, 0), true_labels], axis=1),
columns=['classification', 'truth'])
#print("evaluation", eval_model)
return result, eval_model,weights, classifier.metrics_names, theta_i,classifier
def logistic_regression(self,input_file,output_file,true_labels,weights_file,total_epochs,learning_rate):
simple_example = pd.read_csv(input_file, sep=",")
social_features = simple_example[['follower_nbr', 'followee_nbr', 'tweets_nbr', 'avg_length_tweets']].values
#social_features = simple_example.values
labels = pd.read_csv(true_labels, sep=",")
true_labels = labels[['label']].values
X_train, X_test, y_train, y_test = train_test_split(social_features, true_labels, test_size = 0.2, random_state=45)
n = social_features.shape[0]
print("n=",n)
print ("true_labels", true_labels.shape[0])
m = social_features.shape[1]
# initi pzi
p_z_i_0, p_z_i_1 = self.init_probabilities(n)
n_neurons = 3
steps = 1
hidden = False
size_train = int(0.6 * n)
classifier = self.define_nn(n_neurons, hidden, m,learning_rate)
for i in range(total_epochs):
theta_i, eval_model,loss_and_metrics, weights = self.lr_pzi(classifier, X_train, X_test, y_train, y_test, steps)
result = pd.DataFrame(data=np.concatenate([np.where(theta_i > 0.5, 1, 0), y_test], axis=1), columns=['classification', 'truth'])
np.savetxt(weights_file,weights,delimiter=',')
result.to_csv(output_file)
metrics = pd.DataFrame(np.array(eval_model).reshape(1, 1), columns=[['accuracy']])
with open(output_file, 'a') as f:
metrics.to_csv(f, header=True)
def nn(self, input_file,output_file,weights_file,total_epochs,learning_rate):
simple_example = pd.read_csv(input_file, sep=",")
social_features = simple_example[['follower_nbr', 'followee_nbr', 'tweets_nbr', 'avg_length_tweets']].values
true_labels = simple_example[['label']].values
n = social_features.shape[0]
m = social_features.shape[1]
# initi pzi
p_z_i_0, p_z_i_1 = self.init_probabilities(n)
n_neurons = 3
steps = 1
hidden = True
size_train = int(0.8 * n)
classifier = self.define_nn(n_neurons, hidden, m,learning_rate)
result, eval_model, weights, metrics_names, theta_i = self.train(classifier,social_features, true_labels, p_z_i_1, total_epochs,
steps, size_train)
np.savetxt(weights_file,weights,delimiter=',')
result.to_csv(output_file)
metrics = pd.DataFrame(np.array(eval_model).reshape(1, 1), columns=[['accuracy']])
with open(output_file, 'a') as f:
metrics.to_csv(f, header=True)
def create_multiple_input_model(self,mlp_neurons,input_dim_a,input_dim_b,class_num):
inputA = Input(shape=input_dim_a)
inputB = Input(shape=input_dim_b)
a = Dense(mlp_neurons, activation="relu")(inputA)
a = Dense(class_num, activation="softmax")(a)
a = Model(inputs=inputA, outputs=a)
b = Conv1D(64, 3, activation="relu")(inputB)
b = Conv1D(64, 3, activation="relu")(b)
b = Dropout(0.5)(b)
b = MaxPooling1D()(b)
b = Flatten()(b)
b = Dense(100, activation='relu')(b)
b = Model(inputs=inputB, outputs=b)
combined = Concatenate()([a.output, b.output])
c = Dense(mlp_neurons, activation="relu")(combined)
c = Dense(class_num, activation="softmax")(c)
model = Model(inputs=[a.input, b.input], outputs=c)
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model
def create_multiple_input_model_mlp(self,mlp_neurons,input_dim_a,input_dim_b,class_num):
inputA = Input(shape=input_dim_a)
inputB = Input(shape=input_dim_b)
a = Dense(mlp_neurons, activation="relu")(inputA)
a = Dense(class_num, activation="relu")(a)
a = Model(inputs=inputA, outputs=a)
b = Dense(int((input_dim_b[0] + class_num)/2), activation="relu")(inputB)
b = Dense(class_num, activation="relu")(b)
b = Model(inputs=inputB, outputs=b)
combined = Concatenate()([a.output, b.output])
c = Dense(int((class_num*3)/2), activation="relu")(combined)
c = Dense(class_num, activation="softmax")(c)
model = Model(inputs=[a.input, b.input], outputs=c)
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model | 47.587983 | 144 | 0.648449 | [
"Apache-2.0"
] | lmluzern/MultiOpenCrowd | src/feature_based/multiclass_opencrowd/nn_em.py | 11,088 | Python |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0); | 38.857143 | 167 | 0.735294 | [
"BSD-3-Clause"
] | jmabry/pyaf | tests/artificial/transf_Difference/trend_MovingMedian/cycle_0/ar_/test_artificial_128_Difference_MovingMedian_0__0.py | 272 | Python |
import sys
import unicodedata
from PIL import Image, ImageDraw, ImageFont
args = sys.argv
if len(args) < 2:
print("usage:", args[0], "message")
sys.exit()
text = args[1]
if len(text) < 1:
print("usage:", args[0], "message")
sys.exit()
def get_east_asian_width_count(text):
count = 0
for c in text:
if unicodedata.east_asian_width(c) in 'FWA':
count += 2
else:
count += 1
return count
length = get_east_asian_width_count(text)
img = Image.new("RGB",(length*5,11),(0,0,0))
draw = ImageDraw.Draw(img)
fontpath = "./PixelMplus10-Regular.ttf"
#fontpath = "./PixelMplus10-Bold.ttf"
draw.font = ImageFont.truetype(fontpath, 10)
draw.text((0,0), text, (255, 255, 255))
img.save("./message.png")
| 20.8 | 48 | 0.666209 | [
"Apache-2.0"
] | kitazaki/LED_Badge | message.py | 728 | Python |
from corehq.sql_db.connections import get_db_alias_or_none, ICDS_UCR_CITUS_ENGINE_ID
def get_icds_ucr_citus_db_alias():
return get_db_alias_or_none(ICDS_UCR_CITUS_ENGINE_ID)
| 30 | 84 | 0.872222 | [
"BSD-3-Clause"
] | dungeonmaster51/commcare-hq | custom/icds_reports/utils/connections.py | 180 | Python |
class ElasticEase(EasingFunctionBase,ISealable,IEasingFunction):
"""
Represents an easing function that creates an animation that resembles a spring oscillating back and forth until it comes to rest.
ElasticEase()
"""
def CloneCore(self,*args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
"""
CreateInstanceCore(self: ElasticEase) -> Freezable
Creates a new instance of the System.Windows.Freezable derived class. When creating a derived
class,you must override this method.
Returns: The new instance.
"""
pass
def EaseInCore(self,*args):
"""
EaseInCore(self: ElasticEase,normalizedTime: float) -> float
Provides the logic portion of the easing function that you can override to produce the
System.Windows.Media.Animation.EasingMode.EaseIn mode of the custom easing function.
normalizedTime: Normalized time (progress) of the animation.
Returns: A double that represents the transformed progress.
"""
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified System.Windows.Freezable. If the
object has animated dependency properties,their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Oscillations=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the number of times the target slides back and forth over the animation destination.
Get: Oscillations(self: ElasticEase) -> int
Set: Oscillations(self: ElasticEase)=value
"""
Springiness=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the stiffness of the spring. The smaller the Springiness value is,the stiffer the spring and the faster the elasticity decreases in intensity over each oscillation.
Get: Springiness(self: ElasticEase) -> float
Set: Springiness(self: ElasticEase)=value
"""
OscillationsProperty=None
SpringinessProperty=None
| 26.304498 | 215 | 0.71889 | [
"MIT"
] | BCSharp/ironpython-stubs | release/stubs.min/System/Windows/Media/Animation_parts/ElasticEase.py | 7,602 | Python |
import itertools
import logging
import re
import time
import urllib
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
from urllib.parse import urlencode
import pytz
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db import connection
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import render
from django.template import loader
from django.urls import reverse
from django.utils import translation
from django.utils.timesince import timesince
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from psycopg2.sql import SQL, Composable, Literal
from analytics.lib.counts import COUNT_STATS, CountStat
from analytics.lib.time_utils import time_range
from analytics.models import (
BaseCount,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
)
from confirmation.models import Confirmation, _properties, confirmation_url
from confirmation.settings import STATUS_ACTIVE
from zerver.decorator import (
require_non_guest_user,
require_server_admin,
require_server_admin_api,
to_utc_datetime,
zulip_login_required,
)
from zerver.forms import check_subdomain_available
from zerver.lib.actions import (
do_change_plan_type,
do_change_realm_subdomain,
do_deactivate_realm,
do_scrub_realm,
do_send_realm_reactivation_email,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.i18n import get_and_set_request_language, get_language_translation_data
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.subdomains import get_subdomain_from_hostname
from zerver.lib.timestamp import convert_to_UTC, timestamp_to_datetime
from zerver.lib.validator import to_non_negative_int
from zerver.models import (
Client,
MultiuseInvite,
PreregistrationUser,
Realm,
UserActivity,
UserActivityInterval,
UserProfile,
get_realm,
)
from zerver.views.invite import get_invitee_emails_set
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
approve_sponsorship,
attach_discount_to_realm,
downgrade_at_the_end_of_billing_cycle,
downgrade_now_without_creating_additional_invoices,
get_current_plan_by_realm,
get_customer_by_realm,
get_discount_for_realm,
get_latest_seat_count,
make_end_of_cycle_updates_if_needed,
update_billing_method_of_current_plan,
update_sponsorship_status,
void_all_open_invoices,
)
if settings.ZILENCER_ENABLED:
from zilencer.models import RemoteInstallationCount, RemoteRealmCount, RemoteZulipServer
MAX_TIME_FOR_FULL_ANALYTICS_GENERATION = timedelta(days=1, minutes=30)
def is_analytics_ready(realm: Realm) -> bool:
return (timezone_now() - realm.date_created) > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION
def render_stats(
request: HttpRequest,
data_url_suffix: str,
target_name: str,
for_installation: bool = False,
remote: bool = False,
analytics_ready: bool = True,
) -> HttpRequest:
page_params = dict(
data_url_suffix=data_url_suffix,
for_installation=for_installation,
remote=remote,
debug_mode=False,
)
request_language = get_and_set_request_language(
request,
request.user.default_language,
translation.get_language_from_path(request.path_info),
)
page_params["translation_data"] = get_language_translation_data(request_language)
return render(
request,
"analytics/stats.html",
context=dict(
target_name=target_name, page_params=page_params, analytics_ready=analytics_ready
),
)
@zulip_login_required
def stats(request: HttpRequest) -> HttpResponse:
realm = request.user.realm
if request.user.is_guest:
# TODO: Make @zulip_login_required pass the UserProfile so we
# can use @require_member_or_admin
raise JsonableError(_("Not allowed for guest users"))
return render_stats(
request, "", realm.name or realm.string_id, analytics_ready=is_analytics_ready(realm)
)
@require_server_admin
@has_request_variables
def stats_for_realm(request: HttpRequest, realm_str: str) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
return HttpResponseNotFound(f"Realm {realm_str} does not exist")
return render_stats(
request,
f"/realm/{realm_str}",
realm.name or realm.string_id,
analytics_ready=is_analytics_ready(realm),
)
@require_server_admin
@has_request_variables
def stats_for_remote_realm(
request: HttpRequest, remote_server_id: int, remote_realm_id: int
) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(
request,
f"/remote/{server.id}/realm/{remote_realm_id}",
f"Realm {remote_realm_id} on server {server.hostname}",
)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_realm(
request: HttpRequest, user_profile: UserProfile, realm_str: str, **kwargs: Any
) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
raise JsonableError(_("Invalid organization"))
return get_chart_data(request=request, user_profile=user_profile, realm=realm, **kwargs)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_realm(
request: HttpRequest,
user_profile: UserProfile,
remote_server_id: int,
remote_realm_id: int,
**kwargs: Any,
) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(
request=request,
user_profile=user_profile,
server=server,
remote=True,
remote_realm_id=int(remote_realm_id),
**kwargs,
)
@require_server_admin
def stats_for_installation(request: HttpRequest) -> HttpResponse:
return render_stats(request, "/installation", "installation", True)
@require_server_admin
def stats_for_remote_installation(request: HttpRequest, remote_server_id: int) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(
request,
f"/remote/{server.id}/installation",
f"remote installation {server.hostname}",
True,
True,
)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_installation(
request: HttpRequest, user_profile: UserProfile, chart_name: str = REQ(), **kwargs: Any
) -> HttpResponse:
return get_chart_data(
request=request, user_profile=user_profile, for_installation=True, **kwargs
)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_installation(
request: HttpRequest,
user_profile: UserProfile,
remote_server_id: int,
chart_name: str = REQ(),
**kwargs: Any,
) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(
request=request,
user_profile=user_profile,
for_installation=True,
remote=True,
server=server,
**kwargs,
)
@require_non_guest_user
@has_request_variables
def get_chart_data(
request: HttpRequest,
user_profile: UserProfile,
chart_name: str = REQ(),
min_length: Optional[int] = REQ(converter=to_non_negative_int, default=None),
start: Optional[datetime] = REQ(converter=to_utc_datetime, default=None),
end: Optional[datetime] = REQ(converter=to_utc_datetime, default=None),
realm: Optional[Realm] = None,
for_installation: bool = False,
remote: bool = False,
remote_realm_id: Optional[int] = None,
server: Optional["RemoteZulipServer"] = None,
) -> HttpResponse:
if for_installation:
if remote:
assert settings.ZILENCER_ENABLED
aggregate_table = RemoteInstallationCount
assert server is not None
else:
aggregate_table = InstallationCount
else:
if remote:
assert settings.ZILENCER_ENABLED
aggregate_table = RemoteRealmCount
assert server is not None
assert remote_realm_id is not None
else:
aggregate_table = RealmCount
if chart_name == "number_of_humans":
stats = [
COUNT_STATS["1day_actives::day"],
COUNT_STATS["realm_active_humans::day"],
COUNT_STATS["active_users_audit:is_bot:day"],
]
tables = [aggregate_table]
subgroup_to_label: Dict[CountStat, Dict[Optional[str], str]] = {
stats[0]: {None: "_1day"},
stats[1]: {None: "_15day"},
stats[2]: {"false": "all_time"},
}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == "messages_sent_over_time":
stats = [COUNT_STATS["messages_sent:is_bot:hour"]]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {"false": "human", "true": "bot"}}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == "messages_sent_by_message_type":
stats = [COUNT_STATS["messages_sent:message_type:day"]]
tables = [aggregate_table, UserCount]
subgroup_to_label = {
stats[0]: {
"public_stream": _("Public streams"),
"private_stream": _("Private streams"),
"private_message": _("Private messages"),
"huddle_message": _("Group private messages"),
}
}
labels_sort_function = lambda data: sort_by_totals(data["everyone"])
include_empty_subgroups = True
elif chart_name == "messages_sent_by_client":
stats = [COUNT_STATS["messages_sent:client:day"]]
tables = [aggregate_table, UserCount]
# Note that the labels are further re-written by client_label_map
subgroup_to_label = {
stats[0]: {str(id): name for id, name in Client.objects.values_list("id", "name")}
}
labels_sort_function = sort_client_labels
include_empty_subgroups = False
elif chart_name == "messages_read_over_time":
stats = [COUNT_STATS["messages_read::hour"]]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {None: "read"}}
labels_sort_function = None
include_empty_subgroups = True
else:
raise JsonableError(_("Unknown chart name: {}").format(chart_name))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None:
start = convert_to_UTC(start)
if end is not None:
end = convert_to_UTC(end)
if start is not None and end is not None and start > end:
raise JsonableError(
_("Start time is later than end time. Start: {start}, End: {end}").format(
start=start,
end=end,
)
)
if realm is None:
# Note that this value is invalid for Remote tables; be
# careful not to access it in those code paths.
realm = user_profile.realm
if remote:
# For remote servers, we don't have fillstate data, and thus
# should simply use the first and last data points for the
# table.
assert server is not None
if not aggregate_table.objects.filter(server=server).exists():
raise JsonableError(
_("No analytics data available. Please contact your server administrator.")
)
if start is None:
start = aggregate_table.objects.filter(server=server).first().end_time
if end is None:
end = aggregate_table.objects.filter(server=server).last().end_time
else:
# Otherwise, we can use tables on the current server to
# determine a nice range, and some additional validation.
if start is None:
if for_installation:
start = installation_epoch()
else:
start = realm.date_created
if end is None:
end = max(
stat.last_successful_fill() or datetime.min.replace(tzinfo=timezone.utc)
for stat in stats
)
if start > end and (timezone_now() - start > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION):
logging.warning(
"User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation of realm or installation) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?",
realm.string_id,
start,
end,
)
raise JsonableError(
_("No analytics data available. Please contact your server administrator.")
)
assert len({stat.frequency for stat in stats}) == 1
end_times = time_range(start, end, stats[0].frequency, min_length)
data: Dict[str, Any] = {
"end_times": [int(end_time.timestamp()) for end_time in end_times],
"frequency": stats[0].frequency,
}
aggregation_level = {
InstallationCount: "everyone",
RealmCount: "everyone",
UserCount: "user",
}
if settings.ZILENCER_ENABLED:
aggregation_level[RemoteInstallationCount] = "everyone"
aggregation_level[RemoteRealmCount] = "everyone"
# -1 is a placeholder value, since there is no relevant filtering on InstallationCount
id_value = {
InstallationCount: -1,
RealmCount: realm.id,
UserCount: user_profile.id,
}
if settings.ZILENCER_ENABLED:
if server is not None:
id_value[RemoteInstallationCount] = server.id
# TODO: RemoteRealmCount logic doesn't correctly handle
# filtering by server_id as well.
if remote_realm_id is not None:
id_value[RemoteRealmCount] = remote_realm_id
for table in tables:
data[aggregation_level[table]] = {}
for stat in stats:
data[aggregation_level[table]].update(
get_time_series_by_subgroup(
stat,
table,
id_value[table],
end_times,
subgroup_to_label[stat],
include_empty_subgroups,
)
)
if labels_sort_function is not None:
data["display_order"] = labels_sort_function(data)
else:
data["display_order"] = None
return json_success(data=data)
def sort_by_totals(value_arrays: Dict[str, List[int]]) -> List[str]:
totals = [(sum(values), label) for label, values in value_arrays.items()]
totals.sort(reverse=True)
return [label for total, label in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data: Dict[str, Dict[str, List[int]]]) -> List[str]:
realm_order = sort_by_totals(data["everyone"])
user_order = sort_by_totals(data["user"])
label_sort_values: Dict[str, float] = {}
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i - 0.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(), key=lambda x: x[1])]
def table_filtered_to_id(table: Type[BaseCount], key_id: int) -> QuerySet:
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
elif settings.ZILENCER_ENABLED and table == RemoteInstallationCount:
return RemoteInstallationCount.objects.filter(server_id=key_id)
elif settings.ZILENCER_ENABLED and table == RemoteRealmCount:
return RemoteRealmCount.objects.filter(realm_id=key_id)
else:
raise AssertionError(f"Unknown table: {table}")
def client_label_map(name: str) -> str:
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipElectron":
return "Desktop app"
if name == "ZulipAndroid":
return "Old Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "Mobile app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip") : -len("Webhook")] + " webhook"
return name
def rewrite_client_arrays(value_arrays: Dict[str, List[int]]) -> Dict[str, List[int]]:
mapped_arrays: Dict[str, List[int]] = {}
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(
stat: CountStat,
table: Type[BaseCount],
key_id: int,
end_times: List[datetime],
subgroup_to_label: Dict[Optional[str], str],
include_empty_subgroups: bool,
) -> Dict[str, List[int]]:
queryset = (
table_filtered_to_id(table, key_id)
.filter(property=stat.property)
.values_list("subgroup", "end_time", "value")
)
value_dicts: Dict[Optional[str], Dict[datetime, int]] = defaultdict(lambda: defaultdict(int))
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in subgroup_to_label.items():
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS["messages_sent:client:day"]:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone("US/Eastern")
def make_table(
title: str, cols: Sequence[str], rows: Sequence[Any], has_row_class: bool = False
) -> str:
if not has_row_class:
def fix_row(row: Any) -> Dict[str, Any]:
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
"analytics/ad_hoc_query.html",
dict(data=data),
)
return content
def dictfetchall(cursor: connection.cursor) -> List[Dict[str, Any]]:
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [dict(zip((col[0] for col in desc), row)) for row in cursor.fetchall()]
def get_realm_day_counts() -> Dict[str, Dict[str, str]]:
query = SQL(
"""
select
r.string_id,
(now()::date - date_sent::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
date_sent > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
"""
)
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts: Dict[str, Dict[int, int]] = defaultdict(dict)
for row in rows:
counts[row["string_id"]][row["age"]] = row["cnt"]
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts[1:])
max_cnt = max(raw_cnts[1:])
def format_count(cnt: int, style: Optional[str] = None) -> str:
if style is not None:
good_bad = style
elif cnt == min_cnt:
good_bad = "bad"
elif cnt == max_cnt:
good_bad = "good"
else:
good_bad = "neutral"
return f'<td class="number {good_bad}">{cnt}</td>'
cnts = format_count(raw_cnts[0], "neutral") + "".join(map(format_count, raw_cnts[1:]))
result[string_id] = dict(cnts=cnts)
return result
def get_plan_name(plan_type: int) -> str:
return ["", "self hosted", "limited", "standard", "open source"][plan_type]
def realm_summary_table(realm_minutes: Dict[str, float]) -> str:
now = timezone_now()
query = SQL(
"""
SELECT
realm.string_id,
realm.date_created,
realm.plan_type,
coalesce(wau_table.value, 0) wau_count,
coalesce(dau_table.value, 0) dau_count,
coalesce(user_count_table.value, 0) user_profile_count,
coalesce(bot_count_table.value, 0) bot_count
FROM
zerver_realm as realm
LEFT OUTER JOIN (
SELECT
value _14day_active_humans,
realm_id
from
analytics_realmcount
WHERE
property = 'realm_active_humans::day'
AND end_time = %(realm_active_humans_end_time)s
) as _14day_active_humans_table ON realm.id = _14day_active_humans_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '7day_actives::day'
AND end_time = %(seven_day_actives_end_time)s
) as wau_table ON realm.id = wau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '1day_actives::day'
AND end_time = %(one_day_actives_end_time)s
) as dau_table ON realm.id = dau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'false'
AND end_time = %(active_users_audit_end_time)s
) as user_count_table ON realm.id = user_count_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'true'
AND end_time = %(active_users_audit_end_time)s
) as bot_count_table ON realm.id = bot_count_table.realm_id
WHERE
_14day_active_humans IS NOT NULL
or realm.plan_type = 3
ORDER BY
dau_count DESC,
string_id ASC
"""
)
cursor = connection.cursor()
cursor.execute(
query,
{
"realm_active_humans_end_time": COUNT_STATS[
"realm_active_humans::day"
].last_successful_fill(),
"seven_day_actives_end_time": COUNT_STATS["7day_actives::day"].last_successful_fill(),
"one_day_actives_end_time": COUNT_STATS["1day_actives::day"].last_successful_fill(),
"active_users_audit_end_time": COUNT_STATS[
"active_users_audit:is_bot:day"
].last_successful_fill(),
},
)
rows = dictfetchall(cursor)
cursor.close()
# Fetch all the realm administrator users
realm_admins: Dict[str, List[str]] = defaultdict(list)
for up in UserProfile.objects.select_related("realm").filter(
role=UserProfile.ROLE_REALM_ADMINISTRATOR,
is_active=True,
):
realm_admins[up.realm.string_id].append(up.delivery_email)
for row in rows:
row["date_created_day"] = row["date_created"].strftime("%Y-%m-%d")
row["plan_type_string"] = get_plan_name(row["plan_type"])
row["age_days"] = int((now - row["date_created"]).total_seconds() / 86400)
row["is_new"] = row["age_days"] < 12 * 7
row["realm_admin_email"] = ", ".join(realm_admins[row["string_id"]])
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row["history"] = counts[row["string_id"]]["cnts"]
except Exception:
row["history"] = ""
# estimate annual subscription revenue
total_amount = 0
if settings.BILLING_ENABLED:
from corporate.lib.stripe import estimate_annual_recurring_revenue_by_realm
estimated_arrs = estimate_annual_recurring_revenue_by_realm()
for row in rows:
if row["string_id"] in estimated_arrs:
row["amount"] = estimated_arrs[row["string_id"]]
total_amount += sum(estimated_arrs.values())
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row["string_id"]
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row["hours"] = str(int(hours))
try:
row["hours_per_user"] = "{:.1f}".format(hours / row["dau_count"])
except Exception:
pass
# formatting
for row in rows:
row["stats_link"] = realm_stats_link(row["string_id"])
row["string_id"] = realm_activity_link(row["string_id"])
# Count active sites
def meets_goal(row: Dict[str, int]) -> bool:
return row["dau_count"] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_dau_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_wau_count = 0
for row in rows:
total_dau_count += int(row["dau_count"])
total_user_profile_count += int(row["user_profile_count"])
total_bot_count += int(row["bot_count"])
total_wau_count += int(row["wau_count"])
total_row = dict(
string_id="Total",
plan_type_string="",
amount=total_amount,
stats_link="",
date_created_day="",
realm_admin_email="",
dau_count=total_dau_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
wau_count=total_wau_count,
)
rows.insert(0, total_row)
content = loader.render_to_string(
"analytics/realm_summary_table.html",
dict(rows=rows, num_active_sites=num_active_sites, utctime=now.strftime("%Y-%m-%d %H:%MZ")),
)
return content
def user_activity_intervals() -> Tuple[mark_safe, Dict[str, float]]:
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = (
UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end,
)
.select_related(
"user_profile",
"user_profile__realm",
)
.only(
"start",
"end",
"user_profile__delivery_email",
"user_profile__realm__string_id",
)
.order_by(
"user_profile__realm__string_id",
"user_profile__delivery_email",
)
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.delivery_email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += f"<hr>{string_id}\n"
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += f" {email:<37}{duration}\n"
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += f"\nTotal duration: {total_duration}\n"
output += f"\nTotal duration in minutes: {total_duration.total_seconds() / 60.}\n"
output += f"Total duration amortized to a month: {total_duration.total_seconds() * 30. / 60.}"
content = mark_safe("<pre>" + output + "</pre>")
return content, realm_minutes
def sent_messages_report(realm: str) -> str:
title = "Recently sent messages for " + realm
cols = [
"Date",
"Humans",
"Bots",
]
query = SQL(
"""
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) humans on
series.day = humans.date_sent
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) bots on
series.day = bots.date_sent
"""
)
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries() -> List[Dict[str, str]]:
def get_page(
query: Composable, cols: Sequence[str], title: str, totals_columns: Sequence[int] = []
) -> Dict[str, str]:
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(
i: int, fixup_func: Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]
) -> None:
for row in rows:
row[i] = fixup_func(row[i])
total_row = []
for i, col in enumerate(cols):
if col == "Realm":
fix_rows(i, realm_activity_link)
elif col in ["Last time", "Last visit"]:
fix_rows(i, format_date_for_activity_reports)
elif col == "Hostname":
for row in rows:
row[i] = remote_installation_stats_link(row[0], row[i])
if len(totals_columns) > 0:
if i == 0:
total_row.append("Total")
elif i in totals_columns:
total_row.append(str(sum(row[i] for row in rows if row[i] is not None)))
else:
total_row.append("")
if len(totals_columns) > 0:
rows.insert(0, total_row)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title,
)
pages = []
###
for mobile_type in ["Android", "ZulipiOS"]:
title = f"{mobile_type} usage"
query = SQL(
"""
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like {mobile_type}
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
"""
).format(
mobile_type=Literal(mobile_type),
)
cols = [
"Realm",
"User id",
"Name",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Desktop users"
query = SQL(
"""
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
"""
)
cols = [
"Realm",
"Client",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Integrations by realm"
query = SQL(
"""
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
"""
)
cols = [
"Realm",
"Client",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Integrations by client"
query = SQL(
"""
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
"""
)
cols = [
"Client",
"Realm",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
title = "Remote Zulip servers"
query = SQL(
"""
with icount as (
select
server_id,
max(value) as max_value,
max(end_time) as max_end_time
from zilencer_remoteinstallationcount
where
property='active_users:is_bot:day'
and subgroup='false'
group by server_id
),
remote_push_devices as (
select server_id, count(distinct(user_id)) as push_user_count from zilencer_remotepushdevicetoken
group by server_id
)
select
rserver.id,
rserver.hostname,
rserver.contact_email,
max_value,
push_user_count,
max_end_time
from zilencer_remotezulipserver rserver
left join icount on icount.server_id = rserver.id
left join remote_push_devices on remote_push_devices.server_id = rserver.id
order by max_value DESC NULLS LAST, push_user_count DESC NULLS LAST
"""
)
cols = [
"ID",
"Hostname",
"Contact email",
"Analytics users",
"Mobile users",
"Last update time",
]
pages.append(get_page(query, cols, title, totals_columns=[3, 4]))
return pages
@require_server_admin
@has_request_variables
def get_activity(request: HttpRequest) -> HttpResponse:
duration_content, realm_minutes = user_activity_intervals()
counts_content: str = realm_summary_table(realm_minutes)
data = [
("Counts", counts_content),
("Durations", duration_content),
]
for page in ad_hoc_queries():
data.append((page["title"], page["content"]))
title = "Activity"
return render(
request,
"analytics/activity.html",
context=dict(data=data, title=title, is_home=True),
)
def get_confirmations(
types: List[int], object_ids: List[int], hostname: Optional[str] = None
) -> List[Dict[str, Any]]:
lowest_datetime = timezone_now() - timedelta(days=30)
confirmations = Confirmation.objects.filter(
type__in=types, object_id__in=object_ids, date_sent__gte=lowest_datetime
)
confirmation_dicts = []
for confirmation in confirmations:
realm = confirmation.realm
content_object = confirmation.content_object
type = confirmation.type
days_to_activate = _properties[type].validity_in_days
expiry_date = confirmation.date_sent + timedelta(days=days_to_activate)
if hasattr(content_object, "status"):
if content_object.status == STATUS_ACTIVE:
link_status = "Link has been clicked"
else:
link_status = "Link has never been clicked"
else:
link_status = ""
if timezone_now() < expiry_date:
expires_in = timesince(confirmation.date_sent, expiry_date)
else:
expires_in = "Expired"
url = confirmation_url(confirmation.confirmation_key, realm, type)
confirmation_dicts.append(
{
"object": confirmation.content_object,
"url": url,
"type": type,
"link_status": link_status,
"expires_in": expires_in,
}
)
return confirmation_dicts
@require_server_admin
def support(request: HttpRequest) -> HttpResponse:
context: Dict[str, Any] = {}
if "success_message" in request.session:
context["success_message"] = request.session["success_message"]
del request.session["success_message"]
if settings.BILLING_ENABLED and request.method == "POST":
# We check that request.POST only has two keys in it: The
# realm_id and a field to change.
keys = set(request.POST.keys())
if "csrfmiddlewaretoken" in keys:
keys.remove("csrfmiddlewaretoken")
if len(keys) != 2:
return json_error(_("Invalid parameters"))
realm_id = request.POST.get("realm_id")
realm = Realm.objects.get(id=realm_id)
if request.POST.get("plan_type", None) is not None:
new_plan_type = int(request.POST.get("plan_type"))
current_plan_type = realm.plan_type
do_change_plan_type(realm, new_plan_type)
msg = f"Plan type of {realm.string_id} changed from {get_plan_name(current_plan_type)} to {get_plan_name(new_plan_type)} "
context["success_message"] = msg
elif request.POST.get("discount", None) is not None:
new_discount = Decimal(request.POST.get("discount"))
current_discount = get_discount_for_realm(realm) or 0
attach_discount_to_realm(realm, new_discount)
context[
"success_message"
] = f"Discount of {realm.string_id} changed to {new_discount}% from {current_discount}%."
elif request.POST.get("new_subdomain", None) is not None:
new_subdomain = request.POST.get("new_subdomain")
old_subdomain = realm.string_id
try:
check_subdomain_available(new_subdomain)
except ValidationError as error:
context["error_message"] = error.message
else:
do_change_realm_subdomain(realm, new_subdomain)
request.session[
"success_message"
] = f"Subdomain changed from {old_subdomain} to {new_subdomain}"
return HttpResponseRedirect(
reverse("support") + "?" + urlencode({"q": new_subdomain})
)
elif request.POST.get("status", None) is not None:
status = request.POST.get("status")
if status == "active":
do_send_realm_reactivation_email(realm)
context[
"success_message"
] = f"Realm reactivation email sent to admins of {realm.string_id}."
elif status == "deactivated":
do_deactivate_realm(realm, request.user)
context["success_message"] = f"{realm.string_id} deactivated."
elif request.POST.get("billing_method", None) is not None:
billing_method = request.POST.get("billing_method")
if billing_method == "send_invoice":
update_billing_method_of_current_plan(realm, charge_automatically=False)
context[
"success_message"
] = f"Billing method of {realm.string_id} updated to pay by invoice."
elif billing_method == "charge_automatically":
update_billing_method_of_current_plan(realm, charge_automatically=True)
context[
"success_message"
] = f"Billing method of {realm.string_id} updated to charge automatically."
elif request.POST.get("sponsorship_pending", None) is not None:
sponsorship_pending = request.POST.get("sponsorship_pending")
if sponsorship_pending == "true":
update_sponsorship_status(realm, True)
context["success_message"] = f"{realm.string_id} marked as pending sponsorship."
elif sponsorship_pending == "false":
update_sponsorship_status(realm, False)
context["success_message"] = f"{realm.string_id} is no longer pending sponsorship."
elif request.POST.get("approve_sponsorship") is not None:
if request.POST.get("approve_sponsorship") == "approve_sponsorship":
approve_sponsorship(realm)
context["success_message"] = f"Sponsorship approved for {realm.string_id}"
elif request.POST.get("downgrade_method", None) is not None:
downgrade_method = request.POST.get("downgrade_method")
if downgrade_method == "downgrade_at_billing_cycle_end":
downgrade_at_the_end_of_billing_cycle(realm)
context[
"success_message"
] = f"{realm.string_id} marked for downgrade at the end of billing cycle"
elif downgrade_method == "downgrade_now_without_additional_licenses":
downgrade_now_without_creating_additional_invoices(realm)
context[
"success_message"
] = f"{realm.string_id} downgraded without creating additional invoices"
elif downgrade_method == "downgrade_now_void_open_invoices":
downgrade_now_without_creating_additional_invoices(realm)
voided_invoices_count = void_all_open_invoices(realm)
context[
"success_message"
] = f"{realm.string_id} downgraded and voided {voided_invoices_count} open invoices"
elif request.POST.get("scrub_realm", None) is not None:
if request.POST.get("scrub_realm") == "scrub_realm":
do_scrub_realm(realm, acting_user=request.user)
context["success_message"] = f"{realm.string_id} scrubbed."
query = request.GET.get("q", None)
if query:
key_words = get_invitee_emails_set(query)
users = set(UserProfile.objects.filter(delivery_email__in=key_words))
realms = set(Realm.objects.filter(string_id__in=key_words))
for key_word in key_words:
try:
URLValidator()(key_word)
parse_result = urllib.parse.urlparse(key_word)
hostname = parse_result.hostname
assert hostname is not None
if parse_result.port:
hostname = f"{hostname}:{parse_result.port}"
subdomain = get_subdomain_from_hostname(hostname)
try:
realms.add(get_realm(subdomain))
except Realm.DoesNotExist:
pass
except ValidationError:
users.update(UserProfile.objects.filter(full_name__iexact=key_word))
for realm in realms:
realm.customer = get_customer_by_realm(realm)
current_plan = get_current_plan_by_realm(realm)
if current_plan is not None:
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(
current_plan, timezone_now()
)
if last_ledger_entry is not None:
if new_plan is not None:
realm.current_plan = new_plan
else:
realm.current_plan = current_plan
realm.current_plan.licenses = last_ledger_entry.licenses
realm.current_plan.licenses_used = get_latest_seat_count(realm)
# full_names can have , in them
users.update(UserProfile.objects.filter(full_name__iexact=query))
context["users"] = users
context["realms"] = realms
confirmations: List[Dict[str, Any]] = []
preregistration_users = PreregistrationUser.objects.filter(email__in=key_words)
confirmations += get_confirmations(
[Confirmation.USER_REGISTRATION, Confirmation.INVITATION, Confirmation.REALM_CREATION],
preregistration_users,
hostname=request.get_host(),
)
multiuse_invites = MultiuseInvite.objects.filter(realm__in=realms)
confirmations += get_confirmations([Confirmation.MULTIUSE_INVITE], multiuse_invites)
confirmations += get_confirmations(
[Confirmation.REALM_REACTIVATION], [realm.id for realm in realms]
)
context["confirmations"] = confirmations
def realm_admin_emails(realm: Realm) -> str:
return ", ".join(
realm.get_human_admin_users()
.order_by("delivery_email")
.values_list("delivery_email", flat=True)
)
context["realm_admin_emails"] = realm_admin_emails
context["get_discount_for_realm"] = get_discount_for_realm
context["realm_icon_url"] = realm_icon_url
context["Confirmation"] = Confirmation
return render(request, "analytics/support.html", context=context)
def get_user_activity_records_for_realm(realm: str, is_bot: bool) -> QuerySet:
fields = [
"user_profile__full_name",
"user_profile__delivery_email",
"query",
"client__name",
"count",
"last_visit",
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot,
)
records = records.order_by("user_profile__delivery_email", "-last_visit")
records = records.select_related("user_profile", "client").only(*fields)
return records
def get_user_activity_records_for_email(email: str) -> List[QuerySet]:
fields = [
"user_profile__full_name",
"query",
"client__name",
"count",
"last_visit",
]
records = UserActivity.objects.filter(
user_profile__delivery_email=email,
)
records = records.order_by("-last_visit")
records = records.select_related("user_profile", "client").only(*fields)
return records
def raw_user_activity_table(records: List[QuerySet]) -> str:
cols = [
"query",
"client",
"count",
"last_visit",
]
def row(record: QuerySet) -> List[Any]:
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit),
]
rows = list(map(row, records))
title = "Raw data"
return make_table(title, cols, rows)
def get_user_activity_summary(records: List[QuerySet]) -> Dict[str, Dict[str, Any]]:
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary: Dict[str, Dict[str, Any]] = {}
def update(action: str, record: QuerySet) -> None:
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit,
)
else:
summary[action]["count"] += record.count
summary[action]["last_visit"] = max(
summary[action]["last_visit"],
record.last_visit,
)
if records:
summary["name"] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update("use", record)
if client == "API":
m = re.match("/api/.*/external/(.*)", query)
if m:
client = m.group(1)
update(client, record)
if client.startswith("desktop"):
update("desktop", record)
if client == "website":
update("website", record)
if ("send_message" in query) or re.search("/api/.*/external/.*", query):
update("send", record)
if query in [
"/json/update_pointer",
"/json/users/me/pointer",
"/api/v1/update_pointer",
"update_pointer_backend",
]:
update("pointer", record)
update(client, record)
return summary
def format_date_for_activity_reports(date: Optional[datetime]) -> str:
if date:
return date.astimezone(eastern_tz).strftime("%Y-%m-%d %H:%M")
else:
return ""
def user_activity_link(email: str) -> mark_safe:
url = reverse(get_user_activity, kwargs=dict(email=email))
email_link = f'<a href="{url}">{email}</a>'
return mark_safe(email_link)
def realm_activity_link(realm_str: str) -> mark_safe:
url = reverse(get_realm_activity, kwargs=dict(realm_str=realm_str))
realm_link = f'<a href="{url}">{realm_str}</a>'
return mark_safe(realm_link)
def realm_stats_link(realm_str: str) -> mark_safe:
url = reverse(stats_for_realm, kwargs=dict(realm_str=realm_str))
stats_link = f'<a href="{url}"><i class="fa fa-pie-chart"></i>{realm_str}</a>'
return mark_safe(stats_link)
def remote_installation_stats_link(server_id: int, hostname: str) -> mark_safe:
url = reverse(stats_for_remote_installation, kwargs=dict(remote_server_id=server_id))
stats_link = f'<a href="{url}"><i class="fa fa-pie-chart"></i>{hostname}</a>'
return mark_safe(stats_link)
def realm_client_table(user_summaries: Dict[str, Dict[str, Dict[str, Any]]]) -> str:
exclude_keys = [
"internal",
"name",
"use",
"send",
"pointer",
"website",
"desktop",
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary["name"]
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v["count"]
last_visit = v["last_visit"]
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
"Last visit",
"Client",
"Name",
"Email",
"Count",
]
title = "Clients"
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary: Dict[str, Dict[str, Any]]) -> str:
rows = []
for k, v in user_summary.items():
if k == "name":
continue
client = k
count = v["count"]
last_visit = v["last_visit"]
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
"last_visit",
"client",
"count",
]
title = "User activity"
return make_table(title, cols, rows)
def realm_user_summary_table(
all_records: List[QuerySet], admin_emails: Set[str]
) -> Tuple[Dict[str, Dict[str, Any]], str]:
user_records = {}
def by_email(record: QuerySet) -> str:
return record.user_profile.delivery_email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary: Dict[str, Dict[str, datetime]], k: str) -> Optional[datetime]:
if k in user_summary:
return user_summary[k]["last_visit"]
else:
return None
def get_count(user_summary: Dict[str, Dict[str, str]], k: str) -> str:
if k in user_summary:
return user_summary[k]["count"]
else:
return ""
def is_recent(val: Optional[datetime]) -> bool:
age = timezone_now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, "send")
cells = [user_summary["name"], email_link, sent_count]
row_class = ""
for field in ["use", "send", "pointer", "desktop", "ZulipiOS", "Android"]:
visit = get_last_visit(user_summary, field)
if field == "use":
if visit and is_recent(visit):
row_class += " recently_active"
if email in admin_emails:
row_class += " admin"
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row: Dict[str, Any]) -> str:
return row["cells"][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
"Name",
"Email",
"Total sent",
"Heard from",
"Message sent",
"Pointer motion",
"Desktop",
"ZulipiOS",
"Android",
]
title = "Summary"
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@require_server_admin
def get_realm_activity(request: HttpRequest, realm_str: str) -> HttpResponse:
data: List[Tuple[str, str]] = []
all_user_records: Dict[str, Any] = {}
try:
admins = Realm.objects.get(string_id=realm_str).get_human_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound(f"Realm {realm_str} does not exist")
admin_emails = {admin.delivery_email for admin in admins}
for is_bot, page_title in [(False, "Humans"), (True, "Bots")]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = "Clients"
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = "History"
content = sent_messages_report(realm_str)
data += [(page_title, content)]
title = realm_str
return render(
request,
"analytics/activity.html",
context=dict(data=data, realm_link=None, title=title),
)
@require_server_admin
def get_user_activity(request: HttpRequest, email: str) -> HttpResponse:
records = get_user_activity_records_for_email(email)
data: List[Tuple[str, str]] = []
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [("Summary", content)]
content = raw_user_activity_table(records)
data += [("Info", content)]
title = email
return render(
request,
"analytics/activity.html",
context=dict(data=data, title=title),
)
| 33.818999 | 134 | 0.611778 | [
"Apache-2.0"
] | ArunSankarKs/zulip | analytics/views.py | 60,164 | Python |
# Postgres' `clone_schema` adapted to work with schema names containing
# capital letters or `-`
# Source: IdanDavidi, https://stackoverflow.com/a/48732283/6412017
from django.db import connection, transaction, ProgrammingError
CLONE_SCHEMA_FUNCTION = """
-- Function: clone_schema(text, text)
-- DROP FUNCTION clone_schema(text, text);
CREATE OR REPLACE FUNCTION clone_schema(
source_schema text,
dest_schema text,
include_recs boolean)
RETURNS void AS
$BODY$
-- This function will clone all sequences, tables, data, views & functions from any existing schema to a new one
-- SAMPLE CALL:
-- SELECT clone_schema('public', 'new_schema', TRUE);
DECLARE
src_oid oid;
tbl_oid oid;
func_oid oid;
object text;
buffer text;
srctbl text;
default_ text;
column_ text;
qry text;
dest_qry text;
v_def text;
seqval bigint;
sq_last_value bigint;
sq_max_value bigint;
sq_start_value bigint;
sq_increment_by bigint;
sq_min_value bigint;
sq_cache_value bigint;
sq_log_cnt bigint;
sq_is_called boolean;
sq_is_cycled boolean;
sq_cycled char(10);
BEGIN
-- Check that source_schema exists
SELECT oid INTO src_oid
FROM pg_namespace
WHERE nspname = source_schema;
IF NOT FOUND
THEN
RAISE EXCEPTION 'source schema % does not exist!', source_schema;
RETURN ;
END IF;
-- Check that dest_schema does not yet exist
PERFORM nspname
FROM pg_namespace
WHERE nspname = dest_schema;
IF FOUND
THEN
RAISE EXCEPTION 'dest schema % already exists!', dest_schema;
RETURN ;
END IF;
EXECUTE 'CREATE SCHEMA "' || dest_schema || '"';
-- Create sequences
-- TODO: Find a way to make this sequence's owner is the correct table.
FOR object IN
SELECT sequence_name::text
FROM information_schema.sequences
WHERE sequence_schema = source_schema
LOOP
EXECUTE 'CREATE SEQUENCE "' || dest_schema || '".' || quote_ident(object);
srctbl := '"' || source_schema || '".' || quote_ident(object);
EXECUTE 'SELECT last_value, max_value, start_value, increment_by, min_value, cache_value, log_cnt, is_cycled, is_called
FROM "' || source_schema || '".' || quote_ident(object) || ';'
INTO sq_last_value, sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_log_cnt, sq_is_cycled, sq_is_called ;
IF sq_is_cycled
THEN
sq_cycled := 'CYCLE';
ELSE
sq_cycled := 'NO CYCLE';
END IF;
EXECUTE 'ALTER SEQUENCE "' || dest_schema || '".' || quote_ident(object)
|| ' INCREMENT BY ' || sq_increment_by
|| ' MINVALUE ' || sq_min_value
|| ' MAXVALUE ' || sq_max_value
|| ' START WITH ' || sq_start_value
|| ' RESTART ' || sq_min_value
|| ' CACHE ' || sq_cache_value
|| sq_cycled || ' ;' ;
buffer := '"' || dest_schema || '".' || quote_ident(object);
IF include_recs
THEN
EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ;
ELSE
EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ;
END IF;
END LOOP;
-- Create tables
FOR object IN
SELECT TABLE_NAME::text
FROM information_schema.tables
WHERE table_schema = source_schema
AND table_type = 'BASE TABLE'
LOOP
buffer := '"' || dest_schema || '".' || quote_ident(object);
EXECUTE 'CREATE TABLE ' || buffer || ' (LIKE "' || source_schema || '".' || quote_ident(object)
|| ' INCLUDING ALL)';
IF include_recs
THEN
-- Insert records from source table
EXECUTE 'INSERT INTO ' || buffer || ' SELECT * FROM "' || source_schema || '".' || quote_ident(object) || ';';
END IF;
FOR column_, default_ IN
SELECT column_name::text,
REPLACE(column_default::text, source_schema, dest_schema)
FROM information_schema.COLUMNS
WHERE table_schema = dest_schema
AND TABLE_NAME = object
AND column_default LIKE 'nextval(%"' || source_schema || '"%::regclass)'
LOOP
EXECUTE 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_;
END LOOP;
END LOOP;
-- add FK constraint
FOR qry IN
SELECT 'ALTER TABLE "' || dest_schema || '".' || quote_ident(rn.relname)
|| ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || pg_get_constraintdef(ct.oid) || ';'
FROM pg_constraint ct
JOIN pg_class rn ON rn.oid = ct.conrelid
WHERE connamespace = src_oid
AND rn.relkind = 'r'
AND ct.contype = 'f'
LOOP
EXECUTE qry;
END LOOP;
-- Create views
FOR object IN
SELECT table_name::text,
view_definition
FROM information_schema.views
WHERE table_schema = source_schema
LOOP
buffer := '"' || dest_schema || '".' || quote_ident(object);
SELECT view_definition INTO v_def
FROM information_schema.views
WHERE table_schema = source_schema
AND table_name = quote_ident(object);
EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ;
END LOOP;
-- Create functions
FOR func_oid IN
SELECT oid
FROM pg_proc
WHERE pronamespace = src_oid
LOOP
SELECT pg_get_functiondef(func_oid) INTO qry;
SELECT replace(qry, source_schema, dest_schema) INTO dest_qry;
EXECUTE dest_qry;
END LOOP;
RETURN;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION clone_schema(text, text, boolean)
OWNER TO postgres;
"""
class CloneSchema(object):
def _create_clone_schema_function(self):
"""
Creates a postgres function `clone_schema` that copies a schema and its
contents. Will replace any existing `clone_schema` functions owned by the
`postgres` superuser.
"""
cursor = connection.cursor()
cursor.execute(CLONE_SCHEMA_FUNCTION)
cursor.close()
def clone_schema(self, base_schema_name, new_schema_name):
"""
Creates a new schema `new_schema_name` as a clone of an existing schema
`old_schema_name`.
"""
connection.set_schema_to_public()
cursor = connection.cursor()
# check if the clone_schema function already exists in the db
try:
cursor.execute("SELECT 'clone_schema'::regproc")
except ProgrammingError:
self._create_clone_schema_function()
transaction.commit()
sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)'
cursor.execute(
sql,
{'base_schema': base_schema_name, 'new_schema': new_schema_name}
)
cursor.close() | 29.892704 | 152 | 0.618378 | [
"MIT"
] | geekashu/django-tenants | django_tenants/clone.py | 6,965 | Python |
# coding: utf-8
"""
Titan API v1
# Introduction The Intel 471 API is organized around the principles of REST. Our API lets you gather results from our platform with anything that can send a HTTP request, including cURL and modern internet browsers. Access to this API requires an API token which is managed from your account settings. Intel 471 reserves the right to add fields to our API however we will provide backwards compatibility and older version support so that it will be possible to choose exact versions that provide a response with an older structure. This documentation tracks all API versions and it is possible to compare this version which has changes highlighted. Please consider not storing information provided by API locally as we constantly improving our data set and want you to have the most updated information. # Authentication Authenticate to the Intel 471 API by providing your API key in the request. Your API key carries many privileges so please do not expose them on public web resources. Authentication to the API occurs by providing your email address as the login and API key as password in the authorization header via HTTP Basic Auth. Your API key can be found in the [API](https://portal.intel471.com/api) section on the portal. # Accessing API ## Via internet browser Just open url: `https://api.intel471.com/v1/reports` Browser will ask for credentials, provide your email as login and API key as password. ## Via curl command line utility Type in terminal the following command: ``` curl -u <YOU EMAIL>:<YOUR API KEY> https://api.intel471.com/v1/reports ``` ## CURL usage examples This section covers some Watchers API requests. ### List watcher groups: Type in terminal the following command: *curl -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* ### Create watcher group: To create watcher group you need to pass a json body to request. Passing json body possible in two ways: #### Write json to request *curl -d'{\"name\": \"group_name\", \"description\": \"Description\"}' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* #### Write json to file and call it *curl -d\"@json_file_name\" -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups* ### Create free text search watcher: *curl -d'{\"type\": \"search\", \"freeTextPattern\": \"text to search\", \"notificationChannel\": \"website\"}' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups/\"GROUP UID\"/watchers* ### Create specific search watcher: *curl -d'{\"type\": \"search\", \"patterns\":[ { \"types\": \"Actor\" , \"pattern\": \"swisman\" } ], \"notificationChannel\": \"website\" }' -X POST -u \"YOUR EMAIL\":\"YOUR API KEY\" https://api.intel471.com/v1/watcherGroups/\"GROUP UID\"/watchers* ## Via Python Execute the following script: ``` import urllib2, base64 username = \"<YOU EMAIL>\" apikey = \"<YOUR API KEY>\" request = urllib2.Request(\"https://api.intel471.com/v1/reports\") base64string = base64.encodestring('%s:%s' % (username, apikey)).replace('\\n', '') request.add_header(\"Authorization\", \"Basic %s\" % base64string) result = urllib2.urlopen(request) response_in_json = result.read() print response_in_json ``` # API integration best practice with your application When accessing our API from your application don't do AJAX calls directly from web browser to https://api.intel471.com/. We do not allow CORS requests from browser due to potential security issues. Instead we suggest you look to establish a kind of a server side proxy in your application which will pass requests to our API. For example: you can send a request from browser javascript to your server side, for instance to url `/apiproxy/actors?actor=hacker` which will be internally passed to `https://api.intel471.com/v1/actors?actor=hacker` (with authentication headers added) and response will be sent back to the browser. # Versioning support We are consistently improving our API and occasionally bring in changes to the API based on customer feedback. The current API version can be seen in the drop down boxes for each version. We are providing API backwards compatibility when possible. All requests are prefixed with the major version number, for example `/v1`: ``` https://api.intel471.com/v1/reports ``` Different major versions are not compatible and imply significant response structure changes. Minor versions differences might include extra fields in response or provide new request parameter support. To stick to the specific version, just add the following extra parameter to the request, for example: `?v=1.2.0`. If you specify a not existing version, it will be brought down to the nearest existing one. For example, parameter `?v=1.5.4` will call API of version 1.3.0 — the latest available; `?v=1.2.9` will awake version 1.2.0 and so on. Omitting the version parameter from your request means you will always use the latest version of the API. We highly recommend you always add the version parameter to be safe on API updates and code your integration in a way to accept possible future extra fields added to the response object. ``` https://api.intel471.com/v1/tags?prettyPrint - will return response for the latest API version (v.1.1.0) https://api.intel471.com/v1/tags?prettyPrint&v=1.1.0 - absolutely the same request with the version explicitly specified https://api.intel471.com/v1/reports?prettyPrint&v=1.0.0 - will return response compatible with the older version ``` # noqa: E501
The version of the OpenAPI document: 1.18.0
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from titan_client.configuration import Configuration
class CredentialSetSchemaData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'breach_date': 'int',
'collection_date': 'int',
'description': 'str',
'disclosure_date': 'int',
'external_sources': 'list[CredentialSetSchemaDataExternalSources]',
'internal_sources': 'list[CredentialSetSchemaDataInternalSources]',
'name': 'str',
'record_count': 'int',
'victims': 'list[CredentialSetSchemaDataVictims]'
}
attribute_map = {
'breach_date': 'breach_date',
'collection_date': 'collection_date',
'description': 'description',
'disclosure_date': 'disclosure_date',
'external_sources': 'external_sources',
'internal_sources': 'internal_sources',
'name': 'name',
'record_count': 'record_count',
'victims': 'victims'
}
def __init__(self, breach_date=None, collection_date=None, description=None, disclosure_date=None, external_sources=None, internal_sources=None, name=None, record_count=None, victims=None, local_vars_configuration=None): # noqa: E501
"""CredentialSetSchemaData - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._breach_date = None
self._collection_date = None
self._description = None
self._disclosure_date = None
self._external_sources = None
self._internal_sources = None
self._name = None
self._record_count = None
self._victims = None
self.discriminator = None
if breach_date is not None:
self.breach_date = breach_date
if collection_date is not None:
self.collection_date = collection_date
if description is not None:
self.description = description
if disclosure_date is not None:
self.disclosure_date = disclosure_date
if external_sources is not None:
self.external_sources = external_sources
if internal_sources is not None:
self.internal_sources = internal_sources
if name is not None:
self.name = name
if record_count is not None:
self.record_count = record_count
if victims is not None:
self.victims = victims
@property
def breach_date(self):
"""Gets the breach_date of this CredentialSetSchemaData. # noqa: E501
Date of breach. # noqa: E501
:return: The breach_date of this CredentialSetSchemaData. # noqa: E501
:rtype: int
"""
return self._breach_date
@breach_date.setter
def breach_date(self, breach_date):
"""Sets the breach_date of this CredentialSetSchemaData.
Date of breach. # noqa: E501
:param breach_date: The breach_date of this CredentialSetSchemaData. # noqa: E501
:type breach_date: int
"""
self._breach_date = breach_date
@property
def collection_date(self):
"""Gets the collection_date of this CredentialSetSchemaData. # noqa: E501
Date of collection. # noqa: E501
:return: The collection_date of this CredentialSetSchemaData. # noqa: E501
:rtype: int
"""
return self._collection_date
@collection_date.setter
def collection_date(self, collection_date):
"""Sets the collection_date of this CredentialSetSchemaData.
Date of collection. # noqa: E501
:param collection_date: The collection_date of this CredentialSetSchemaData. # noqa: E501
:type collection_date: int
"""
self._collection_date = collection_date
@property
def description(self):
"""Gets the description of this CredentialSetSchemaData. # noqa: E501
Description of the credential set. # noqa: E501
:return: The description of this CredentialSetSchemaData. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CredentialSetSchemaData.
Description of the credential set. # noqa: E501
:param description: The description of this CredentialSetSchemaData. # noqa: E501
:type description: str
"""
self._description = description
@property
def disclosure_date(self):
"""Gets the disclosure_date of this CredentialSetSchemaData. # noqa: E501
Date of disclosure. # noqa: E501
:return: The disclosure_date of this CredentialSetSchemaData. # noqa: E501
:rtype: int
"""
return self._disclosure_date
@disclosure_date.setter
def disclosure_date(self, disclosure_date):
"""Sets the disclosure_date of this CredentialSetSchemaData.
Date of disclosure. # noqa: E501
:param disclosure_date: The disclosure_date of this CredentialSetSchemaData. # noqa: E501
:type disclosure_date: int
"""
self._disclosure_date = disclosure_date
@property
def external_sources(self):
"""Gets the external_sources of this CredentialSetSchemaData. # noqa: E501
List of external sources. # noqa: E501
:return: The external_sources of this CredentialSetSchemaData. # noqa: E501
:rtype: list[CredentialSetSchemaDataExternalSources]
"""
return self._external_sources
@external_sources.setter
def external_sources(self, external_sources):
"""Sets the external_sources of this CredentialSetSchemaData.
List of external sources. # noqa: E501
:param external_sources: The external_sources of this CredentialSetSchemaData. # noqa: E501
:type external_sources: list[CredentialSetSchemaDataExternalSources]
"""
self._external_sources = external_sources
@property
def internal_sources(self):
"""Gets the internal_sources of this CredentialSetSchemaData. # noqa: E501
List of internal sources. # noqa: E501
:return: The internal_sources of this CredentialSetSchemaData. # noqa: E501
:rtype: list[CredentialSetSchemaDataInternalSources]
"""
return self._internal_sources
@internal_sources.setter
def internal_sources(self, internal_sources):
"""Sets the internal_sources of this CredentialSetSchemaData.
List of internal sources. # noqa: E501
:param internal_sources: The internal_sources of this CredentialSetSchemaData. # noqa: E501
:type internal_sources: list[CredentialSetSchemaDataInternalSources]
"""
self._internal_sources = internal_sources
@property
def name(self):
"""Gets the name of this CredentialSetSchemaData. # noqa: E501
Name of the credential set. # noqa: E501
:return: The name of this CredentialSetSchemaData. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CredentialSetSchemaData.
Name of the credential set. # noqa: E501
:param name: The name of this CredentialSetSchemaData. # noqa: E501
:type name: str
"""
self._name = name
@property
def record_count(self):
"""Gets the record_count of this CredentialSetSchemaData. # noqa: E501
Number of records. # noqa: E501
:return: The record_count of this CredentialSetSchemaData. # noqa: E501
:rtype: int
"""
return self._record_count
@record_count.setter
def record_count(self, record_count):
"""Sets the record_count of this CredentialSetSchemaData.
Number of records. # noqa: E501
:param record_count: The record_count of this CredentialSetSchemaData. # noqa: E501
:type record_count: int
"""
self._record_count = record_count
@property
def victims(self):
"""Gets the victims of this CredentialSetSchemaData. # noqa: E501
List of purported victims. # noqa: E501
:return: The victims of this CredentialSetSchemaData. # noqa: E501
:rtype: list[CredentialSetSchemaDataVictims]
"""
return self._victims
@victims.setter
def victims(self, victims):
"""Sets the victims of this CredentialSetSchemaData.
List of purported victims. # noqa: E501
:param victims: The victims of this CredentialSetSchemaData. # noqa: E501
:type victims: list[CredentialSetSchemaDataVictims]
"""
self._victims = victims
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CredentialSetSchemaData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CredentialSetSchemaData):
return True
return self.to_dict() != other.to_dict()
| 46.877095 | 5,526 | 0.673877 | [
"MIT"
] | intel471/titan-client-python | titan_client/models/credential_set_schema_data.py | 16,784 | Python |
#!/bin/python3
# Copyright (C) 2020 Matheus Fernandes Bigolin <[email protected]>
# SPDX-License-Identifier: MIT
"""Day Thirteen, Shuttle Search."""
from sys import argv
from re import findall
from utils import open_file, arrange, usage_and_exit, product
def solve1(buses, est):
"""Get the earliest bus from the <buses> according to the <est>imate
time. """
arrival = [bus - est%bus for bus in buses]
earliest = min(arrival)
return min(arrival)*buses[arrival.index(earliest)]
def solve2(buses, depart):
"""Find the smallest timestamp, such that all the <buses> follow their
bus ID, which is indexically paired with <depart>.
Here I used the Chinese Remainder Theorem, someone well acquainted to
anyone who does competitive or discrete mathematics. """
# Desired residue class for each bus.
mods = [(b - d) % b for b, d in zip(buses, depart)]
# Cross multiplication of the elements in the sequence.
cross_mul = [product(buses)//b for b in buses]
return sum([c*pow(c, -1, b)*m for b, c, m
in zip(buses, cross_mul, mods)]) % product(buses)
if __name__ == "__main__":
usage_and_exit(len(argv) != 2)
input_file = arrange(open_file(argv[1]))
bus_data = [int(b) for b in findall(r"\d+", input_file[1])]
estimate = int(input_file[0])
depart_data = [i for i,d in enumerate(findall(r"\w+", input_file[1]))
if d != "x"]
print(solve1(bus_data, estimate))
print(solve2(bus_data, depart_data))
| 28.277778 | 74 | 0.664047 | [
"MIT"
] | mfrdbigolin/AoC2020 | src/day13.py | 1,527 | Python |
"""Plot power curves for turbine 1 with data filters
"""
# import libraries
import pandas as pd
import numpy as np
import itertools
import matplotlib.pyplot as plt
# import data
df = pd.read_csv('data/SCADA_downtime_merged.csv', skip_blank_lines=True)
# list of turbines to plot (just 1)
list1 = [1]
# list of categories to plot
list2 = list(df['TurbineCategory_id'].unique())
# remove NaN from list
list2 = [g for g in list2 if g >= 0]
# round from float to integer
list2 = [a.astype(np.int64) for a in list2]
# sort categories in ascending order
list2 = sorted(list2, key=int)
# categories to remove from plot
list2 = [b for b in list2 if b not in (1, 12, 13, 14, 15, 17, 21, 22)]
list3 = list(itertools.product(list1, list2))
# filter only data for turbine x and plot
for (x, y) in list3:
df2x = df[(df['turbine_id'] == x)].copy()
# sort values by timestamp in descending order
df2x = df2x.sort_values(by='timestamp', ascending=False)
# copying fault to new column (mins)
# (fault when turbine category id is y)
def f(c):
if c['TurbineCategory_id'] == y:
return 0
else:
return 1
df2x['mins'] = df2x.apply(f, axis=1)
# reset index
df2x.reset_index(drop=True, inplace=True)
# assigning value to first cell if it's not 0
if df2x.loc[0, 'mins'] == 0:
df2x.set_value(0, 'mins', 0)
else:
df2x.set_value(0, 'mins', 999999999)
# using previous value's row to evaluate time
for i, e in enumerate(df2x['mins']):
if e == 1:
df2x.at[i, 'mins'] = df2x.at[i-1, 'mins'] + 10
# sort in ascending order
df2x = df2x.sort_values(by='timestamp')
# reset index
df2x.reset_index(drop=True, inplace=True)
# convert to hours and round to nearest hour
df2x['hours'] = df2x['mins'].astype(np.int64)
df2x['hours'] = df2x['hours']/60
df2x['hours'] = round(df2x['hours']).astype(np.int64)
# > 48 hours - label as normal (9999)
def f1(c):
if c['hours'] > 48:
return 9999
else:
return c['hours']
df2x['hours'] = df2x.apply(f1, axis=1)
# filter out curtailment - curtailed when turbine is pitching
# outside 0 deg <= normal <= 3.5 deg
def f2(c):
if 0 <= c['pitch'] <= 3.5 or c['hours'] != 9999 or (
(c['pitch'] > 3.5 or c['pitch'] < 0) and
(c['ap_av'] <= (.1 * df2x['ap_av'].max()) or
c['ap_av'] >= (.9 * df2x['ap_av'].max()))):
return 'normal'
else:
return 'curtailed'
df2x['curtailment'] = df2x.apply(f2, axis=1)
# filter unusual readings, i.e., for normal operation, power <= 0 in
# operating wind speeds, power > 100 before cut-in, runtime < 600
def f3(c):
if c['hours'] == 9999 and (
(3 < c['ws_av'] < 25 and (
c['ap_av'] <= 0 or c['runtime'] < 600 or
c['EnvironmentalCategory_id'] > 1 or
c['GridCategory_id'] > 1 or
c['InfrastructureCategory_id'] > 1 or
c['AvailabilityCategory_id'] == 2 or
12 <= c['TurbineCategory_id'] <= 15 or
21 <= c['TurbineCategory_id'] <= 22)) or
(c['ws_av'] < 3 and c['ap_av'] > 100)):
return 'unusual'
else:
return 'normal'
df2x['unusual'] = df2x.apply(f3, axis=1)
# filter data for plots
# normal w/ curtailment (all data)
df3 = df2x[df2x.hours == 9999]
# before fault
df4 = df2x[df2x.hours != 9999]
df4 = df4[df4.hours != 0]
# fault
df5 = df2x[df2x.hours == 0]
# normal w/o curtailment
df6 = df3[df3.curtailment == 'normal']
# normal w/o curtailment and unusual readings
df7 = df6[df6.unusual == 'normal']
# get x and y coordinates
# normal w/ curtailment
x1 = df3['ws_av']
y1 = df3['ap_av']
# before fault
x2 = df4['ws_av']
y2 = df4['ap_av']
# faulty
x3 = df5['ws_av']
y3 = df5['ap_av']
# normal w/o curtailment
x4 = df6['ws_av']
y4 = df6['ap_av']
# normal w/o curtailment and unusual readings
x5 = df7['ws_av']
y5 = df7['ap_av']
fig = plt.figure(figsize=(18.5, 4.5), dpi=1500)
ax1 = fig.add_subplot(131)
ax1.scatter(x1, y1, c='#098A63', label='normal', marker='.')
ax1.scatter(x2, y2, c='#3F2B78', label='before fault', marker='.')
ax1.scatter(x3, y3, c='c', label='faulty', marker='.')
ax1.legend()
plt.xlabel('Wind speed (m/s)')
plt.ylabel('Average active power (kW)')
plt.title('all data points')
ax2 = fig.add_subplot(132)
ax2.scatter(x4, y4, c='#098A63', marker='.')
ax2.scatter(x2, y2, c='#3F2B78', marker='.')
ax2.scatter(x3, y3, c='c', marker='.')
plt.xlabel('Wind speed (m/s)')
plt.ylabel('Average active power (kW)')
plt.title('w/o curtailment')
ax3 = fig.add_subplot(133)
ax3.scatter(x5, y5, c='#098A63', marker='.')
ax3.scatter(x2, y2, c='#3F2B78', marker='.')
ax3.scatter(x3, y3, c='c', marker='.')
plt.xlabel('Wind speed (m/s)')
plt.ylabel('Average active power (kW)')
plt.title('w/o curtailment and anomalies')
fig.suptitle(
'Power curves for turbine %s' % x + ' with turbine category %s' % y)
plt.tight_layout()
plt.subplots_adjust(top=0.88)
plt.show()
| 32.381818 | 76 | 0.576642 | [
"MIT"
] | nmstreethran/WindTurbineClassification | scripts/plot/powercurves-filter-T1.py | 5,343 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 28 15:05:36 2018
@author: louismueller
"""
# import
import numpy as np
#%%
def PlotOverview(oData):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Overview Plots
# Find interesting stuff: takeoff, landing, excitations etc.
time_s = oData['time_s']
# Airspeed
vIas_mps = oData['vIas_mps']
vGps_mps = np.linalg.norm(oData['vGps_L_mps'], 2, axis=0)
vB_mps = np.linalg.norm(oData['vB_L_mps'], 2, axis=0)
fig0, ax0 = plt.subplots()
ax0.plot(time_s, oData['refV_mps'], label='ref')
ax0.plot(time_s, vIas_mps, label='airspeed')
ax0.plot(time_s, vGps_mps, '.', label='Gps')
ax0.plot(time_s, vB_mps, label='Ekf')
ax0.grid()
ax0.set_xlabel('Time (s)')
ax0.set_ylabel('Airspeed (m/s)')
ax0.set_title('Air Data Airspeed')
ax0.legend()
# Altitude
altBaro_m = oData['altBaro_m']
altGps_m = oData['rGps_D_ddm'][2]
altB_m = oData['rB_D_ddm'][2]
fig1, ax1 = plt.subplots()
ax1.plot(time_s, oData['refH_m'], label='ref')
ax1.plot(time_s, altBaro_m, label='Baro')
ax1.plot(time_s, altGps_m, '.', label='GPS')
ax1.plot(time_s, altB_m - altB_m, label='Ekf')
ax1.grid()
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Altitude (m)')
ax1.set_title('Altitude')
ax1.legend()
# X and Y Position
latGps_deg = oData['rGps_D_ddm'][0]
lonGps_deg = oData['rGps_D_ddm'][1]
latB_deg = oData['rB_D_ddm'][0]
lonB_deg = oData['rB_D_ddm'][1]
fig2, ax2 = plt.subplots()
ax2.plot(lonGps_deg, latGps_deg, '.', label='GPS')
ax2.plot(lonB_deg, latB_deg, label='Ekf')
ax2.grid()
ax2.axis('equal')
ax2.set_xlabel('Longitude (deg)')
ax2.set_ylabel('Latitude (deg)')
ax2.set_title('Latitude and Longitude')
ax2.legend()
# Voltage
pwrFmu_V = oData['pwrFmu_V']
fig3, ax3 = plt.subplots()
ax3.plot(time_s, pwrFmu_V)
ax3.set_xlabel('Time (s)')
ax3.set_ylabel('Avionics Voltage (V)')
ax3.set_title('Power')
ax3.grid()
# 3D Position
fig4 = plt.figure()
ax4 = fig4.gca(projection='3d', proj_type = 'ortho')
ax4.plot(lonGps_deg, latGps_deg, altGps_m, '.', label='GPS')
ax4.plot(lonB_deg, latB_deg, altB_m, label='Ekf')
ax4.axis('equal')
ax4.grid()
ax4.set_xlabel('Longitude (deg)')
ax4.set_ylabel('Latitude (deg)')
ax4.set_title('Flight Path')
ax4.legend()
plt.show()
return 1
#%% Find Excitation Times based on 'exciteEngage'
def FindExcite(oData):
# returns list of tuples:
#(testID, [timeMin_us, timeMax_us])
# Create array that is the testID value when exciteEngage is True and -1 everywhere else
iTestExcite = np.where(oData['exciteEngage'], oData['testID'], -1 * np.ones_like(oData['testID']))
# Find where the index changes
iRange = np.where(iTestExcite[:-1] != iTestExcite[1:])[0]
iStartList = iRange[0::2]
iEndList = iRange[1::2]
excList = []
for iExc in range(0,len(iStartList)):
iStart = iStartList[iExc]
iEnd = iEndList[iExc]
timeRange_us = [int(oData['time_us'][iStart]), int(oData['time_us'][iEnd])]
testID = oData['testID'][iStart]
exc = (testID, timeRange_us)
excList.append(exc)
return excList
#%%
def TestPointOut(excList, testPointList):
testList = []
for iExc, exc in enumerate(excList):
iTestID = exc[0]
testPoint = testPointList[iTestID]
testPoint['time_us'] = excList[iExc][1]
testList.append(testPoint)
return testList
#%% Segment oData by condition
import copy
def SliceDict(oData, iCond):
oDataSeg = {}
lenCond = len(iCond)
for k, v in oData.items():
if isinstance(v, dict):
oDataSeg[k] = {}
oDataSeg[k] = SliceDict(v, iCond)
else:
if v.shape[-1] >= lenCond:
oDataSeg[k] = np.copy(v[...,iCond])
else:
oDataSeg[k] = np.copy(v)
return oDataSeg
#
def Segment(oData, cond):
# cond = (condName, [min, max])
# if cond is a list, will return a list of oData segments
# Example: cond = ('time_s', [60, 61])
# Recursive call if cond is a list of conditions
if type(cond) is list:
oDataSeg = []
for c in cond:
seg = Segment(oData, c)
oDataSeg.append(seg)
return oDataSeg
# Slice into Segments
condName = cond[0]
condRange = cond[1]
if len(cond) > 2:
condDesc = cond[2]
else:
condDesc = ''
# Bool that matches the condition
iCond = (oData[condName] >= condRange[0]) & (oData[condName] <= condRange[1])
# Slice, with full copy, into segmented oData. SliceDict will handle recursive calls
oDataSeg = copy.deepcopy(SliceDict(oData, iCond))
oDataSeg['Desc'] = condDesc
return oDataSeg
#
def Decimate(oData, skip):
# Recursive call if cond is a list of conditions
if type(skip) is list:
oDataSeg = []
for s in skip:
seg = Segment(oData, s)
oDataSeg.append(seg)
return oDataSeg
# Bool that matches the condition
iCond = range(0, len(oData['time_s']), skip)
# Slice, with full copy, into segmented oData. SliceDict will handle recursive calls
oDataSeg = copy.deepcopy(SliceDict(oData, iCond))
# oDataSeg['Desc'] = condDesc
return oDataSeg
| 28.020101 | 102 | 0.601506 | [
"MIT"
] | UASLab/OpenFlightAnalysis | Core/OpenData.py | 5,576 | Python |
import sys, os
imgdir = sys.argv[1]
outfile = sys.argv[2]
flag = int(sys.argv[3])
fn = open(outfile, 'w')
fn.write("<html>\n")
fn.write("<body>\n")
namelist = os.listdir(imgdir)
namelist = sorted(namelist)
for idx, name in enumerate(namelist):
#if idx > 10:
# break
if flag:
imname = "office1_%d.jpg"%idx
impath = os.path.join(imgdir, imname)
else:
impath = os.path.join(imgdir, name)
#impath = os.path.abspath(impath)
#fn.write("<img src=\"%s\" width=\"640\" height=\"320\">\n"%impath)
fn.write("<img src=\"%s\" width=\"960\" height=\"540\">\n"%impath)
#print(impath)
fn.write("</body>\n")
fn.write("</html>")
fn.close()
| 26.230769 | 71 | 0.595308 | [
"Apache-2.0"
] | NicoleWang/retina_tiny | gen_view_html.py | 682 | Python |
# coding=utf-8
# Copyright 2021 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract decoder and MSE decoder.
"""
import gin
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim
@gin.configurable
class MSEDecoder(object):
"""Default MSE decoder."""
def __call__(self, params, output_size):
self._predictions = slim.fully_connected(
params, output_size, activation_fn=None, scope='pose')
return self._predictions
def loss(self, labels):
return tf.losses.mean_squared_error(labels=labels.action,
predictions=self._predictions)
| 30.972973 | 74 | 0.729494 | [
"Apache-2.0"
] | StanislavParovoy/tensor2robot | research/vrgripper/mse_decoder.py | 1,146 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mods.ui'
#
# Created: Sun Oct 12 21:39:36 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ModsDialog(object):
def setupUi(self, ModsDialog):
ModsDialog.setObjectName("ModsDialog")
ModsDialog.resize(500, 500)
ModsDialog.setModal(True)
self.dialogGridLayout = QtWidgets.QGridLayout(ModsDialog)
self.dialogGridLayout.setObjectName("dialogGridLayout")
self.custom_button_box = QtWidgets.QDialogButtonBox(ModsDialog)
self.custom_button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Apply | QtWidgets.QDialogButtonBox.Cancel)
self.custom_button_box.setCenterButtons(True)
self.custom_button_box.setObjectName("custom_button_box")
self.dialogGridLayout.addWidget(self.custom_button_box, 1, 0, 1, 1)
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName("formLayout_2")
self.nameLabel = QtWidgets.QLabel(ModsDialog)
self.nameLabel.setObjectName("nameLabel")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.nameLabel)
self.nameComboBox = QtWidgets.QComboBox(ModsDialog)
self.nameComboBox.setEditable(True)
self.nameComboBox.setObjectName("nameComboBox")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.nameComboBox)
self.colorLabel = QtWidgets.QLabel(ModsDialog)
self.colorLabel.setObjectName("colorLabel")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.colorLabel)
self.colorLineEdit = QtWidgets.QLineEdit(ModsDialog)
self.colorLineEdit.setObjectName("colorLineEdit")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.colorLineEdit)
self.sequence5Label = QtWidgets.QLabel(ModsDialog)
self.sequence5Label.setObjectName("sequence5Label")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.sequence5Label)
self.sequence5LineEdit = QtWidgets.QLineEdit(ModsDialog)
self.sequence5LineEdit.setObjectName("sequence5LineEdit")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.sequence5LineEdit)
self.sequence3Label = QtWidgets.QLabel(ModsDialog)
self.sequence3Label.setObjectName("sequence3Label")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.sequence3Label)
self.sequence3LineEdit = QtWidgets.QLineEdit(ModsDialog)
self.sequence3LineEdit.setObjectName("sequence3LineEdit")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.sequence3LineEdit)
self.sequenceInternalLabel = QtWidgets.QLabel(ModsDialog)
self.sequenceInternalLabel.setObjectName("sequenceInternalLabel")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.sequenceInternalLabel)
self.sequenceInternalLineEdit = QtWidgets.QLineEdit(ModsDialog)
self.sequenceInternalLineEdit.setObjectName("sequenceInternalLineEdit")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.sequenceInternalLineEdit)
self.noteLabel = QtWidgets.QLabel(ModsDialog)
self.noteLabel.setObjectName("noteLabel")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.noteLabel)
self.noteTextEdit = QtWidgets.QTextEdit(ModsDialog)
self.noteTextEdit.setObjectName("noteTextEdit")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.noteTextEdit)
self.dialogGridLayout.addLayout(self.formLayout_2, 0, 0, 1, 1)
self.retranslateUi(ModsDialog)
self.custom_button_box.rejected.connect(ModsDialog.reject)
self.custom_button_box.clicked['QAbstractButton*'].connect(ModsDialog.accept)
QtCore.QMetaObject.connectSlotsByName(ModsDialog)
def retranslateUi(self, ModsDialog):
_translate = QtCore.QCoreApplication.translate
ModsDialog.setWindowTitle(_translate("ModsDialog", "Choose a Modification"))
self.nameLabel.setText(_translate("ModsDialog", "Name"))
self.colorLabel.setText(_translate("ModsDialog", "color"))
self.sequence5Label.setText(_translate("ModsDialog", "sequence 5\'"))
self.sequence3Label.setText(_translate("ModsDialog", "sequence 3\'"))
self.sequenceInternalLabel.setText(_translate("ModsDialog", "sequence internal"))
self.noteLabel.setText(_translate("ModsDialog", "Note"))
| 58.493827 | 119 | 0.748417 | [
"BSD-3-Clause"
] | cadnano/cadnano2.5 | cadnano/gui/dialogs/ui_mods.py | 4,738 | Python |
#!/usr/bin/env python
#
# ---- VGRID ----
# A Class for Incremental Gridding
#
# Val Schmidt
# Center for Coastal and Ocean Mapping
# Univeristy of New Hampshire
# Copyright, 2018-2019
# All Rights Reserved
import numpy as np
from line_profiler import LineProfiler
import sys
import scipy.spatial as spatial
from matplotlib import pyplot as plt
import numba
class vgrid():
''' A class for gridding of x,y,z data.
See vgrid.add() for details on usage.
'''
def __init__(self, cs=1.0, cinf=1.0, type='mean'):
self.cs = cs # cell size
self.cinf = cinf # cell influence
self.type = type # grid type
self.xx = None # x (Easting) coordinates of grid
self.yy = None # y (Nothing) coordinates of grid
self.zw = None # Sum of the product of the gridded values and their weights for the grid cell.
self.ww = None # Sum of weights
self.nn = None # Number of points contributing to grid cell
self.varw = None # Sum of the square of the difference of the gridded values and the estimated mean, times their weights.
self.Z = None # Sequential estimator of depth for scalar (CUBE) or platlet methods.
self.CZ = None
### Utility variables used internally. ###
# New values to incorporate into grid.
self._x = None
self._y = None
self._z = None
self._w = None
self._II = None # Indices of values to add for node under consideration.
# (These are not used as class variables currently.)
self._idx = None # row grid node indiex for node under consideration.
self._jdx = None # column grid node indiex for node under consideation.
self._I = None # table of index lists for data to add for each grid node
def zz(self):
''' Calculate the z values for the grid.'''
return self.zw / self.ww
def mean_wholegrid(self):
''' Calculate mean values for the whole grid.'''
# Fancy list comprehension to execute a double loop concisely.
[self.mean(idx, jdx)
for idx in range(self.yy.size)
for jdx in range(self.xx.size)
if self._I[idx][jdx] is not None]
def median_wholegrid(self):
''' Calculate median values for the whole grid.'''
# Fancy list comprehension to execute a double loop concisely.
[self.median(idx, jdx)
for idx in range(self.yy.size)
for jdx in range(self.xx.size)
if self._I[idx][jdx] is not None]
def mean(self, idx, jdx):
'''Mean gridding algorithm.
vgrid implemnets incremental gridding where possible.
To do this, the sum of the product of the weights and z values are
retained in addition to the sum of the weights. Then method zz()
calculates the quotient of the two to obtain the actual weighted
mean z values. Note that when all weights are one, (or if w is set to
1 for shorthand), a standard mean is calculated.
Variance is calcualted in a similar way. In this case the sum of
w*(z_i - mu)^2 is calculated and stored for each grid node, where
z_i is the value to be gridded and mu is the mean of the grid node
calculated thus far. Then this sum is divided by the sum of the
weights to get the final estimated variance. As the mean of the grid
node approaches the true mean, this value should approach the true
variance.
'''
self._II = self._I[idx][jdx]
# Non-weighted gridding.
if self._w.size == 1:
self.zw[idx, jdx] = np.nansum(np.concatenate((self._z[self._II], [self.zw[idx, jdx]])))
self.ww[idx, jdx] = self.nn[idx, jdx]
self.varw[idx, jdx] = np.nansum(np.concatenate((np.power( (self._z[self._II] - self.zw[idx,jdx]/self.nn[idx,jdx]), 2),[self.varw[idx, jdx]])))
else:
# Weighted gridding. Sum of value times the weight divided by the
# sum of the weights.
# The strategy taken here is to retain the sum of the values times the weights, and also
# the sum of the weights. Then when the weighted mean is requested the calling function
# divides the these two values. This strategy allows incremental addition of data to the grid.
#
# The coding strategy below is to append the new points to the existing point in a list
# and then call nansum to add them up.
#
# Q: Note: A dot-product might be quicker, but there is no dot-product that will produce a
# non-nan result if one of the values is nan, which is desired here.
self.zw[idx, jdx] = np.nansum(np.append(self.zw[idx, jdx], self._z[self._II] * self._w[self._II]))
self.ww[idx, jdx] = np.nansum(np.append(self.ww[idx, jdx], self._w[self._II]))
self.varw[idx, jdx] = np.nansum(np.append(np.power( (self._z[self._II] - self.zw[idx,jdx]/self.ww[idx,jdx]),2)
, self.varw[idx, jdx] ))
def var(self):
''' Calculate the variance'''
return self.varw/self.ww
def std(self):
'''Calculate the standard deviation'''
return np.sqrt(self.var())
def meanwithoutlierrejection(self):
''' TO DO: Calculate the mean, rejecting values that exceed 3-sigma
from existing estimate.'''
pass
def median(self, idx, jdx):
''' Calculate the median value in each grid cell.
The method used here to provide a "running median" is for each add(),
calculate the average of the existing value with the median of the
new points. This method works reasonably well, but can produce
inferior results if a single add() contains only outliers and their
are insufficient additional adds to constrain it.'''
self.zw[idx, jdx] = np.nanmean(
np.append(self.zw[idx, jdx], np.nanmedian(self._z[self._II])))
self.ww[idx, jdx] = 1
self.varw[idx, jdx] = np.nansum(np.append(
np.power((self._z[self._II] - self.zw[idx, jdx]/self.ww[idx, jdx]),
2),
self.varw[idx, jdx]))
pass
def gridsizesanitycheck(self, M):
'''Check to see if the grid size is going to be REALLY large. '''
if M.__len__() > 1e4:
return False
else:
return True
def create_new_grid(self):
''' Create a new empty grid.'''
self.xx = np.arange(min(self._x), max(self._x)+self.cs, self.cs)
self.yy = np.arange(min(self._y), max(self._y)+self.cs, self.cs)
if not (self.gridsizesanitycheck(self.xx) and
self.gridsizesanitycheck(self.yy)):
print('Grid size is too large.')
return
# Initialize grid.
self.zw = np.empty((self.yy.size, self.xx.size))
self.zw.fill(np.nan)
self.nn = np.copy(self.zw)
self.ww = np.copy(self.zw)
self.varw = np.copy(self.zw)
def expand_grid(self):
minx = min(self._x)
miny = min(self._y)
maxx = max(self._x)
maxy = max(self._y)
if minx < self.xx[0]:
dx = np.arange(minx, self.xx[0] - self.cs, self.cs)
self.xx = np.concatenate((dx,self.xx))
# Create new space
tmp = np.empty((self.yy.size,dx.size))
tmp.fill(np.nan)
# Tack it on.
self.zw = np.concatenate((np.copy(tmp), self.zw), axis=1)
self.nn = np.concatenate((np.copy(tmp), self.nn), axis=1)
self.ww = np.concatenate((np.copy(tmp), self.ww), axis=1)
self.varw = np.concatenate((np.copy(tmp), self.varw), axis=1)
# FIX: Support depth/platelet estimates here, tbd
if maxx > self.xx[-1]:
dx = np.arange(self.xx[-1]+self.cs,maxx,self.cs)
self.xx = np.concatenate((self.xx,dx))
# Create new space
tmp = np.empty((self.yy.size,dx.size))
tmp.fill(np.nan)
# Tack it on.
self.zw = np.concatenate((self.zw,np.copy(tmp)),axis=1)
self.nn = np.concatenate((self.nn,np.copy(tmp)),axis=1)
self.ww = np.concatenate((self.ww,np.copy(tmp)),axis=1)
self.varw = np.concatenate((self.varw,np.copy(tmp)),axis=1)
if miny < self.yy[0]:
dy = np.arange(miny,self.yy[0]-self.cs,self.cs)
self.yy = np.concatenate((dy,self.yy))
tmp = np.empty((dy.size,self.xx.size))
tmp.fill(np.nan)
self.zw = np.concatenate((np.copy(tmp), self.zw),axis=0)
self.nn = np.concatenate((np.copy(tmp), self.nn),axis=0)
self.ww = np.concatenate((np.copy(tmp), self.ww),axis=0)
self.varw = np.concatenate((np.copy(tmp), self.varw),axis=0)
if maxy > self.yy[-1]:
dy = np.arange(self.yy[-1] + self.cs,maxy, self.cs)
self.yy = np.concatenate((self.yy, dy))
tmp = np.empty((dy.size, self.xx.size))
tmp.fill(np.nan)
self.zw = np.concatenate((self.zw, np.copy(tmp)), axis=0)
self.nn = np.concatenate((self.nn, np.copy(tmp)), axis=0)
self.ww = np.concatenate((self.ww, np.copy(tmp)), axis=0)
self.varw = np.concatenate((self.varw, np.copy(tmp)), axis=0)
def add(self, x, y, z, w):
''' An incremental gridding function
Arguments:
x: x-coordinates
y: y-coordiantes
z: z-scalar values to grid
w: w-weight applied to each point (size of x or 1 for no weighting)
When 'type' = Nlowerthan or Ngreaterthan, w is the threshold value
When 'type' = distance weighted mean, distance = R^w
cs: grid cell size
cinf: cell influence
type: type of grid (see below)
Output:
g.xx: vector of grid cell x coordinates.
g.yy: vector of grid cell y coordiantes.
g.zz: 2D matrix of grided values times their weights.
g.nn: 2D matrix containing the number of points in each grid cell.
g.ww: sum of weights of items in the grid cell
%
% Grid types:
% mean:
% Average of the values. When w != 1, the mean is calculated by
% multipying each value in the cell by its weight divided by the sum
% of the weights in that cell.
%
% median:
% Calculates the median value for each grid cell.
%
% mode:
% Calculates the mode of the values for each grid cell.
%
% shoalest:
% Calculates the minimum value for each grid cell.
%
% deepest:
% Calculates the maximum value for each grid cell.
%
% stddev:
% Calculates the standard deviation of the values in each grid cell.
%
% stderr:
% Calculates the standard error of the values in each grid cell
% (stddev/N, where stddev is the standard deviation and N is the number
% of points falling in the cell)
%
% dwm:
% Calculates the distance weighted mean where each value in the cell is
% inversely weighted by the square if it's distance to the cell node.
%
% Nlowerthan:
% Calculates the number of points in the grid cell lower than some value,
% w.
%
% Ngreaterthan:
% Calculates the number of points greater than some value w.
%
% To Do:
% - Rewrite mean function as a matrix operation to simplify the propagation
% of uncertainty calcualtion. Actually this might be make more general such
% that your pass a list of values, their uncertainty and weighting factors
% and get back a mean and propagated uncertainty. This would allow
% relatively simple incorporation of things like range weighting, footprint
% weighting, gaussian weighting, etc.
% - Add uncertainty to z input and propagate these through the
% calculations.
% - Add uncertainty to x and y inputs and propagate these through the
% calculations (more difficult)
% Rewrite a C mex function.
%
% Val Schmidt
% CCOM/JHC
% 2018, 2019
'''
# Force everything to match.
if np.isscalar(x) or np.isscalar(y) or np.isscalar(z):
print('X, Y, or Z is scalar - must be numpy array.')
sys.exit()
self._x = x.ravel()
self._y = y.ravel()
self._z = z.ravel()
if not np.isscalar(w):
self._w = w.ravel()
else:
self._w = np.array(w)
# Weight cannot be zero.
if self._w.size != 1:
if sum(self._w == 0):
print("Found zero weights. Weights cannot be zero.")
print("Setting to 1e-20.")
self._w[self._w == 0] = 1e-20
# Set up new grid, or extend the existing grid if necessary.
if self.zw is None:
self.create_new_grid()
else:
self.expand_grid()
grows = self.yy.size
gcols = self.xx.size
doindices = 0
#self.sort_data()
self.sort_data_kdtree()
# Peform the gridding calculations.
if self.type == 'dwm':
# Calculate distance between points and grid node for distance-weighted mean.
# In the case, w is the exponent.
#R = ((self.xx[jdx] - self._x(self.II))**2 +
# (self.yy[jdx]-self._y[self.II])**2)**(self._w/2.0)
print("Not yet supported.")
if self.type == 'mean':
self.mean_wholegrid()
if self.type == "median":
self.median_wholegrid()
def sort_data_kdtree(self):
''' A sorting of the data into grid cells using KDtrees.'''
tree = spatial.cKDTree(list(zip(self._x.ravel(), self._y.ravel())), leafsize=1e7)
xxx,yyy = np.meshgrid(self.xx,self.yy)
indexes = tree.query_ball_point(np.vstack((xxx.ravel(), yyy.ravel())).T,
r=self.cinf,
p=2,
n_jobs=-1).reshape(xxx.shape)
self._I = indexes
def sort_data(self):
''' Determine which data contributes to each grid node.
The list of indices is populated in self._I[n][m], where n and m
indicate the grid node.'''
# Initilize the 2D list of index lists.
self._I = [x[:] for x in
[[None] * self.xx.size] * self.yy.size]
cinf2 = self.cinf**2
# Go through the rows of the grid..
for idx in np.arange(0, self.yy.size, dtype='uint32'):
'''
We need to search through all the data efficiently to determine
indices for points that will contribute to a grid node. Those that
contribute are ones that fall within the "cell influence" (cinf).
Thse are the ones that meet the criteria:
sqrt( (x-xo).^2 + (y-yo).^2 ) < cinf
Squaring both sides....
(x-xo)^2 + (y-yo)^2 < cinf^2
This will never be true when either term of the lhs is >= cinf^2.
So we reduce the search by doing these piece-meal. '''
# Here we find the y data values within cinf of the grid node
ddy = (self._y - self.yy[idx])**2
yidx = np.flatnonzero(ddy < cinf2)
# If there are none, then don't bother with further calculations.
if yidx.size == 0:
continue
# Then go through each cell of that row, and look for x - values
# that also are in the cell. But first pre-calculate a vector of
# terms that will be needed for every evaluation.
xtest = cinf2 - ddy[yidx]
for jdx in np.arange(0, self.xx.size, dtype='uint32'):
xidx = np.flatnonzero((self._x[yidx] - self.xx[jdx])**2
< xtest)
# If there are none of these then there is nothing to do.
if xidx.size == 0:
continue
# Retain the list of indices contributing to the node.
self._I[idx][jdx] = yidx[xidx]
# Keep running count of the number of values.
self.nn[idx,jdx] = np.nansum(np.append(self.nn[idx, jdx],
xidx.size))
def numba_add(self, x, y, z, w, chnksize=100000):
"""
An attempt at running self.add with numba. Key here is to chunk the points so that the numba compiled function
_numba_add runs multiple times, where the first run is slow as it compiles. _numba_add is not within the class,
as classes aren't supported. There is this new thing numba.jitclass, but it appears to still be experimental.
On my test dataset containing about 4.5 million soundings, I got the following results:
- existing add = 55.8 seconds
- numba_add (chunksize, time) = (100, 55.2), (1000, 21.2), (10000, 17.9), (100000, 16.6), (150000, 16.2),
(200000, 15.7), (1000000, 18.0)
"""
# Force everything to match.
if np.isscalar(x) or np.isscalar(y) or np.isscalar(z):
print('X, Y, or Z is scalar - must be numpy array.')
sys.exit()
self._x = x.ravel()
self._y = y.ravel()
self._z = z.ravel()
if not np.isscalar(w):
self._w = w.ravel()
else:
self._w = np.array(w)
# Weight cannot be zero.
if self._w.size != 1:
if sum(self._w == 0):
print("Found zero weights. Weights cannot be zero.")
print("Setting to 1e-20.")
self._w[self._w == 0] = 1e-20
# Set up new grid, or extend the existing grid if necessary.
if self.zw is None:
self.create_new_grid()
else:
self.expand_grid()
ptlen = len(self._x)
chnks = [[i * chnksize, min((i + 1) * chnksize, ptlen)] for i in range(int(ptlen / chnksize) + 1)]
for chnk in chnks:
chnk_idx = slice(chnk[0], chnk[1])
if self._w.size != 1:
chunk_w = self._w[chnk_idx]
else:
chunk_w = self._w
self.zw, self.ww, self.varw, self.nn = _numba_add(self.xx, self.yy, self.nn, self.cinf, self._x[chnk_idx],
self._y[chnk_idx], self._z[chnk_idx], chunk_w,
self.type, self.zw, self.varw, self.ww)
def rotate(self):
pass
def pcolor(self,*kwargs):
plt.pcolor(self.xx,self.yy,self.zz(),*kwargs)
#plt.colorbar()
plt.ion()
plt.show()
#plt.draw()
plt.pause(0.001)
@numba.jit(nopython=True, nogil=True, parallel=True)
def _numba_add(xx, yy, nn, cinf, x, y, z, w, typ, zw, varw, ww):
"""
numba jit compiled add function
- Numba compiles this function, ensure that no classes/functions are within unless they are also numba-ized
- Numba.prange forces numba to parallelize, generates exception when parallelism fails, helping you figure out
what needs to be fixed. Otherwise parallel=True can fail silently
- nopython=True, this function operates entirely outside of the python interpreter
- nogil=True, will not use the python GIL (this might be redundant with nopython)
"""
grows = yy.size
gcols = xx.size
doindices = 0
cinf2 = cinf ** 2
# Go through the rows of the grid..
for idx in numba.prange(grows):
# Here we find the y data values within cinf of the grid node
ddy = (y - yy[idx]) ** 2
yidx = np.flatnonzero(ddy < cinf2)
# If there are none, then don't bother with further calculations.
if yidx.size == 0:
continue
# Then go through each cell of that row, and look for x - values that also are in the cell.
# But first pre-calculate a vector of terms that will be needed for every evaluation.
xtest = cinf2 - ddy[yidx]
for jdx in numba.prange(gcols):
xidx = np.flatnonzero((x[yidx] - xx[jdx]) ** 2 < xtest)
# If there are none of these then there is nothing to do to the grid node.
if xidx.size == 0:
continue
# Set the indices of the values to be add to this grid node.
II = yidx[xidx]
if typ == 'dwm':
# Calculate distance between points and grid node for distance-weighted mean.
# In the case, w is the exponent.
if w.size != 1:
R = ((xx[jdx] - x[II]) ** 2 + (yy[idx] - y[II]) ** 2) ** (w[II] / 2.0)
else:
R = ((xx[jdx] - x[II]) ** 2 + (yy[idx] - y[II]) ** 2) ** (w / 2.0)
if not doindices:
nn[idx, jdx] = np.nansum(np.array([nn[idx, jdx], xidx.size]))
else:
nn[idx, jdx] = idx * (gcols - 1) + jdx
if w.size != 1:
chunk_w = w[II]
else:
chunk_w = w
if typ == 'mean':
zw[idx, jdx], ww[idx, jdx], varw[idx, jdx] = _numba_mean_by_cell(zw[idx, jdx], ww[idx, jdx],
varw[idx, jdx], nn[idx, jdx], z[II],
chunk_w)
elif typ == "median":
zw[idx, jdx], ww[idx, jdx], varw[idx, jdx] = _numba_median_by_cell(zw[idx, jdx], ww[idx, jdx],
varw[idx, jdx], z[II])
return zw, ww, varw, nn
@numba.jit(nopython=True)
def _numba_mean_by_cell(zw_cell, ww_cell, varw_cell, nn_cell, z, w):
# Non-weighted gridding.
if w.size == 1:
zw = np.nansum(np.concatenate((z, np.array([zw_cell]))))
ww = nn_cell
varw = np.nansum(np.concatenate((((z - zw / nn_cell) ** 2), np.array([varw_cell]))))
else:
# Weighted gridding. Sum of value times the weight divided by the
# sum of the weights.
# The strategy taken here is to retain the sum of the values times the weights, and also
# the sum of the weights. Then when the weighted mean is requested the calling function
# divides the these two values. This strategy allows incremental addition of data to the grid.
#
# The coding strategy below is to append the new points to the existing point in a list
# and then call nansum to add them up.
#
# Q: Note: A dot-product might be quicker, but there is no dot-product that will produce a
# non-nan result if one of the values is nan, which is desired here.
zw = np.nansum(np.append(zw_cell, z * w))
ww = np.nansum(np.append(ww_cell, w))
varw = np.nansum(np.append(((z - zw_cell / ww_cell) ** 2), varw_cell))
return zw, ww, varw
@numba.jit(nopython=True)
def _numba_median_by_cell(zw_cell, ww_cell, varw_cell, z):
''' Calculate the median value in each grid cell.
The method used here to provide a "running median" is for each add(),
calculate the average of the existing value with the median of the
new points. This method works reasonably well, but can produce
inferior results if a single add() contains only outliers and their
are insufficient additional adds to constrain it.'''
zw = np.nanmean(np.append(zw_cell, np.nanmedian(z)))
ww = 1
varw = np.nansum(np.append((z - zw_cell / ww_cell ** 2), varw_cell))
return zw, ww, varw
if __name__=='__main__':
profileON = True
def gridTest(N = 2, ProfileON = False):
''' Method to test gridding.'''
print("N=%d" % N)
# Generate data.
x = np.random.random((N,1))*100
y = np.random.random((N,1))*100
z = np.exp( np.sqrt((x-50.)**2 + (y-50.)**2)/50)
# Generate grid.
G = vgrid(1,1,'mean')
if profileON:
print("Profiling on.")
lp = LineProfiler()
GAddProfiled = lp(G.add)
lp.add_function(G.mean)
GAddProfiled(x,y,z,1)
return (G,lp)
else:
G.add(x,y,z,1)
return G
## Gridding test script:
for N in [1000, 5000, 10000, 20000, 50000, 100000, 1000000]:
if profileON:
GG,LP = gridTest(N,True)
LP.print_stats()
else:
GG = gridTest(N,False)
# Plot test.
GG.pcolor()
| 40.2208 | 154 | 0.559153 | [
"BSD-2-Clause"
] | valschmidt/vgrid | VGRID/vgrid.py | 25,138 | Python |
import pandas as pd
import io
import dropbox
import streamlit as st
TOKEN = st.secrets["TOKEN"]
dbx = dropbox.Dropbox(TOKEN)
def read_dbx_file(file):
print('Getting latest file')
_, f = dbx.files_download(file)
with io.BytesIO(f.content) as stream:
df = pd.read_csv(stream, index_col=0)
return df
| 22.466667 | 46 | 0.673591 | [
"MIT"
] | greenrock21/test | dbox_aux.py | 337 | Python |
# -*- coding: utf-8 -*-
import tensorflow as tf
INPUT_NODE = 784
OUTPUT_NODE = 10
# 28 (edge) * 28
IMAGE_SIZE = 28
# 黑白
NUM_CHANNELS = 1
NUM_LABELS = 10
CONV1_DEEP = 32
# 过滤器尺寸
CONV1_SIZE = 5
CONV2_DEEP = 64
CONV2_SIZE = 5
# num of Fully connected nodes
FC_SIZE = 512
# def get_weight_variable(shape, regularizer):
# weights = tf.get_variable(
# "weight", shape,
# initializer=tf.truncated_normal_initializer(stddev=0.1))
# if regularizer != None:
# tf.add_to_collection("losses", regularizer(weights)) # 这个是自定义集合,不受自动管理
# return weights
# def inference(input_tensor, regularizer):
# with tf.variable_scope("layer1"):
# weights = get_weight_variable(
# [INPUT_NODE, LAYER1_NODE], regularizer) # 注意当这行被多次运行时,记得修改 reuse=True
# biases = tf.get_variable(
# "biases", [LAYER1_NODE],
# initializer=tf.constant_initializer(0.0))
# layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
# with tf.variable_scope("layer2"):
# weights = get_weight_variable(
# [LAYER1_NODE, OUTPUT_NODE], regularizer) # 注意当这行被多次运行时,记得修改 reuse=True
# biases = tf.get_variable(
# "biases", [OUTPUT_NODE],
# initializer=tf.constant_initializer(0.0))
# layer2 = tf.matmul(layer1, weights) + biases
# return layer2
def inference(input_tensor, train, regularizer):
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable( # 与 tf.Variable() 类似
"weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP], # x, y, prev-depth, depth
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
conv1_biases = tf.get_variable(
"bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0)
)
# 过滤器:边长5,深度32,移动步长1,填充全0
conv1 = tf.nn.conv2d(
input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME'
)
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
# https://www.jianshu.com/p/cff8678de15a
# 最大池化层:
with tf.name_scope('layer2-pool1'):
# 过滤器:边长2,移动步长2,全0填充
pool1 = tf.nn.max_pool(
relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME'
)
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable(
"weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
conv2_biases = tf.get_variable(
"bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0)
)
conv2 = tf.nn.conv2d(
pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME'
)
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
with tf.name_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(
relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME'
)
# as_list 拉成向量
pool_shape = pool2.get_shape().as_list()
# pool_shape[0] 为一个batch中数据的个数; 7*7*64
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
# reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
reshaped = tf.reshape(pool2, [-1, nodes])
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable(
"weight", [nodes, FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
if regularizer != None:
tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable(
"bias", [FC_SIZE],
initializer=tf.constant_initializer(0.1)
)
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train:
fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable(
"weight", [FC_SIZE, NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
if regularizer != None:
tf.add_to_collection('losses', regularizer(fc2_weights))
fc2_biases = tf.get_variable(
"bias", [NUM_LABELS],
initializer=tf.constant_initializer(0.1)
)
logit = tf.matmul(fc1, fc2_weights) + fc2_biases
return logit
| 32.477612 | 100 | 0.610064 | [
"MIT"
] | Banyc/TensorFlow_practice | mnist/LeNet-5/mnist_inference.py | 4,570 | Python |
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## | 42.846154 | 75 | 0.746858 | [
"Apache-2.0"
] | AnthonyTruchet/cylon | python/pycylon/pycylon/common/__init__.py | 557 | Python |
from setuptools import setup
setup(name='gaedevhelper',
version='0.1.0.dev',
description='A lovely command-line helper for developing GAE applications',
url='https://github.com/devjoe/gae-dev-helper',
author='devjoe',
author_email='[email protected]',
license='MIT',
packages=['gaedevhelper'],
install_requires=[
'Click',
'Pygments',
'subprocess32',
'daemonize',
],
tests_require=[
'pytest',
'pytest-mock'
],
entry_points='''
[console_scripts]
gaedh=gaedevhelper.gae:gae
''',
keywords=['GAE', 'Google App Engine', 'dev_appserver', 'pygments', 'remote_api', 'gae_dev_helper'],
zip_safe=False)
| 28.555556 | 105 | 0.574578 | [
"MIT"
] | devjoe/gae-dev-helper | setup.py | 771 | Python |
valor100= (mediadia-100)*taxa+(diaria*dias)
mediadia=km//dias
diaria=90
taxa=12
carro=str('você deseja alugar um carro? ').upper()
if carro=='SIM':
dias=int(input('quantos dias você deseja ficar com o carro'))
km=int(input('quantos km tem sua viagem?'))
if mediadia<=100 :
print('o valor a ser pago é {}'.format(dias*diaria)
| 31.727273 | 65 | 0.670487 | [
"MIT"
] | MiguelTeixeiraUFPB/PythonM2 | algoritmos/PythonM2/locadora.py | 352 | Python |
"""Support for interface with an Samsung TV."""
import asyncio
from datetime import timedelta
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP)
from homeassistant.const import (
CONF_HOST, CONF_MAC, CONF_NAME, CONF_PORT, CONF_TIMEOUT, STATE_OFF,
STATE_ON)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Samsung TV Remote'
DEFAULT_PORT = 55000
DEFAULT_TIMEOUT = 1
KEY_PRESS_TIMEOUT = 1.2
KNOWN_DEVICES_KEY = 'samsungtv_known_devices'
SOURCES = {
'TV': 'KEY_TV',
'HDMI': 'KEY_HDMI',
}
SUPPORT_SAMSUNGTV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_SELECT_SOURCE | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Samsung TV platform."""
known_devices = hass.data.get(KNOWN_DEVICES_KEY)
if known_devices is None:
known_devices = set()
hass.data[KNOWN_DEVICES_KEY] = known_devices
uuid = None
# Is this a manual configuration?
if config.get(CONF_HOST) is not None:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
mac = config.get(CONF_MAC)
timeout = config.get(CONF_TIMEOUT)
elif discovery_info is not None:
tv_name = discovery_info.get('name')
model = discovery_info.get('model_name')
host = discovery_info.get('host')
name = "{} ({})".format(tv_name, model)
port = DEFAULT_PORT
timeout = DEFAULT_TIMEOUT
mac = None
udn = discovery_info.get('udn')
if udn and udn.startswith('uuid:'):
uuid = udn[len('uuid:'):]
else:
_LOGGER.warning("Cannot determine device")
return
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr not in known_devices:
known_devices.add(ip_addr)
add_entities([SamsungTVDevice(host, port, name, timeout, mac, uuid)])
_LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name)
else:
_LOGGER.info("Ignoring duplicate Samsung TV %s:%d", host, port)
class SamsungTVDevice(MediaPlayerDevice):
"""Representation of a Samsung TV."""
def __init__(self, host, port, name, timeout, mac, uuid):
"""Initialize the Samsung device."""
from samsungctl import exceptions
from samsungctl import Remote
import wakeonlan
# Save a reference to the imported classes
self._exceptions_class = exceptions
self._remote_class = Remote
self._name = name
self._mac = mac
self._uuid = uuid
self._wol = wakeonlan
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._state = None
self._remote = None
# Mark the end of a shutdown command (need to wait 15 seconds before
# sending the next command to avoid turning the TV back ON).
self._end_of_power_off = None
# Generate a configuration for the Samsung library
self._config = {
'name': 'HomeAssistant',
'description': name,
'id': 'ha.component.samsung',
'port': port,
'host': host,
'timeout': timeout,
}
if self._config['port'] in (8001, 8002):
self._config['method'] = 'websocket'
else:
self._config['method'] = 'legacy'
def update(self):
"""Update state of device."""
self.send_key("KEY")
def get_remote(self):
"""Create or return a remote control instance."""
if self._remote is None:
# We need to create a new instance to reconnect.
self._remote = self._remote_class(self._config)
return self._remote
def send_key(self, key):
"""Send a key to the tv and handles exceptions."""
if self._power_off_in_progress() \
and key not in ('KEY_POWER', 'KEY_POWEROFF'):
_LOGGER.info("TV is powering off, not sending command: %s", key)
return
try:
# recreate connection if connection was dead
retry_count = 1
for _ in range(retry_count + 1):
try:
self.get_remote().control(key)
break
except (self._exceptions_class.ConnectionClosed,
BrokenPipeError):
# BrokenPipe can occur when the commands is sent to fast
self._remote = None
self._state = STATE_ON
except (self._exceptions_class.UnhandledResponse,
self._exceptions_class.AccessDenied):
# We got a response so it's on.
self._state = STATE_ON
self._remote = None
_LOGGER.debug("Failed sending command %s", key, exc_info=True)
return
except OSError:
self._state = STATE_OFF
self._remote = None
if self._power_off_in_progress():
self._state = STATE_OFF
def _power_off_in_progress(self):
return self._end_of_power_off is not None and \
self._end_of_power_off > dt_util.utcnow()
@property
def unique_id(self) -> str:
"""Return the unique ID of the device."""
return self._uuid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source_list(self):
"""List of available input sources."""
return list(SOURCES)
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mac:
return SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON
return SUPPORT_SAMSUNGTV
def turn_off(self):
"""Turn off media player."""
self._end_of_power_off = dt_util.utcnow() + timedelta(seconds=15)
if self._config['method'] == 'websocket':
self.send_key('KEY_POWER')
else:
self.send_key('KEY_POWEROFF')
# Force closing of remote session to provide instant UI feedback
try:
self.get_remote().close()
self._remote = None
except OSError:
_LOGGER.debug("Could not establish connection.")
def volume_up(self):
"""Volume up the media player."""
self.send_key('KEY_VOLUP')
def volume_down(self):
"""Volume down media player."""
self.send_key('KEY_VOLDOWN')
def mute_volume(self, mute):
"""Send mute command."""
self.send_key('KEY_MUTE')
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self.send_key('KEY_PLAY')
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self.send_key('KEY_PAUSE')
def media_next_track(self):
"""Send next track command."""
self.send_key('KEY_FF')
def media_previous_track(self):
"""Send the previous track command."""
self.send_key('KEY_REWIND')
async def async_play_media(self, media_type, media_id, **kwargs):
"""Support changing a channel."""
if media_type != MEDIA_TYPE_CHANNEL:
_LOGGER.error('Unsupported media type')
return
# media_id should only be a channel number
try:
cv.positive_int(media_id)
except vol.Invalid:
_LOGGER.error('Media ID must be positive integer')
return
for digit in media_id:
await self.hass.async_add_job(self.send_key, 'KEY_' + digit)
await asyncio.sleep(KEY_PRESS_TIMEOUT, self.hass.loop)
await self.hass.async_add_job(self.send_key, 'KEY_ENTER')
def turn_on(self):
"""Turn the media player on."""
if self._mac:
self._wol.send_magic_packet(self._mac)
else:
self.send_key('KEY_POWERON')
async def async_select_source(self, source):
"""Select input source."""
if source not in SOURCES:
_LOGGER.error('Unsupported source')
return
await self.hass.async_add_job(self.send_key, SOURCES[source])
| 33.079038 | 77 | 0.622689 | [
"Apache-2.0"
] | MagicalTrev89/home-assistant | homeassistant/components/samsungtv/media_player.py | 9,626 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'everpro.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.5 | 73 | 0.682616 | [
"Apache-2.0"
] | Ascensiony/EverPro-Intelligence-APIs | backend/everpro/manage.py | 627 | Python |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a Container VM with the provided Container manifest."""
from container_helper import GenerateManifest
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/global/', collection, '/', name])
def ZonalComputeUrl(project, zone, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/zones/', zone, '/', collection, '/', name])
def GenerateConfig(context):
"""Generate configuration."""
base_name = context.env['name']
# Properties for the container-based instance.
instance = {
'zone': context.properties['zone'],
'machineType': ZonalComputeUrl(context.env['project'],
context.properties['zone'],
'machineTypes',
'f1-micro'),
'metadata': {
'items': [{
'key': 'gce-container-declaration',
'value': GenerateManifest(context)
}]
},
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskName': base_name + '-disk',
'sourceImage': GlobalComputeUrl('cos-cloud',
'images',
context.properties[
'containerImage'])
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': GlobalComputeUrl(context.env['project'],
'networks',
'default')
}],
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/logging.write']
}]
}
# Resources to return.
resources = {
'resources': [{
'name': base_name,
'type': 'compute.v1.instance',
'properties': instance
}]
}
return resources | 33.056818 | 74 | 0.53111 | [
"MIT"
] | AlexBulankou/dm-logbook-sample | templates/container_vm.py | 2,909 | Python |
"""Data handling by server or instance."""
import json
import urllib3
class DataHandler(object):
"""Handle data."""
def __init__(self, server_port=None):
"""Initialize.
:param server_port: Int. local port.
"""
self.server_port = server_port
self.logged_requests = {}
self.analysis = {
'total_requests': 0, 'domains': set(), 'duration': 0
}
def _delete(self):
http = urllib3.PoolManager()
resp = http.request(
'DELETE',
'http://localhost:{}/'.format(self.server_port)
)
if resp.status != 200:
raise Exception('Monitor Requests server error: {}.'.format(
resp.status
))
def _get(self):
http = urllib3.PoolManager()
resp = http.request(
'GET',
'http://localhost:{}/'.format(self.server_port)
)
if resp.status != 200:
raise Exception('Monitor Requests server error: {}.'.format(
resp.status
))
return json.loads(resp.data)
def _post(self, data):
http = urllib3.PoolManager()
resp = http.request(
'POST',
'http://localhost:{}/'.format(self.server_port),
headers={'Content-Type': 'application/json'},
body=json.dumps(data)
)
if resp.status != 200:
raise Exception('Monitor Requests server error: {}.'.format(
resp.status
))
def delete(self):
"""Delete data from server if applicable."""
if not self.server_port:
return
self._delete()
def log(self, url, domain, method, response, tb_list, duration):
"""Log request, store traceback/response data and update counts."""
if self.server_port:
self._post({
'url': url,
'domain': domain,
'method': method,
'response_content': str(response.content),
'response_status_code': response.status_code,
'duration': duration,
'traceback_list': tb_list
})
else:
if url not in self.logged_requests:
self.logged_requests[url] = {
'count': 0,
'methods': set(),
'tracebacks': set(),
'responses': set()
}
self.logged_requests[url]['count'] += 1
self.logged_requests[url]['methods'].add(method)
self.logged_requests[url]['tracebacks'].add(tuple(tb_list))
self.logged_requests[url]['responses'].add((
response.status_code,
response.content,
))
self.analysis['duration'] += duration
self.analysis['total_requests'] += 1
self.analysis['domains'].add(domain)
def retrieve(self):
"""Retrieve data from server or instance."""
if not self.server_port:
return self.logged_requests, self.analysis
data = self._get()
return data.get('logged_requests'), data.get('analysis')
| 32.393939 | 75 | 0.518553 | [
"BSD-3-Clause"
] | danpozmanter/monitor_requests | monitor_requests/data.py | 3,207 | Python |
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, Group
)
from phonenumber_field.modelfields import PhoneNumberField
# For the signal
from django.dispatch import receiver
from django.urls import reverse
from django.core.mail import send_mail
from django_rest_passwordreset.signals import reset_password_token_created
@receiver(reset_password_token_created)
def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):
email_plaintext_message = "{}?token={}".format(
reverse('password_reset:reset-password-request'), reset_password_token.key)
send_mail(
# title:
"Password Reset for {title}".format(title="Some website title"),
# message:
email_plaintext_message,
# from:
"[email protected]",
# to:
[reset_password_token.user.email]
)
class CustomUserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
user = self.create_user(
email,
password=password,
)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
USERNAME_FIELD = 'email'
objects = CustomUserManager()
class Meta:
ordering: ['-createdAt']
verbose_name_plural = "Users"
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
class UserProfile(models.Model):
GENDER = (
('M', "Male"),
('F', "Female"),
)
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
)
firstName = models.CharField(max_length=100)
lastName = models.CharField(max_length=100)
phone = PhoneNumberField(null=False, blank=False, unique=True)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
class Meta:
ordering: ['-createdAt']
verbose_name_plural = "UserProfiles"
def userEmail(self):
email = self.user.email
return email
def fullName(self):
return f'{self.firstName} {self.lastName}'
def __str__(self):
return self.fullName()
| 28.579832 | 91 | 0.638048 | [
"MIT"
] | PavelescuVictor/DentalApplication | dentalapp-backend/dentalapp/userauth/models.py | 3,401 | Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class MatMulTest(tf.test.TestCase):
def _testCpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=False):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _testGpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=True):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _randMatrix(self, rows, cols, dtype):
if dtype is np.complex64:
real = self._randMatrix(rows, cols, np.float32)
imag = self._randMatrix(rows, cols, np.float32)
return real + np.complex(0, 1) * imag
else:
return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
[rows, cols]).astype(dtype)
# Basic test:
# [ [1],
# [2],
# [3], * [1, 2]
# [4] ]
def testFloatBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float64)
self._testCpuMatmul(x, y)
def testInt32Basic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.int32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.int32)
self._testCpuMatmul(x, y)
def testSComplexBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex64)
self._testCpuMatmul(x, y)
# Tests testing random sized matrices.
def testFloatRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float64)
y = self._randMatrix(k, m, np.float64)
self._testCpuMatmul(x, y)
def testInt32Random(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.int32)
y = self._randMatrix(k, m, np.int32)
self._testCpuMatmul(x, y)
def testSComplexRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.complex64)
y = self._randMatrix(k, m, np.complex64)
self._testCpuMatmul(x, y)
# Test the cases that transpose the matrices before multiplying.
# NOTE(keveman): The cases where only one of the inputs is
# transposed are covered by tf.matmul's gradient function.
def testFloatRandomTransposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float32)
y = self._randMatrix(m, k, np.float32)
self._testCpuMatmul(x, y, True, True)
self._testGpuMatmul(x, y, True, True)
def testDoubleRandomTranposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float64)
y = self._randMatrix(m, k, np.float64)
self._testCpuMatmul(x, y, True, True)
def testMatMul_OutEmpty_A(self):
n, k, m = 0, 8, 3
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_OutEmpty_B(self):
n, k, m = 3, 8, 0
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_Inputs_Empty(self):
n, k, m = 3, 0, 4
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
class MatMulGradientTest(tf.test.TestCase):
def testGradientInput0(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = gc.ComputeGradientError(x, [3, 2], m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = gc.ComputeGradientError(y, [2, 4], m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def _VerifyInput0(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = gc.ComputeGradientError(x, shape_x, m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput0WithTranspose(self):
self._VerifyInput0(transpose_a=True, transpose_b=False)
self._VerifyInput0(transpose_a=False, transpose_b=True)
self._VerifyInput0(transpose_a=True, transpose_b=True)
def _VerifyInput1(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = gc.ComputeGradientError(y, shape_y, m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1WithTranspose(self):
self._VerifyInput1(transpose_a=True, transpose_b=False)
self._VerifyInput1(transpose_a=False, transpose_b=True)
self._VerifyInput1(transpose_a=True, transpose_b=True)
if __name__ == "__main__":
tf.test.main()
| 37.132743 | 80 | 0.63632 | [
"Apache-2.0"
] | DougFirErickson/tensorflow | tensorflow/python/kernel_tests/matmul_op_test.py | 8,392 | Python |
#!/usr/bin/env python
# Copyright 2016 Vijayaditya Peddinti.
# 2016 Vimal Manohar
# Apache 2.0.
""" This script is similar to steps/nnet3/train_dnn.py but trains a
raw neural network instead of an acoustic model.
"""
from __future__ import print_function
from __future__ import division
import argparse
import logging
import pprint
import os
import sys
import traceback
sys.path.insert(0, 'steps')
import libs.nnet3.train.common as common_train_lib
import libs.common as common_lib
import libs.nnet3.train.frame_level_objf as train_lib
import libs.nnet3.report.log_parse as nnet3_log_parse
logger = logging.getLogger('libs')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
"%(funcName)s - %(levelname)s ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting raw DNN trainer (train_raw_dnn.py)')
def get_args():
""" Get args from stdin.
The common options are defined in the object
libs.nnet3.train.common.CommonParser.parser.
See steps/libs/nnet3/train/common.py
"""
parser = argparse.ArgumentParser(
description="""Trains a feed forward raw DNN (without transition model)
using frame-level objectives like cross-entropy and mean-squared-error.
DNNs include simple DNNs, TDNNs and CNNs.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
parents=[common_train_lib.CommonParser(include_chunk_context=False).parser])
# egs extraction options
parser.add_argument("--egs.frames-per-eg", type=int, dest='frames_per_eg',
default=8,
help="Number of output labels per example")
parser.add_argument("--image.augmentation-opts", type=str,
dest='image_augmentation_opts',
default=None,
help="Image augmentation options")
# trainer options
parser.add_argument("--trainer.input-model", type=str,
dest='input_model', default=None,
action=common_lib.NullstrToNoneAction,
help="""If specified, this model is used as initial
raw model (0.raw in the script) instead of initializing
the model from xconfig. Configs dir is not expected to
exist and left/right context is computed from this
model.""")
parser.add_argument("--trainer.prior-subset-size", type=int,
dest='prior_subset_size', default=20000,
help="Number of samples for computing priors")
parser.add_argument("--trainer.num-jobs-compute-prior", type=int,
dest='num_jobs_compute_prior', default=10,
help="The prior computation jobs are single "
"threaded and run on the CPU")
# Parameters for the optimization
parser.add_argument("--trainer.optimization.minibatch-size",
type=str, dest='minibatch_size', default='512',
help="""Size of the minibatch used in SGD training
(argument to nnet3-merge-egs); may be a more general
rule as accepted by the --minibatch-size option of
nnet3-merge-egs; run that program without args to see
the format.""")
parser.add_argument("--compute-average-posteriors",
type=str, action=common_lib.StrToBoolAction,
choices=["true", "false"], default=False,
help="""If true, then the average output of the
network is computed and dumped as post.final.vec""")
# General options
parser.add_argument("--nj", type=int, default=4,
help="Number of parallel jobs")
parser.add_argument("--use-dense-targets", type=str,
action=common_lib.StrToBoolAction,
default=True, choices=["true", "false"],
help="Train neural network using dense targets")
parser.add_argument("--feat-dir", type=str, required=False,
help="Directory with features used for training "
"the neural network.")
parser.add_argument("--targets-scp", type=str, required=False,
help="""Targets for training neural network.
This is a kaldi-format SCP file of target matrices.
<utterance-id> <extended-filename-of-target-matrix>.
The target matrix's column dim must match
the neural network output dim, and the
row dim must match the number of output frames
i.e. after subsampling if "--frame-subsampling-factor"
option is passed to --egs.opts.""")
parser.add_argument("--vad-egs", type=str,
action=common_lib.StrToBoolAction,
default=False, choices=["true", "false"],
help="Get nnet3 egs with vad applied on features.")
parser.add_argument("--dir", type=str, required=True,
help="Directory to store the models and "
"all other files.")
print(' '.join(sys.argv))
print(sys.argv)
args = parser.parse_args()
[args, run_opts] = process_args(args)
return [args, run_opts]
def process_args(args):
""" Process the options got from get_args()
"""
if args.frames_per_eg < 1:
raise Exception("--egs.frames-per-eg should have a minimum value of 1")
if not common_train_lib.validate_minibatch_size_str(args.minibatch_size):
raise Exception("--trainer.optimization.minibatch-size has an invalid value")
if (not os.path.exists(args.dir)):
raise Exception("Directory specified with --dir={0} "
"does not exist.".format(args.dir))
if (not os.path.exists(args.dir + "/configs") and
(args.input_model is None or not os.path.exists(args.input_model))):
raise Exception("Either --trainer.input-model option should be supplied, "
"and exist; or the {0}/configs directory should exist."
"{0}/configs is the output of make_configs.py"
"".format(args.dir))
# set the options corresponding to args.use_gpu
run_opts = common_train_lib.RunOpts()
if args.use_gpu in ["true", "false"]:
args.use_gpu = ("yes" if args.use_gpu == "true" else "no")
if args.use_gpu in ["yes", "wait"]:
if not common_lib.check_if_cuda_compiled():
logger.warning(
"""You are running with one thread but you have not compiled
for CUDA. You may be running a setup optimized for GPUs.
If you have GPUs and have nvcc installed, go to src/ and do
./configure; make""")
run_opts.train_queue_opt = "--gpu 1"
run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu)
run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu)
run_opts.combine_queue_opt = "--gpu 1"
run_opts.prior_gpu_opt = "--use-gpu={}".format(args.use_gpu)
run_opts.prior_queue_opt = "--gpu 1"
else:
logger.warning("Without using a GPU this will be very slow. "
"nnet3 does not yet support multiple threads.")
run_opts.train_queue_opt = ""
run_opts.parallel_train_opts = "--use-gpu=no"
run_opts.combine_gpu_opt = "--use-gpu=no"
run_opts.combine_queue_opt = ""
run_opts.prior_gpu_opt = "--use-gpu=no"
run_opts.prior_queue_opt = ""
run_opts.command = args.command
run_opts.egs_command = (args.egs_command
if args.egs_command is not None else
args.command)
run_opts.num_jobs_compute_prior = args.num_jobs_compute_prior
return [args, run_opts]
def train(args, run_opts):
""" The main function for training.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
run_opts: RunOpts object obtained from the process_args()
"""
arg_string = pprint.pformat(vars(args))
logger.info("Arguments for the experiment\n{0}".format(arg_string))
# Set some variables.
# note, feat_dim gets set to 0 if args.feat_dir is unset (None).
feat_dim = common_lib.get_feat_dim(args.feat_dir)
ivector_dim = common_lib.get_ivector_dim(args.online_ivector_dir)
ivector_id = common_lib.get_ivector_extractor_id(args.online_ivector_dir)
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
if args.input_model is None:
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
variables = common_train_lib.parse_generic_config_vars_file(var_file)
else:
# If args.input_model is specified, the model left and right contexts
# are computed using input_model.
variables = common_train_lib.get_input_model_info(args.input_model)
# Set some variables.
try:
model_left_context = variables['model_left_context']
model_right_context = variables['model_right_context']
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined in "
"{1}".format(str(e), '{0}/configs'.format(args.dir)))
left_context = model_left_context
right_context = model_right_context
# Initialize as "raw" nnet, prior to training the LDA-like preconditioning
# matrix. This first config just does any initial splicing that we do;
# we do this as it's a convenient way to get the stats for the 'lda-like'
# transform.
if (args.stage <= -4) and os.path.exists(args.dir+"/configs/init.config") and \
(args.input_model is None):
logger.info("Initializing the network for computing the LDA stats")
common_lib.execute_command(
"""{command} {dir}/log/nnet_init.log \
nnet3-init --srand=-2 {dir}/configs/init.config \
{dir}/init.raw""".format(command=run_opts.command,
dir=args.dir))
default_egs_dir = '{0}/egs'.format(args.dir)
if (args.stage <= -3) and args.egs_dir is None:
if args.targets_scp is None or args.feat_dir is None:
raise Exception("If you don't supply the --egs-dir option, the "
"--targets-scp and --feat-dir options are required.")
logger.info("Generating egs")
if args.use_dense_targets:
target_type = "dense"
try:
num_targets = int(variables['num_targets'])
if (common_lib.get_feat_dim_from_scp(args.targets_scp)
!= num_targets):
raise Exception("Mismatch between num-targets provided to "
"script vs configs")
except KeyError as e:
num_targets = -1
else:
target_type = "sparse"
try:
num_targets = int(variables['num_targets'])
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined "
"in {1}".format(
str(e), '{0}/configs'.format(args.dir)))
train_lib.raw_model.generate_egs_using_targets(
data=args.feat_dir, targets_scp=args.targets_scp,
vad_egs=args.vad_egs,
egs_dir=default_egs_dir,
left_context=left_context, right_context=right_context,
run_opts=run_opts,
frames_per_eg_str=str(args.frames_per_eg),
srand=args.srand,
egs_opts=args.egs_opts,
cmvn_opts=args.cmvn_opts,
online_ivector_dir=args.online_ivector_dir,
samples_per_iter=args.samples_per_iter,
stage=args.egs_stage,
target_type=target_type,
num_targets=num_targets)
if args.egs_dir is None:
egs_dir = default_egs_dir
else:
egs_dir = args.egs_dir
[egs_left_context, egs_right_context,
frames_per_eg_str, num_archives] = (
common_train_lib.verify_egs_dir(egs_dir, feat_dim,
ivector_dim, ivector_id,
left_context, right_context))
assert str(args.frames_per_eg) == frames_per_eg_str
if args.num_jobs_final > num_archives:
raise Exception('num_jobs_final cannot exceed the number of archives '
'in the egs directory')
# copy the properties of the egs to dir for
# use during decoding
common_train_lib.copy_egs_properties_to_exp_dir(egs_dir, args.dir)
if args.stage <= -2 and os.path.exists(args.dir+"/configs/init.config") and \
(args.input_model is None):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(
args.dir, egs_dir, num_archives, run_opts,
max_lda_jobs=args.max_lda_jobs,
rand_prune=args.rand_prune)
if args.stage <= -2:
logger.info("Computing initial vector for FixedScaleComponent before"
" softmax, using priors^{prior_scale} and rescaling to"
" average 1".format(
prior_scale=args.presoftmax_prior_scale_power))
# total num of frames per target already prepared
counts_path = os.path.dirname(args.targets_scp) + '/target_counts'
common_train_lib.compute_presoftmax_prior_scale_targets(
args.dir, counts_path,
presoftmax_prior_scale_power=args.presoftmax_prior_scale_power)
if args.stage <= -1:
logger.info("Preparing the initial network.")
common_train_lib.prepare_initial_network(args.dir, run_opts, args.srand, args.input_model)
# set num_iters so that as close as possible, we process the data
# $num_epochs times, i.e. $num_iters*$avg_num_jobs) ==
# $num_epochs*$num_archives, where
# avg_num_jobs=(num_jobs_initial+num_jobs_final)/2.
num_archives_expanded = num_archives * args.frames_per_eg
num_archives_to_process = int(args.num_epochs * num_archives_expanded)
num_archives_processed = 0
num_iters = int((num_archives_to_process * 2) / (args.num_jobs_initial + args.num_jobs_final))
# If do_final_combination is True, compute the set of models_to_combine.
# Otherwise, models_to_combine will be none.
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(
num_iters, args.num_epochs,
num_archives_expanded, args.max_models_combine,
args.num_jobs_final)
else:
models_to_combine = None
if os.path.exists('{0}/valid_diagnostic.scp'.format(egs_dir)):
if os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir)):
raise Exception('both {0}/valid_diagnostic.egs and '
'{0}/valid_diagnostic.scp exist.'
'This script expects only one of them to exist.'
''.format(egs_dir))
use_multitask_egs = True
else:
if not os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir)):
raise Exception('neither {0}/valid_diagnostic.egs nor '
'{0}/valid_diagnostic.scp exist.'
'This script expects one of them.'
''.format(egs_dir))
use_multitask_egs = False
logger.info("Training will run for {0} epochs = "
"{1} iterations".format(args.num_epochs, num_iters))
for iter in range(num_iters):
if (args.exit_stage is not None) and (iter == args.exit_stage):
logger.info("Exiting early due to --exit-stage {0}".format(iter))
return
current_num_jobs = common_train_lib.get_current_num_jobs(
iter, num_iters,
args.num_jobs_initial, args.num_jobs_step, args.num_jobs_final)
if args.stage <= iter:
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs,
num_iters,
num_archives_processed,
num_archives_to_process,
args.initial_effective_lrate,
args.final_effective_lrate)
shrinkage_value = 1.0 - (args.proportional_shrink * lrate)
if shrinkage_value <= 0.5:
raise Exception("proportional-shrink={0} is too large, it gives "
"shrink-value={1}".format(args.proportional_shrink,
shrinkage_value))
percent = num_archives_processed * 100.0 / num_archives_to_process
epoch = (num_archives_processed * args.num_epochs
/ num_archives_to_process)
shrink_info_str = ''
if shrinkage_value != 1.0:
shrink_info_str = 'shrink: {0:0.5f}'.format(shrinkage_value)
logger.info("Iter: {0}/{1} Jobs: {2} "
"Epoch: {3:0.2f}/{4:0.1f} ({5:0.1f}% complete) "
"lr: {6:0.6f} {7}".format(iter, num_iters - 1,
current_num_jobs,
epoch, args.num_epochs,
percent,
lrate, shrink_info_str))
train_lib.common.train_one_iteration(
dir=args.dir,
iter=iter,
srand=args.srand,
egs_dir=egs_dir,
num_jobs=current_num_jobs,
num_archives_processed=num_archives_processed,
num_archives=num_archives,
learning_rate=lrate,
dropout_edit_string=common_train_lib.get_dropout_edit_string(
args.dropout_schedule,
float(num_archives_processed) / num_archives_to_process,
iter),
train_opts=' '.join(args.train_opts),
minibatch_size_str=args.minibatch_size,
frames_per_eg=args.frames_per_eg,
momentum=args.momentum,
max_param_change=args.max_param_change,
shrinkage_value=shrinkage_value,
shuffle_buffer_size=args.shuffle_buffer_size,
run_opts=run_opts,
get_raw_nnet_from_am=False,
image_augmentation_opts=args.image_augmentation_opts,
use_multitask_egs=use_multitask_egs,
backstitch_training_scale=args.backstitch_training_scale,
backstitch_training_interval=args.backstitch_training_interval)
if args.cleanup:
# do a clean up everything but the last 2 models, under certain
# conditions
common_train_lib.remove_model(
args.dir, iter-2, num_iters, models_to_combine,
args.preserve_model_interval,
get_raw_nnet_from_am=False)
if args.email is not None:
reporting_iter_interval = num_iters * args.reporting_interval
if iter % reporting_iter_interval == 0:
# lets do some reporting
[report, times, data] = (
nnet3_log_parse.generate_acc_logprob_report(args.dir))
message = report
subject = ("Update : Expt {dir} : "
"Iter {iter}".format(dir=args.dir, iter=iter))
common_lib.send_mail(message, subject, args.email)
num_archives_processed = num_archives_processed + current_num_jobs
if args.stage <= num_iters:
if args.do_final_combination:
logger.info("Doing final combination to produce final.raw")
train_lib.common.combine_models(
dir=args.dir, num_iters=num_iters,
models_to_combine=models_to_combine, egs_dir=egs_dir,
minibatch_size_str=args.minibatch_size, run_opts=run_opts,
get_raw_nnet_from_am=False,
max_objective_evaluations=args.max_objective_evaluations,
use_multitask_egs=use_multitask_egs)
else:
common_lib.force_symlink("{0}.raw".format(num_iters),
"{0}/final.raw".format(args.dir))
if args.compute_average_posteriors and args.stage <= num_iters + 1:
logger.info("Getting average posterior for output-node 'output'.")
train_lib.common.compute_average_posterior(
dir=args.dir, iter='final', egs_dir=egs_dir,
num_archives=num_archives,
prior_subset_size=args.prior_subset_size, run_opts=run_opts,
get_raw_nnet_from_am=False)
if args.cleanup:
logger.info("Cleaning up the experiment directory "
"{0}".format(args.dir))
remove_egs = args.remove_egs
if args.egs_dir is not None:
# this egs_dir was not created by this experiment so we will not
# delete it
remove_egs = False
common_train_lib.clean_nnet_dir(
nnet_dir=args.dir, num_iters=num_iters, egs_dir=egs_dir,
preserve_model_interval=args.preserve_model_interval,
remove_egs=remove_egs,
get_raw_nnet_from_am=False)
# do some reporting
outputs_list = common_train_lib.get_outputs_list("{0}/final.raw".format(
args.dir), get_raw_nnet_from_am=False)
if 'output' in outputs_list:
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(
args.dir)
if args.email is not None:
common_lib.send_mail(report, "Update : Expt {0} : "
"complete".format(args.dir),
args.email)
with open("{dir}/accuracy.{output_name}.report".format(dir=args.dir,
output_name="output"),
"w") as f:
f.write(report)
common_lib.execute_command("steps/info/nnet3_dir_info.pl "
"{0}".format(args.dir))
def main():
[args, run_opts] = get_args()
try:
train(args, run_opts)
common_lib.wait_for_background_commands()
except BaseException as e:
# look for BaseException so we catch KeyboardInterrupt, which is
# what we get when a background thread dies.
if args.email is not None:
message = ("Training session for experiment {dir} "
"died due to an error.".format(dir=args.dir))
common_lib.send_mail(message, message, args.email)
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
| 44.631086 | 98 | 0.593253 | [
"Apache-2.0"
] | iezhanqingran/kaldi | egs/wsj/s5/steps/nnet3/train_raw_dnn.py | 23,833 | Python |
#!/usr/bin/env python
# Script for grading performance.
# Performance is graded by comparing the student's best wall-clock time
# (not speedup) after running the code in 64, 128, and 240 thread
# configurations for bfs, kbfs, pagerank, and graph decomp against the
# reference solution.
import re
import subprocess
import sys
GRAPHS = [
"/home/15-418/asst3_graphs/soc-pokec_30m.graph",
"/home/15-418/asst3_graphs/soc-livejournal1_68m.graph",
"/home/15-418/asst3_graphs/com-orkut_117m.graph",
"/home/15-418/asst3_graphs/rmat_200m.graph"
]
# runGraph returns the student's score and total possible score for runinng 3
# algorithms on the given graph.
def runGraph(paraGraph, g):
args = [
paraGraph,
"grade",
g,
"-r" # Don't run ref
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line != '':
line = line.strip();
# Print the line so the user can see the detailed timing breakdown.
print line
matchObj = re.match(r'Total Grade: ([\d\.]*)\/([\d\.]*)$', line, re.M)
if matchObj:
return float(matchObj.group(1)), float(matchObj.group(2))
else:
break
return -1, -1
def main():
if len(sys.argv) != 2:
print "Usage: ./grade_peformance.py <path to paraGraph>"
sys.exit(1)
paraGraph = sys.argv[1]
score = 0
possibleScore = 0
for g in GRAPHS:
print "Timing " + g
graphScore, pScore = runGraph(paraGraph, g)
if graphScore < 0:
sys.stderr.write("Error parsing total grade for graph " + g + "\n")
score += graphScore
possibleScore += pScore
print ""
print "**************************************************"
print "Final Performance Score: %f/%f" % (score, possibleScore)
print "**************************************************"
if __name__ == "__main__":
main()
| 27.014493 | 77 | 0.621781 | [
"MIT"
] | eric-haibin-lin/15418-asst3 | jobs/grade_performance.py | 1,864 | Python |
"""
Test module for Strava API reader base module
"""
import os
import json
import pytest
from pandas import DataFrame, Timedelta, Timestamp
from runpandas import read_strava
from runpandas import types
from stravalib.protocol import ApiV3
from stravalib.client import Client
from stravalib.model import Stream
pytestmark = pytest.mark.stable
class MockResponse:
def __init__(self, json_file):
with open(json_file) as json_handler:
self.json_data = json.load(json_handler)
def json(self):
return self.json_data
def mock_get_activity_streams(streams_file):
"""
@TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,
it isn't the best alternative for mock the request from strava by passing a json file.
"""
stream_mock = MockResponse(streams_file).json()
entities = {}
for key, value in stream_mock.items():
value["type"] = key
stream = Stream.deserialize(value)
entities[stream.type] = stream
return entities
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data")
@pytest.fixture
def strava_activity(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=False,
)
return activity
@pytest.fixture
def strava_dataframe(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=True,
)
return activity
def test_read_strava_basic_dataframe(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=True,
)
assert isinstance(activity, DataFrame)
included_data = set(
[
"latitude",
"longitude",
"altitude",
"distance",
"velocity_smooth",
"heartrate",
"cadence",
"moving",
"grade_smooth",
]
)
assert included_data <= set(activity.columns.to_list())
assert activity.size == 15723
def test_read_strava_activity(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=False,
)
assert isinstance(activity, types.Activity)
included_data = set(
[
"alt",
"cad",
"dist",
"hr",
"lon",
"lat",
"moving",
"velocity_smooth",
"grade_smooth",
]
)
assert included_data <= set(activity.columns.to_list())
assert activity.size == 15723
test_data = [
(pytest.lazy_fixture("strava_activity"), "alt", 0, 6.4),
(pytest.lazy_fixture("strava_activity"), "alt", -1, 6.6),
(pytest.lazy_fixture("strava_activity"), "cad", 0, 79),
(pytest.lazy_fixture("strava_activity"), "cad", -1, 86),
(pytest.lazy_fixture("strava_activity"), "dist", 0, 0.0),
(pytest.lazy_fixture("strava_activity"), "dist", -1, 12019.7),
(pytest.lazy_fixture("strava_activity"), "hr", 0, 111),
(pytest.lazy_fixture("strava_activity"), "hr", -1, 160),
(pytest.lazy_fixture("strava_activity"), "lat", 0, -8.016994),
(pytest.lazy_fixture("strava_activity"), "lon", 0, -34.847439),
(pytest.lazy_fixture("strava_activity"), "lat", -1, -8.016821),
(pytest.lazy_fixture("strava_activity"), "lon", -1, -34.84716),
(pytest.lazy_fixture("strava_activity"), "moving", 0, False),
(pytest.lazy_fixture("strava_activity"), "moving", -1, True),
(pytest.lazy_fixture("strava_activity"), "velocity_smooth", 0, 0.0),
(pytest.lazy_fixture("strava_activity"), "velocity_smooth", -1, 3.2),
(pytest.lazy_fixture("strava_activity"), "grade_smooth", 0, 1.1),
(pytest.lazy_fixture("strava_activity"), "grade_smooth", -1, -0.6),
(pytest.lazy_fixture("strava_dataframe"), "altitude", 0, 6.4),
(pytest.lazy_fixture("strava_dataframe"), "altitude", -1, 6.6),
(pytest.lazy_fixture("strava_dataframe"), "cadence", 0, 79),
(pytest.lazy_fixture("strava_dataframe"), "cadence", -1, 86),
(pytest.lazy_fixture("strava_dataframe"), "distance", 0, 0.0),
(pytest.lazy_fixture("strava_dataframe"), "distance", -1, 12019.7),
(pytest.lazy_fixture("strava_dataframe"), "heartrate", 0, 111),
(pytest.lazy_fixture("strava_dataframe"), "heartrate", -1, 160),
(pytest.lazy_fixture("strava_dataframe"), "latitude", 0, -8.016994),
(pytest.lazy_fixture("strava_dataframe"), "longitude", 0, -34.847439),
(pytest.lazy_fixture("strava_dataframe"), "latitude", -1, -8.016821),
(pytest.lazy_fixture("strava_dataframe"), "longitude", -1, -34.84716),
(pytest.lazy_fixture("strava_dataframe"), "moving", 0, False),
(pytest.lazy_fixture("strava_dataframe"), "moving", -1, True),
(pytest.lazy_fixture("strava_dataframe"), "velocity_smooth", 0, 0.0),
(pytest.lazy_fixture("strava_dataframe"), "velocity_smooth", -1, 3.2),
(pytest.lazy_fixture("strava_dataframe"), "grade_smooth", 0, 1.1),
(pytest.lazy_fixture("strava_dataframe"), "grade_smooth", -1, -0.6),
]
@pytest.mark.parametrize("activity,column,index,expected", test_data)
def test_strava_values(activity, column, index, expected):
assert activity[column].iloc[index] == expected
assert activity.index[-1] == Timedelta("0 days 01:25:45")
if isinstance(activity, types.Activity):
assert activity.start == Timestamp("2020-12-06 06:36:27")
| 34.933649 | 90 | 0.66124 | [
"MIT"
] | bitner/runpandas | runpandas/tests/test_strava_parser.py | 7,371 | Python |
from flask import g
from app.comm.CompositeOperate import CompositeOperate
from app.comm.SqlExecute import SqlExecute
from app.module_config import table_module_map
class CommentController(CompositeOperate):
def __init__(self, module):
super(CommentController, self).__init__(module)
def after_deal_get(self):
comments = g.result.get("data")
# 获取用户点赞记录
user_id = g.flask_httpauth_user.get('id', None) if g.flask_httpauth_user else None
# 点赞记录
comment_licks_dict = dict()
if user_id is not None:
sql_query = table_module_map['bloglikelog'].sql_query_default
sql_query = f'{sql_query} where bll_userid={user_id}'
user_likes = SqlExecute.query_sql_data(sql_query)
comment_licks_dict = {like['bll_blogcommentid']:like['bll_status'] for like in user_likes}
# 所有评论根节点,添加用户是否点赞标志
new_comments = []
for comment in comments:
comment['is_like'] = comment_licks_dict.get(comment['id']) or 0
if not comment['bc_commentupid']:
new_comments.append(comment)
# new_comments = [comment for comment in comments if not comment['bc_commentupid']]
for comment in new_comments:
# 获取每个评论的回复
comment['sub'] = [sub for sub in comments if sub['bc_commentupid']==comment['id']]
g.result['data'] = new_comments
def before_deal_post(self):
g.json_data["data"]["bc_createuid"] = g.flask_httpauth_user.get('id')
| 39.076923 | 102 | 0.664042 | [
"MIT"
] | szhu9903/flask-react-blog | blog_server/app/api/general/CommentController.py | 1,600 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
import sys
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
network_thread_start,
)
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.stderr = sys.stdout
self.extra_args = [['-stopatheight=207', '-prune=1', '-txindex=0']]
def run_test(self):
# Have to prepare the chain manually here.
# txindex=1 by default in Xazab which is incompatible with pruning.
self.set_genesis_mocktime()
for i in range(200):
self.bump_mocktime(156)
self.nodes[0].generate(1)
# Actual tests
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'bip9_softforks',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207', '-txindex=0'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550', '-txindex=0'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per ~2.6 minutes (156 seconds), or 1/156
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 156, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_block_count'], 0)
assert('window_tx_count' not in chaintxstats)
assert('window_interval' not in chaintxstats)
assert('txrate' not in chaintxstats)
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('98214.28571450'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 2.6 minutes (156 seconds) or 1/78
assert abs(hashes_per_second * 78 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0, ['-txindex=0'])
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
# Start a P2P connection since we'll need to create some blocks.
node.add_p2p_connection(P2PInterface())
network_thread_start()
node.p2p.wait_for_verack()
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height, timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
if __name__ == '__main__':
BlockchainTest().main()
| 39.286624 | 168 | 0.657669 | [
"MIT"
] | nunumichael/xazab | test/functional/rpc_blockchain.py | 12,336 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.utilities.deepspeed import convert_zero_checkpoint_to_fp32_state_dict
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_collate_checkpoint(tmpdir):
"""Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.strategy.barrier()
if trainer.is_global_zero:
# ensure function call works
output_path = os.path.join(tmpdir, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)
_assert_checkpoint_equal(model, output_path)
def _assert_checkpoint_equal(model, output_path):
assert os.path.exists(output_path)
single_output = torch.load(output_path)
state_dict = model.state_dict()
for orig_param, saved_model_param in zip(state_dict.values(), single_output["state_dict"].values()):
if model.dtype == torch.half:
# moved model to float32 for comparison with single fp32 saved weights
saved_model_param = saved_model_param.half()
assert torch.equal(orig_param.cpu(), saved_model_param)
| 42.773585 | 111 | 0.763564 | [
"Apache-2.0"
] | Borda/pytorch-lightning | tests/utilities/test_deepspeed_collate_checkpoint.py | 2,267 | Python |
"""meiduo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from . import views
from django.urls import re_path
urlpatterns = [
re_path(r'^usernames/(?P<username>\w{5,20})/count/$',views.UsernameCountView.as_view()),
re_path(r'^mobiles/(?P<mobile>1[3-9]\d{9})/count/$',views.MobileCountView.as_view()),
re_path(r'^register/$',views.RegisterView.as_view()),
re_path(r'^login/$',views.LoginView.as_view()),
re_path(r'^logout/$',views.LogoutView.as_view()),
re_path(r'^info/$',views.UserInfoView.as_view()),
re_path(r'^emails/$', views.EmailView.as_view()),
re_path(r'^emails/verification/$', views.VerifyEmailView.as_view()),
re_path(r'^addresses/create/$', views.CreateAddressView.as_view()),
re_path(r'^addresses/$', views.AddressView.as_view()),
re_path(r'^addresses/(?P<address_id>\d+)/$', views.UpdateDestroyAddressView.as_view()),
re_path(r'^addresses/(?P<address_id>\d+)/default/$', views.DefaultAddressView.as_view()),
re_path(r'^addresses/(?P<address_id>\d+)/title/$', views.UpdateTitleAddressView.as_view()),
re_path(r'^password/$', views.ChangePasswordView.as_view()),
re_path(r'^browse_histories/$', views.UserBrowseHistory.as_view()),
]
| 49.416667 | 95 | 0.699831 | [
"MIT"
] | physili/django_test | meiduo/meiduo/apps/users/urls.py | 1,779 | Python |
compression_methods = ['']
_blosc_methods = ['blosc-blosclz', 'blosc-lz4']
try:
import blosc
HAVE_BLOSC = True
compression_methods.extend(_blosc_methods)
except ImportError:
HAVE_BLOSC = False
def compress(data, method, *args, **kwds):
if method == '':
return data
_check_method(method)
if method.startswith('blosc-'):
kwds['cname'] = method[6:]
data = blosc.compress(data, *args, **kwds)
else:
raise ValueError("Unknown compression method '%s'" % method)
return data
def decompress(data, method, *args, **kwds):
if method == '':
return data
_check_method(method)
if method.startswith('blosc-'):
return blosc.decompress(data)
else:
raise ValueError("Unknown compression method '%s'" % method)
def _check_method(method):
if method not in compression_methods:
if method in _blosc_methods:
raise ValueError("Cannot use %s compression; blosc package is not importable." % method)
else:
raise ValueError('Unknown compression method "%s"' % method)
| 25.409091 | 100 | 0.634168 | [
"BSD-3-Clause"
] | Cocococo11/pyacq | pyacq/core/stream/compression.py | 1,118 | Python |
import pygame
# TODO: make these configurable
c_UP = pygame.K_UP
c_DOWN = pygame.K_DOWN
c_LEFT = pygame.K_LEFT
c_RIGHT = pygame.K_RIGHT
c_PREV = pygame.K_LEFTBRACKET
c_NEXT = pygame.K_RIGHTBRACKET
c_START = pygame.K_RETURN
c_1 = pygame.K_1
c_2 = pygame.K_2
c_3 = pygame.K_3
c_4 = pygame.K_4
c_5 = pygame.K_5
c_6 = pygame.K_6
c_7 = pygame.K_7
c_8 = pygame.K_8
c_9 = pygame.K_9
c_0 = pygame.K_0
c_POINT = pygame.K_PERIOD
c_DEL = pygame.K_BACKSPACE
c_X = pygame.K_a
c_A = pygame.K_x
c_B = pygame.K_z
c_Y = pygame.K_LSHIFT
c_L = pygame.K_q
c_R = pygame.K_w
def isDown(code):
return pygame.key.get_pressed()[code]
def isUp(code):
return not isDown(code)
| 17.025641 | 41 | 0.740964 | [
"MIT"
] | byackley/critter | ceControl.py | 664 | Python |
import argparse
import cv2
import numpy as np
from onnxruntime.quantization import quantize_static, CalibrationDataReader, QuantType
from onnxruntime.quantization.calibrate import CalibrationMethod
from onnxruntime.quantization.quant_utils import QuantFormat
from dataset import pre_process_vgg
def parse_args():
parser = argparse.ArgumentParser(description="ONNXRuntime quantization tool")
parser.add_argument("--input", "-i", type=str)
parser.add_argument("--output", "-o", type=str)
parser.add_argument("--dataset", "-d", type=str)
parser.add_argument("--entropy-calibration", default=False, action="store_true")
return parser.parse_args()
# https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/imagenet_v2/mobilenet.ipynb
def preprocess_image(image_path, height, width, channels=3):
image = cv2.imread(image_path)
image_data = pre_process_vgg(image, dims=[height, width, channels], need_transpose=True)
image_data = np.expand_dims(image_data, axis=0)
return image_data
def preprocess_func(images_folder, height, width, size_limit=0):
unconcatenated_batch_data = []
import pathlib
image_filepathes = [str(path) for path in pathlib.Path(images_folder).glob("*.JPEG")]
for image_filepath in image_filepathes:
# image_filepath = images_folder + '/' + image_name
image_data = preprocess_image(image_filepath, height, width)
unconcatenated_batch_data.append(image_data)
batch_data = np.concatenate(np.expand_dims(unconcatenated_batch_data, axis=0), axis=0)
return batch_data
image_height = 224
image_width = 224
class ResNetDataReader(CalibrationDataReader):
def __init__(self, calibration_image_folder):
self.image_folder = calibration_image_folder
self.preprocess_flag = True
self.enum_data_dicts = []
self.datasize = 0
def get_next(self):
if self.preprocess_flag:
self.preprocess_flag = False
nhwc_data_list = preprocess_func(
self.image_folder, image_height, image_width, size_limit=0
)
self.datasize = len(nhwc_data_list)
self.enum_data_dicts = iter(
[{"input_tensor:0": nhwc_data} for nhwc_data in nhwc_data_list]
)
return next(self.enum_data_dicts, None)
if __name__ == "__main__":
args = parse_args()
dr = ResNetDataReader(args.dataset)
if args.entropy_calibration:
method = CalibrationMethod.Entropy
else:
method = CalibrationMethod.MinMax
quantize_static(
args.input,
args.output,
dr,
quant_format=QuantFormat.QDQ,
per_channel=True,
calibrate_method=method,
)
| 32.988095 | 130 | 0.709491 | [
"Apache-2.0"
] | ctuning/inference_results_v1.1 | closed/FuriosaAI/code/quantization/mlperf_evaluation/python/ort_quantization.py | 2,771 | Python |
# -*- coding: utf-8 -*-
import logging
from dbaas_dbmonitor.provider import DBMonitorProvider
from workflow.steps.util.base import BaseInstanceStep
LOG = logging.getLogger(__name__)
class DBMonitorStep(BaseInstanceStep):
def __init__(self, instance):
super(DBMonitorStep, self).__init__(instance)
self.provider = DBMonitorProvider()
def do(self):
raise NotImplementedError
def undo(self):
pass
class DisableMonitoring(DBMonitorStep):
def __unicode__(self):
return "Disabling DB Monitor..."
def do(self):
self.provider.disabled_dbmonitor_monitoring_instance(self.instance)
class EnableMonitoring(DBMonitorStep):
def __unicode__(self):
return "Enabling DB Monitor..."
def do(self):
self.provider.enabled_dbmonitor_monitoring_instance(self.instance)
class CreateMonitoring(DBMonitorStep):
def __unicode__(self):
return "Creating DB Monitor..."
def do(self):
instance_number = self.instance.databaseinfra.last_vm_created
self.provider.create_dbmonitor_instance_monitoring(
self.instance, instance_number
)
def undo(self):
DisableMonitoring(self.instance).do()
class DisableInfraMonitoring(DBMonitorStep):
def __unicode__(self):
return "Disabling DB Monitor..."
def do(self):
self.provider.remove_dbmonitor_monitoring(self.infra)
class CreateInfraMonitoring(DBMonitorStep):
def __unicode__(self):
return "Creating DB Monitor..."
def do(self):
if self.instance == self.infra.instances.all()[0]:
if not self.provider.get_dbmonitor_databaseinfra(self.infra):
self.provider.create_dbmonitor_monitoring(self.infra)
def undo(self):
if self.instance == self.infra.instances.all()[0]:
DisableInfraMonitoring(self.instance).do()
class UpdateInfraVersion(DBMonitorStep):
def __unicode__(self):
return "Update version on DB Monitor..."
@property
def is_valid(self):
if ((self.upgrade or self.upgrade_patch) and
self.instance == self.infra.instances.all()[0]):
return True
return False
@property
def target_version(self):
if self.upgrade:
return self.upgrade.target_plan.engine.full_inicial_version
elif self.upgrade_patch:
return self.upgrade_patch.target_patch_full_version
@property
def source_version(self):
if self.upgrade:
return self.upgrade.source_plan.engine.full_inicial_version
elif self.upgrade_patch:
return self.upgrade_patch.source_patch_full_version
def do(self):
if self.is_valid:
self.provider.update_dbmonitor_database_version(
self.infra, self.target_version)
def undo(self):
if self.is_valid:
self.provider.update_dbmonitor_database_version(
self.infra, self.source_version)
class UpdateInfraCloudDatabaseMigrate(DBMonitorStep):
def __unicode__(self):
return "Update info about cloud on DBMonitor..."
def do(self):
self.provider.update_database_cloud(
self.infra, self.environment.cloud.name)
class UpdateInfraOrganizationName(DBMonitorStep):
def __unicode__(self):
return "Update info about organization on DBMonitor..."
def __init__(self, instance, organization_name=None):
super(UpdateInfraOrganizationName, self).__init__(instance)
self.organization_name = organization_name
@property
def is_valid(self):
if self.organization_name:
return True
return self.instance == self.infra.instances.first()
def do(self):
if not self.is_valid:
return
self.provider.update_database_organization(
self.infra, self.organization_name)
| 27.194444 | 75 | 0.677732 | [
"BSD-3-Clause"
] | AyushBherwani1998/database-as-a-service | dbaas/workflow/steps/util/db_monitor.py | 3,916 | Python |
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Optional
from yarl import URL
@dataclass(frozen=True)
class ServerConfig:
host: str = "0.0.0.0"
port: int = 8080
name: str = "Docker Registry"
@dataclass(frozen=True)
class AuthConfig:
server_endpoint_url: Optional[URL]
service_token: str = field(repr=False)
class UpstreamType(str, Enum):
BASIC = "basic"
OAUTH = "oauth"
AWS_ECR = "aws_ecr"
@dataclass(frozen=True)
class UpstreamRegistryConfig:
endpoint_url: URL
project: str
type: UpstreamType = UpstreamType.OAUTH
basic_username: str = field(repr=False, default="")
basic_password: str = field(repr=False, default="")
# TODO: should be derived from the WWW-Authenticate header instead
token_endpoint_url: URL = URL()
token_service: str = ""
token_endpoint_username: str = field(repr=False, default="")
token_endpoint_password: str = field(repr=False, default="")
token_registry_catalog_scope: str = "registry:catalog:*"
token_repository_scope_actions: str = "*"
sock_connect_timeout_s: Optional[float] = 30.0
sock_read_timeout_s: Optional[float] = 30.0
# https://github.com/docker/distribution/blob/dcfe05ce6cff995f419f8df37b59987257ffb8c1/registry/handlers/catalog.go#L16
max_catalog_entries: int = 100
@property
def is_basic(self) -> bool:
return self.type == UpstreamType.BASIC
@property
def is_oauth(self) -> bool:
return self.type == UpstreamType.OAUTH
@dataclass(frozen=True)
class ZipkinConfig:
url: URL
app_name: str = "platform-registry"
sample_rate: float = 0
@dataclass(frozen=True)
class SentryConfig:
dsn: URL
cluster_name: str
app_name: str = "platform-registry"
sample_rate: float = 0
@dataclass(frozen=True)
class Config:
server: ServerConfig
upstream_registry: UpstreamRegistryConfig
auth: AuthConfig
cluster_name: str
zipkin: Optional[ZipkinConfig] = None
sentry: Optional[SentryConfig] = None
class EnvironConfigFactory:
def __init__(self, environ: Optional[dict[str, str]] = None) -> None:
self._environ = environ or os.environ
def _get_url(self, name: str) -> Optional[URL]:
value = self._environ[name]
if value == "-":
return None
else:
return URL(value)
def create_server(self) -> ServerConfig:
port = int(self._environ.get("NP_REGISTRY_API_PORT", ServerConfig.port))
return ServerConfig(port=port)
def create_upstream_registry(self) -> UpstreamRegistryConfig:
endpoint_url = URL(self._environ["NP_REGISTRY_UPSTREAM_URL"])
project = self._environ["NP_REGISTRY_UPSTREAM_PROJECT"]
max_catalog_entries = int(
self._environ.get(
"NP_REGISTRY_UPSTREAM_MAX_CATALOG_ENTRIES",
UpstreamRegistryConfig.max_catalog_entries,
)
)
upstream_type = UpstreamType(
self._environ.get("NP_REGISTRY_UPSTREAM_TYPE", UpstreamType.OAUTH.value)
)
upstream: dict[str, Any] = dict(
endpoint_url=endpoint_url,
project=project,
max_catalog_entries=max_catalog_entries,
type=upstream_type,
)
if upstream_type == UpstreamType.OAUTH:
upstream.update(
dict(
token_endpoint_url=URL(
self._environ["NP_REGISTRY_UPSTREAM_TOKEN_URL"]
),
token_service=self._environ["NP_REGISTRY_UPSTREAM_TOKEN_SERVICE"],
token_endpoint_username=self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_USERNAME"
],
token_endpoint_password=self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_PASSWORD"
],
)
)
if "NP_REGISTRY_UPSTREAM_TOKEN_REGISTRY_SCOPE" in self._environ:
upstream["token_registry_catalog_scope"] = self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_REGISTRY_SCOPE"
]
if "NP_REGISTRY_UPSTREAM_TOKEN_REPO_SCOPE_ACTIONS" in self._environ:
upstream["token_repository_scope_actions"] = self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_REPO_SCOPE_ACTIONS"
]
if upstream_type == UpstreamType.BASIC:
basic_username = self._environ.get("NP_REGISTRY_UPSTREAM_BASIC_USERNAME")
if basic_username is not None:
upstream["basic_username"] = basic_username
basic_password = self._environ.get("NP_REGISTRY_UPSTREAM_BASIC_PASSWORD")
if basic_password is not None:
upstream["basic_password"] = basic_password
return UpstreamRegistryConfig(**upstream)
def create_auth(self) -> AuthConfig:
url = self._get_url("NP_REGISTRY_AUTH_URL")
token = self._environ["NP_REGISTRY_AUTH_TOKEN"]
return AuthConfig(server_endpoint_url=url, service_token=token)
def create_zipkin(self) -> Optional[ZipkinConfig]:
if "NP_ZIPKIN_URL" not in self._environ:
return None
url = URL(self._environ["NP_ZIPKIN_URL"])
app_name = self._environ.get("NP_ZIPKIN_APP_NAME", ZipkinConfig.app_name)
sample_rate = float(
self._environ.get("NP_ZIPKIN_SAMPLE_RATE", ZipkinConfig.sample_rate)
)
return ZipkinConfig(url=url, app_name=app_name, sample_rate=sample_rate)
def create_sentry(self) -> Optional[SentryConfig]:
if "NP_SENTRY_DSN" not in self._environ:
return None
return SentryConfig(
dsn=URL(self._environ["NP_SENTRY_DSN"]),
cluster_name=self._environ["NP_SENTRY_CLUSTER_NAME"],
app_name=self._environ.get("NP_SENTRY_APP_NAME", SentryConfig.app_name),
sample_rate=float(
self._environ.get("NP_SENTRY_SAMPLE_RATE", SentryConfig.sample_rate)
),
)
def create(self) -> Config:
server_config = self.create_server()
upstream_registry_config = self.create_upstream_registry()
auth_config = self.create_auth()
zipkin_config = self.create_zipkin()
sentry_config = self.create_sentry()
cluster_name = self._environ["NP_CLUSTER_NAME"]
assert cluster_name
return Config(
server=server_config,
upstream_registry=upstream_registry_config,
auth=auth_config,
cluster_name=cluster_name,
zipkin=zipkin_config,
sentry=sentry_config,
)
| 34.096447 | 123 | 0.647611 | [
"Apache-2.0"
] | neuro-inc/platform-registry-api | platform_registry_api/config.py | 6,717 | Python |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class WriteActionsMissLearnedInfo(Base):
"""The WriteActionsMissLearnedInfo class encapsulates a system managed writeActionsMissLearnedInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the WriteActionsMissLearnedInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'writeActionsMissLearnedInfo'
def __init__(self, parent):
super(WriteActionsMissLearnedInfo, self).__init__(parent)
@property
def ExperimenterData(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('experimenterData')
@property
def ExperimenterDataLength(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('experimenterDataLength')
@property
def ExperimenterId(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('experimenterId')
@property
def NextTableIds(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('nextTableIds')
@property
def Property(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('property')
@property
def SupportedField(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('supportedField')
def find(self, ExperimenterData=None, ExperimenterDataLength=None, ExperimenterId=None, NextTableIds=None, Property=None, SupportedField=None):
"""Finds and retrieves writeActionsMissLearnedInfo data from the server.
All named parameters support regex and can be used to selectively retrieve writeActionsMissLearnedInfo data from the server.
By default the find method takes no parameters and will retrieve all writeActionsMissLearnedInfo data from the server.
Args:
ExperimenterData (str): NOT DEFINED
ExperimenterDataLength (number): NOT DEFINED
ExperimenterId (number): NOT DEFINED
NextTableIds (str): NOT DEFINED
Property (str): NOT DEFINED
SupportedField (str): NOT DEFINED
Returns:
self: This instance with matching writeActionsMissLearnedInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of writeActionsMissLearnedInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the writeActionsMissLearnedInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 32.296875 | 145 | 0.745525 | [
"MIT"
] | kakkotetsu/IxNetwork | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | 4,134 | Python |
# -*- coding: utf-8 -*-
''' Globals variables '''
# ##################################################################################
# MG ILLUMINATION #
# First Crazy Debroussailleur : jDepoortere #
# Author : cPOTTIER #
# Date : 12-05-2016 #
# ##################################################################################
# Required modules
import sys
import os
# InK modules
import graphs
import nomen
import ink.proto
import ink.query
import ink.io
import nask.sdk
import nask.sdk.casting
import nask.sdk.shots as shots
import nask.sdk.hit
import proj.pipe.ink.graphs as prodgraphs
from subprocess import Popen, PIPE
# Maya modules
import sip
# Optionals modules
import re
import shutil
import time
import datetime
from datetime import datetime
import subprocess
import glob
import json
import shutil
import string
import subprocess
import collections
from collections import OrderedDict
# QT modules
from PyQt4 import QtGui
from PyQt4 import QtGui, QtCore, Qt, QtOpenGL
from PyQt4.QtCore import QThread
# qt module for InK
try:
if 'sandboxQt' in sys.modules:
del(sys.modules["sandboxQt"])
import sandboxQt
else:
import sandboxQt
except:
pass
# Globals
CONNECT_USER_INFOS = ink.io.ConnectUserInfo()
CONNECT_USER0 = CONNECT_USER_INFOS[0]
CONNECT_USER1 = CONNECT_USER_INFOS[1] # todo to ask why ?
PROJECT = CONNECT_USER_INFOS[2].upper() # cf Nomen.GetFilm()
projectLower = PROJECT.lower()
USER = CONNECT_USER_INFOS[1]
MAIL_HOSTNAME = 'HOSTNAME.illum-mg.fr'
MAIL_USER = USER+'@illum-mg.fr'
LOCALPATH = '/u/'+projectLower+'/Users/'+USER+'/Presets/Graphs/'
# Useful Classes
if str(USER) == 'cpottier': # for dev
if '__InK__classes_forDev' in sys.modules:
del(sys.modules["__InK__classes_forDev"])
import __InK__classes_forDev
from __InK__classes_forDev import __PIPEIN_GRAPH__
else:
import __InK__classes_forDev
from __InK__classes_forDev import __PIPEIN_GRAPH__
else:
if '__InK__classes' in sys.modules:
del(sys.modules["__InK__classes"])
import __InK__classes
from __InK__classes import __PIPEIN_GRAPH__
else:
import __InK__classes
from __InK__classes import __PIPEIN_GRAPH__
print sys.modules
| 27.043478 | 84 | 0.618167 | [
"MIT"
] | vincseize/MG | __InK__connect.py | 2,488 | Python |
import re
import discord
from discord.ext import commands
from discord.ext.commands import clean_content
from Util import Configuration, GearbotLogging, Permissioncheckers, Translator, Utils
INVITE_MATCHER = re.compile(r"(?:https?:\/\/)?(?:www\.)?(?:discord\.(?:gg|io|me|li)|discordapp\.com\/invite)\/([\w|\d|-]+)", flags=re.IGNORECASE)
async def censor(ctx, code, server_name):
try:
await ctx.message.delete()
clean_message = await clean_content().convert(ctx, ctx.message.content)
clean_name = Utils.clean_user(ctx.message.author)
await GearbotLogging.log_to(ctx.guild.id, "CENSOR",
f":no_entry_sign: {Translator.translate('censored_invite', ctx.guild.id, user=clean_name, code=code, message=clean_message, server_name=server_name)}")
except discord.NotFound:
pass # we failed? guess we lost the race
class Censor:
def __init__(self, bot):
self.bot: commands.Bot = bot
async def on_message(self, message: discord.Message):
if not hasattr(message.channel, "guild") or message.channel.guild is None:
return
ctx: commands.Context = await self.bot.get_context(message)
guild = message.guild
is_mod = Permissioncheckers.get_user_lvl(ctx) >= 2
if message.author == guild.me or is_mod or message.author.id in Configuration.get_var(guild.id, "IGNORED_USERS"):
return
guilds = Configuration.get_var(message.guild.id, "INVITE_WHITELIST")
if len(guilds) is not 0:
codes = INVITE_MATCHER.findall(message.content)
for code in codes:
try:
invite:discord.Invite = await self.bot.get_invite(code)
except discord.NotFound:
pass
except KeyError:
await censor(ctx, code, "DM group")
else:
if invite.guild is None or (not invite.guild.id in guilds and invite.guild.id != guild.id):
await censor(ctx, code, invite.guild.name)
def setup(bot):
bot.add_cog(Censor(bot)) | 41.230769 | 197 | 0.629198 | [
"MIT"
] | Gh0stlyy/GearBot | GearBot/Cogs/Censor.py | 2,144 | Python |
from .base import MixinBase
from .cool_nameable import CoolNameable
from .filterable import Filterable
from .hashable import Hashable
from .identifiable import Identifiable
from .labelable import Labelable
from .loggable import Loggable
from .pathable import Pathable
from .serialisable import Serialisable
from .taggable import Taggable
__all__ = (
"CoolNameable",
"Filterable",
"Hashable",
"Identifiable",
"Labelable",
"Loggable",
"MixinBase",
"Pathable",
"Serialisable",
"Taggable",
)
| 21.2 | 39 | 0.737736 | [
"MIT"
] | octue/octue-sdk-python | octue/mixins/__init__.py | 530 | Python |
from typing import Optional
import click
from opta.layer import Layer
@click.command(hidden=True)
@click.option("-c", "--config", default="opta.yml", help="Opta config file.")
@click.option(
"-e",
"--env",
default=None,
help="The env to use when loading the config file",
show_default=True,
)
def validate(config: str, env: Optional[str]) -> None:
Layer.load_from_yaml(config, env)
| 21.578947 | 77 | 0.678049 | [
"Apache-2.0"
] | the-MuffinMan/opta | opta/commands/validate.py | 410 | Python |
#!/usr/bin/env python
"""
python list_bundles.py prod b6dc9b93-929a-45d0-beb2-5cf8e64872fe
python list_bundles.py staging 3b41f062-621c-46ca-abad-bce09427934d
"""
import argparse
import json
import logging
import sys
from ingest.api.ingestapi import IngestApi
logging.getLogger('ingest').setLevel(logging.DEBUG)
format = ' %(asctime)s - %(name)s - %(levelname)s in %(filename)s:' \
'%(lineno)s %(funcName)s(): %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.WARNING, format=format)
class BundleManifest:
def __init__(self, resource):
self._object = resource
@property
def fqid(self):
uuid = self._object.get('bundleUuid')
version = self._object.get('bundleVersion')
return f'{uuid}.{version}'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generates a bundle fqid list given a project uuid')
parser.add_argument('env', choices=['dev', 'integration', 'staging', 'prod'], help='environment')
parser.add_argument('project_uuid', metavar='project-uuid', type=str, help='Project uuid')
parser.add_argument('--filename', type=str, help='Output filename')
args = parser.parse_args()
project_uuid = args.project_uuid
filename = args.filename or f'{args.project_uuid}.json'
env = args.env
infix = f'.{env}' if env != 'prod' else ''
url = f'https://api.ingest{infix}.data.humancellatlas.org'
ingest_api = IngestApi(url)
project = ingest_api.get_project_by_uuid(project_uuid)
bundle_manifests = ingest_api.get_related_entities("bundleManifests", project, "bundleManifests")
bundle_fqids = [BundleManifest(obj).fqid for obj in bundle_manifests]
with open(filename, 'w') as outfile:
json.dump(bundle_fqids, outfile, indent=4)
print(f'Total bundle count: {len(bundle_fqids)}')
print(f'Saved into file: {filename}')
| 33.732143 | 101 | 0.704606 | [
"Apache-2.0"
] | HumanCellAtlas/ingest-common | list_bundles.py | 1,889 | Python |
# -*- coding: utf-8 -*-
"""
Code to take template spectra, used for RV fitting, and pass them through 4FS to resample them to 4MOST's resolution.
It then further resamples each arm onto a fixed logarithmic stride.
"""
import argparse
import hashlib
import logging
import numpy as np
import os
from os import path as os_path
from fourgp_fourfs import FourFS
from fourgp_degrade.resample import SpectrumResampler
from fourgp_degrade import SpectrumProperties
from fourgp_speclib import SpectrumLibrarySqlite
def command_line_interface(root_path):
"""
A simple command-line interface for running a tool to resample a library of template spectra onto fixed
logarithmic rasters representing each of the 4MOST arms.
We use the python argparse module to build the interface, and return the inputs supplied by the user.
:param root_path:
The root path of this 4GP installation; the directory where we can find 4FS.
:return:
An object containing the arguments supplied by the user.
"""
# Read input parameters
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--templates-in',
required=False,
default='turbospec_rv_templates',
dest='templates_in',
help="Library of spectra to use as templates for RV code")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries")
parser.add_argument('--templates-out',
required=False,
default="rv_templates_resampled",
dest="templates_out",
help="Library into which to place resampled templates for RV code")
parser.add_argument('--binary-path',
required=False,
default=root_path,
dest="binary_path",
help="Specify a directory where 4FS binary package is installed")
args = parser.parse_args()
# Set up logger
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Resampling template spectra")
return args
def logarithmic_raster(lambda_min, lambda_max, lambda_step):
"""
Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,
and a mean wavelength step.
:param lambda_min:
Smallest wavelength in raster.
:param lambda_max:
Largest wavelength in raster.
:param lambda_step:
The approximate pixel size in the raster.
:return:
A numpy array containing a wavelength raster with fixed logarithmic stride.
"""
return np.exp(np.arange(
np.log(lambda_min),
np.log(lambda_max),
np.log(1 + lambda_step / lambda_min)
))
def resample_templates(args, logger):
"""
Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in
turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect
the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster
with fixed logarithmic stride.
:param args:
Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for
input and output. The required fields are defined by the user interface above.
:param logger:
A python logging object.
:return:
None.
"""
# Set path to workspace where we expect to find libraries of spectra
workspace = args.workspace if args.workspace else os_path.join(args.our_path, "../../../workspace")
# Open input template spectra
spectra = SpectrumLibrarySqlite.open_and_search(
library_spec=args.templates_in,
workspace=workspace,
extra_constraints={"continuum_normalised": 0}
)
templates_library, templates_library_items, templates_spectra_constraints = \
[spectra[i] for i in ("library", "items", "constraints")]
# Create new SpectrumLibrary to hold the resampled output templates
library_path = os_path.join(workspace, args.templates_out)
output_library = SpectrumLibrarySqlite(path=library_path, create=True)
# Instantiate 4FS wrapper
etc_wrapper = FourFS(
path_to_4fs=os_path.join(args.binary_path, "OpSys/ETC"),
snr_list=[250.],
magnitude=13,
snr_per_pixel=True
)
for input_spectrum_id in templates_library_items:
logger.info("Working on <{}>".format(input_spectrum_id['filename']))
# Open Spectrum data from disk
input_spectrum_array = templates_library.open(ids=input_spectrum_id['specId'])
# Load template spectrum (flux normalised)
template_flux_normalised = input_spectrum_array.extract_item(0)
# Look up the unique ID of the star we've just loaded
# Newer spectrum libraries have a uid field which is guaranteed unique; for older spectrum libraries use
# Starname instead.
# Work out which field we're using (uid or Starname)
spectrum_matching_field = 'uid' if 'uid' in template_flux_normalised.metadata else 'Starname'
# Look up the unique ID of this object
object_name = template_flux_normalised.metadata[spectrum_matching_field]
# Search for the continuum-normalised version of this same object (which will share the same uid / name)
search_criteria = {
spectrum_matching_field: object_name,
'continuum_normalised': 1
}
continuum_normalised_spectrum_id = templates_library.search(**search_criteria)
# Check that continuum-normalised spectrum exists and is unique
assert len(continuum_normalised_spectrum_id) == 1, "Could not find continuum-normalised spectrum."
# Load the continuum-normalised version
template_continuum_normalised_arr = templates_library.open(
ids=continuum_normalised_spectrum_id[0]['specId']
)
# Turn the SpectrumArray we got back into a single Spectrum
template_continuum_normalised = template_continuum_normalised_arr.extract_item(0)
# Now create a mock observation of this template using 4FS
logger.info("Passing template through 4FS")
mock_observed_template = etc_wrapper.process_spectra(
spectra_list=((template_flux_normalised, template_continuum_normalised),)
)
# Loop over LRS and HRS
for mode in mock_observed_template:
# Loop over the spectra we simulated (there was only one!)
for index in mock_observed_template[mode]:
# Loop over the various SNRs we simulated (there was only one!)
for snr in mock_observed_template[mode][index]:
# Create a unique ID for this arm's data
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
# Import the flux- and continuum-normalised spectra separately, but give them the same ID
for spectrum_type in mock_observed_template[mode][index][snr]:
# Extract continuum-normalised mock observation
logger.info("Resampling {} spectrum".format(mode))
mock_observed = mock_observed_template[mode][index][snr][spectrum_type]
# Replace errors which are nans with a large value
mock_observed.value_errors[np.isnan(mock_observed.value_errors)] = 1000.
# Check for NaN values in spectrum itself
if not np.all(np.isfinite(mock_observed.values)):
print("Warning: NaN values in template <{}>".format(template_flux_normalised.metadata['Starname']))
mock_observed.value_errors[np.isnan(mock_observed.values)] = 1000.
mock_observed.values[np.isnan(mock_observed.values)] = 1.
# Resample template onto a logarithmic raster of fixed step
resampler = SpectrumResampler(mock_observed)
# Construct the raster for each wavelength arm
wavelength_arms = SpectrumProperties(mock_observed.wavelengths).wavelength_arms()
# Resample 4FS output for each arm onto a fixed logarithmic stride
for arm_count, arm in enumerate(wavelength_arms["wavelength_arms"]):
arm_raster, mean_pixel_width = arm
name = "{}_{}".format(mode, arm_count)
arm_info = {
"lambda_min": arm_raster[0],
"lambda_max": arm_raster[-1],
"lambda_step": mean_pixel_width
}
arm_raster = logarithmic_raster(lambda_min=arm_info['lambda_min'],
lambda_max=arm_info['lambda_max'],
lambda_step=arm_info['lambda_step']
)
# Resample 4FS output onto a fixed logarithmic step
mock_observed_arm = resampler.onto_raster(arm_raster)
# Save it into output spectrum library
output_library.insert(spectra=mock_observed_arm,
filenames=input_spectrum_id['filename'],
metadata_list={
"uid": unique_id,
"template_id": object_name,
"mode": mode,
"arm_name": "{}_{}".format(mode,arm_count),
"lambda_min": arm_raster[0],
"lambda_max": arm_raster[-1],
"lambda_step": mean_pixel_width
})
| 45.387234 | 127 | 0.603975 | [
"MIT"
] | dcf21/4most-4gp | src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py | 10,666 | Python |
from django.shortcuts import render, HttpResponseRedirect, get_object_or_404
from cartapp.models import Cart
from mainapp.models import Product
from django.contrib.auth.decorators import login_required
@login_required
def view(request):
return render(request, 'cartapp/cart.html', context = {
'cart': Cart.objects.filter(user=request.user)
})
@login_required
def add(request, product_id):
product = get_object_or_404(Product, pk=product_id)
cart_items = Cart.objects.filter(user=request.user, product=product)
if cart_items:
cart = cart_items.first()
else:
cart = Cart(user=request.user, product=product)
cart.quantity += 1
cart.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required
def remove(request, cart_item_id):
cart = get_object_or_404( Cart, pk=cart_item_id )
cart.delete()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required
def edit(request, cart_item_id, quantity):
quantity = quantity
cart_item = Cart.objects.get(pk=cart_item_id)
if quantity > 0:
cart_item.quantity = quantity
cart_item.save()
else:
cart_item.delete()
return render(request, 'cartapp/include/inc_cart_edit.html') | 27.3125 | 76 | 0.704805 | [
"MIT"
] | A1eksAwP/GB-Internet-Store | geekshop/cartapp/views.py | 1,311 | Python |
#!/usr/bin/env python
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import init
import config
import misc
from kzcashd import KZCashDaemon
from models import Superblock, Proposal, GovernanceObject, Watchdog
from models import VoteSignals, VoteOutcomes, Transient
import socket
from misc import printdbg
import time
from bitcoinrpc.authproxy import JSONRPCException
import signal
import atexit
import random
from scheduler import Scheduler
import argparse
# sync kzcashd gobject list with our local relational DB backend
def perform_kzcashd_object_sync(kzcashd):
GovernanceObject.sync(kzcashd)
# delete old watchdog objects, create new when necessary
def watchdog_check(kzcashd):
printdbg("in watchdog_check")
# delete expired watchdogs
for wd in Watchdog.expired(kzcashd):
printdbg("\tFound expired watchdog [%s], voting to delete" % wd.object_hash)
wd.vote(kzcashd, VoteSignals.delete, VoteOutcomes.yes)
# now, get all the active ones...
active_wd = Watchdog.active(kzcashd)
active_count = active_wd.count()
# none exist, submit a new one to the network
if 0 == active_count:
# create/submit one
printdbg("\tNo watchdogs exist... submitting new one.")
wd = Watchdog(created_at=int(time.time()))
wd.submit(kzcashd)
else:
wd_list = sorted(active_wd, key=lambda wd: wd.object_hash)
# highest hash wins
winner = wd_list.pop()
printdbg("\tFound winning watchdog [%s], voting VALID" % winner.object_hash)
winner.vote(kzcashd, VoteSignals.valid, VoteOutcomes.yes)
# if remaining Watchdogs exist in the list, vote delete
for wd in wd_list:
printdbg("\tFound losing watchdog [%s], voting DELETE" % wd.object_hash)
wd.vote(kzcashd, VoteSignals.delete, VoteOutcomes.yes)
printdbg("leaving watchdog_check")
def attempt_superblock_creation(kzcashd):
import kzcashlib
if not kzcashd.is_masternode():
print("We are not a Masternode... can't submit superblocks!")
return
# query votes for this specific ebh... if we have voted for this specific
# ebh, then it's voted on. since we track votes this is all done using joins
# against the votes table
#
# has this masternode voted on *any* superblocks at the given event_block_height?
# have we voted FUNDING=YES for a superblock for this specific event_block_height?
event_block_height = kzcashd.next_superblock_height()
if Superblock.is_voted_funding(event_block_height):
# printdbg("ALREADY VOTED! 'til next time!")
# vote down any new SBs because we've already chosen a winner
for sb in Superblock.at_height(event_block_height):
if not sb.voted_on(signal=VoteSignals.funding):
sb.vote(kzcashd, VoteSignals.funding, VoteOutcomes.no)
# now return, we're done
return
if not kzcashd.is_govobj_maturity_phase():
printdbg("Not in maturity phase yet -- will not attempt Superblock")
return
proposals = Proposal.approved_and_ranked(proposal_quorum=kzcashd.governance_quorum(), next_superblock_max_budget=kzcashd.next_superblock_max_budget())
budget_max = kzcashd.get_superblock_budget_allocation(event_block_height)
sb_epoch_time = kzcashd.block_height_to_epoch(event_block_height)
sb = kzcashlib.create_superblock(proposals, event_block_height, budget_max, sb_epoch_time)
if not sb:
printdbg("No superblock created, sorry. Returning.")
return
# find the deterministic SB w/highest object_hash in the DB
dbrec = Superblock.find_highest_deterministic(sb.hex_hash())
if dbrec:
dbrec.vote(kzcashd, VoteSignals.funding, VoteOutcomes.yes)
# any other blocks which match the sb_hash are duplicates, delete them
for sb in Superblock.select().where(Superblock.sb_hash == sb.hex_hash()):
if not sb.voted_on(signal=VoteSignals.funding):
sb.vote(kzcashd, VoteSignals.delete, VoteOutcomes.yes)
printdbg("VOTED FUNDING FOR SB! We're done here 'til next superblock cycle.")
return
else:
printdbg("The correct superblock wasn't found on the network...")
# if we are the elected masternode...
if (kzcashd.we_are_the_winner()):
printdbg("we are the winner! Submit SB to network")
sb.submit(kzcashd)
def check_object_validity(kzcashd):
# vote (in)valid objects
for gov_class in [Proposal, Superblock]:
for obj in gov_class.select():
obj.vote_validity(kzcashd)
def is_kzcashd_port_open(kzcashd):
# test socket open before beginning, display instructive message to MN
# operators if it's not
port_open = False
try:
info = kzcashd.rpc_command('getgovernanceinfo')
port_open = True
except (socket.error, JSONRPCException) as e:
print("%s" % e)
return port_open
def main():
kzcashd = KZCashDaemon.from_kzcash_conf(config.kzcash_conf)
options = process_args()
# check kzcashd connectivity
if not is_kzcashd_port_open(kzcashd):
print("Cannot connect to kzcashd. Please ensure kzcashd is running and the JSONRPC port is open to Sentinel.")
return
# check kzcashd sync
if not kzcashd.is_synced():
print("kzcashd not synced with network! Awaiting full sync before running Sentinel.")
return
# ensure valid masternode
if not kzcashd.is_masternode():
print("Invalid Masternode Status, cannot continue.")
return
# register a handler if SENTINEL_DEBUG is set
if os.environ.get('SENTINEL_DEBUG', None):
import logging
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
if options.bypass:
# bypassing scheduler, remove the scheduled event
printdbg("--bypass-schedule option used, clearing schedule")
Scheduler.clear_schedule()
if not Scheduler.is_run_time():
printdbg("Not yet time for an object sync/vote, moving on.")
return
if not options.bypass:
# delay to account for cron minute sync
Scheduler.delay()
# running now, so remove the scheduled event
Scheduler.clear_schedule()
# ========================================================================
# general flow:
# ========================================================================
#
# load "gobject list" rpc command data, sync objects into internal database
perform_kzcashd_object_sync(kzcashd)
# delete old watchdog objects, create a new if necessary
watchdog_check(kzcashd)
# auto vote network objects as valid/invalid
# check_object_validity(kzcashd)
# create a Superblock if necessary
attempt_superblock_creation(kzcashd)
# schedule the next run
Scheduler.schedule_next_run()
def signal_handler(signum, frame):
print("Got a signal [%d], cleaning up..." % (signum))
Transient.delete('SENTINEL_RUNNING')
sys.exit(1)
def cleanup():
Transient.delete(mutex_key)
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bypass-scheduler',
action='store_true',
help='Bypass scheduler and sync/vote immediately',
dest='bypass')
args = parser.parse_args()
return args
if __name__ == '__main__':
atexit.register(cleanup)
signal.signal(signal.SIGINT, signal_handler)
# ensure another instance of Sentinel is not currently running
mutex_key = 'SENTINEL_RUNNING'
# assume that all processes expire after 'timeout_seconds' seconds
timeout_seconds = 90
is_running = Transient.get(mutex_key)
if is_running:
printdbg("An instance of Sentinel is already running -- aborting.")
sys.exit(1)
else:
Transient.set(mutex_key, misc.now(), timeout_seconds)
# locked to this instance -- perform main logic here
main()
Transient.delete(mutex_key)
| 33.040486 | 154 | 0.680799 | [
"MIT"
] | kzcashteam/sentinel | bin/sentinel.py | 8,161 | Python |
"""
"""
import numpy as np
from ..stellar_ages import _get_lg_age_bin_edges, _get_lgt_birth, T_BIRTH_MIN
from ..stellar_ages import _get_sfh_tables, _get_age_weights_from_tables
from ..sfh_model import DEFAULT_MAH_PARAMS, DEFAULT_MS_PARAMS, DEFAULT_Q_PARAMS
from ..utils import _jax_get_dt_array
FSPS_LG_AGES = np.arange(5.5, 10.2, 0.05) # log10 ages in years
def linear_sfr(t_gyr):
return t_gyr * 1e9
def linear_smh(t0, t_gyr):
return 1e9 * 0.5 * (t_gyr ** 2 - t0 ** 2)
def test_age_bin_edges_have_correct_array_shape():
lgt_ages = np.linspace(5.5, 10.5, 50)
lgt_age_bins = _get_lg_age_bin_edges(lgt_ages)
assert lgt_age_bins.size == lgt_ages.size + 1
def test_age_weights_are_mathematically_sensible():
t_obs = 11.0
mah_params = np.array(list(DEFAULT_MAH_PARAMS.values()))
ms_params = np.array(list(DEFAULT_MS_PARAMS.values()))
q_params = np.array(list(DEFAULT_Q_PARAMS.values()))
res = _get_sfh_tables(mah_params, ms_params, q_params)
t_table, lgt_table, dt_table, sfh_table, logsm_table = res
lgt_ages = np.linspace(5.5, 10.5, 50) - 9.0
lgt_age_bin_edges = _get_lg_age_bin_edges(lgt_ages)
lgt_birth_bin_edges = _get_lgt_birth(t_obs, lgt_age_bin_edges)
age_weights = _get_age_weights_from_tables(
lgt_birth_bin_edges, lgt_table, logsm_table
)
assert age_weights.shape == lgt_ages.shape
assert np.allclose(age_weights.sum(), 1.0)
def test_age_weights_agree_with_analytical_calculation_of_constant_sfr_weights():
constant_sfr = 1.0 * 1e9 # Msun/Gyr
# Analytically calculate age distributions for constant SFR (independent of t_obs)
log_ages_gyr = FSPS_LG_AGES - 9
ages_gyr = 10 ** log_ages_gyr
dt_ages = _jax_get_dt_array(ages_gyr)
mstar_age_bins = dt_ages * constant_sfr
correct_weights = mstar_age_bins / mstar_age_bins.sum()
# Calculate age distributions with DSPS
t_obs = 16.0
t_table = np.linspace(T_BIRTH_MIN, t_obs, 50_000)
lgt_table = np.log10(t_table)
mstar_table = constant_sfr * t_table
logsm_table = np.log10(mstar_table)
lgt_age_bin_edges = _get_lg_age_bin_edges(log_ages_gyr)
lgt_birth_bin_edges = _get_lgt_birth(t_obs, lgt_age_bin_edges)
dsps_age_weights = _get_age_weights_from_tables(
lgt_birth_bin_edges, lgt_table, logsm_table
)
assert np.allclose(dsps_age_weights, correct_weights, atol=0.01)
def test_age_weights_agree_with_analytical_calculation_of_linear_sfr_weights():
t_obs = 16.0
# Analytically calculate age distributions for SFR(t) = t
log_ages_gyr = FSPS_LG_AGES - 9
lgt_age_bin_edges = _get_lg_age_bin_edges(log_ages_gyr)
t_age_bin_edges_gyr = 10 ** lgt_age_bin_edges
t_births_bin_edges = t_obs - t_age_bin_edges_gyr
mstar_at_age_bins = linear_smh(T_BIRTH_MIN, t_births_bin_edges)
dmstar_ages = -np.diff(mstar_at_age_bins)
correct_weights = dmstar_ages / dmstar_ages.sum()
# Calculate age distributions with DSPS
t_table = np.linspace(T_BIRTH_MIN, t_obs, 50_000)
lgt_table = np.log10(t_table)
logsm_table = np.log10(linear_smh(T_BIRTH_MIN, t_table[1:]))
lgt_birth_bin_edges = _get_lgt_birth(t_obs, lgt_age_bin_edges)
dsps_age_weights = _get_age_weights_from_tables(
lgt_birth_bin_edges, lgt_table[1:], logsm_table
)
assert np.allclose(dsps_age_weights, correct_weights, atol=0.001)
| 35.893617 | 86 | 0.753705 | [
"BSD-3-Clause"
] | ArgonneCPAC/dsps | dsps/tests/test_stellar_ages.py | 3,374 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 09 21:40:30 2014
@author: yyu
"""
import JBCWebAutomation as jbc
today=jbc.GetToday()
# to override the date: today='140305' in 'yymmdd' format
###################################################################
print 'Start job for date ' + today +'...'
jbc.ConvertSermonDOC2PDF(today, 1) # 0 for English
ftp = jbc.JBCFTPConnection()
jbc.uploadJBCWeb(ftp, today, 1, 'pdf')
mp3today='20'+today[0:2]+'_'+today[2:6]
jbc.uploadJBCWeb(ftp, mp3today, 1, 'mp3')
jbc.backup_podcast_xml(ftp, today)
print 'Finshed job.'
ftp.close()
raw_input("Press Enter to continue...")
| 27.652174 | 68 | 0.599057 | [
"Apache-2.0"
] | yingweiy/jbc_task_automation | JBCMain-Chinese.py | 636 | Python |
from django.db.models.fields.json import JSONField
__all__ = ("CurrencyAmountField", "JSONField")
class CurrencyAmountField(JSONField):
pass
| 16.222222 | 50 | 0.787671 | [
"MIT"
] | dj-stripe/dj-paypal | src/djpaypal/fields.py | 146 | Python |
import numpy as np
from skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector
class ADWIN(BaseDriftDetector):
""" Adaptive Windowing method for concept drift detection.
Parameters
----------
delta : float (default=0.002)
The delta parameter for the ADWIN algorithm.
Notes
-----
ADWIN [1]_ (ADaptive WINdowing) is an adaptive sliding window algorithm
for detecting change, and keeping updated statistics about a data stream.
ADWIN allows algorithms not adapted for drifting data, to be resistant
to this phenomenon.
The general idea is to keep statistics from a window of variable size while
detecting concept drift.
The algorithm will decide the size of the window by cutting the statistics'
window at different points and analysing the average of some statistic over
these two windows. If the absolute value of the difference between the two
averages surpasses a pre-defined threshold, change is detected at that point
and all data before that time is discarded.
References
----------
.. [1] Bifet, Albert, and Ricard Gavalda. "Learning from time-changing data with adaptive
windowing."
In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448.
Society for Industrial and Applied Mathematics, 2007.
Examples
--------
>>> # Imports
>>> import numpy as np
>>> from skmultiflow.drift_detection.adwin import ADWIN
>>> adwin = ADWIN()
>>> # Simulating a data stream as a normal distribution of 1's and 0's
>>> data_stream = np.random.randint(2, size=2000)
>>> # Changing the data concept from index 999 to 2000
>>> for i in range(999, 2000):
... data_stream[i] = np.random.randint(4, high=8)
>>> # Adding stream elements to ADWIN and verifying if drift occurred
>>> for i in range(2000):
... adwin.add_element(data_stream[i])
... if adwin.detected_change():
... print('Change detected in data: ' + str(data_stream[i]) + ' - at index: ' + str(i))
"""
MAX_BUCKETS = 5
def __init__(self, delta=.002):
super().__init__()
# default values affected by init_bucket()
self.delta = delta
self.last_bucket_row = 0
self.list_row_bucket = None
self._total = 0
self._variance = 0
self._width = 0
self.bucket_number = 0
self.__init_buckets()
# other default values
self.mint_min_window_longitude = 10
self.mdbl_delta = .002
self.mint_time = 0
self.mdbl_width = 0
self.detect = 0
self._n_detections = 0
self.detect_twice = 0
self.mint_clock = 32
self.bln_bucket_deleted = False
self.bucket_num_max = 0
self.mint_min_window_length = 5
super().reset()
def reset(self):
""" Reset detectors
Resets statistics and adwin's window.
Returns
-------
ADWIN
self
"""
self.__init__(delta=self.delta)
def get_change(self):
""" Get drift
Returns
-------
bool
Whether or not a drift occurred
"""
return self.bln_bucket_deleted
def reset_change(self):
self.bln_bucket_deleted = False
def set_clock(self, clock):
self.mint_clock = clock
def detected_warning_zone(self):
return False
@property
def _bucket_used_bucket(self):
return self.bucket_num_max
@property
def width(self):
return self._width
@property
def n_detections(self):
return self._n_detections
@property
def total(self):
return self._total
@property
def variance(self):
return self._variance / self._width
@property
def estimation(self):
if self._width == 0:
return 0
return self._total / self._width
@estimation.setter
def estimation(self, value):
pass
@property
def width_t(self):
return self.mdbl_width
def __init_buckets(self):
""" Initialize the bucket's List and statistics
Set all statistics to 0 and create a new bucket List.
"""
self.list_row_bucket = List()
self.last_bucket_row = 0
self._total = 0
self._variance = 0
self._width = 0
self.bucket_number = 0
def add_element(self, value):
""" Add a new element to the sample window.
Apart from adding the element value to the window, by inserting it in
the correct bucket, it will also update the relevant statistics, in
this case the total sum of all values, the window width and the total
variance.
Parameters
----------
value: int or float (a numeric value)
Notes
-----
The value parameter can be any numeric value relevant to the analysis
of concept change. For the learners in this framework we are using
either 0's or 1's, that are interpreted as follows:
0: Means the learners prediction was wrong
1: Means the learners prediction was correct
This function should be used at every new sample analysed.
"""
if self.in_concept_change:
self.reset()
self._width += 1
self.__insert_element_bucket(0, value, self.list_row_bucket.first)
incremental_variance = 0
if self._width > 1:
incremental_variance = (self._width - 1) * \
(value - self._total / (self._width - 1)) * \
(value - self._total / (self._width - 1)) / self._width
self._variance += incremental_variance
self._total += value
self.__compress_buckets()
def __insert_element_bucket(self, variance, value, node):
node.insert_bucket(value, variance)
self.bucket_number += 1
if self.bucket_number > self.bucket_num_max:
self.bucket_num_max = self.bucket_number
@staticmethod
def bucket_size(row):
return np.power(2, row)
def delete_element(self):
""" Delete an Item from the bucket list.
Deletes the last Item and updates relevant statistics kept by ADWIN.
Returns
-------
int
The bucket size from the updated bucket
"""
node = self.list_row_bucket.last
n1 = self.bucket_size(self.last_bucket_row)
self._width -= n1
self._total -= node.get_total(0)
u1 = node.get_total(0) / n1
incremental_variance = node.get_variance(0) + n1 * self._width * (
u1 - self._total / self._width) * (u1 - self._total / self._width) / (
n1 + self._width)
self._variance -= incremental_variance
node.remove_bucket()
self.bucket_number -= 1
if node.bucket_size_row == 0:
self.list_row_bucket.remove_from_tail()
self.last_bucket_row -= 1
return n1
def __compress_buckets(self):
cursor = self.list_row_bucket.first
i = 0
while cursor is not None:
k = cursor.bucket_size_row
if k == self.MAX_BUCKETS + 1:
next_node = cursor.get_next_item()
if next_node is None:
self.list_row_bucket.add_to_tail()
next_node = cursor.get_next_item()
self.last_bucket_row += 1
n1 = self.bucket_size(i)
n2 = self.bucket_size(i)
u1 = cursor.get_total(0) / n1
u2 = cursor.get_total(1) / n2
incremental_variance = n1 * n2 * ((u1 - u2) * (u1 - u2)) / (n1 + n2)
next_node.insert_bucket(
cursor.get_total(0) + cursor.get_total(1),
cursor.get_variance(1) + incremental_variance)
self.bucket_number += 1
cursor.compress_bucket_row(2)
if next_node.bucket_size_row <= self.MAX_BUCKETS:
break
else:
break
cursor = cursor.get_next_item()
i += 1
def detected_change(self):
""" Detects concept change in a drifting data stream.
The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from
Time-Changing Data with Adaptive Windowing'. The general idea is to keep
statistics from a window of variable size while detecting concept drift.
This function is responsible for analysing different cutting points in
the sliding window, to verify if there is a significant change in concept.
Returns
-------
bln_change : bool
Whether change was detected or not
Notes
-----
If change was detected, one should verify the new window size, by reading
the width property.
"""
bln_change = False
bln_exit = False
bln_bucket_deleted = False
self.mint_time += 1
n0 = 0
if (self.mint_time % self.mint_clock == 0) and (
self.width > self.mint_min_window_longitude):
bln_reduce_width = True
while bln_reduce_width:
bln_reduce_width = not bln_reduce_width
bln_exit = False
n0 = 0
n1 = self._width
u0 = 0
u1 = self.total
v0 = 0
v1 = self._variance
n2 = 0
u2 = 0
cursor = self.list_row_bucket.last
i = self.last_bucket_row
while (not bln_exit) and (cursor is not None):
for k in range(cursor.bucket_size_row):
n2 = self.bucket_size(i)
u2 = cursor.get_total(k)
if n0 > 0:
v0 += cursor.get_variance(k) + 1. * n0 * n2 * \
(u0 / n0 - u2 / n2) * (u0 / n0 - u2 / n2) / (n0 + n2)
if n1 > 0:
v1 -= cursor.get_variance(k) + 1. * n1 * n2 * \
(u1 / n1 - u2 / n2) * (u1 / n1 - u2 / n2) / (n1 + n2)
n0 += self.bucket_size(i)
n1 -= self.bucket_size(i)
u0 += cursor.get_total(k)
u1 -= cursor.get_total(k)
if (i == 0) and (k == cursor.bucket_size_row - 1):
bln_exit = True
break
abs_value = 1. * ((u0 / n0) - (u1 / n1))
if (n1 >= self.mint_min_window_length) \
and (n0 >= self.mint_min_window_length) \
and (
self.__bln_cut_expression(n0, n1, u0, u1, v0, v1, abs_value,
self.delta)):
bln_bucket_deleted = True # noqa: F841
self.detect = self.mint_time
if self.detect == 0:
self.detect = self.mint_time
elif self.detect_twice == 0:
self.detect_twice = self.mint_time
bln_reduce_width = True
bln_change = True
if self.width > 0:
n0 -= self.delete_element()
bln_exit = True
break
cursor = cursor.get_previous()
i -= 1
self.mdbl_width += self.width
if bln_change:
self._n_detections += 1
self.in_concept_change = bln_change
return bln_change
def __bln_cut_expression(self, n0, n1, u0, u1, v0, v1, abs_value, delta):
n = self.width
dd = np.log(2 * np.log(n) / delta)
v = self.variance
m = (1. / (n0 - self.mint_min_window_length + 1)) + \
(1. / (n1 - self.mint_min_window_length + 1))
epsilon = np.sqrt(2 * m * v * dd) + 1. * 2 / 3 * dd * m
return np.absolute(abs_value) > epsilon
class List(object):
""" A linked list object for ADWIN algorithm.
Used for storing ADWIN's bucket list. Is composed of Item objects.
Acts as a linked list, where each element points to its predecessor
and successor.
"""
def __init__(self):
super().__init__()
self._count = None
self._first = None
self._last = None
self.reset()
self.add_to_head()
def reset(self):
self._count = 0
self._first = None
self._last = None
def add_to_head(self):
self._first = Item(self._first, None)
if self._last is None:
self._last = self._first
def remove_from_head(self):
self._first = self._first.get_next_item()
if self._first is not None:
self._first.set_previous(None)
else:
self._last = None
self._count -= 1
def add_to_tail(self):
self._last = Item(None, self._last)
if self._first is None:
self._first = self._last
self._count += 1
def remove_from_tail(self):
self._last = self._last.get_previous()
if self._last is not None:
self._last.set_next_item(None)
else:
self._first = None
self._count -= 1
@property
def first(self):
return self._first
@property
def last(self):
return self._last
@property
def size(self):
return self._count
class Item(object):
""" Item to be used by the List object.
The Item object, alongside the List object, are the two main data
structures used for storing the relevant statistics for the ADWIN
algorithm for change detection.
Parameters
----------
next_item: Item object
Reference to the next Item in the List
previous_item: Item object
Reference to the previous Item in the List
"""
def __init__(self, next_item=None, previous_item=None):
super().__init__()
self.next = next_item
self.previous = previous_item
if next_item is not None:
next_item.previous = self
if previous_item is not None:
previous_item.set_next_item(self)
self.bucket_size_row = None
self.max_buckets = ADWIN.MAX_BUCKETS
self.bucket_total = np.zeros(self.max_buckets + 1, dtype=float)
self.bucket_variance = np.zeros(self.max_buckets + 1, dtype=float)
self.reset()
def reset(self):
""" Reset the algorithm's statistics and window
Returns
-------
ADWIN
self
"""
self.bucket_size_row = 0
for i in range(ADWIN.MAX_BUCKETS + 1):
self.__clear_buckets(i)
return self
def __clear_buckets(self, index):
self.set_total(0, index)
self.set_variance(0, index)
def insert_bucket(self, value, variance):
new_item = self.bucket_size_row
self.bucket_size_row += 1
self.set_total(value, new_item)
self.set_variance(variance, new_item)
def remove_bucket(self):
self.compress_bucket_row(1)
def compress_bucket_row(self, num_deleted=1):
for i in range(num_deleted, ADWIN.MAX_BUCKETS + 1):
self.bucket_total[i - num_deleted] = self.bucket_total[i]
self.bucket_variance[i - num_deleted] = self.bucket_variance[i]
for i in range(1, num_deleted + 1):
self.__clear_buckets(ADWIN.MAX_BUCKETS - i + 1)
self.bucket_size_row -= num_deleted
def get_next_item(self):
return self.next
def set_next_item(self, next_item):
self.next = next_item
def get_previous(self):
return self.previous
def set_previous(self, previous):
self.previous = previous
def get_total(self, index):
return self.bucket_total[index]
def get_variance(self, index):
return self.bucket_variance[index]
def set_total(self, value, index):
self.bucket_total[index] = value
def set_variance(self, value, index):
self.bucket_variance[index] = value
| 31.131579 | 99 | 0.561949 | [
"BSD-3-Clause"
] | denisesato/scikit-multiflow | src/skmultiflow/drift_detection/adwin.py | 16,563 | Python |
from django.contrib import admin
from django.db import transaction
from zinc.models import Policy, PolicyMember
class PolicyMemberInline(admin.TabularInline):
readonly_fields = ('ip_enabled',)
model = PolicyMember
extra = 1
verbose_name = 'member'
verbose_name_plural = 'members'
def ip_enabled(self, obj):
return obj.ip.enabled
ip_enabled.boolean = True
@admin.register(Policy)
class PolicyAdmin(admin.ModelAdmin):
fields = ('name', 'routing', 'ttl')
readonly_fields = ()
list_display = ('__str__', 'routing', 'regions', 'status')
list_filter = ('routing', 'members__region')
inlines = (PolicyMemberInline,)
exclude = ('members',)
def get_queryset(self, request):
qs = super(PolicyAdmin, self).get_queryset(request)
qs = qs.prefetch_related('members')
return qs
def regions(self, obj):
# get_queryset prefetches related policy members so iterating over
# objects is ok because we are iterating over already fetched data
return ', '.join(sorted({m.region for m in obj.members.all()}))
@transaction.atomic
def save_model(self, request, obj, form, change):
rv = super().save_model(request, obj, form, change)
obj.change_trigger(form.changed_data)
return rv
def status(self, obj):
warnings = []
if obj.routing == 'latency':
members_by_region = {}
for member in obj.members.all():
members_by_region.setdefault(member.region, []).append(member)
if len(members_by_region) <= 1:
warnings.append('✖ Latency routed policy should span multiple regions!')
for region, members in members_by_region.items():
if len([m for m in members if m.weight > 0]) == 0:
warnings.append(
'✖ All members of region {} have weight zero!'.format(region))
elif obj.routing == 'weighted':
active_members = [m for m in obj.members.all() if m.weight > 0]
if len(active_members) == 0:
warnings.append('✖ All members have weight zero!')
if warnings:
return '<span style="color: red">{}</red>'.format("<br>".join(warnings))
else:
return "✔ ok"
status.allow_tags = True
status.short_description = 'Status'
| 36.530303 | 95 | 0.61883 | [
"Apache-2.0"
] | PressLabs/zinc | zinc/admin/policy.py | 2,411 | Python |
import os
import sys
import getopt
import re
import time
from datetime import datetime
use_cpu_utilization = True
url = "http://localhost:8086/status"
min_containers = 1
max_containers = 6
peer = "peer"
order = "order"
check_interval = 5
up_threshold = 85 if use_cpu_utilization else 20
down_threshold = 60 if use_cpu_utilization else -1
log_cpu_path = "fabric.csv"
def printSetting():
print("Min containers: \t%d" % min_containers)
print("Max containers: \t%d" % max_containers)
print("Check interval: \t%d seconds" % check_interval)
if use_cpu_utilization:
print("Up threshold: \t> %.2f%% cpu utilization" % up_threshold)
print("Down threshold: \t< %.2f%% cpu utilization" % down_threshold)
else:
print("Up threshold: \t> %d waiting requests" % int(up_threshold))
print("Down threshold: \t< %d waiting requests" % int(down_threshold))
def printUsage():
print(
"""
Usage: %s [options]
-h or --help: show help info
-l url or --link=url: the status url of nginx
-m min_containers or --min=min_containers: the min number of containers
-M max_containers or --max=max_containers: the max number of containers
-t target_container or --target=target_container: the target container
-i check_interval or --interval=check_interval: the checking interval
-u up_threshold or --up=up_threshold: the threshold for scaling up
-d down_threshold or --down = down_threshold: the threshold for scaling down
"""
% (sys.argv[0],))
def check_cpu_utilization(log_file):
pattern = re.compile(r".*%s.*" % peer)
pattern1 = re.compile(r".*%s.*" % order)
cpus = []
mems = []
with os.popen("sudo docker stats --no-stream") as f:
for s in f.readlines():
ss = s.split()
if len(ss) >= 3 and (pattern.match(ss[1]) or pattern1.match(ss[1])):
cu = float(ss[2].replace("%", ""))
cpus.append(cu)
name = ss[1]
mem = float(ss[6].replace("%", ""))
mems.append(mem)
print("INFO: container %s: cpu %.2f%%, mem %.2f%%" % (ss[1], cu, mem))
num = len(cpus)
avg_cpu = sum(cpus) / num if num > 0 else -1
avg_mem = sum(mems) / num if num > 0 else -1
log_file.write("%s,%d,%.2f,%.2f,%s\n" % (datetime.now().strftime("%H:%M:%S"),
num, avg_cpu, avg_mem,
",".join("%.2f,%.2f" % (cpus[i], mems[i]) for i in range(num))))
log_file.flush()
return avg_cpu
try:
opts, args = getopt.getopt(sys.argv[1:],
"hl:m:M:t:i:u:d:",
["help", "link=", "min=", "max=", "target=", "interval=", "up=", "down="]
)
except getopt.GetoptError:
print("Error: Invalid arguments!")
sys.exit(-1)
for cmd, arg in opts:
if cmd in ("-h", "--help"):
printSetting()
print("")
printUsage()
sys.exit(0)
elif cmd in ("-l", "--link"):
url = arg
elif cmd in ("-m", "--min"):
min_containers = max(1, int(arg))
elif cmd in ("-M", "--max"):
max_containers = int(arg)
elif cmd in ("-t", "--target"):
target_container = target_container
elif cmd in ("-u", "--up"):
up_threshold = float(arg)
elif cmd in ("-d", "--down"):
down_threshold = float(arg)
elif cmd in ("-i", "--interval"):
check_interval = int(arg)
printSetting()
print("")
log_file = open(log_cpu_path, "w+")
log_file.write("Time,Num,AvgCPU,AvgMEM,C1CPU,CIMEM,...\n")
while True:
start_time = time.time()
print("INFO:\tStart checking ...")
if use_cpu_utilization:
avg_cu = check_cpu_utilization(log_file)
print("avg cpu .. ", avg_cu)
end_time = time.time()
sleep_time = check_interval - (end_time - start_time)
print("INFO:\tFinish checking. Sleeping %.2f seconds ...\n" % sleep_time)
if sleep_time > 0:
time.sleep(sleep_time)
log_file.close()
| 32.59375 | 109 | 0.56256 | [
"Apache-2.0"
] | wejdeneHaouari/blockbench | scripts/fabric-cpu.py | 4,172 | Python |
from django.shortcuts import render
from django.http import HttpResponse
from rango.models import Category
def index(request):
category_list = Category.objects.order_by('-likes')[:5]
context_dict = {}
context_dict['boldmessage'] = 'Crunchy, creamy, cookie, candy, cupcake!'
context_dict['categories'] = category_list
# Render the response and send it back!
return render(request, 'rango/index.html', context=context_dict)
def about(request):
context_dict = {'boldmessage': 'This tutorial has been put together by Decklin Johnston'}
return render(request, 'rango/about.html', context=context_dict)
def show_category(request, category_name_slug):
context_dict = {}
try:
category = Category.objects.get(slug=category_name_slug)
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
context_dict['pages'] = None
context_dict['category'] = None
return render(request, 'rango/category.html', context=context_dict) | 34.375 | 93 | 0.715455 | [
"Apache-2.0"
] | Recol/Django-Tango | main/files/rango/views.py | 1,100 | Python |
import numpy as np
from math import cos, sin, atan2
from errors_exceptions import OpenRAVEException
from openravepy import quatFromAxisAngle, matrixFromPose, poseFromMatrix, \
axisAngleFromRotationMatrix, KinBody, GeometryType, RaveCreateRobot, \
RaveCreateKinBody, TriMesh, Environment, DOFAffine, IkParameterization, IkParameterizationType, \
IkFilterOptions, matrixFromAxisAngle, quatFromRotationMatrix
from core.util_classes.robots import Robot, PR2, Baxter, Washer
from core.util_classes.items import Item, Box, Can, BlueCan, RedCan, Circle, BlueCircle, RedCircle, GreenCircle, Obstacle, Wall, Table, Basket
WALL_THICKNESS = 1
class OpenRAVEBody(object):
def __init__(self, env, name, geom):
assert env is not None
self.name = name
self._env = env
self._geom = geom
if env.GetKinBody(name) == None and env.GetRobot(name) == None:
if isinstance(geom, Robot):
self._add_robot(geom)
elif isinstance(geom, Item):
self._add_item(geom)
else:
raise OpenRAVEException("Geometry not supported for %s for OpenRAVEBody"%geom)
elif env.GetKinBody(name) != None:
self.env_body = env.GetKinBody(name)
else:
self.env_body = env.GetRobot(name)
self.set_transparency(0.5)
def delete(self):
self._env.Remove(self.env_body)
def set_transparency(self, transparency):
for link in self.env_body.GetLinks():
for geom in link.GetGeometries():
geom.SetTransparency(transparency)
def _add_robot(self, geom):
self.env_body = self._env.ReadRobotXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
geom.setup(self.env_body)
def _add_item(self, geom):
try:
fun_name = "self._add_{}".format(geom._type)
eval(fun_name)(geom)
except:
self._add_obj(geom)
def _add_circle(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.env_body = OpenRAVEBody.create_cylinder(self._env, self.name, np.eye(4),
[geom.radius, 2], color)
self._env.AddKinBody(self.env_body)
def _add_can(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.env_body = OpenRAVEBody.create_cylinder(self._env, self.name, np.eye(4),
[geom.radius, geom.height], color)
self._env.AddKinBody(self.env_body)
def _add_obstacle(self, geom):
obstacles = np.matrix('-0.576036866359447, 0.918128654970760, 1;\
-0.806451612903226,-1.07017543859649, 1;\
1.01843317972350,-0.988304093567252, 1;\
0.640552995391705,0.906432748538011, 1;\
-0.576036866359447, 0.918128654970760, -1;\
-0.806451612903226,-1.07017543859649, -1;\
1.01843317972350,-0.988304093567252, -1;\
0.640552995391705,0.906432748538011, -1')
body = RaveCreateKinBody(self._env, '')
vertices = np.array(obstacles)
indices = np.array([[0, 1, 2], [2, 3, 0], [4, 5, 6], [6, 7, 4], [0, 4, 5],
[0, 1, 5], [1, 2, 5], [5, 6, 2], [2, 3, 6], [6, 7, 3],
[0, 3, 7], [0, 4, 7]])
body.InitFromTrimesh(trimesh=TriMesh(vertices, indices), draw=True)
body.SetName(self.name)
for link in body.GetLinks():
for geom in link.GetGeometries():
geom.SetDiffuseColor((.9, .9, .9))
self.env_body = body
self._env.AddKinBody(body)
def _add_box(self, geom):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, geom.dim, [0.5, 0.2, 0.1])
self.env_body = RaveCreateKinBody(self._env,'')
self.env_body.InitFromGeometries([infobox])
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_sphere(self, geom):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Sphere, [geom.radius], [0, 0, 1])
self.env_body = RaveCreateKinBody(self._env,'')
self.env_body.InitFromGeometries([infobox])
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_wall(self, geom):
self.env_body = OpenRAVEBody.create_wall(self._env, geom.wall_type)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_obj(self, geom):
self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_table(self, geom):
self.env_body = OpenRAVEBody.create_table(self._env, geom)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_basket(self, geom):
self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def set_pose(self, base_pose, rotation = [0, 0, 0]):
trans = None
if np.any(np.isnan(base_pose)) or np.any(np.isnan(rotation)):
return
if isinstance(self._geom, Robot) and not isinstance(self._geom, Washer):
trans = OpenRAVEBody.base_pose_to_mat(base_pose)
elif len(base_pose) == 2:
trans = OpenRAVEBody.base_pose_2D_to_mat(base_pose)
else:
trans = OpenRAVEBody.transform_from_obj_pose(base_pose, rotation)
self.env_body.SetTransform(trans)
def set_dof(self, dof_value_map):
"""
dof_value_map: A dict that maps robot attribute name to a list of corresponding values
"""
# make sure only sets dof for robot
# assert isinstance(self._geom, Robot)
if not isinstance(self._geom, Robot): return
# Get current dof value for each joint
dof_val = self.env_body.GetActiveDOFValues()
for k, v in dof_value_map.items():
if k not in self._geom.dof_map or np.any(np.isnan(v)): continue
inds = self._geom.dof_map[k]
try:
dof_val[inds] = v
except IndexError:
print(('\n\n\nBad index in set dof:', inds, k, v, self._geom, '\n\n\n'))
# Set new DOF value to the robot
self.env_body.SetActiveDOFValues(dof_val)
def _set_active_dof_inds(self, inds = None):
"""
Set active dof index to the one we are interested
This function is implemented to simplify jacobian calculation in the CollisionPredicate
inds: Optional list of index specifying dof index we are interested in
"""
robot = self.env_body
if inds == None:
dof_inds = np.ndarray(0, dtype=np.int)
if robot.GetJoint("torso_lift_joint") != None:
dof_inds = np.r_[dof_inds, robot.GetJoint("torso_lift_joint").GetDOFIndex()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetArmIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetGripperIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetArmIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetGripperIndices()]
robot.SetActiveDOFs(
dof_inds,
DOFAffine.X + DOFAffine.Y + DOFAffine.RotationAxis,
[0, 0, 1])
else:
robot.SetActiveDOFs(inds)
@staticmethod
def create_cylinder(env, body_name, t, dims, color=[0, 1, 1]):
infocylinder = OpenRAVEBody.create_body_info(GeometryType.Cylinder, dims, color)
if type(env) != Environment:
# import ipdb; ipdb.set_trace()
print("Environment object is not valid")
cylinder = RaveCreateKinBody(env, '')
cylinder.InitFromGeometries([infocylinder])
cylinder.SetName(body_name)
cylinder.SetTransform(t)
return cylinder
@staticmethod
def create_box(env, name, transform, dims, color=[0,0,1]):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, dims, color, 0, True)
box = RaveCreateKinBody(env,'')
box.InitFromGeometries([infobox])
box.SetName(name)
box.SetTransform(transform)
return box
@staticmethod
def create_sphere(env, name, transform, dims, color=[0,0,1]):
infobox = OpenRAVEBody.create_body_info(GeometryType.Sphere, dims, color)
sphere = RaveCreateKinBody(env,'')
sphere.InitFromGeometries([infobox])
sphere.SetName(name)
sphere.SetTransform(transform)
return sphere
@staticmethod
def create_body_info(body_type, dims, color, transparency = 0.8, visible = True):
infobox = KinBody.Link.GeometryInfo()
infobox._type = body_type
infobox._vGeomData = dims
infobox._bVisible = True
infobox._fTransparency = transparency
infobox._vDiffuseColor = color
return infobox
@staticmethod
def create_wall(env, wall_type):
component_type = KinBody.Link.GeomType.Box
wall_color = [0.5, 0.2, 0.1]
box_infos = []
if wall_type == 'closet':
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
else:
raise NotImplemented
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims = [dim_x, dim_y, 1]
box_info = OpenRAVEBody.create_body_info(component_type, dims, wall_color)
box_info._t = transform
box_infos.append(box_info)
wall = RaveCreateKinBody(env, '')
wall.InitFromGeometries(box_infos)
return wall
@staticmethod
def get_wall_dims(wall_type='closet'):
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
dims = []
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims.append(([dim_x, dim_y, 1], transform))
return dims
@staticmethod
def create_basket_col(env):
long_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
long_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
short_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
short_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
bottom_info = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.015,.2], [0, 0.75, 1])
long_info1._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,0.208],[0,0,0.055])
long_info2._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,-0.208],[0,0,-0.055])
short_info1._t = OpenRAVEBody.transform_from_obj_pose([0.309,-0.118,0],[-0.055,0,0])
short_info2._t = OpenRAVEBody.transform_from_obj_pose([-0.309,-0.118,0],[0.055,0,0])
bottom_info._t = OpenRAVEBody.transform_from_obj_pose([0,-0.25,0],[0,0,0])
basket = RaveCreateRobot(env, '')
basket.InitFromGeometries([long_info1, long_info2, short_info1, short_info2, bottom_info])
return basket
@staticmethod
def create_table(env, geom):
thickness = geom.thickness
leg_height = geom.leg_height
back = geom.back
dim1, dim2 = geom.table_dim
legdim1, legdim2 = geom.leg_dim
table_color = [0.5, 0.2, 0.1]
component_type = KinBody.Link.GeomType.Box
tabletop = OpenRAVEBody.create_body_info(component_type, [dim1/2, dim2/2, thickness/2], table_color)
leg1 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg1._t[0, 3] = dim1/2 - legdim1/2
leg1._t[1, 3] = dim2/2 - legdim2/2
leg1._t[2, 3] = -leg_height/2 - thickness/2
leg2 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg2._t[0, 3] = dim1/2 - legdim1/2
leg2._t[1, 3] = -dim2/2 + legdim2/2
leg2._t[2, 3] = -leg_height/2 - thickness/2
leg3 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg3._t[0, 3] = -dim1/2 + legdim1/2
leg3._t[1, 3] = dim2/2 - legdim2/2
leg3._t[2, 3] = -leg_height/2 - thickness/2
leg4 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg4._t[0, 3] = -dim1/2 + legdim1/2
leg4._t[1, 3] = -dim2/2 + legdim2/2
leg4._t[2, 3] = -leg_height/2 - thickness/2
if back:
back_plate = OpenRAVEBody.create_body_info(component_type, [legdim1/10, dim2/2, leg_height-thickness/2], table_color)
back_plate._t[0, 3] = dim1/2 - legdim1/10
back_plate._t[1, 3] = 0
back_plate._t[2, 3] = -leg_height/2 - thickness/4
table = RaveCreateRobot(env, '')
if not back:
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4])
else:
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4, back_plate])
return table
@staticmethod
def base_pose_2D_to_mat(pose):
# x, y = pose
assert len(pose) == 2
x = pose[0]
y = pose[1]
rot = 0
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, 0]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def base_pose_3D_to_mat(pose):
# x, y, z = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
z = pose[2]
rot = 0
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, z]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def mat_to_base_pose_2D(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
return np.array([x,y])
@staticmethod
def base_pose_to_mat(pose):
# x, y, rot = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
rot = pose[2]
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, 0]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def angle_pose_to_mat(pose):
assert len(pose) == 1
q = quatFromAxisAngle((0, 0, pose)).tolist()
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def mat_to_base_pose(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
rot = axisAngleFromRotationMatrix(mat)[2]
return np.array([x,y,rot])
@staticmethod
def obj_pose_from_transform(transform):
trans = transform[:3,3]
rot_matrix = transform[:3,:3]
yaw, pitch, roll = OpenRAVEBody._ypr_from_rot_matrix(rot_matrix)
# ipdb.set_trace()
return np.array((trans[0], trans[1], trans[2], yaw, pitch, roll))
@staticmethod
def transform_from_obj_pose(pose, rotation = np.array([0,0,0])):
x, y, z = pose
alpha, beta, gamma = rotation
Rz, Ry, Rx = OpenRAVEBody._axis_rot_matrices(pose, rotation)
rot_mat = np.dot(Rz, np.dot(Ry, Rx))
matrix = np.eye(4)
matrix[:3,:3] = rot_mat
matrix[:3,3] = [x,y,z]
return matrix
@staticmethod
def _axis_rot_matrices(pose, rotation):
x, y, z = pose
alpha, beta, gamma = rotation
Rz_2d = np.array([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
Ry_2d = np.array([[cos(beta), sin(beta)], [-sin(beta), cos(beta)]])
Rx_2d = np.array([[cos(gamma), -sin(gamma)], [sin(gamma), cos(gamma)]])
I = np.eye(3)
Rz = I.copy()
Rz[:2,:2] = Rz_2d
Ry = I.copy()
Ry[[[0],[2]],[0,2]] = Ry_2d
Rx = I.copy()
Rx[1:3,1:3] = Rx_2d
# ipdb.set_trace()
return Rz, Ry, Rx
@staticmethod
def _ypr_from_rot_matrix(r):
# alpha
yaw = atan2(r[1,0], r[0,0])
# beta
pitch = atan2(-r[2,0],np.sqrt(r[2,1]**2+r[2,2]**2))
# gamma
roll = atan2(r[2,1], r[2,2])
# ipdb.set_trace()
return (yaw, pitch, roll)
@staticmethod
def get_ik_transform(pos, rot, right_arm = True):
trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
# Openravepy flip the rotation axis by 90 degree, thus we need to change it back
if right_arm:
rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
else:
rot_mat = matrixFromAxisAngle([0, -np.pi/2, 0])
trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
trans[:3, :3] = trans_mat
return trans
def get_ik_arm_pose(self, pos, rot):
# assert isinstance(self._geom, PR2)
solutions = self.get_ik_from_pose(pos, rot, 'rightarm_torso')
return solutions
def get_ik_from_pose(self, pos, rot, manip_name, use6d=True):
trans = OpenRAVEBody.get_ik_transform(pos, rot)
solutions = self.get_ik_solutions(manip_name, trans, use6d)
return solutions
def get_ik_solutions(self, manip_name, trans, use6d=True):
manip = self.env_body.GetManipulator(manip_name)
if use6d:
iktype = IkParameterizationType.Transform6D
else:
iktype = IkParameterizationType.Translation3D
solutions = manip.FindIKSolutions(IkParameterization(trans, iktype),IkFilterOptions.CheckEnvCollisions)
return solutions
def get_close_ik_solution(self, manip_name, trans, dof_map=None):
if dof_map is not None:
self.set_dof(dof_map)
manip = self.env_body.GetManipulator(manip_name)
iktype = IkParameterizationType.Transform6D
ik_param = IkParameterization(trans, iktype)
solution = manip.FindIKSolution(ik_param, IkFilterOptions.IgnoreSelfCollisions)
return solution
def fwd_kinematics(self, manip_name, dof_map=None, mat_result=False):
if dof_map is not None:
self.set_dof(dof_map)
trans = self.env_body.GetLink(manip_name).GetTransform()
if mat_result:
return trans
pos = trans[:3, 3]
quat = quatFromRotationMatrix(trans[:3, :3])
return {'pos': pos, 'quat': quat}
def param_fwd_kinematics(self, param, manip_names, t, mat_result=False):
if not isinstance(self._geom, Robot): return
attrs = list(param._attr_types.keys())
dof_val = self.env_body.GetActiveDOFValues()
for attr in attrs:
if attr not in self._geom.dof_map: continue
val = getattr(param, attr)[:, t]
if np.any(np.isnan(val)): continue
inds = self._geom.dof_map[attr]
dof_val[inds] = val
self.env_body.SetActiveDOFValues(dof_val)
result = {}
for manip_name in manip_names:
result[manip_name] = self.fwd_kinematics(manip_name, mat_result=mat_result)
return result
| 40.00369 | 142 | 0.595102 | [
"MIT"
] | Algorithmic-Alignment-Lab/OpenTAMP | opentamp/src/core/util_classes/no_openrave_body.py | 21,682 | Python |
# coding: utf-8
"""
Account API
The <b>Account API</b> gives sellers the ability to configure their eBay seller accounts, including the seller's policies (the Fulfillment Policy, Payment Policy, and Return Policy), opt in and out of eBay seller programs, configure sales tax tables, and get account information. <br><br>For details on the availability of the methods in this API, see <a href=\"/api-docs/sell/account/overview.html#requirements\">Account API requirements and restrictions</a>. # noqa: E501
OpenAPI spec version: v1.6.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RateTableResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rate_tables': 'list[RateTable]'
}
attribute_map = {
'rate_tables': 'rateTables'
}
def __init__(self, rate_tables=None): # noqa: E501
"""RateTableResponse - a model defined in Swagger""" # noqa: E501
self._rate_tables = None
self.discriminator = None
if rate_tables is not None:
self.rate_tables = rate_tables
@property
def rate_tables(self):
"""Gets the rate_tables of this RateTableResponse. # noqa: E501
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:return: The rate_tables of this RateTableResponse. # noqa: E501
:rtype: list[RateTable]
"""
return self._rate_tables
@rate_tables.setter
def rate_tables(self, rate_tables):
"""Sets the rate_tables of this RateTableResponse.
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:param rate_tables: The rate_tables of this RateTableResponse. # noqa: E501
:type: list[RateTable]
"""
self._rate_tables = rate_tables
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RateTableResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RateTableResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.734513 | 479 | 0.602833 | [
"MIT"
] | craiga/ebay_rest | src/ebay_rest/api/sell_account/models/rate_table_response.py | 3,812 | Python |
from setuptools import setup
import json
import glob
def readme():
with open('README.rst') as f:
return f.read()
def get_version():
with open('version.json') as json_file:
data = json.load(json_file)
if 'dev' in data:
return "{}.{}.{}-dev{}".format( data['major'], data['minor'], data['patch'], data['dev'])
return "{}.{}.{}".format( data['major'], data['minor'], data['patch'])
def get_requirements():
file_handle = open('requirements.txt', 'r')
data = file_handle.read()
file_handle.close()
return data.split("\n")
def scripts(directory='bin/*') -> []:
print(glob.glob( directory ))
return list(glob.glob( directory ))
setup(name='ecc',
version= get_version(),
description='Elastic Compute Cluster',
url='https://github.com/usegalaxy-no/ecc/',
author='Kim Brugger',
author_email='[email protected]',
license='MIT',
packages=['ecc'],
classifiers=[
'License :: MIT License',
'Programming Language :: Python :: +3.6'
],
install_requires=[ get_requirements() ],
scripts=scripts(),
)
| 24.276596 | 97 | 0.595092 | [
"MIT"
] | olafsarnow/ecc | setup.py | 1,141 | Python |
import numpy as np
import sys, os, re, gzip, struct
import random
import h5py
import copy
from keras import backend as K
from keras.utils import Sequence
import keras.utils
import tensorflow as tf
import multi_utils
import mat_utils
class NbestFixedDataGenerator(Sequence):
def __init__(self, file, key_file, batch_size=64, feat_dim=40, n_labels=1024,
procs=10, extras1=10, extras2=10, num_extras1=1, nbest=100, mode='train', shuffle=False,
mod=1):
self.file=file
self.batch_size=batch_size
self.feat_dim=feat_dim
self.n_labels=n_labels
self.procs=procs
self.extras1=extras1
self.extras2=extras2
self.num_extras1=num_extras1
self.nbest=nbest
self.shuffle=shuffle
self.keys=[]
self.sorted_keys=[]
self.mode=mode
self.mod=1
self.h5fd = h5py.File(self.file, 'r')
self.n_samples = len(self.h5fd.keys())
if key_file is not None:
with open(key_file, 'r') as f:
for line in f:
self.sorted_keys.append(line.strip())
for key in self.h5fd.keys():
self.keys.append(key)
self.n_samples = len(self.h5fd.keys())
for key in self.h5fd.keys():
self.keys.append(key)
if len(self.sorted_keys) > 0:
self.keys = self.sorted_keys
def __len__(self):
return int(np.ceil(self.n_samples)/self.batch_size)
def __getitem__(self, index, return_keys=False):
list_keys_temp = [self.keys[k] for k in range(index*self.batch_size,
min( (index+1)*self.batch_size,
len(self.keys) ) )]
# [input_sequences, label_sequences, inputs_lengths, labels_length]
if self.mode == 'train':
x, mask, y = self.__data_generation(list_keys_temp)
if return_keys == True:
return x, mask, y, list_keys_temp
else:
return x, mask, y
else:
x, mask = self.__data_generation(list_keys_temp)
if return_keys == True:
return x, mask, list_keys_temp
else:
return x, mask
def on_epoch_end(self):
if self.shuffle == True:
random.shuffle(self.keys)
def __data_generation(self, list_keys_temp):
max_num_blocks=0
max_num_frames=0
for i, key in enumerate(list_keys_temp):
mat = self.h5fd[key+'/data'][()]
mat = mat_utils.pad_mat(mat, self.mod)
[ex_blocks,ex_frames] = multi_utils.expected_num_blocks(mat,
self.procs,
self.extras1,
self.extras2,
self.num_extras1)
if ex_blocks > max_num_blocks:
max_num_blocks = ex_blocks
if ex_frames > max_num_frames:
max_num_frames = ex_frames
input_mat=np.zeros((len(list_keys_temp), max_num_blocks,
self.procs+max(self.extras1, self.extras2), self.feat_dim))
input_mask=np.zeros((len(list_keys_temp), max_num_blocks,
self.procs+max(self.extras1, self.extras2), 1))
if self.mode == 'train':
numer_labels=np.zeros((len(list_keys_temp), max_num_blocks,
self.procs+max(self.extras1, self.extras2), self.n_labels+1))
numer_lmscores = np.zros((len(list_keys_temp), 1))
denom_labels=np.zeros((len(list_keys_temp), self.nbest, max_num_blocks,
self.procs+max(self.extras1, self.extras2), self.n_labels+1))
denom_lmlscores = np.zeros((len(list_keys_temp), self.nbest, 1))
for i, key in enumerate(list_keys_temp):
mat = self.h5fd[key+'/data'][()]
[ex_blocks, ex_frames] = multi_utils.expected_num_blocks(mat,
self.procs,
self.extras1,
self.extras2,
self.num_extras1)
blocked_mat, mask , _ = multi_utils.split_utt(mat, self.procs, self.extras1,
self.extras2,
self.num_extras1,
ex_blocks,
self.feat_dim, max_num_blocks)
input_mat[i,:,:,:] = np.expand_dims(blocked_mat, axis=0)
input_mask[i,:,:,:] = np.expand_dims(mask, axis=0)
if self.mode == 'train':
# label is a list of string starting from 0
numer = self.h5fd[key+'/1best'][()]
numer_labels = multi_utils.str2dict(numer)
numer_lmscores[i,0] = self.h5fd[key+'/1best_scores'][()]
denom = self.h5fd[key+'/nbest'][()]
denom_labels = multi_utils.str2nbest(denom)
denom_lmscores[i, :, 0] = self.h5fd[key+'/nbest_scores'][()]
# w/o padding for convenience
# splitting labels
# (blocks, frames, feats)
number_blocked_labels = multi_utils.split_post_label(numer_labels, self.procs, self.extras1,
self.extras2, self.num_extras1, ex_blocks,
self.n_labels+1, max_num_blocks)
# expand dimensions along with batch dim.
numer_labels[i,:,:,:] = np.expand_dims(numer_blocked_labels, axis=0)
# (nbest, blocks, time, feats)
denom_blocked_labels = muti_utils.split_nbest_label(denom_labels, self.procs, self.extra1,
self.extra2, self.num_extras1, ex_blocks,
self.n_labels+1, max_num_blocks)
denom_labels[i,:,:,:,:] = np.expand_dims(denom_blocked_labels, axis=0)
# transpose batch and block axes for outer loop in training
input_mat = input_mat.transpose((1,0,2,3))
input_mask = input_mask.transpose((1,0,2,3))
if self.mode == 'train':
# transpose batch dim. <-> block dim.
number_labels = numer_labels.transpose((1,0,2,3)) # (batch,, blocks, time, feats) -> (blocks, batch, time, feats)
denom_labels = denom_labels.transpose((2,1,0,3,4)) # (batch, nbest, blocks, time, feats)->(nbest, blocks, batch, time, feats)
if self.mode == 'train':
return input_mat, input_mask, [numer_labels, numer_lmscores, denom_labels, denom_lmscores]
else:
return input_mat, input_mask
| 46.411392 | 137 | 0.505387 | [
"Apache-2.0"
] | akio-kobayashi/lc_lstm | nbest_multi_fixed_generator.py | 7,333 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import re
import sys
import urllib
import tarfile
import zipfile
import os.path as osp
from scipy.io import loadmat
import numpy as np
import h5py
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
from .bases import BaseVideoDataset
class DukeMTMCVidReID(BaseVideoDataset):
"""
DukeMTMCVidReID
Reference:
Wu et al. Exploit the Unknown Gradually: One-Shot Video-Based Person
Re-Identification by Stepwise Learning. CVPR 2018.
URL: https://github.com/Yu-Wu/DukeMTMC-VideoReID
Dataset statistics:
# identities: 702 (train) + 702 (test)
# tracklets: 2196 (train) + 2636 (test)
"""
dataset_dir = 'dukemtmc-vidreid'
def __init__(self, root='data', min_seq_len=0, verbose=True, **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-VideoReID.zip'
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/gallery')
self.split_train_json_path = osp.join(self.dataset_dir, 'split_train.json')
self.split_query_json_path = osp.join(self.dataset_dir, 'split_query.json')
self.split_gallery_json_path = osp.join(self.dataset_dir, 'split_gallery.json')
self.min_seq_len = min_seq_len
self._download_data()
self._check_before_run()
print("Note: if root path is changed, the previously generated json files need to be re-generated (so delete them first)")
train = self._process_dir(self.train_dir, self.split_train_json_path, relabel=True)
query = self._process_dir(self.query_dir, self.split_query_json_path, relabel=False)
gallery = self._process_dir(self.gallery_dir, self.split_gallery_json_path, relabel=False)
if verbose:
print("=> DukeMTMC-VideoReID loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, _, self.num_train_cams = self.get_videodata_info(self.train)
self.num_query_pids, _, self.num_query_cams = self.get_videodata_info(self.query)
self.num_gallery_pids, _, self.num_gallery_cams = self.get_videodata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
print("Creating directory {}".format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading DukeMTMC-VideoReID dataset")
urllib.urlretrieve(self.dataset_url, fpath)
print("Extracting files")
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, json_path, relabel):
if osp.exists(json_path):
print("=> {} generated before, awesome!".format(json_path))
split = read_json(json_path)
return split['tracklets']
print("=> Automatically generating split (might take a while for the first time, have a coffe)")
pdirs = glob.glob(osp.join(dir_path, '*')) # avoid .DS_Store
print("Processing '{}' with {} person identities".format(dir_path, len(pdirs)))
pid_container = set()
for pdir in pdirs:
pid = int(osp.basename(pdir))
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
tracklets = []
for pdir in pdirs:
pid = int(osp.basename(pdir))
if relabel: pid = pid2label[pid]
tdirs = glob.glob(osp.join(pdir, '*'))
for tdir in tdirs:
raw_img_paths = glob.glob(osp.join(tdir, '*.jpg'))
num_imgs = len(raw_img_paths)
if num_imgs < self.min_seq_len:
continue
img_paths = []
for img_idx in range(num_imgs):
# some tracklet starts from 0002 instead of 0001
img_idx_name = 'F' + str(img_idx+1).zfill(4)
res = glob.glob(osp.join(tdir, '*' + img_idx_name + '*.jpg'))
if len(res) == 0:
print("Warn: index name {} in {} is missing, jump to next".format(img_idx_name, tdir))
continue
img_paths.append(res[0])
img_name = osp.basename(img_paths[0])
if img_name.find('_') == -1:
# old naming format: 0001C6F0099X30823.jpg
camid = int(img_name[5]) - 1
else:
# new naming format: 0001_C6_F0099_X30823.jpg
camid = int(img_name[6]) - 1
img_paths = tuple(img_paths)
tracklets.append((img_paths, pid, camid))
print("Saving split to {}".format(json_path))
split_dict = {
'tracklets': tracklets,
}
write_json(split_dict, json_path)
return tracklets | 40.61745 | 130 | 0.632022 | [
"MIT"
] | ArronHZG/ABD-Net | torchreid/datasets/dukemtmcvidreid.py | 6,052 | Python |
# Copyright (c) 2019 Eric Steinberger
import numpy as np
from PokerRL.rl.base_cls.workers.WorkerBase import WorkerBase
class EvaluatorMasterBase(WorkerBase):
"""
Baseclass to all Evaluators. An Evaluator is an algorithm to evaluate an agent's performance in a certain metric.
"""
def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False):
"""
Args:
t_prof (TrainingProfile)
chief_handle (class instance or ray ActorHandle)
evaluator_name (str): Name of the evaluator
"""
super().__init__(t_prof=t_prof)
self._eval_env_bldr = eval_env_bldr
self._chief_handle = chief_handle
self._is_multi_stack = len(self._t_prof.eval_stack_sizes) > 1
self._log_conf_interval = log_conf_interval
self._evaluator_name = evaluator_name
self._exp_name_total, self._exp_names_conf = self._create_experiments(self_name=evaluator_name)
if self._is_multi_stack:
self._exp_name_multi_stack = {
eval_mode:
self._ray.get(
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ "Multi_Stack"
+ ": " + evaluator_name
+ " Averaged Total"))
for eval_mode in self._t_prof.eval_modes_of_algo
}
if self._log_conf_interval:
self._exp_names_multi_stack_conf = {
eval_mode:
self._ray.get(
[
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ ": " + evaluator_name
+ " Conf_" + bound_end)
for bound_end in ["lower95", "upper95"]
]
)
for eval_mode in self._t_prof.eval_modes_of_algo
}
@property
def is_multi_stack(self):
"""
Whether the agent is evaluated in games that start with different stack sizes each time.
"""
return self._is_multi_stack
def evaluate(self, iter_nr):
""" Evaluate an agent and send the results as logs to the Chief. """
raise NotImplementedError
def update_weights(self):
""" Update the local weights on the master, for instance by calling .pull_current_strat_from_chief() """
raise NotImplementedError
def pull_current_strat_from_chief(self):
"""
Pulls and Returns weights or any other changing algorithm info of any format from the Chief.
"""
return self._ray.get(self._ray.remote(self._chief_handle.pull_current_eval_strategy,
self._evaluator_name
))
def _create_experiments(self, self_name, ):
"""
Registers a new experiment either for each player and their average or just for their average.
"""
if self._log_conf_interval:
exp_names_conf = {
eval_mode:
[
self._ray.get(
[
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ "_stack_" + str(stack_size[0])
+ ": " + self_name
+ " Conf_" + bound_end)
for bound_end in ["lower95", "upper95"]
]
)
for stack_size in self._t_prof.eval_stack_sizes
]
for eval_mode in self._t_prof.eval_modes_of_algo
}
else:
exp_names_conf = None
exp_name_total = {
eval_mode:
[
self._ray.get(
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ "_stack_" + str(stack_size[0])
+ ": " + self_name
+ " Total"))
for stack_size in self._t_prof.eval_stack_sizes
]
for eval_mode in self._t_prof.eval_modes_of_algo
}
return exp_name_total, exp_names_conf
def _get_95confidence(self, scores):
mean = np.mean(scores).item()
std = np.std(scores).item()
_d = 1.96 * std / np.sqrt(scores.shape[0])
return float(mean), float(_d)
def _log_results(self, agent_mode, stack_size_idx, iter_nr, score, upper_conf95=None, lower_conf95=None):
"""
Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.
Args:
agent_mode: Evaluation mode of the agent whose performance is logged
stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one
this data is from.
iter_nr: Algorithm Iteration of this data
score: Score in this evaluation (e.g. exploitability)
"""
graph_name = "Evaluation/" + self._eval_env_bldr.env_cls.WIN_METRIC
self._ray.remote(self._chief_handle.add_scalar,
self._exp_name_total[agent_mode][stack_size_idx], graph_name, iter_nr, score)
if self._log_conf_interval:
assert upper_conf95 is not None
assert lower_conf95 is not None
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_conf[agent_mode][stack_size_idx][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_conf[agent_mode][stack_size_idx][1], graph_name, iter_nr, upper_conf95)
def _log_multi_stack(self, agent_mode, iter_nr, score_total, upper_conf95=None, lower_conf95=None):
"""
Additional logging for multistack evaluations
"""
graph_name = "Evaluation/" + self._eval_env_bldr.env_cls.WIN_METRIC
self._ray.remote(self._chief_handle.add_scalar,
self._exp_name_multi_stack[agent_mode], graph_name, iter_nr, score_total)
if self._log_conf_interval:
assert upper_conf95 is not None
assert lower_conf95 is not None
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_multi_stack_conf[agent_mode][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_multi_stack_conf[agent_mode][1], graph_name, iter_nr, upper_conf95)
| 43.873563 | 120 | 0.521352 | [
"MIT"
] | EricSteinberger/DREAM | PokerRL/eval/_/EvaluatorMasterBase.py | 7,634 | Python |
import pytest
from pytest_httpx import HTTPXMock
from coinpaprika_async import client as async_client, ResponseObject
client = async_client.Client()
@pytest.mark.asyncio
async def test_mock_async_price_conv(httpx_mock: HTTPXMock):
params = {"base_currency_id": "btc-bitcoin", "quote_currency_id": "usd-us-dollars", "amount": 1337}
json = {
"base_currency_id": "btc-bitcoin",
"base_currency_name": "Bitcoin",
"base_price_last_updated": "2022-01-16T23:46:14Z",
"quote_currency_id": "xmr-monero",
"quote_currency_name": "Monero",
"quote_price_last_updated": "2022-01-16T23:46:14Z",
"amount": 12.2,
"price": 2336.6037613108747,
}
httpx_mock.add_response(json=json)
response = await client.price_converter(params=params)
assert response.status_code == 200
assert response.data is not None
assert response.data != {}
@pytest.mark.asyncio
async def test_failed_api_call(httpx_mock: HTTPXMock):
json_obj = {"error": "id not found"}
httpx_mock.add_response(json=json_obj, status_code=404)
response: ResponseObject = await client.coin("eth")
assert response.status_code == 404
| 26.555556 | 103 | 0.700418 | [
"MIT"
] | DroidZed/coinpaprika-async-client | test/test_client.py | 1,195 | Python |
# setup.py file
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="my-lambdata-dspt5", # the name that you will install via pip
version="1.0",
author="Devvin Kraatz",
author_email="[email protected] ",
description="Made as an example while taking Lambda School's Data Science Course, come join it's highly recommended!",
long_description=long_description,
long_description_content_type="text/markdown",
# required if using a md file for long desc
# license="MIT",
url="https://github.com/YOUR_USERNAME/YOUR_REPO_NAME",
# keywords="",
packages=find_packages() # ["my_lambdata"]
)
| 32.227273 | 122 | 0.700987 | [
"MIT"
] | jiobu1/DS-Unit-3-Sprint-1-Software-Engineering | module2-oop-code-style-and-reviews/lambdata_dspt5/setup.py | 709 | Python |
#! /usr/local/bin/python
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
with open("table.txt", "r") as f:
print "projections = ("
lines = f.readlines()
units_list = []
for line in lines:
lineparts = line.rstrip().split("|")
epsg = lineparts[0]
wkt = lineparts[3]
name = wkt.split("\"")[1]
location = line.find("+units=")
if location == -1:
unit_index = line.find("UNIT[")
unit_code = line[unit_index:].rstrip().split("\"")[1]
units_list.append(unit_code)
else:
unit_code = line[location:].rstrip().split(" ")[0].split("=")[1]
units_list.append(unit_code)
if unit_code == "m":
unit = "Meter"
unit_factor = 1
elif unit_code == "ft":
unit = "International Foot"
unit_factor = 0.3048
elif unit_code == "us-ft":
unit = "US Survey Foot"
unit_factor = 0.3048006096012192
elif unit_code == "grad":
unit = "Gradian"
unit_factor = 0.01470796326794897
elif unit_code == "degree":
unit = "Degree"
unit_factor = 0.0174532925199433
else:
unit = "Unknown"
unit_factor = 0
print "{"
print "wkt = \"" + wkt.replace("\"", "\\\"") + "\";"
print "name = \"" + name + "\";"
print "unit = \"" + unit + "\";"
print "unit_factor = " + str(unit_factor) + ";"
print "epsg = " + str(epsg) + ";"
print "},"
print ")"
| 29.481481 | 76 | 0.491834 | [
"MIT"
] | mbasanta/projective-detective | projection-scraping/parseSRS.py | 1,592 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import math
from ccxt.base.errors import ExchangeError
class coingi(Exchange):
def describe(self):
return self.deep_extend(super(coingi, self).describe(), {
'id': 'coingi',
'name': 'Coingi',
'rateLimit': 1000,
'countries': ['PA', 'BG', 'CN', 'US'], # Panama, Bulgaria, China, US
'has': {
'CORS': False,
'fetchTickers': True,
},
'urls': {
'referral': 'https://www.coingi.com/?r=XTPPMC',
'logo': 'https://user-images.githubusercontent.com/1294454/28619707-5c9232a8-7212-11e7-86d6-98fe5d15cc6e.jpg',
'api': {
'www': 'https://coingi.com',
'current': 'https://api.coingi.com',
'user': 'https://api.coingi.com',
},
'www': 'https://coingi.com',
'doc': 'https://coingi.docs.apiary.io',
},
'api': {
'www': {
'get': [
'',
],
},
'current': {
'get': [
'order-book/{pair}/{askCount}/{bidCount}/{depth}',
'transactions/{pair}/{maxCount}',
'24hour-rolling-aggregation',
],
},
'user': {
'post': [
'balance',
'add-order',
'cancel-order',
'orders',
'transactions',
'create-crypto-withdrawal',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.2 / 100,
'maker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'LTC': 0.01,
'DOGE': 2,
'PPC': 0.02,
'VTC': 0.2,
'NMC': 2,
'DASH': 0.002,
'USD': 10,
'EUR': 10,
},
'deposit': {
'BTC': 0,
'LTC': 0,
'DOGE': 0,
'PPC': 0,
'VTC': 0,
'NMC': 0,
'DASH': 0,
'USD': 5,
'EUR': 1,
},
},
},
})
def fetch_markets(self, params={}):
response = self.wwwGet(params)
parts = response.split('do=currencyPairSelector-selectCurrencyPair" class="active">')
currencyParts = parts[1].split('<div class="currency-pair-label">')
result = []
for i in range(1, len(currencyParts)):
currencyPart = currencyParts[i]
idParts = currencyPart.split('</div>')
id = idParts[0]
id = id.replace('/', '-')
id = id.lower()
baseId, quoteId = id.split('-')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': id,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
lowercaseCurrencies = []
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercaseCurrencies.append(currency.lower())
request = {
'currencies': ','.join(lowercaseCurrencies),
}
response = self.userPostBalance(self.extend(request, params))
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance['currency'], 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'available')
blocked = self.safe_float(balance, 'blocked')
inOrders = self.safe_float(balance, 'inOrders')
withdrawing = self.safe_float(balance, 'withdrawing')
account['used'] = self.sum(blocked, inOrders, withdrawing)
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=512, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'depth': 32, # maximum number of depth range steps 1-32
'askCount': limit, # maximum returned number of asks 1-512
'bidCount': limit, # maximum returned number of bids 1-512
}
orderbook = self.currentGetOrderBookPairAskCountBidCountDepth(self.extend(request, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'baseAmount')
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': None,
'close': None,
'last': None,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'baseVolume'),
'quoteVolume': self.safe_float(ticker, 'counterVolume'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.currentGet24hourRollingAggregation(params)
result = {}
for t in range(0, len(response)):
ticker = response[t]
base = ticker['currencyPair']['base'].upper()
quote = ticker['currencyPair']['counter'].upper()
symbol = base + '/' + quote
market = None
if symbol in self.markets:
market = self.markets[symbol]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
tickers = self.fetch_tickers(None, params)
if symbol in tickers:
return tickers[symbol]
raise ExchangeError(self.id + ' return did not contain ' + symbol)
def parse_trade(self, trade, market=None):
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
timestamp = self.safe_integer(trade, 'timestamp')
id = self.safe_string(trade, 'id')
marketId = self.safe_string(trade, 'currencyPair')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None, # type
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'maxCount': 128,
}
response = self.currentGetTransactionsPairMaxCount(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
'volume': amount,
'price': price,
'orderType': 0 if (side == 'buy') else 1,
}
response = self.userPostAddOrder(self.extend(request, params))
return {
'info': response,
'id': response['result'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
return self.userPostCancelOrder(self.extend(request, params))
def sign(self, path, api='current', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api != 'www':
url += '/' + api + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'current':
if query:
url += '?' + self.urlencode(query)
elif api == 'user':
self.check_required_credentials()
nonce = self.nonce()
request = self.extend({
'token': self.apiKey,
'nonce': nonce,
}, query)
auth = str(nonce) + '$' + self.apiKey
request['signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(request)
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='current', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if not isinstance(response, basestring):
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 36.740061 | 126 | 0.462377 | [
"MIT"
] | 5747018167/ccxt | python/ccxt/coingi.py | 12,014 | Python |
# -*- coding: utf-8 -*-
import json
import copy
import types
import inspect
import re
import traceback
import datetime
import markdown2
import semver
import functools
import platform
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Constants
VERSION = "1.0.3"
BREAKINGVERSIONS = ["0.2.9-beta"]
WIN = platform.system() == "Windows"
MAC = platform.system() == "Darwin"
LINUX = platform.system() == "Linux"
# Response types (success, error, ...)
SUCCESS = "success"
ERROR = "error"
UNKNOWNFONT = "unknownFont"
INSUFFICIENTPERMISSION = "insufficientPermission"
SEATALLOWANCEREACHED = "seatAllowanceReached"
EXPIRED = "fontExpired"
UNKNOWNINSTALLATION = "unknownInstallation"
NOFONTSAVAILABLE = "noFontsAvailable"
TEMPORARILYUNAVAILABLE = "temporarilyUnavailable"
VALIDTYPEWORLDUSERACCOUNTREQUIRED = "validTypeWorldUserAccountRequired"
REVEALEDUSERIDENTITYREQUIRED = "revealedUserIdentityRequired"
LOGINREQUIRED = "loginRequired"
PROTOCOLS = ["typeworld"]
RESPONSES = {
SUCCESS: "The request has been processed successfully.",
ERROR: "There request produced an error. You may add a custom error message in the `errorMessage` field.",
UNKNOWNFONT: "No font could be identified for the given `fontID`.",
EXPIRED: "This font installation has expired.",
INSUFFICIENTPERMISSION: (
"The Type.World user account credentials "
"couldn’t be confirmed by the publisher (which are checked with the "
"central server) and therefore access to the subscription is denied."
),
SEATALLOWANCEREACHED: (
"The user has exhausted their seat allowances for "
"this font. The app may take them to the publisher’s website as "
"defined in ::LicenseUsage.upgradeURL:: to upgrade their font license."
),
UNKNOWNINSTALLATION: (
"This font installation (combination of app instance and user "
"credentials) is unknown. The response with this error message is "
"crucial to remote de-authorization of app instances. When a user "
"de-authorizes an entire app instance’s worth of font installations, "
"such as when a computer got bricked and re-installed or is lost, the "
"success of the remote de-authorization process is judged by either "
"`success` responses (app actually had this font installed and its "
"deletion has been recorded) or `unknownInstallation` responses "
"(app didn’t have this font installed). All other reponses count as "
"errors in the remote de-authorization process."
),
NOFONTSAVAILABLE: "This subscription exists but carries no fonts at the moment.",
TEMPORARILYUNAVAILABLE: "The service is temporarily unavailable but should work again later on.",
VALIDTYPEWORLDUSERACCOUNTREQUIRED: (
"The access to this subscription requires a valid Type.World user account connected to an app."
),
REVEALEDUSERIDENTITYREQUIRED: (
"The access to this subscription requires a valid Type.World user "
"account and that the user agrees to having their identity "
"(name and email address) submitted to the publisher upon font "
"installation (closed workgroups only)."
),
LOGINREQUIRED: (
"The access to this subscription requires that the user logs into "
"the publisher’s website again to authenticate themselves. "
"Normally, this happens after a subscription’s secret key has been "
"invalidated. The user will be taken to the publisher’s website "
"defined at ::EndpointResponse.loginURL::. After successful login, "
"a button should be presented to the user to reconnect to the same "
"subscription that they are trying to access. To identify the "
"subscription, the link that the user will be taken to will carry a "
"`subscriptionID` parameter with the subscriptionID as defined in "
"the subscription’s URL."
),
}
# Commands
ENDPOINTCOMMAND = {
"keyword": "endpoint",
"currentVersion": VERSION,
"responseTypes": [SUCCESS, ERROR],
"acceptableMimeTypes": ["application/json"],
}
INSTALLABLEFONTSCOMMAND = {
"keyword": "installableFonts",
"currentVersion": VERSION,
"responseTypes": [
SUCCESS,
ERROR,
NOFONTSAVAILABLE,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
],
"acceptableMimeTypes": ["application/json"],
}
INSTALLFONTSCOMMAND = {
"keyword": "installFonts",
"currentVersion": VERSION,
"responseTypes": [
SUCCESS,
ERROR,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
REVEALEDUSERIDENTITYREQUIRED,
],
"acceptableMimeTypes": ["application/json"],
}
UNINSTALLFONTSCOMMAND = {
"keyword": "uninstallFonts",
"currentVersion": VERSION,
"responseTypes": [
SUCCESS,
ERROR,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
],
"acceptableMimeTypes": ["application/json"],
}
INSTALLFONTASSETCOMMAND = {
"responseTypes": [
SUCCESS,
ERROR,
UNKNOWNFONT,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
REVEALEDUSERIDENTITYREQUIRED,
SEATALLOWANCEREACHED,
EXPIRED,
],
}
UNINSTALLFONTASSETCOMMAND = {
"responseTypes": [
SUCCESS,
ERROR,
UNKNOWNFONT,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
UNKNOWNINSTALLATION,
],
}
COMMANDS = [
ENDPOINTCOMMAND,
INSTALLABLEFONTSCOMMAND,
INSTALLFONTSCOMMAND,
UNINSTALLFONTSCOMMAND,
]
FONTPURPOSES = {
"desktop": {
"acceptableMimeTypes": [
"font/collection",
"font/otf",
"font/sfnt",
"font/ttf",
],
},
"web": {"acceptableMimeTypes": ["application/zip"]},
"app": {"acceptableMimeTypes": ["application/zip"]},
}
# https://tools.ietf.org/html/rfc8081
MIMETYPES = {
"font/sfnt": {"fileExtensions": ["otf", "ttf"]},
"font/ttf": {"fileExtensions": ["ttf"]},
"font/otf": {"fileExtensions": ["otf"]},
"font/collection": {"fileExtensions": ["ttc"]},
"font/woff": {"fileExtensions": ["woff"]},
"font/woff2": {"fileExtensions": ["woff2"]},
}
# Compile list of file extensions
FILEEXTENSIONS = []
for mimeType in list(MIMETYPES.keys()):
FILEEXTENSIONS = list(set(FILEEXTENSIONS) | set(MIMETYPES[mimeType]["fileExtensions"]))
FILEEXTENSIONNAMES = {
"otf": "OpenType",
"ttf": "TrueType",
"ttc": "TrueType collection",
"woff": "WOFF",
"woff2": "WOFF2",
}
MIMETYPEFORFONTTYPE = {
"otf": "font/otf",
"ttf": "font/ttf",
"ttc": "font/collection",
"woff": "font/woff",
"woff2": "font/woff2",
}
FONTENCODINGS = ["base64"]
OPENSOURCELICENSES = [
"0BSD",
"AAL",
"Abstyles",
"Adobe-2006",
"Adobe-Glyph",
"ADSL",
"AFL-1.1",
"AFL-1.2",
"AFL-2.0",
"AFL-2.1",
"AFL-3.0",
"Afmparse",
"AGPL-1.0",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"Aladdin",
"AMDPLPA",
"AML",
"AMPAS",
"ANTLR-PD",
"Apache-1.0",
"Apache-1.1",
"Apache-2.0",
"APAFML",
"APL-1.0",
"APSL-1.0",
"APSL-1.1",
"APSL-1.2",
"APSL-2.0",
"Artistic-1.0-cl8",
"Artistic-1.0-Perl",
"Artistic-1.0",
"Artistic-2.0",
"Bahyph",
"Barr",
"Beerware",
"BitTorrent-1.0",
"BitTorrent-1.1",
"Borceux",
"BSD-1-Clause",
"BSD-2-Clause-FreeBSD",
"BSD-2-Clause-NetBSD",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause-Attribution",
"BSD-3-Clause-Clear",
"BSD-3-Clause-LBNL",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-No-Nuclear-License",
"BSD-3-Clause-No-Nuclear-Warranty",
"BSD-3-Clause",
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-Protection",
"BSD-Source-Code",
"BSL-1.0",
"bzip2-1.0.5",
"bzip2-1.0.6",
"Caldera",
"CATOSL-1.1",
"CC-BY-1.0",
"CC-BY-2.0",
"CC-BY-2.5",
"CC-BY-3.0",
"CC-BY-4.0",
"CC-BY-NC-1.0",
"CC-BY-NC-2.0",
"CC-BY-NC-2.5",
"CC-BY-NC-3.0",
"CC-BY-NC-4.0",
"CC-BY-NC-ND-1.0",
"CC-BY-NC-ND-2.0",
"CC-BY-NC-ND-2.5",
"CC-BY-NC-ND-3.0",
"CC-BY-NC-ND-4.0",
"CC-BY-NC-SA-1.0",
"CC-BY-NC-SA-2.0",
"CC-BY-NC-SA-2.5",
"CC-BY-NC-SA-3.0",
"CC-BY-NC-SA-4.0",
"CC-BY-ND-1.0",
"CC-BY-ND-2.0",
"CC-BY-ND-2.5",
"CC-BY-ND-3.0",
"CC-BY-ND-4.0",
"CC-BY-SA-1.0",
"CC-BY-SA-2.0",
"CC-BY-SA-2.5",
"CC-BY-SA-3.0",
"CC-BY-SA-4.0",
"CC0-1.0",
"CDDL-1.0",
"CDDL-1.1",
"CDLA-Permissive-1.0",
"CDLA-Sharing-1.0",
"CECILL-1.0",
"CECILL-1.1",
"CECILL-2.0",
"CECILL-2.1",
"CECILL-B",
"CECILL-C",
"ClArtistic",
"CNRI-Jython",
"CNRI-Python-GPL-Compatible",
"CNRI-Python",
"Condor-1.1",
"CPAL-1.0",
"CPL-1.0",
"CPOL-1.02",
"Crossword",
"CrystalStacker",
"CUA-OPL-1.0",
"Cube",
"curl",
"D-FSL-1.0",
"diffmark",
"DOC",
"Dotseqn",
"DSDP",
"dvipdfm",
"ECL-1.0",
"ECL-2.0",
"EFL-1.0",
"EFL-2.0",
"eGenix",
"Entessa",
"EPL-1.0",
"EPL-2.0",
"ErlPL-1.1",
"EUDatagrid",
"EUPL-1.0",
"EUPL-1.1",
"EUPL-1.2",
"Eurosym",
"Fair",
"Frameworx-1.0",
"FreeImage",
"FSFAP",
"FSFUL",
"FSFULLR",
"FTL",
"GFDL-1.1-only",
"GFDL-1.1-or-later",
"GFDL-1.2-only",
"GFDL-1.2-or-later",
"GFDL-1.3-only",
"GFDL-1.3-or-later",
"Giftware",
"GL2PS",
"Glide",
"Glulxe",
"gnuplot",
"GPL-1.0-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"GPL-2.0-or-later",
"GPL-3.0-only",
"GPL-3.0-or-later",
"gSOAP-1.3b",
"HaskellReport",
"HPND",
"IBM-pibs",
"ICU",
"IJG",
"ImageMagick",
"iMatix",
"Imlib2",
"Info-ZIP",
"Intel-ACPI",
"Intel",
"Interbase-1.0",
"IPA",
"IPL-1.0",
"ISC",
"JasPer-2.0",
"JSON",
"LAL-1.2",
"LAL-1.3",
"Latex2e",
"Leptonica",
"LGPL-2.0-only",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"LGPL-3.0-only",
"LGPL-3.0-or-later",
"LGPLLR",
"Libpng",
"libtiff",
"LiLiQ-P-1.1",
"LiLiQ-R-1.1",
"LiLiQ-Rplus-1.1",
"LPL-1.0",
"LPL-1.02",
"LPPL-1.0",
"LPPL-1.1",
"LPPL-1.2",
"LPPL-1.3a",
"LPPL-1.3c",
"MakeIndex",
"MirOS",
"MIT-advertising",
"MIT-CMU",
"MIT-enna",
"MIT-feh",
"MIT",
"MITNFA",
"Motosoto",
"mpich2",
"MPL-1.0",
"MPL-1.1",
"MPL-2.0-no-copyleft-exception",
"MPL-2.0",
"MS-PL",
"MS-RL",
"MTLL",
"Multics",
"Mup",
"NASA-1.3",
"Naumen",
"NBPL-1.0",
"NCSA",
"Net-SNMP",
"NetCDF",
"Newsletr",
"NGPL",
"NLOD-1.0",
"NLPL",
"Nokia",
"NOSL",
"Noweb",
"NPL-1.0",
"NPL-1.1",
"NPOSL-3.0",
"NRL",
"NTP",
"OCCT-PL",
"OCLC-2.0",
"ODbL-1.0",
"OFL-1.0",
"OFL-1.1",
"OGTSL",
"OLDAP-1.1",
"OLDAP-1.2",
"OLDAP-1.3",
"OLDAP-1.4",
"OLDAP-2.0.1",
"OLDAP-2.0",
"OLDAP-2.1",
"OLDAP-2.2.1",
"OLDAP-2.2.2",
"OLDAP-2.2",
"OLDAP-2.3",
"OLDAP-2.4",
"OLDAP-2.5",
"OLDAP-2.6",
"OLDAP-2.7",
"OLDAP-2.8",
"OML",
"OpenSSL",
"OPL-1.0",
"OSET-PL-2.1",
"OSL-1.0",
"OSL-1.1",
"OSL-2.0",
"OSL-2.1",
"OSL-3.0",
"PDDL-1.0",
"PHP-3.0",
"PHP-3.01",
"Plexus",
"PostgreSQL",
"psfrag",
"psutils",
"Python-2.0",
"Qhull",
"QPL-1.0",
"Rdisc",
"RHeCos-1.1",
"RPL-1.1",
"RPL-1.5",
"RPSL-1.0",
"RSA-MD",
"RSCPL",
"Ruby",
"SAX-PD",
"Saxpath",
"SCEA",
"Sendmail",
"SGI-B-1.0",
"SGI-B-1.1",
"SGI-B-2.0",
"SimPL-2.0",
"SISSL-1.2",
"SISSL",
"Sleepycat",
"SMLNJ",
"SMPPL",
"SNIA",
"Spencer-86",
"Spencer-94",
"Spencer-99",
"SPL-1.0",
"SugarCRM-1.1.3",
"SWL",
"TCL",
"TCP-wrappers",
"TMate",
"TORQUE-1.1",
"TOSL",
"Unicode-DFS-2015",
"Unicode-DFS-2016",
"Unicode-TOU",
"Unlicense",
"UPL-1.0",
"Vim",
"VOSTROM",
"VSL-1.0",
"W3C-19980720",
"W3C-20150513",
"W3C",
"Watcom-1.0",
"Wsuipa",
"WTFPL",
"X11",
"Xerox",
"XFree86-1.1",
"xinetd",
"Xnet",
"xpp",
"XSkat",
"YPL-1.0",
"YPL-1.1",
"Zed",
"Zend-2.0",
"Zimbra-1.3",
"Zimbra-1.4",
"zlib-acknowledgement",
"Zlib",
"ZPL-1.1",
"ZPL-2.0",
"ZPL-2.1",
]
FONTSTATUSES = ["prerelease", "trial", "stable"]
PUBLISHERTYPES = ["free", "retail", "custom", "undefined"]
PUBLICPUBLISHERTYPES = ["free", "retail", "custom"]
PUBLISHERSIDEAPPANDUSERCREDENTIALSTATUSES = ["active", "deleted", "revoked"]
DEFAULT = "__default__"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Helper methods
def makeSemVer(version):
"""Turn simple float number (0.1) into semver-compatible number
for comparison by adding .0(s): (0.1.0)"""
# Make string
version = str(version)
if version.count(".") < 2:
# Strip leading zeros
version = ".".join(map(str, list(map(int, version.split(".")))))
# Add .0(s)
version = version + (2 - version.count(".")) * ".0"
return version
def ResponsesDocu(responses):
text = "\n\n"
for response in responses:
text += "`%s`: %s\n\n" % (response, RESPONSES[response])
return text
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Basic Data Types
class DataType(object):
initialData = None
dataType = None
def __init__(self):
self.value = copy.copy(self.initialData)
if issubclass(self.__class__, (MultiLanguageText, MultiLanguageTextProxy)):
self.value = self.dataType()
def __repr__(self):
if issubclass(self.__class__, Proxy):
return "<%s>" % (self.dataType.__name__)
else:
return "<%s '%s'>" % (self.__class__.__name__, self.get())
def valid(self):
if not self.value:
return True
if type(self.value) == self.dataType:
return True
else:
return "Wrong data type. Is %s, should be: %s." % (
type(self.value),
self.dataType,
)
def get(self):
return self.value
def put(self, value):
self.value = self.shapeValue(value)
if issubclass(self.value.__class__, (DictBasedObject, ListProxy, Proxy, DataType)):
object.__setattr__(self.value, "_parent", self)
valid = self.valid()
if valid is not True and valid is not None:
raise ValueError(valid)
def shapeValue(self, value):
return value
def isEmpty(self):
return self.value is None or self.value == [] or self.value == ""
def isSet(self):
return not self.isEmpty()
def formatHint(self):
return None
def exampleData(self):
return None
class BooleanDataType(DataType):
dataType = bool
class IntegerDataType(DataType):
dataType = int
def shapeValue(self, value):
return int(value)
class FloatDataType(DataType):
dataType = float
def shapeValue(self, value):
return float(value)
class StringDataType(DataType):
dataType = str
def shapeValue(self, value):
return str(value)
class DictionaryDataType(DataType):
dataType = dict
def shapeValue(self, value):
return dict(value)
class FontDataType(StringDataType):
pass
class FontEncodingDataType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value not in FONTENCODINGS:
return "Encoding '%s' is unknown. Known are: %s" % (
self.value,
FONTENCODINGS,
)
return True
class VersionDataType(StringDataType):
dataType = str
def valid(self):
if not self.value:
return True
# Append .0 for semver comparison
try:
value = makeSemVer(self.value)
except ValueError:
return False
try:
semver.VersionInfo.parse(value)
except ValueError as e:
return str(e)
return True
def formatHint(self):
return (
"Simple float number (1 or 1.01) or semantic versioning "
"(2.0.0-rc.1) as per [semver.org](https://semver.org)"
)
class TimestampDataType(IntegerDataType):
pass
class DateDataType(StringDataType):
def valid(self):
if not self.value:
return True
try:
datetime.datetime.strptime(self.value, "%Y-%m-%d")
return True
except ValueError:
return traceback.format_exc().splitlines()[-1]
def formatHint(self):
return "YYYY-MM-DD"
class WebURLDataType(StringDataType):
def valid(self):
if not self.value:
return True
if not self.value.startswith("http://") and not self.value.startswith("https://"):
return "Needs to start with http:// or https://"
else:
return True
# # TODO: This is a stump. Expand.
# class TypeWorldURLDataType(StringDataType):
# def valid(self):
# if not self.value:
# return True
# if not self.value.startswith("http://") and not self.value.startswith(
# "https://"
# ):
# return "Needs to start with http:// or https://"
# else:
# return True
# def formatHint(self):
# return (
# "Type.World Subscription URL as per "
# "[Developer Docs](https://type.world/developer#the-subscription-url)"
# )
class TelephoneDataType(StringDataType):
def valid(self):
if not self.value:
return True
text = "Needs to start with + and contain only numbers 0-9"
match = re.match(r"(\+[0-9]+)", self.value)
if match:
match = self.value.replace(match.group(), "")
if match:
return text
else:
return text
return True
def formatHint(self):
return "+1234567890"
class WebResourceURLDataType(WebURLDataType):
def formatHint(self):
return (
"This resource may get downloaded and cached on the client "
"computer. To ensure up-to-date resources, append a unique ID "
"to the URL such as a timestamp of when the resources changed on your "
"server, e.g. "
"https://awesomefonts.com/xyz/regular/specimen.pdf?t=1548239062. "
"Don’t use the current time for a timestamp, as this will mean constant "
"reloading the resource when it actually hasn’t changed. Instead use "
"the resource’s server-side change timestamp."
)
class EmailDataType(StringDataType):
def valid(self):
if not self.value:
return True
if (
"@" in self.value
and "." in self.value
and self.value.find(".", self.value.find("@")) > 0
and self.value.count("@") == 1
and self.value.find("..") == -1
):
return True
else:
return "Not a valid email format: %s" % self.value
class HexColorDataType(StringDataType):
def valid(self):
if not self.value:
return True
if (len(self.value) == 3 or len(self.value) == 6) and re.match("^[A-Fa-f0-9]*$", self.value):
return True
else:
return "Not a valid hex color of format RRGGBB (like FF0000 for red): %s" % self.value
def formatHint(self):
return "Hex RRGGBB (without leading #)"
class ListProxy(DataType):
initialData = []
includeEmpty = False
# Data type of each list member
# Here commented out to enforce explicit setting of data type
# for each Proxy
# dataType = str
def __repr__(self):
if self.value:
return "%s" % ([x.get() for x in self.value])
else:
return "[]"
def __getitem__(self, i):
return self.value[i].get()
def __setitem__(self, i, value):
if issubclass(value.__class__, (DictBasedObject, Proxy, ListProxy, DataType)):
object.__setattr__(value, "_parent", self)
self.value[i].put(value)
object.__setattr__(self.value[i], "_parent", self)
def __delitem__(self, i):
del self.value[i]
def __iter__(self):
for element in self.value:
yield element.get()
def __len__(self):
return len(self.value)
def index(self, item):
return [x.get() for x in self.value].index(item)
def get(self):
return self
def put(self, values):
if not type(values) in (list, tuple):
raise ValueError("Wrong data type. Is %s, should be: %s." % (type(values), list))
self.value = []
for value in values:
self.append(value)
def append(self, value):
newData = self.dataType()
newData.put(value)
self.value.append(newData)
if issubclass(newData.__class__, (DictBasedObject, Proxy, ListProxy, DataType)):
object.__setattr__(newData, "_parent", self)
def extend(self, values):
for value in values:
self.append(value)
def remove(self, removeValue):
for i, value in enumerate(self.value):
if self[i] == removeValue:
del self[i]
def isEmpty(self):
if self.includeEmpty:
return False
else:
return not bool(self.value)
# def valid(self):
# if self.value:
# for data in self.value:
# valid = data.valid()
# return valid
# return True
class DictBasedObject(object):
_structure = {}
_deprecatedKeys = []
_possible_keys = []
_dataType_for_possible_keys = None
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
obj = cls()
obj.loadJSON(self.dumpJSON())
return obj
def sameContent(self, other):
# return self.difference(other) == {}
return json.dumps(self.dumpDict(validate=False), sort_keys=True) == json.dumps(
other.dumpDict(validate=False), sort_keys=True
)
# def difference(self, other):
# d1 = self.dumpDict(validate=False)
# d2 = other.dumpDict(validate=False)
# from deepdiff import DeepDiff
# r2 = DeepDiff(d1, d2, ignore_order=True)
# return r2
def nonListProxyBasedKeys(self):
_list = []
for keyword in self._structure.keys():
if ListProxy not in inspect.getmro(self._structure[keyword][0]):
_list.append(keyword)
_list.extend(self._deprecatedKeys)
return _list
def linkDocuText(self, text):
def my_replace(match):
match = match.group()
match = match[2:-2]
if "." in match:
className, attributeName = match.split(".")
if "()" in attributeName:
attributeName = attributeName[:-2]
match = "[%s.%s()](#user-content-class-%s-method-%s)" % (
className,
attributeName,
className.lower(),
attributeName.lower(),
)
else:
match = "[%s.%s](#user-content-class-%s-attribute-%s)" % (
className,
attributeName,
className.lower(),
attributeName.lower(),
)
else:
className = match
match = "[%s](#user-content-class-%s)" % (
className,
className.lower(),
)
return match
try:
text = re.sub(r"::.+?::", my_replace, text)
except Exception:
pass
return text or ""
def typeDescription(self, class_):
if issubclass(class_, ListProxy):
return "List of %s objects" % self.typeDescription(class_.dataType)
elif class_.dataType in (
dict,
list,
tuple,
str,
bytes,
set,
frozenset,
bool,
int,
float,
):
return class_.dataType.__name__.capitalize()
elif "" in ("%s" % class_.dataType):
return self.linkDocuText("::%s::" % class_.dataType.__name__)
# Seems unused
# elif 'typeworld.api.' in ("%s" % class_.dataType):
# return self.linkDocuText('::%s::' % class_.dataType.__name__)
# else:
# return class_.dataType.__name__.title()
def additionalDocu(self):
doc = ""
if hasattr(self, "sample"):
doc += f"""*Example JSON data:*
```json
{self.sample().dumpJSON(strict = False)}
```
"""
return doc
def docu(self):
classes = []
# Define string
docstring = ""
head = ""
attributes = ""
methods = ""
attributesList = []
methodsList = []
head += '<div id="class-%s"></div>\n\n' % self.__class__.__name__.lower()
head += "# _class_ %s()\n\n" % self.__class__.__name__
head += self.linkDocuText(inspect.getdoc(self))
head += "\n\n"
additionalDocu = self.additionalDocu()
if additionalDocu:
head += additionalDocu + "\n\n"
# attributes
attributes += "## Attributes\n\n"
for key in sorted(self._structure.keys()):
attributesList.append(key)
attributes += '<div id="class-%s-attribute-%s"></div>\n\n' % (
self.__class__.__name__.lower(),
key,
)
attributes += "### %s\n\n" % key
# Description
if self._structure[key][3]:
attributes += self.linkDocuText(self._structure[key][3]) + "\n\n"
attributes += "__Required:__ %s" % self._structure[key][1] + "<br />\n"
attributes += "__Type:__ %s" % self.typeDescription(self._structure[key][0]) + "<br />\n"
# Format Hint
hint = self._structure[key][0]().formatHint()
if hint:
attributes += "__Format:__ %s" % hint + "<br />\n"
if self._structure[key][2] is not None:
attributes += "__Default value:__ %s" % self._structure[key][2] + "\n\n"
# Example Data
example = self._structure[key][0]().exampleData()
if example:
attributes += "Example:\n"
attributes += "```json\n"
attributes += json.dumps(example, indent=4)
attributes += "\n```\n"
method_list = [
func
for func in dir(self)
if callable(getattr(self, func)) and not func.startswith("__") and inspect.getdoc(getattr(self, func))
]
if method_list:
methods += "## Methods\n\n"
for methodName in method_list:
methodsList.append(methodName)
methods += '<div id="class-%s-method-%s"></div>\n\n' % (
self.__class__.__name__.lower(),
methodName.lower(),
)
args = inspect.getfullargspec(getattr(self, methodName))
if args.args != ["self"]:
argList = []
if args.args and args.defaults:
startPoint = len(args.args) - len(args.defaults)
for i, defaultValue in enumerate(args.defaults):
argList.append("%s = %s" % (args.args[i + startPoint], defaultValue))
methods += "#### %s(%s)\n\n" % (
methodName,
", ".join(argList),
)
else:
methods += "#### %s()\n\n" % methodName
methods += self.linkDocuText(inspect.getdoc(getattr(self, methodName))) + "\n\n"
# Compile
docstring += head
# TOC
if attributesList:
docstring += "### Attributes\n\n"
for attribute in attributesList:
docstring += "[%s](#class-%s-attribute-%s)<br />" % (
attribute,
self.__class__.__name__.lower(),
attribute.lower(),
)
docstring += "\n\n"
if methodsList:
docstring += "### Methods\n\n"
for methodName in methodsList:
docstring += "[%s()](#class-%s-method-%s)<br />" % (
methodName,
self.__class__.__name__.lower(),
methodName.lower(),
)
docstring += "\n\n"
if attributesList:
docstring += attributes
docstring += "\n\n"
if methodsList:
docstring += methods
docstring += "\n\n"
# Add data
classes.append([self.__class__.__name__, docstring])
# Recurse
for key in list(self._structure.keys()):
if issubclass(self._structure[key][0], Proxy):
o = self._structure[key][0].dataType()
classes.extend(o.docu())
if issubclass(self._structure[key][0], ListProxy):
o = self._structure[key][0].dataType.dataType()
if hasattr(o, "docu"):
classes.extend(o.docu())
return classes
def __init__(self, json=None, dict=None):
super(DictBasedObject, self).__init__()
object.__setattr__(self, "_content", {})
object.__setattr__(
self,
"_allowedKeys",
set(self._structure.keys()) | set(self._possible_keys),
)
# Fill default values
for key in self._structure:
# Set default values
if self._structure[key][2] is not None:
setattr(self, key, self._structure[key][2])
if json:
self.loadJSON(json)
elif dict:
self.loadDict(dict)
def initAttr(self, key):
if key not in self._content:
if key in list(object.__getattribute__(self, "_structure").keys()):
self._content[key] = object.__getattribute__(self, "_structure")[key][0]()
elif key in self._possible_keys:
self._content[key] = self._dataType_for_possible_keys()
self._content[key]._parent = self
def __getattr__(self, key):
if key in self._allowedKeys:
self.initAttr(key)
return self._content[key].get()
else:
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
if key in self._allowedKeys:
self.initAttr(key)
if issubclass(value.__class__, (DictBasedObject, ListProxy, Proxy, DataType)):
object.__setattr__(value, "_parent", self)
self.__dict__["_content"][key].put(value)
else:
object.__setattr__(self, key, value)
def set(self, key, value):
self.__setattr__(key, value)
def get(self, key):
return self.__getattr__(key)
def validate(self, strict=True):
information = []
warnings = []
critical = []
def extendWithKey(values, key=None, sourceObject=None):
# Remove duplicates
seen = set()
seen_add = seen.add
values = [x for x in values if not (x in seen or seen_add(x))]
# values = list(set(values))
_list = []
for value in values:
if sourceObject and key:
_list.append("%s.%s --> %s --> %s" % (self, key, sourceObject, value))
elif key:
_list.append("%s.%s --> %s" % (self, key, value))
else:
_list.append("%s --> %s" % (self, value))
return _list
# Check if required fields are filled
for key in list(self._structure.keys()):
self.initAttr(key)
if self.discardThisKey(key) is False:
if strict and self._structure[key][1] and self._content[key].isEmpty():
critical.append("%s.%s is a required attribute, but empty" % (self, key))
else:
# recurse
if issubclass(self._content[key].__class__, (Proxy)):
if self._content[key].isEmpty() is False:
(newInformation, newWarnings, newCritical,) = self._content[
key
].value.validate(strict=strict)
information.extend(extendWithKey(newInformation, key))
warnings.extend(extendWithKey(newWarnings, key))
critical.extend(extendWithKey(newCritical, key))
# Check custom messages:
if hasattr(self._content[key].value, "customValidation") and isinstance(
self._content[key].value.customValidation,
types.MethodType,
):
(
newInformation,
newWarnings,
newCritical,
) = self._content[key].value.customValidation()
information.extend(extendWithKey(newInformation, key, self._content[key]))
warnings.extend(extendWithKey(newWarnings, key, self._content[key]))
critical.extend(extendWithKey(newCritical, key, self._content[key]))
# recurse
if issubclass(self._content[key].__class__, (ListProxy)):
if self._content[key].isEmpty() is False:
for item in self._content[key]:
if hasattr(item, "validate") and isinstance(item.validate, types.MethodType):
(
newInformation,
newWarnings,
newCritical,
) = item.validate(strict=strict)
information.extend(extendWithKey(newInformation, key))
warnings.extend(extendWithKey(newWarnings, key))
critical.extend(extendWithKey(newCritical, key))
# Check custom messages:
if hasattr(item, "customValidation") and isinstance(
item.customValidation, types.MethodType
):
(
newInformation,
newWarnings,
newCritical,
) = item.customValidation()
information.extend(extendWithKey(newInformation, key, item))
warnings.extend(extendWithKey(newWarnings, key, item))
critical.extend(extendWithKey(newCritical, key, item))
# Check custom messages:
if (
issubclass(self.__class__, BaseResponse)
and hasattr(self, "customValidation")
and isinstance(self.customValidation, types.MethodType)
):
newInformation, newWarnings, newCritical = self.customValidation()
information.extend(extendWithKey(newInformation))
warnings.extend(extendWithKey(newWarnings))
critical.extend(extendWithKey(newCritical))
return information, warnings, critical
def discardThisKey(self, key):
return False
def dumpDict(self, strict=True, validate=True):
d = {}
# Auto-validate
if validate:
information, warnings, critical = self.validate(strict=strict)
if critical:
raise ValueError(critical[0])
for key in list(self._content.keys()):
if self.discardThisKey(key) is False:
attr = getattr(self, key)
if (
# required
(key in self._structure and self._structure[key][1])
# don't know
or attr
# is set
or (hasattr(attr, "isSet") and attr.isSet())
):
if hasattr(attr, "dumpDict"):
d[key] = attr.dumpDict(strict=strict, validate=validate)
elif issubclass(attr.__class__, (ListProxy)):
d[key] = list(attr)
if len(d[key]) > 0 and hasattr(d[key][0], "dumpDict"):
d[key] = [x.dumpDict(strict=strict, validate=validate) for x in d[key]]
else:
d[key] = attr
return d
def loadDict(self, d):
for key in d:
if key in self._allowedKeys:
if key in self._structure:
if issubclass(self._structure[key][0], (Proxy)):
try:
exec(
"self.%s = typeworld.api.%s()"
% (
key,
self._structure[key][0].dataType.__name__,
)
)
except Exception:
exec(
"self.%s = %s()"
% (
key,
self._structure[key][0].dataType.__name__,
)
)
exec("self.%s.loadDict(d[key])" % (key))
elif issubclass(self._structure[key][0], (ListProxy)):
_list = self.__getattr__(key)
_list.value = []
# allow empty
# if self._structure[key][0].includeEmpty:
# _list.value = []
for item in d[key]:
o = self._structure[key][0].dataType.dataType()
if hasattr(o, "loadDict"):
o.loadDict(item)
_list.append(o)
else:
_list.append(item)
exec("self._content[key] = _list")
else:
self.set(key, d[key])
def dumpJSON(self, strict=True, validate=False):
return json.dumps(self.dumpDict(strict=strict, validate=validate), indent=4, sort_keys=True)
def loadJSON(self, j):
self.loadDict(json.loads(j))
class Proxy(DataType):
pass
class ResponseCommandDataType(StringDataType):
def formatHint(self):
return (
"To ensure the proper function of the entire Type.World protocol, "
"your API endpoint *must* return the proper responses as per "
"[this flow chart](https://type.world/documentation/Type.World%20"
"Request%20Flow%20Chart.pdf). "
"In addition to ensure functionality, this enables the response "
"messages displayed to the user to be translated into all the "
"possible languages on our side."
)
class MultiLanguageText(DictBasedObject):
"""\
Multi-language text. Attributes are language keys as per
[https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes]
The GUI app will then calculate the language data to be displayed using
::MultiLanguageText.getText():: with a prioritized list of languages that
the user can understand. They may be pulled from the operating system’s
language preferences.
These classes are already initiated wherever they are used, and can be
addresses instantly with the language attributes:
```python
api.name.en = u'Font Publisher XYZ'
api.name.de = u'Schriftenhaus XYZ'
```
If you are loading language information from an external source, you may use
the `.set()` method to enter data:
```python
# Simulating external data source
for languageCode, text in (
('en': u'Font Publisher XYZ'),
('de': u'Schriftenhaus XYZ'),
)
api.name.set(languageCode, text)
```
Neither HTML nor Markdown code is permitted in `MultiLanguageText`.
"""
_possible_keys = [
"ab",
"aa",
"af",
"ak",
"sq",
"am",
"ar",
"an",
"hy",
"as",
"av",
"ae",
"ay",
"az",
"bm",
"ba",
"eu",
"be",
"bn",
"bh",
"bi",
"bs",
"br",
"bg",
"my",
"ca",
"ch",
"ce",
"ny",
"zh",
"cv",
"kw",
"co",
"cr",
"hr",
"cs",
"da",
"dv",
"nl",
"dz",
"en",
"eo",
"et",
"ee",
"fo",
"fj",
"fi",
"fr",
"ff",
"gl",
"ka",
"de",
"el",
"gn",
"gu",
"ht",
"ha",
"he",
"hz",
"hi",
"ho",
"hu",
"ia",
"id",
"ie",
"ga",
"ig",
"ik",
"io",
"is",
"it",
"iu",
"ja",
"jv",
"kl",
"kn",
"kr",
"ks",
"kk",
"km",
"ki",
"rw",
"ky",
"kv",
"kg",
"ko",
"ku",
"kj",
"la",
"lb",
"lg",
"li",
"ln",
"lo",
"lt",
"lu",
"lv",
"gv",
"mk",
"mg",
"ms",
"ml",
"mt",
"mi",
"mr",
"mh",
"mn",
"na",
"nv",
"nd",
"ne",
"ng",
"nb",
"nn",
"no",
"ii",
"nr",
"oc",
"oj",
"cu",
"om",
"or",
"os",
"pa",
"pi",
"fa",
"pl",
"ps",
"pt",
"qu",
"rm",
"rn",
"ro",
"ru",
"sa",
"sc",
"sd",
"se",
"sm",
"sg",
"sr",
"gd",
"sn",
"si",
"sk",
"sl",
"so",
"st",
"es",
"su",
"sw",
"ss",
"sv",
"ta",
"te",
"tg",
"th",
"ti",
"bo",
"tk",
"tl",
"tn",
"to",
"tr",
"ts",
"tt",
"tw",
"ty",
"ug",
"uk",
"ur",
"uz",
"ve",
"vi",
"vo",
"wa",
"cy",
"wo",
"fy",
"xh",
"yi",
"yo",
"za",
"zu",
]
_dataType_for_possible_keys = StringDataType
_length = 100
_markdownAllowed = False
# def __repr__(self):
# return '<MultiLanguageText>'
def __str__(self):
return str(self.getText())
def __bool__(self):
return self.isSet()
def sample(self):
o = self.__class__()
o.en = "Text in English"
o.de = "Text auf Deutsch"
return o
def getTextAndLocale(self, locale=["en"]):
"""Like getText(), but additionally returns the language of whatever
text was found first."""
if type(locale) == str:
if self.get(locale):
return self.get(locale), locale
elif type(locale) in (list, tuple):
for key in locale:
if self.get(key):
return self.get(key), key
# try english
if self.get("en"):
return self.get("en"), "en"
# try anything
for key in self._possible_keys:
if self.get(key):
return self.get(key), key
return None, None
def getText(self, locale=["en"]):
"""Returns the text in the first language found from the specified
list of languages. If that language can’t be found, we’ll try English
as a standard. If that can’t be found either, return the first language
you can find."""
text, locale = self.getTextAndLocale(locale)
return text
def customValidation(self):
information, warnings, critical = [], [], []
if self.isEmpty():
critical.append("Needs to contain at least one language field")
# Check for text length
for langId in self._possible_keys:
if self.get(langId):
string = self.get(langId)
if len(string) > self._length:
critical.append(
"Language entry '%s' is too long. Allowed are %s characters." % (langId, self._length)
)
if re.findall(r"(<.+?>)", string):
if self._markdownAllowed:
critical.append(
(
"String contains HTML code, which is not "
"allowed. You may use Markdown for text "
"formatting. String: " + string
)
)
else:
critical.append("String contains HTML code, which is not allowed. String: " + string)
if not self._markdownAllowed and string and "<p>" + string + "</p>\n" != markdown2.markdown(string):
critical.append("String contains Markdown code, which is not allowed.")
return information, warnings, critical
def isSet(self):
for langId in self._possible_keys:
if langId in self._content and self.get(langId) not in (None, ""):
return True
return False
def isEmpty(self):
return not self.isSet()
def loadDict(self, d):
for key in d:
self.set(key, d[key])
def MultiLanguageText_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent"):
return self._parent._parent
MultiLanguageText.parent = property(lambda self: MultiLanguageText_Parent(self))
class MultiLanguageTextProxy(Proxy):
dataType = MultiLanguageText
def isEmpty(self):
return self.value.isEmpty()
def formatHint(self):
text = "Maximum allowed characters: %s." % self.dataType._length
if self.dataType._markdownAllowed:
text += " Mardown code is permitted for text formatting."
return text
class MultiLanguageTextListProxy(ListProxy):
dataType = MultiLanguageTextProxy
###############################################################################
class MultiLanguageLongText(MultiLanguageText):
"""\
Multi-language text. Attributes are language keys as per
[https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes]
The GUI app will then calculate the language data to be displayed using
::MultiLanguageText.getText():: with a prioritized list of languages that
the user can understand. They may be pulled from the operating system’s
language preferences.
These classes are already initiated wherever they are used, and can be
addresses instantly with the language attributes:
```python
api.name.en = u'Font Publisher XYZ'
api.name.de = u'Schriftenhaus XYZ'
```
If you are loading language information from an external source, you may use
the `.set()` method to enter data:
```python
# Simulating external data source
for languageCode, text in (
('en': u'Font Publisher XYZ'),
('de': u'Schriftenhaus XYZ'),
)
api.name.set(languageCode, text)
```
Markdown is permitted in `MultiLanguageLongText`.
Line breaks need to be escaped as `\n` characters.
"""
_length = 3000
_markdownAllowed = True
class MultiLanguageLongTextProxy(MultiLanguageTextProxy):
dataType = MultiLanguageLongText
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Top-Level Data Types
# class LanguageSupportDataType(DictionaryDataType):
# def valid(self):
# if not self.value:
# return True
# for script in self.value:
# if not len(script) == 4 or not script.islower():
# return "Script tag '%s' needs to be a four-letter lowercase tag." % (
# script
# )
# for language in self.value[script]:
# if not len(language) == 3 or not language.isupper():
# return (
# "Language tag '%s' needs to be a " "three-letter uppercase"
# ) % (language)
# return True
class OpenTypeFeatureDataType(StringDataType):
def valid(self):
if not self.value:
return True
if not len(self.value) == 4 or not self.value.islower():
return "OpenType feature tag '%s' needs to be a four-letter lowercase tag." % (self.value)
return True
class OpenTypeFeatureListProxy(ListProxy):
dataType = OpenTypeFeatureDataType
class OpenSourceLicenseIdentifierDataType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value in OPENSOURCELICENSES:
return True
else:
return "Unknown license identifier: '%s'. See https://spdx.org/licenses/" % (self.value)
class SupportedAPICommandsDataType(StringDataType):
commands = [x["keyword"] for x in COMMANDS]
def valid(self):
if not self.value:
return True
if self.value in self.commands:
return True
else:
return "Unknown API command: '%s'. Possible: %s" % (
self.value,
self.commands,
)
class SupportedAPICommandsListProxy(ListProxy):
dataType = SupportedAPICommandsDataType
class SupportedPublisherTypeDataType(StringDataType):
types = PUBLISHERTYPES
def valid(self):
if not self.value:
return True
if self.value in self.types:
return True
else:
return "Unknown publisher type: '%s'. Possible: %s" % (
self.value,
self.types,
)
class SupportedPublisherTypeListProxy(ListProxy):
dataType = SupportedPublisherTypeDataType
class FontPurposeDataType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value in list(FONTPURPOSES.keys()):
return True
else:
return "Unknown font type: '%s'. Possible: %s" % (
self.value,
list(FONTPURPOSES.keys()),
)
class FontMimeType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value in list(FONTPURPOSES["desktop"]["acceptableMimeTypes"]):
return True
else:
return "Unknown font MIME Type: '%s'. Possible: %s" % (
self.value,
list(FONTPURPOSES["desktop"]["acceptableMimeTypes"]),
)
class FontStatusDataType(StringDataType):
statuses = FONTSTATUSES
def valid(self):
if not self.value:
return True
if self.value in self.statuses:
return True
else:
return "Unknown Font Status: '%s'. Possible: %s" % (
self.value,
self.statuses,
)
class FontExtensionDataType(StringDataType):
def valid(self):
if not self.value:
return True
found = False
for mimeType in list(MIMETYPES.keys()):
if self.value in MIMETYPES[mimeType]["fileExtensions"]:
found = True
break
if found:
return True
else:
return "Unknown font extension: '%s'. Possible: %s" % (
self.value,
FILEEXTENSIONS,
)
###############################################################################
# LicenseDefinition
class LicenseDefinition(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Machine-readable keyword under which the license will be referenced from the individual fonts.",
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of font license",
],
"URL": [
WebURLDataType,
True,
None,
"URL where the font license text can be viewed online",
],
}
def __repr__(self):
return "<LicenseDefinition '%s'>" % self.name or self.keyword or "undefined"
def sample(self):
o = self.__class__()
o.keyword = "awesomefontsEULA"
o.name.en = "Awesome Fonts End User License Agreement"
o.name.de = "Awesome Fonts Endnutzerlizenzvereinbarung"
o.URL = "https://awesomefonts.com/eula.html"
return o
def LicenseDefinition_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
LicenseDefinition.parent = property(lambda self: LicenseDefinition_Parent(self))
class LicenseDefinitionProxy(Proxy):
dataType = LicenseDefinition
class LicenseDefinitionListProxy(ListProxy):
dataType = LicenseDefinitionProxy
###############################################################################
# FontPackage
class FontPackage(DictBasedObject):
"""\
`FontPackages` are groups of fonts that serve a certain purpose
to the user.
They can be defined at ::InstallableFontsReponse.packages::,
::Foundry.packages::, ::Family.packages::
and are referenced by their keywords in ::Font.packageKeywords::.
On a font family level, defined at ::Family.packages::, a typical example
for defining a `FontPackage` would be the so called **Office Fonts**.
While they are technically identical to other OpenType fonts, they normally
have a sightly different set of glyphs and OpenType features.
Linking them to a `FontPackage` allows the UI to display them clearly as a
separate set of fonts that serve a different purpuse than the
regular fonts.
On a subscription-wide level, defined at
::InstallableFontsReponse.packages::, a `FontPackage` could represent a
curated collection of fonts of various foundries and families, for example
**Script Fonts** or **Brush Fonts** or **Corporate Fonts**.
Each font may be part of several `FontPackages`.
For the time being, only family-level FontPackages are supported in the UI.
"""
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Keyword of font packages. This keyword must be referenced in "
"::Font.packageKeywords:: and must be unique to this subscription.",
],
"name": [MultiLanguageTextProxy, True, None, "Name of package"],
"description": [MultiLanguageLongTextProxy, False, None, "Description"],
}
def __repr__(self):
return "<FontPackage '%s'>" % self.keyword or "undefined"
def sample(self):
o = self.__class__()
o.keyword = "officefonts"
o.name.en = "Office Fonts"
o.name.de = "Office-Schriften"
o.description.en = "These fonts are produced specifically to be used in Office applications."
o.description.de = "Diese Schriftdateien sind für die Benutzung in Office-Applikationen vorgesehen."
return o
def getFonts(self, filterByFontFormat=[], variableFont=None):
"""
Calculate list of fonts of this package by applying filters for
font.format and font.variableFont (possibly more in the future)
"""
def passedFilter(font):
# font.format filter
passed1 = not filterByFontFormat or (filterByFontFormat and font.format in filterByFontFormat)
# font.variableFont filter
passed2 = variableFont is None or (variableFont is not None and font.variableFont == variableFont)
return passed1 and passed2
return [x for x in self.fonts if passedFilter(x)]
def getFormats(self):
formats = []
if hasattr(self, "fonts"):
for font in self.fonts:
if font.format not in formats:
formats.append(font.format)
return formats
class FontPackageProxy(Proxy):
dataType = FontPackage
class FontPackageListProxy(ListProxy):
dataType = FontPackageProxy
class FontPackageReferencesListProxy(ListProxy):
dataType = StringDataType
###############################################################################
# LicenseUsage
class LicenseUsage(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Keyword reference of font’s license. This license must be specified in ::Foundry.licenses::",
],
"seatsAllowed": [
IntegerDataType,
False,
None,
"In case of desktop font (see ::Font.purpose::), number of installations permitted by the user’s license.",
],
"seatsInstalled": [
IntegerDataType,
False,
None,
"In case of desktop font (see ::Font.purpose::), number of "
"installations recorded by the API endpoint. This value will "
"need to be supplied dynamically by the API endpoint through "
"tracking all font installations through the `anonymousAppID` "
"parameter of the '%s' and '%s' command. Please note that the "
"Type.World client app is currently not designed to reject "
"installations of the fonts when the limits are exceeded. "
"Instead it is in the responsibility of the API endpoint to "
"reject font installations though the '%s' command when the "
"limits are exceeded. In that case the user will be presented "
"with one or more license upgrade links."
% (
INSTALLFONTSCOMMAND["keyword"],
UNINSTALLFONTSCOMMAND["keyword"],
INSTALLFONTSCOMMAND["keyword"],
),
],
"allowanceDescription": [
MultiLanguageTextProxy,
False,
None,
"In case of non-desktop font (see ::Font.purpose::), custom "
"string for web fonts or app fonts reminding the user of the "
"license’s limits, e.g. '100.000 page views/month'",
],
"upgradeURL": [
WebURLDataType,
False,
None,
"URL the user can be sent to to upgrade the license of the "
"font, for instance at the foundry’s online shop. If "
"possible, this link should be user-specific and guide "
"him/her as far into the upgrade process as possible.",
],
"dateAddedForUser": [
DateDataType,
False,
None,
"Date that the user has purchased this font or the font has "
"become available to the user otherwise (like a new font "
"within a foundry’s beta font repository). Will be used in "
"the UI to signal which fonts have become newly available "
"in addition to previously available fonts. This is not to "
"be confused with the ::Version.releaseDate::, although they "
"could be identical.",
],
}
def sample(self):
o = self.__class__()
o.keyword = "awesomefontsEULA"
o.seatsAllowed = 5
o.seatsInstalled = 2
o.upgradeURL = "https://awesomefonts.com/shop/upgradelicense/083487263904356"
return o
def __repr__(self):
return "<LicenseUsage '%s'>" % self.keyword or "undefined"
def customValidation(self):
information, warnings, critical = [], [], []
# Checking for existing license
if self.keyword and not self.getLicense():
critical.append(
"Has license '%s', but %s has no matching license." % (self.keyword, self.parent.parent.parent)
)
return information, warnings, critical
def getLicense(self):
"""\
Returns the ::License:: object that this font references.
"""
return self.parent.parent.parent.getLicenseByKeyword(self.keyword)
def LicenseUsage_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
LicenseUsage.parent = property(lambda self: LicenseUsage_Parent(self))
class LicenseUsageProxy(Proxy):
dataType = LicenseUsage
class LicenseUsageListProxy(ListProxy):
dataType = LicenseUsageProxy
#######################################################################################
# Designer
class Designer(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Machine-readable keyword under which the designer will be referenced "
"from the individual fonts or font families",
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of designer",
],
"websiteURL": [WebURLDataType, False, None, "Designer’s web site"],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of designer",
],
}
def sample(self):
o = self.__class__()
o.keyword = "johndoe"
o.name.en = "John Doe"
o.websiteURL = "https://johndoe.com"
return o
def __repr__(self):
return "<Designer '%s'>" % self.name.getText() or self.keyword or "undefined"
def Designer_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Designer.parent = property(lambda self: Designer_Parent(self))
class DesignerProxy(Proxy):
dataType = Designer
class DesignersListProxy(ListProxy):
dataType = DesignerProxy
class DesignersReferencesListProxy(ListProxy):
dataType = StringDataType
########################################################################################
# Font Family Version
class Version(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"number": [
VersionDataType,
True,
None,
"Font version number. This can be a simple float number (1.002) or a "
"semver version string (see https://semver.org). For comparison, "
"single-dot version numbers (or even integers) are appended with "
"another .0 (1.0 to 1.0.0), then compared using the Python `semver` "
"module.",
],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of font version",
],
"releaseDate": [DateDataType, False, None, "Font version’s release date."],
}
def sample(self):
o = self.__class__()
o.number = "1.2"
o.description.en = "Added capital SZ and Turkish Lira sign"
o.description.de = "Versal-SZ und türkisches Lira-Zeichen hinzugefügt"
o.releaseDate = "2020-05-21"
return o
def __repr__(self):
return "<Version %s (%s)>" % (
self.number if self.number else "None",
"font-specific" if self.isFontSpecific() else "family-specific",
)
def isFontSpecific(self):
"""\
Returns True if this version is defined at the font level.
Returns False if this version is defined at the family level.
"""
return issubclass(self.parent.__class__, Font)
def Version_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Version.parent = property(lambda self: Version_Parent(self))
class VersionProxy(Proxy):
dataType = Version
class VersionListProxy(ListProxy):
dataType = VersionProxy
########################################################################################
# Fonts
class BillboardListProxy(ListProxy):
dataType = WebResourceURLDataType
class Font(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of font. This may include any additions that you "
"find useful to communicate to your users.",
],
"uniqueID": [
StringDataType,
True,
None,
"A machine-readable string that uniquely identifies this font within "
"the publisher. It will be used to ask for un/installation of the "
"font from the server in the `installFonts` and `uninstallFonts` "
"commands. Also, it will be used for the file name of the font on "
"disk, together with the version string and the file extension. "
"Together, they must not be longer than 220 characters and must "
"not contain the following characters: / ? < > \\ : * | ^ \n**Note:** This "
"ID **must not** include the font's version number, as then it would "
"be treated as a different font. Please also read the section on "
"[versioning](#versioning) above.\n"
"If you offer different font formats of the same font (TTF and OTF), "
"this should be reflected in the *uniqueID* as well.\n"
"Example:\n"
"`MyFoundry_MyFamily_MyFont-Regular_TTF`\n"
"`MyFoundry_MyFamily_MyFont-Regular_OTF`\n"
"`MyFoundry_MyFamily_MyFont-Regular_TTFVAR`\n",
],
"postScriptName": [
StringDataType,
True,
None,
"Complete PostScript name of font",
],
"packageKeywords": [
FontPackageReferencesListProxy,
False,
None,
"List of references to ::FontPackage:: objects by their keyword",
],
"versions": [
VersionListProxy,
False,
None,
"List of ::Version:: objects. These are font-specific versions; they "
"may exist only for this font. You may define additional versions at "
"the family object under ::Family.versions::, which are then expected "
"to be available for the entire family. However, either the fonts or "
"the font family *must* carry version information and the validator "
"will complain when they don't.\n\nPlease also read the section on "
"[versioning](#versioning) above.",
],
"designerKeywords": [
DesignersReferencesListProxy,
False,
None,
"List of keywords referencing designers. These are defined at "
"::InstallableFontsResponse.designers::. This attribute overrides the "
"designer definitions at the family level at ::Family.designers::.",
],
"free": [BooleanDataType, False, None, "Font is freeware. For UI signaling"],
"billboardURLs": [
BillboardListProxy,
False,
None,
"List of URLs pointing at images to show for this typeface. "
"We suggest to use square dimensions and uncompressed SVG "
"images because they scale to all sizes smoothly, "
"but ultimately any size or HTML-compatible image type "
"is possible.",
],
"status": [
FontStatusDataType,
True,
"stable",
"Font status. For UI signaling. Possible values are: %s" % FONTSTATUSES,
],
"variableFont": [
BooleanDataType,
False,
False,
"Font is an OpenType Variable Font. For UI signaling",
],
"purpose": [
FontPurposeDataType,
True,
None,
"Technical purpose of font. This influences how the app handles the "
"font. For instance, it will only install desktop fonts on the system, "
"and make other font types available though folders. Possible: %s" % (list(FONTPURPOSES.keys())),
],
"format": [
FontExtensionDataType,
False,
None,
"Font file format. Required value in case of `desktop` font (see ::Font.purpose::. Possible: %s"
% FILEEXTENSIONS,
],
"protected": [
BooleanDataType,
False,
False,
"Indication that font is (most likely) commercial and requires "
"a certain amount of special treatment over a free font: "
"1) The API Endpoint requires a valid subscriptionID to be used "
"for authentication. 2) The API Endpoint may limit the downloads "
"of fonts. "
"3) Most importantly, "
"the `uninstallFonts` command needs to be called on the "
"API Endpoint when the font gets uninstalled."
"This may also be used for fonts that are free to download, but their "
"installations want to be monitored or limited anyway. ",
],
"dateFirstPublished": [
DateDataType,
False,
None,
"Human readable date of the initial release of the font. May also be "
"defined family-wide at ::Family.dateFirstPublished::.",
],
"usedLicenses": [
LicenseUsageListProxy,
True,
None,
"List of ::LicenseUsage:: objects. These licenses represent the "
"different ways in which a user has access to this font. At least one "
"used license must be defined here, because a user needs to know under "
"which legal circumstances he/she is using the font. Several used "
"licenses may be defined for a single font in case a customer owns "
"several licenses that cover the same font. For instance, a customer "
"could have purchased a font license standalone, but also as part of "
"the foundry’s entire catalogue. It’s important to keep these separate "
"in order to provide the user with separate upgrade links where he/she "
"needs to choose which of several owned licenses needs to be upgraded. "
"Therefore, in case of a commercial retail foundry, used licenses "
"correlate to a user’s purchase history.",
],
"pdfURL": [
WebResourceURLDataType,
False,
None,
"URL of PDF file with type specimen and/or instructions for this "
"particular font. (See also: ::Family.pdf::",
],
"expiry": [
TimestampDataType,
False,
None,
"Unix timestamp of font’s expiry. The font will be deleted on that "
"moment. This could be set either upon initial installation of a trial "
"font, or also before initial installation as a general expiry moment.",
],
"expiryDuration": [
IntegerDataType,
False,
None,
"Minutes for which the user will be able to use the font after initial "
"installation. This attribute is used only as a visual hint in the UI "
"and should be set for trial fonts that expire a certain period after "
"initial installation, such as 60 minutes. If the font is a trial font "
"limited to a certain usage period after initial installation, it must "
"also be marked as ::Font.protected::, with no ::Font.expiry:: "
"timestamp set at first (because the expiry depends on the moment of "
"initial installation). On initial font installation by the user, the "
"publisher’s server needs to record that moment’s time, and from there "
"onwards serve the subscription with ::Font.expiry:: attribute set in "
"the future. Because the font is marked as ::Font.protected::, the app "
"will update the subscription directly after font installation, upon "
"when it will learn of the newly added ::Font.expiry:: attribute. "
"Please note that you *have* to set ::Font.expiry:: after initial "
"installation yourself. The Type.World app will not follow up on its "
"own on installed fonts just with the ::Font.expiryDuration:: "
"attribute, which is used only for display.",
],
"features": [
OpenTypeFeatureListProxy,
False,
None,
"List of supported OpenType features as per "
"https://docs.microsoft.com/en-us/typography/opentype/spec/featuretags",
],
# "languageSupport": [
# LanguageSupportDataType,
# False,
# None,
# "Dictionary of suppported languages as script/language combinations",
# ],
}
def __repr__(self):
return "<Font '%s'>" % (self.postScriptName or self.name.getText() or "undefined")
def sample(self):
o = self.__class__()
o.name.en = "Bold"
o.name.de = "Fette"
o.uniqueID = "AwesomeFonts-AwesomeFamily-Bold"
o.postScriptName = "AwesomeFamily-Bold"
o.purpose = "desktop"
return o
def filename(self, version):
"""\
Returns the recommended font file name to be used to store the font on disk.
It is composed of the font’s uniqueID, its version string and the file
extension. Together, they must not exceed 220 characters.
"""
if not type(version) in (str, int, float):
raise ValueError("Supplied version must be str or int or float")
if self.format:
return "%s_%s.%s" % (self.uniqueID, version, self.format)
else:
return "%s_%s" % (self.uniqueID, version)
def hasVersionInformation(self):
return self.versions or self.parent.versions
def customValidation(self):
information, warnings, critical = [], [], []
# Checking font type/extension
if self.purpose == "desktop" and not self.format:
critical.append("Is a desktop font (see .purpose), but has no .format value.")
# Checking version information
if not self.hasVersionInformation():
critical.append(
"Has no version information, and neither has its family %s. "
"Either one needs to carry version information." % (self.parent)
)
# Checking for designers
for designerKeyword in self.designerKeywords:
if not self.parent.parent.parent.getDesignerByKeyword(designerKeyword):
critical.append(
"Has designer '%s', but %s.designers has no matching designer."
% (designerKeyword, self.parent.parent.parent)
)
# Checking uniqueID for file name contradictions:
forbidden = "/?<>\\:*|^,;"
for char in forbidden:
if self.uniqueID.count(char) > 0:
critical.append(
".uniqueID must not contain the character '%s' because it will "
"be used for the font’s file name on disk." % char
)
for version in self.getVersions():
filename = self.filename(version.number)
if len(filename) > 220:
critical.append("The suggested file name is longer than 220 characters: %s" % filename)
return information, warnings, critical
def getBillboardURLs(self):
"""\
Returns list billboardURLs compiled from ::Font.billboardURLs::
and ::Family.billboardURLs::, giving the font-level definitions priority
over family-level definitions.
"""
return self.billboardURLs or self.parent.billboardURLs
def getVersions(self):
"""\
Returns list of ::Version:: objects.
This is the final list based on the version information in this font object as
well as in its parent ::Family:: object. Please read the section about
[versioning](#versioning) above.
"""
if not self.hasVersionInformation():
raise ValueError(
"%s has no version information, and neither has its family %s. "
"Either one needs to carry version information." % (self, self.parent)
)
def compare(a, b):
return semver.VersionInfo.parse(makeSemVer(a.number)).compare(makeSemVer(b.number))
# return semver.compare(makeSemVer(a.number), makeSemVer(b.number))
versions = []
haveVersionNumbers = []
for version in self.versions:
versions.append(version)
haveVersionNumbers.append(makeSemVer(version.number))
for version in self.parent.versions:
if version.number not in haveVersionNumbers:
versions.append(version)
haveVersionNumbers.append(makeSemVer(version.number))
versions = sorted(versions, key=functools.cmp_to_key(compare))
return versions
def getDesigners(self):
"""\
Returns a list of ::Designer:: objects that this font references.
These are the combination of family-level designers and font-level designers.
The same logic as for versioning applies.
Please read the section about [versioning](#versioning) above.
"""
if not hasattr(self, "_designers"):
self._designers = []
# Family level designers
if self.parent.designerKeywords:
for designerKeyword in self.parent.designerKeywords:
self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword))
# Font level designers
if self.designerKeywords:
for designerKeyword in self.designerKeywords:
self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword))
return self._designers
def getPackageKeywords(self):
if self.packageKeywords:
return list(set(self.packageKeywords))
else:
return [DEFAULT]
def Font_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Font.parent = property(lambda self: Font_Parent(self))
class FontProxy(Proxy):
dataType = Font
class FontListProxy(ListProxy):
dataType = FontProxy
# Font Family
class Family(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"uniqueID": [
StringDataType,
True,
None,
"An string that uniquely identifies this family within the publisher.",
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of font family. This may include any additions "
"that you find useful to communicate to your users.",
],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of font family",
],
"billboardURLs": [
BillboardListProxy,
False,
None,
"List of URLs pointing at images to show for this typeface. "
"We suggest to use square dimensions and uncompressed SVG "
"images because they scale to all sizes smoothly, "
"but ultimately any size or HTML-compatible image type "
"is possible.",
],
"designerKeywords": [
DesignersReferencesListProxy,
False,
None,
"List of keywords referencing designers. These are defined at "
"::InstallableFontsResponse.designers::. In case designers differ "
"between fonts within the same family, they can also be defined at the "
"font level at ::Font.designers::. The font-level references take "
"precedence over the family-level references.",
],
"packages": [
FontPackageListProxy,
False,
None,
"Family-wide list of ::FontPackage:: objects. These will be "
"referenced by their keyword in ::Font.packageKeywords::",
],
"sourceURL": [
WebURLDataType,
False,
None,
"URL pointing to the source of a font project, such as a GitHub repository",
],
"issueTrackerURL": [
WebURLDataType,
False,
None,
"URL pointing to an issue tracker system, where users can debate "
"about a typeface’s design or technicalities",
],
"galleryURL": [
WebURLDataType,
False,
None,
"URL pointing to a web site that shows real world examples of the "
"fonts in use or other types of galleries.",
],
"versions": [
VersionListProxy,
False,
None,
"List of ::Version:: objects. Versions specified here are expected to "
"be available for all fonts in the family, which is probably most "
"common and efficient. You may define additional font-specific "
"versions at the ::Font:: object. You may also rely entirely on "
"font-specific versions and leave this field here empty. However, "
"either the fonts or the font family *must* carry version information "
"and the validator will complain when they don’t.\n\nPlease also read "
"the section on [versioning](#versioning) above.",
],
"fonts": [
FontListProxy,
True,
[],
"List of ::Font:: objects. The order will be displayed unchanged in "
"the UI, so it’s in your responsibility to order them correctly.",
],
"dateFirstPublished": [
DateDataType,
False,
None,
"Human readable date of the initial release of the family. May be "
"overriden on font level at ::Font.dateFirstPublished::.",
],
"pdfURL": [
WebResourceURLDataType,
False,
None,
"URL of PDF file with type specimen and/or instructions for entire "
"family. May be overriden on font level at ::Font.pdfURL::.",
],
}
def sample(self):
o = self.__class__()
o.name.en = "Awesome Family"
o.description.en = "Nice big fat face with smooth corners"
o.description.de = "Fette Groteske mit runden Ecken"
o.uniqueID = "AwesomeFonts-AwesomeFamily"
return o
def __repr__(self):
return "<Family '%s'>" % self.name.getText() or "undefined"
def customValidation(self):
information, warnings, critical = [], [], []
# Checking for designers
for designerKeyword in self.designerKeywords:
if not self.parent.parent.getDesignerByKeyword(designerKeyword):
critical.append(
"Has designer '%s', but %s.designers has no matching designer."
% (designerKeyword, self.parent.parent)
)
return information, warnings, critical
def getDesigners(self):
if not hasattr(self, "_designers"):
self._designers = []
for designerKeyword in self.designerKeywords:
self._designers.append(self.parent.parent.getDesignerByKeyword(designerKeyword))
return self._designers
def getAllDesigners(self):
"""\
Returns a list of ::Designer:: objects that represent all of the designers
referenced both at the family level as well as with all the family’s fonts,
in case the fonts carry specific designers. This could be used to give a
one-glance overview of all designers involved.
"""
if not hasattr(self, "_allDesigners"):
self._allDesigners = []
self._allDesignersKeywords = []
for designerKeyword in self.designerKeywords:
self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword))
self._allDesignersKeywords.append(designerKeyword)
for font in self.fonts:
for designerKeyword in font.designerKeywords:
if designerKeyword not in self._allDesignersKeywords:
self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword))
self._allDesignersKeywords.append(designerKeyword)
return self._allDesigners
def getPackages(self, filterByFontPurpose=[]):
packageKeywords = []
packages = []
packageByKeyword = {}
def passedFilter(font):
# Apply font.purpose filter
return not filterByFontPurpose or font.purpose in filterByFontPurpose
# Collect list of unique package keyword references in family's fonts
for font in self.fonts:
if passedFilter(font):
for keyword in font.getPackageKeywords():
if keyword not in packageKeywords:
packageKeywords.append(keyword)
# Prepend a DEFAULT package
if DEFAULT in packageKeywords:
defaultPackage = FontPackage()
defaultPackage.keyword = DEFAULT
defaultPackage.name.en = DEFAULT
packages.append(defaultPackage)
packageByKeyword[DEFAULT] = defaultPackage
# Build list of FontPackage objects
for package in self.packages:
if package.keyword in packageKeywords:
packages.append(package)
packageByKeyword[package.keyword] = package
# Attach fonts attribute to each package
for package in packages:
package.fonts = []
# Attach fonts to packages
for font in self.fonts:
if passedFilter(font):
for keyword in font.getPackageKeywords():
packageByKeyword[keyword].fonts.append(font)
return packages
def Family_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Family.parent = property(lambda self: Family_Parent(self))
class FamilyProxy(Proxy):
dataType = Family
class FamiliesListProxy(ListProxy):
dataType = FamilyProxy
########################################################################################
# Web Links
class WebURLListProxy(ListProxy):
dataType = WebURLDataType
########################################################################################
# Font Foundry
class StylingDataType(DictionaryDataType):
def exampleData(self):
return {
"light": {
"headerColor": "219BD3",
"headerTextColor": "000000",
"headerLinkColor": "145F7F",
"backgroundColor": "FFFFFF",
"textColor": "000000",
"linkColor": "F7AD22",
"selectionColor": "F7AD22",
"selectionTextColor": "000000",
"buttonColor": "197AA3",
"buttonTextColor": "FFFFFF",
"informationViewBackgroundColor": "F2F2F2",
"informationViewTextColor": "000000",
"informationViewLinkColor": "1D89B8",
"informationViewButtonColor": "197AA3",
"informationViewButtonTextColor": "FFFFFF",
"logoURL": "https://awesomefoundry.com/logo-lighttheme.svg",
},
"dark": {
"headerColor": "156486",
"headerTextColor": "000000",
"headerLinkColor": "53B9E4",
"backgroundColor": "262626",
"textColor": "999999",
"linkColor": "C07F07",
"selectionColor": "9A6606",
"selectionTextColor": "000000",
"buttonColor": "22A4DC",
"buttonTextColor": "000000",
"informationViewBackgroundColor": "1A1A1A",
"informationViewTextColor": "999999",
"informationViewLinkColor": "53B9E4",
"informationViewButtonColor": "22A4DC",
"informationViewButtonTextColor": "000000",
"logoURL": "https://awesomefoundry.com/logo-darktheme.svg",
},
}
class Foundry(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"uniqueID": [
StringDataType,
True,
None,
"An string that uniquely identifies this foundry within the publisher.",
],
"name": [MultiLanguageTextProxy, True, None, "Name of foundry"],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of foundry",
],
"styling": [
StylingDataType,
False,
{"light": {}, "dark": {}},
"Dictionary of styling values, for light and dark theme. See example "
"below. If you want to style your foundry here, please start with the "
"light theme. You may omit the dark theme.",
],
"email": [
EmailDataType,
False,
None,
"General email address for this foundry.",
],
"websiteURL": [WebURLDataType, False, None, "Website for this foundry"],
"telephone": [
TelephoneDataType,
False,
None,
"Telephone number for this foundry",
],
"socialURLs": [
WebURLListProxy,
False,
None,
"List of web URLs pointing to social media channels",
],
"supportEmail": [
EmailDataType,
False,
None,
"Support email address for this foundry.",
],
"supportURL": [
WebURLDataType,
False,
None,
"Support website for this foundry, such as a chat room, forum, online service desk.",
],
"supportTelephone": [
TelephoneDataType,
False,
None,
"Support telephone number for this foundry.",
],
# data
"licenses": [
LicenseDefinitionListProxy,
True,
[],
"List of ::LicenseDefinition:: objects under which the fonts in this "
"response are issued. For space efficiency, these licenses are defined "
"at the foundry object and will be referenced in each font by their "
"keyword. Keywords need to be unique for this foundry and may repeat "
"across foundries.",
],
"families": [FamiliesListProxy, True, [], "List of ::Family:: objects."],
"packages": [
FontPackageListProxy,
False,
None,
"Foundry-wide list of ::FontPackage:: objects. These will be "
"referenced by their keyword in ::Font.packageKeywords::",
],
}
_stylingColorAttributes = (
"headerColor",
"headerTextColor",
"headerLinkColor",
"backgroundColor",
"textColor",
"linkColor",
"selectionColor",
"selectionTextColor",
"buttonColor",
"buttonTextColor",
"informationViewBackgroundColor",
"informationViewTextColor",
"informationViewLinkColor",
"informationViewButtonColor",
"informationViewButtonTextColor",
)
def sample(self):
o = self.__class__()
o.name.en = "Awesome Fonts"
o.name.de = "Geile Schriften"
o.websiteURL = "https://awesomefonts.com"
o.uniqueID = "AwesomeFonts"
return o
def __repr__(self):
return "<Foundry '%s'>" % self.name.getText() or "undefined"
def getLicenseByKeyword(self, keyword):
if not hasattr(self, "_licensesDict"):
self._licensesDict = {}
for license in self.licenses:
self._licensesDict[license.keyword] = license
if keyword in self._licensesDict:
return self._licensesDict[keyword]
def customValidation(self):
information, warnings, critical = [], [], []
themes = ["light", "dark"]
if self.styling:
for theme in self.styling:
if theme not in themes:
critical.append("Styling keyword '%s' is unknown. Known are %s." % (theme, themes))
for colorKey in self._stylingColorAttributes:
if colorKey in self.styling[theme]:
c = HexColorDataType()
c.value = self.styling[theme][colorKey]
valid = c.valid()
if valid is not True:
critical.append(".styling color attribute '%s': %s" % (colorKey, valid))
if "logoURL" in self.styling[theme]:
logo = WebURLDataType()
logo.value = self.styling[theme]["logoURL"]
valid = logo.valid()
if valid is not True:
critical.append(".styling 'logoURL' attribute: %s" % (valid))
return information, warnings, critical
def Foundry_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Foundry.parent = property(lambda self: Foundry_Parent(self))
class FoundryProxy(Proxy):
dataType = Foundry
class FoundryListProxy(ListProxy):
dataType = FoundryProxy
class CommercialAppsAllowedProxy(Proxy):
dataType = str
class CommercialAppsAllowedListProxy(ListProxy):
dataType = CommercialAppsAllowedProxy
includeEmpty = True
########################################################################################
# Base Response
class BaseResponse(DictBasedObject):
def __repr__(self):
return "<%s>" % self.__class__.__name__
def customValidation(self):
information, warnings, critical = [], [], []
if hasattr(self, "response") and self.response == ERROR and self.errorMessage.isEmpty():
critical.append(f".response is '{ERROR}', but .errorMessage is missing.")
return information, warnings, critical
########################################################################################
# Available Fonts
class InstallableFontsResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in INSTALLABLEFONTSCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
INSTALLABLEFONTSCOMMAND["responseTypes"],
)
class InstallableFontsResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=installableFonts` parameter, and contains metadata about which fonts
are available to install for a user.
"""
_command = INSTALLABLEFONTSCOMMAND
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
InstallableFontsResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(INSTALLABLEFONTSCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of ::InstallableFontsResponse.response:: being 'custom'.",
],
# Response-specific
"designers": [
DesignersListProxy,
False,
None,
"List of ::Designer:: objects, referenced in the fonts or font "
"families by the keyword. These are defined at the root of the "
"response for space efficiency, as one designer can be involved in "
"the design of several typefaces across several foundries.",
],
"foundries": [
FoundryListProxy,
True,
[],
"List of ::Foundry:: objects; foundries that this distributor "
"supports. In most cases this will be only one, as many foundries "
"are their own distributors.",
],
"packages": [
FontPackageListProxy,
False,
None,
"Publisher-wide list of ::FontPackage:: objects. These will be "
"referenced by their keyword in ::Font.packageKeywords::",
],
"name": [
MultiLanguageTextProxy,
False,
None,
"A name of this response and its contents. This is needed to manage "
"subscriptions in the UI. For instance 'Free Fonts' for all free and "
"non-restricted fonts, or 'Commercial Fonts' for all those fonts that "
"the use has commercially licensed, so their access is restricted. "
"In case of a free font website that offers individual subscriptions "
"for each typeface, this decription could be the name of the typeface.",
],
"userName": [
MultiLanguageTextProxy,
False,
None,
"The name of the user who these fonts are licensed to.",
],
"userEmail": [
EmailDataType,
False,
None,
"The email address of the user who these fonts are licensed to.",
],
"userIsVerified": [
BooleanDataType,
False,
False,
"This user is known to the publisher. The initial implication for this is to not display the Terms of"
" Service and Privacy banner to this user, as they have already agreed to the terms on the publisher’s"
" website. Only new users (invitees) will be presented with the banner.",
],
"prefersRevealedUserIdentity": [
BooleanDataType,
True,
False,
"Indicates that the publisher prefers to have the user reveal his/her "
"identity to the publisher when installing fonts. In the app, the user "
"will be asked via a dialog to turn the setting on, but is not "
"required to do so.",
],
}
def getFontByUniqueID(self, ID):
for foundry in self.foundries:
for family in foundry.families:
for font in family.fonts:
if font.uniqueID == ID:
return font
def getContentChanges(self, other, calculateOverallChanges=True):
comparison = {}
oldFonts = []
newFonts = []
newVersions = 0
# Accumulate old and new fonts
for foundry in self.foundries:
for family in foundry.families:
for font in family.fonts:
oldFonts.append(font.uniqueID)
for foundry in other.foundries:
for family in foundry.families:
for font in family.fonts:
newFonts.append(font.uniqueID)
# Versions
oldFont = self.getFontByUniqueID(font.uniqueID)
if oldFont and len(font.getVersions()) > len(oldFont.getVersions()):
newVersions += 1
# Added or removed fonts
addedFonts = set(newFonts) - set(oldFonts)
if addedFonts:
comparison["addedFonts"] = len(addedFonts)
comparison["overallChanges"] = True
removedFonts = set(oldFonts) - set(newFonts)
if removedFonts:
comparison["removedFonts"] = len(removedFonts)
comparison["overallChanges"] = True
if newVersions:
comparison["fontsWithAddedVersions"] = newVersions
comparison["overallChanges"] = True
# Other content changes (including the above ones)
if calculateOverallChanges:
identical = self.sameContent(other)
if not identical:
comparison["overallChanges"] = True
return comparison
def sample(self):
o = self.__class__()
o.response = "success"
return o
def getDesignerByKeyword(self, keyword):
if not hasattr(self, "_designersDict"):
self._designersDict = {}
for designer in self.designers:
self._designersDict[designer.keyword] = designer
if keyword in self._designersDict:
return self._designersDict[keyword]
def discardThisKey(self, key):
if key in ["foundries", "designers", "licenseIdentifier"] and self.response != "success":
return True
return False
def customValidation(self):
information, warnings, critical = [], [], []
if hasattr(self, "response") and self.response == ERROR and self.errorMessage.isEmpty():
critical.append(f".response is '{ERROR}', but .errorMessage is missing.")
if self.response == "success" and not self.name.getText():
warnings.append(
"The response has no .name value. It is not required, but highly "
"recommended, to describe the purpose of this subscription to the "
"user (such as 'Commercial Fonts', 'Free Fonts', etc. This is "
"especially useful if you offer several different subscriptions "
"to the same user."
)
# Check all uniqueIDs for duplicity
foundryIDs = []
familyIDs = []
fontIDs = []
for foundry in self.foundries:
foundryIDs.append(foundry.uniqueID)
for family in foundry.families:
familyIDs.append(family.uniqueID)
for font in family.fonts:
fontIDs.append(font.uniqueID)
import collections
duplicateFoundryIDs = [item for item, count in list(collections.Counter(foundryIDs).items()) if count > 1]
if duplicateFoundryIDs:
critical.append("Duplicate unique foundry IDs: %s" % duplicateFoundryIDs)
duplicateFamilyIDs = [item for item, count in list(collections.Counter(familyIDs).items()) if count > 1]
if duplicateFamilyIDs:
critical.append("Duplicate unique family IDs: %s" % duplicateFamilyIDs)
duplicateFontIDs = [item for item, count in list(collections.Counter(fontIDs).items()) if count > 1]
if duplicateFontIDs:
critical.append("Duplicate unique family IDs: %s" % duplicateFontIDs)
newInformation, newWarnings, newCritical = super().customValidation()
if newInformation:
information.extend(newInformation)
if newWarnings:
warnings.extend(newWarnings)
if newCritical:
critical.extend(newCritical)
return information, warnings, critical
########################################################################################
# InstallFonts
class InstallFontAssetResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in INSTALLFONTASSETCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
INSTALLFONTASSETCOMMAND["responseTypes"],
)
class InstallFontAsset(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=installFonts` parameter.
"""
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
InstallFontAssetResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(INSTALLFONTASSETCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"uniqueID": [
StringDataType,
True,
None,
"A machine-readable string that uniquely identifies this font within "
"the subscription. Must match the requested fonts.",
],
"version": [
VersionDataType,
True,
None,
"Font version. Must match the requested fonts.",
],
"mimeType": [
FontMimeType,
False,
None,
"MIME Type of data. For desktop fonts, these are %s." % FONTPURPOSES["desktop"]["acceptableMimeTypes"],
],
"dataURL": [
WebURLDataType,
False,
None,
"HTTP link of font file resource. ::InstallFontAsset.data:: and "
"::InstallFontAsset.dataURL:: are mutually exclusive; only one can be "
"specified. The HTTP resource must be served under the correct "
"MIME type specified in ::InstallFontAsset.mimeType:: and is expected "
"to be in raw binary encoding; ::InstallFontAsset.encoding:: "
"is not regarded.",
],
"data": [
FontDataType,
False,
None,
"Binary data as a string encoded as one of the following supported "
"encodings: ::InstallFontResponse.encoding::. "
"::InstallFontAsset.data:: and ::InstallFontAsset.dataURL:: are "
"mutually exclusive; only one can be specified.",
],
"encoding": [
FontEncodingDataType,
False,
None,
"Encoding type for font data in ::InstallFontResponse.data::. Currently supported: %s" % (FONTENCODINGS),
],
}
def sample(self):
o = self.__class__()
o.response = "success"
o.uniqueID = "AwesomeFonts-AwesomeFamily-Bold"
o.mimeType = "font/otf"
o.data = "emplNXpqdGpoNXdqdHp3enRq..."
o.encoding = "base64"
o.version = "1.1"
return o
def customValidation(self):
information, warnings, critical = [], [], []
if self.response == "success" and (not self.data and not self.dataURL):
critical.append(".response is set to success, but neither .data nor .dataURL are set.")
if self.data and not self.encoding:
critical.append(".data is set, but .encoding is missing")
if self.data and not self.mimeType:
critical.append(".data is set, but .mimeType is missing")
if self.dataURL and not self.mimeType:
critical.append(".dataURL is set, but .mimeType is missing")
if self.dataURL and self.data:
critical.append("Either .dataURL or .data can be defined, not both")
if self.response == ERROR and self.errorMessage.isEmpty():
critical.append(".response is '%s', but .errorMessage is missing." % (ERROR))
newInformation, newWarnings, newCritical = super().customValidation()
if newInformation:
information.extend(newInformation)
if newWarnings:
warnings.extend(newWarnings)
if newCritical:
critical.extend(newCritical)
return information, warnings, critical
class InstallFontResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in INSTALLFONTSCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
INSTALLFONTSCOMMAND["responseTypes"],
)
class InstallFontAssetProxy(Proxy):
dataType = InstallFontAsset
class InstallFontAssetListProxy(ListProxy):
dataType = InstallFontAssetProxy
class InstallFontsResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=installFonts` parameter, and contains the requested binary fonts
attached as ::InstallFontAsset:: obects.
"""
_command = INSTALLFONTSCOMMAND
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
InstallFontResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(UNINSTALLFONTSCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"assets": [
InstallFontAssetListProxy,
False,
None,
"List of ::InstallFontAsset:: objects.",
],
}
def sample(self):
o = self.__class__()
o.response = "success"
o.assets = [InstallFontAsset().sample()]
return o
########################################################################################
# Uninstall Fonts
class UninstallFontAssedResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in UNINSTALLFONTASSETCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
UNINSTALLFONTASSETCOMMAND["responseTypes"],
)
class UninstallFontAsset(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=uninstallFonts` parameter.
"""
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
UninstallFontAssedResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(UNINSTALLFONTASSETCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"uniqueID": [
StringDataType,
True,
None,
"A machine-readable string that uniquely identifies this font within "
"the subscription. Must match the requested fonts.",
],
# Response-specific
}
def sample(self):
o = self.__class__()
o.response = "success"
o.uniqueID = "AwesomeFonts-AwesomeFamily-Bold"
return o
class UninstallFontResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in UNINSTALLFONTSCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
UNINSTALLFONTSCOMMAND["responseTypes"],
)
class UninstallFontAssetProxy(Proxy):
dataType = UninstallFontAsset
class UninstallFontAssetListProxy(ListProxy):
dataType = UninstallFontAssetProxy
class UninstallFontsResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=uninstallFonts` parameter, and contains empty responses as
::UninstallFontAsset:: objects.
While empty of data, these asset objects are still necessary because each font
uninstallation request may return a different response, to which the GUI app needs
to respond to accordingly.
"""
_command = UNINSTALLFONTSCOMMAND
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
UninstallFontResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(UNINSTALLFONTSCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"assets": [
UninstallFontAssetListProxy,
False,
None,
"List of ::UninstallFontAsset:: objects.",
],
}
def sample(self):
o = self.__class__()
o.response = "success"
o.assets = [UninstallFontAsset().sample()]
return o
########################################################################################
class EndpointResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=endpoint` parameter.
This response contains some mandatory information about the API endpoint such as its
name and admin email, the copyright license under which the API endpoint issues its
data, and whether or not this endpoint can be publicized about.
"""
_command = ENDPOINTCOMMAND
# key: [data type, required, default value, description]
_structure = {
"canonicalURL": [
WebURLDataType,
True,
None,
(
"Same as the API Endpoint URL, bare of IDs and other parameters. "
"Used for grouping of subscriptions. It is expected that this URL "
"will not change. When it does, it will be treated as a different "
"publisher.<br />"
"The *API Endpoint URL* must begin with the *Canonical URL* "
"(if you indeed choose the two to be different) or otherwise "
"subscriptions could impersonate another publisher by displaying "
"their name and using their Canonical URL. In other words, "
"both must be located on the same server."
# TODO: Actually implement the above security feature
),
],
"adminEmail": [
EmailDataType,
True,
None,
"API endpoint Administrator. This email needs to be reachable for "
"various information around the Type.World protocol as well as "
"technical problems.",
],
"licenseIdentifier": [
OpenSourceLicenseIdentifierDataType,
True,
"CC-BY-NC-ND-4.0",
"Machine-readable identifier of license under which the API Endpoint "
"publishes its (metda)data, "
"as per [https://spdx.org/licenses/](). This license will not "
"be presented to the user. Instead, the software client that accesses "
"your API Endpoint needs to be aware of "
"the license and proceed only if allowed, otherwise decline the usage "
"of this API endpoint. In other words, the non-commercial "
"`CC-BY-NC-ND-4.0` license that is the default here forbids commercial "
"software from accessing your API Endpoint unless they have a separate "
"legal agreememt with you.",
],
"publisherTypes": [
SupportedPublisherTypeListProxy,
True,
[],
f"List of publisher business types: {PUBLISHERTYPES}. "
"In case ::EndpointResponse.public:: is set to `True`, only the "
f"following types are allowed: {PUBLICPUBLISHERTYPES}",
],
"supportedCommands": [
SupportedAPICommandsListProxy,
True,
None,
"List of commands this API endpoint supports: %s" % [x["keyword"] for x in COMMANDS],
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of API endpoint",
],
"public": [
BooleanDataType,
True,
False,
"API endpoint is meant to be publicly visible and its existence may be publicized within the project",
],
"sendsLiveNotifications": [
BooleanDataType,
True,
False,
"API endpoint is sending live notifications through the central server,"
" namely through the `updateSubscription` command. "
"The app won’t start listening to live notifications unless a "
"subscription holds this setting. ",
],
"allowedCommercialApps": [
CommercialAppsAllowedListProxy,
True,
["world.type.app"],
"Machine-readable list of commercial apps that are allowed to "
"access this API Endpoint in case "
"::EndpointResponse.licenseIdentifier:: carries a non-commercial "
"copyright license such as the default `CC-BY-NC-ND-4.0`. "
"A reverse-domain notation for the app ID is recommended "
"but not required. "
"Note: As the originator of the entire technology, the Type.World App "
"is on this list by default, even though it is a commercial app. "
"This is for backwards-compatibility for endpoints that don’t "
"carry this attribute yet but are expected to allow access by "
"Type.World. If you don’t want the Type.World to access "
"your API Endpoint, you may explicitly unset this attribute to an "
"empty list: `endpoint.allowedCommercialApps = []`",
],
"logoURL": [
WebResourceURLDataType,
False,
None,
"URL of logo of API endpoint, for publication. Specifications to follow.",
],
"backgroundColor": [
HexColorDataType,
False,
None,
"Publisher’s preferred background color. This is meant to go as a "
"background color to the logo at ::APIRoot.logoURL::",
],
"websiteURL": [
WebURLDataType,
False,
None,
"URL of human-visitable website of API endpoint, for publication",
],
"privacyPolicyURL": [
WebURLDataType,
True,
"https://type.world/legal/default/PrivacyPolicy.html",
"URL of human-readable Privacy Policy of API endpoint. This will be "
"displayed to the user for consent when adding a subscription. "
"The default URL points to a document edited by Type.World that you "
"can use (at your own risk) instead of having to write your own.",
],
"termsOfServiceURL": [
WebURLDataType,
True,
"https://type.world/legal/default/TermsOfService.html",
"URL of human-readable Terms of Service Agreement of API endpoint. "
"This will be displayed to the user for consent when adding a "
"subscription. The default URL points to a document edited by "
"Type.World that you can use (at your own risk) instead of having to "
"write your own.",
],
"loginURL": [
WebURLDataType,
False,
None,
"URL for user to log in to publisher’s account in case a validation "
"is required. This normally work in combination with the "
"`loginRequired` response.",
],
}
def sample(self):
o = self.__class__()
o.canonicalURL = "https://awesomefonts.com/api/"
o.adminEmail = "[email protected]"
o.supportedCommands = [
"endpoint",
"installableFonts",
"installFonts",
"uninstallFonts",
]
o.name.en = "Awesome Fonts"
o.name.de = "Geile Schriften"
o.privacyPolicyURL = "https://awesomefonts.com/privacypolicy.html"
o.termsOfServiceURL = "https://awesomefonts.com/termsofservice.html"
o.public = True
return o
def customValidation(self):
information, warnings, critical = [], [], []
if self.canonicalURL and not self.canonicalURL.startswith("https://"):
warnings.append(".canonicalURL is not using SSL (https://). Consider using SSL to protect your data.")
if self.public:
for publisherType in self.publisherTypes:
if publisherType not in PUBLICPUBLISHERTYPES:
critical.append(
"When EndpointResponse.public is set to True, then only a "
"restricted set of types is allowed for "
f"EndpointResponse.publisherTypes: {PUBLICPUBLISHERTYPES}. "
f"You have '{publisherType}'"
)
return information, warnings, critical
########################################################################################
# Root Response
class EndpointResponseProxy(Proxy):
dataType = EndpointResponse
class InstallableFontsResponseProxy(Proxy):
dataType = InstallableFontsResponse
class InstallFontsResponseProxy(Proxy):
dataType = InstallFontsResponse
class UninstallFontsResponseProxy(Proxy):
dataType = UninstallFontsResponse
class RootResponse(BaseResponse):
"""\
This is the root object for each response, and contains one or more individual
response objects as requested in the `commands` parameter of API endpoint calls.
This exists to speed up processes by reducing server calls. For instance,
installing a protected fonts and afterwards asking for a refreshed
`installableFonts` command requires two separate calls to the publisher’s API
endpoint, which in turns needs to verify the requester’s identy with the central
type.world server. By requesting `installFonts,installableFonts` commands in one go,
a lot of time is saved.
"""
# key: [data type, required, default value, description]
_structure = {
# Root
"endpoint": [
EndpointResponseProxy,
False,
None,
"::EndpointResponse:: object.",
],
"installableFonts": [
InstallableFontsResponseProxy,
False,
None,
"::InstallableFontsResponse:: object.",
],
"installFonts": [
InstallFontsResponseProxy,
False,
None,
"::InstallFontsResponse:: object.",
],
"uninstallFonts": [
UninstallFontsResponseProxy,
False,
None,
"::UninstallFontsResponse:: object.",
],
"version": [
VersionDataType,
True,
INSTALLFONTSCOMMAND["currentVersion"],
"Version of '%s' response" % INSTALLFONTSCOMMAND["keyword"],
],
}
def sample(self):
o = self.__class__()
o.endpoint = EndpointResponse().sample()
o.installableFonts = InstallableFontsResponse().sample()
return o
| 31.305015 | 119 | 0.542243 | [
"Apache-2.0"
] | typeWorld/api | Lib/typeworld/api/__init__.py | 128,690 | Python |
import json
import datetime
import traceback
import re
from base64 import b64encode
from ast import literal_eval
from flask import Blueprint, render_template, render_template_string, make_response, url_for, current_app, request, redirect, jsonify, abort, flash, session
from flask_login import login_required, current_user
from ..decorators import operator_role_required, admin_role_required, history_access_required
from ..models.user import User
from ..models.account import Account
from ..models.account_user import AccountUser
from ..models.role import Role
from ..models.server import Server
from ..models.setting import Setting
from ..models.history import History
from ..models.domain import Domain
from ..models.domain_user import DomainUser
from ..models.record import Record
from ..models.domain_template import DomainTemplate
from ..models.domain_template_record import DomainTemplateRecord
from ..models.api_key import ApiKey
from ..models.base import db
from ..lib.schema import ApiPlainKeySchema
apikey_plain_schema = ApiPlainKeySchema(many=True)
admin_bp = Blueprint('admin',
__name__,
template_folder='templates',
url_prefix='/admin')
"""
changeSet is a list of tuples, in the following format
(old_state, new_state, change_type)
old_state: dictionary with "disabled" and "content" keys. {"disabled" : False, "content" : "1.1.1.1" }
new_state: similarly
change_type: "addition" or "deletion" or "status" for status change or "unchanged" for no change
Note: A change in "content", is considered a deletion and recreation of the same record,
holding the new content value.
"""
def get_record_changes(del_rrest, add_rrest):
changeSet = []
delSet = del_rrest['records'] if 'records' in del_rrest else []
addSet = add_rrest['records'] if 'records' in add_rrest else []
for d in delSet: # get the deletions and status changes
exists = False
for a in addSet:
if d['content'] == a['content']:
exists = True
if d['disabled'] != a['disabled']:
changeSet.append( ({"disabled":d['disabled'],"content":d['content']},
{"disabled":a['disabled'],"content":a['content']},
"status") )
break
if not exists: # deletion
changeSet.append( ({"disabled":d['disabled'],"content":d['content']},
None,
"deletion") )
for a in addSet: # get the additions
exists = False
for d in delSet:
if d['content'] == a['content']:
exists = True
# already checked for status change
break
if not exists:
changeSet.append( (None, {"disabled":a['disabled'], "content":a['content']}, "addition") )
continue
for a in addSet: # get the unchanged
exists = False
for c in changeSet:
if c[1] != None and c[1]["content"] == a['content']:
exists = True
break
if not exists:
changeSet.append( ( {"disabled":a['disabled'], "content":a['content']}, {"disabled":a['disabled'], "content":a['content']}, "unchanged") )
return changeSet
# out_changes is a list of HistoryRecordEntry objects in which we will append the new changes
# a HistoryRecordEntry represents a pair of add_rrest and del_rrest
def extract_changelogs_from_a_history_entry(out_changes, history_entry, change_num, record_name=None, record_type=None):
if history_entry.detail is None:
return
if "add_rrests" in history_entry.detail:
detail_dict = json.loads(history_entry.detail)
else: # not a record entry
return
add_rrests = detail_dict['add_rrests']
del_rrests = detail_dict['del_rrests']
for add_rrest in add_rrests:
exists = False
for del_rrest in del_rrests:
if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']:
exists = True
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, add_rrest, "*"))
break
if not exists: # this is a new record
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, [], add_rrest, "+")) # (add_rrest, del_rrest, change_type)
for del_rrest in del_rrests:
exists = False
for add_rrest in add_rrests:
if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']:
exists = True # no need to add in the out_changes set
break
if not exists: # this is a deletion
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, [], "-"))
# only used for changelog per record
if record_name != None and record_type != None: # then get only the records with the specific (record_name, record_type) tuple
if change_num in out_changes:
changes_i = out_changes[change_num]
else:
return
for hre in changes_i: # for each history record entry in changes_i
if 'type' in hre.add_rrest and hre.add_rrest['name'] == record_name and hre.add_rrest['type'] == record_type:
continue
elif 'type' in hre.del_rrest and hre.del_rrest['name'] == record_name and hre.del_rrest['type'] == record_type:
continue
else:
out_changes[change_num].remove(hre)
# records with same (name,type) are considered as a single HistoryRecordEntry
# history_entry is of type History - used to extract created_by and created_on
# add_rrest is a dictionary of replace
# del_rrest is a dictionary of remove
class HistoryRecordEntry:
def __init__(self, history_entry, del_rrest, add_rrest, change_type):
# search the add_rrest index into the add_rrest set for the key (name, type)
self.history_entry = history_entry
self.add_rrest = add_rrest
self.del_rrest = del_rrest
self.change_type = change_type # "*": edit or unchanged, "+" new tuple(name,type), "-" deleted (name,type) tuple
self.changed_fields = [] # contains a subset of : [ttl, name, type]
self.changeSet = [] # all changes for the records of this add_rrest-del_rrest pair
if change_type == "+": # addition
self.changed_fields.append("name")
self.changed_fields.append("type")
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
elif change_type == "-": # removal
self.changed_fields.append("name")
self.changed_fields.append("type")
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
elif change_type == "*": # edit of unchanged
if add_rrest['ttl'] != del_rrest['ttl']:
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
def toDict(self):
return {
"add_rrest" : self.add_rrest,
"del_rrest" : self.del_rrest,
"changed_fields" : self.changed_fields,
"created_on" : self.history_entry.created_on,
"created_by" : self.history_entry.created_by,
"change_type" : self.change_type,
"changeSet" : self.changeSet
}
def __eq__(self, obj2): # used for removal of objects from a list
return True if obj2.toDict() == self.toDict() else False
@admin_bp.before_request
def before_request():
# Manage session timeout
session.permanent = True
# current_app.permanent_session_lifetime = datetime.timedelta(
# minutes=int(Setting().get('session_timeout')))
current_app.permanent_session_lifetime = datetime.timedelta(
minutes=int(Setting().get('session_timeout')))
session.modified = True
@admin_bp.route('/pdns', methods=['GET'])
@login_required
@operator_role_required
def pdns_stats():
if not Setting().get('pdns_api_url') or not Setting().get(
'pdns_api_key') or not Setting().get('pdns_version'):
return redirect(url_for('admin.setting_pdns'))
domains = Domain.query.all()
users = User.query.all()
server = Server(server_id='localhost')
configs = server.get_config()
statistics = server.get_statistic()
history_number = History.query.count()
if statistics:
uptime = list([
uptime for uptime in statistics if uptime['name'] == 'uptime'
])[0]['value']
else:
uptime = 0
return render_template('admin_pdns_stats.html',
domains=domains,
users=users,
configs=configs,
statistics=statistics,
uptime=uptime,
history_number=history_number)
@admin_bp.route('/user/edit/<user_username>', methods=['GET', 'POST'])
@admin_bp.route('/user/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_user(user_username=None):
if user_username:
user = User.query.filter(User.username == user_username).first()
create = False
if not user:
return render_template('errors/404.html'), 404
if user.role.name == 'Administrator' and current_user.role.name != 'Administrator':
return render_template('errors/401.html'), 401
else:
user = None
create = True
if request.method == 'GET':
return render_template('admin_edit_user.html',
user=user,
create=create)
elif request.method == 'POST':
fdata = request.form
if create:
user_username = fdata.get('username', '').strip()
user = User(username=user_username,
plain_text_password=fdata.get('password', ''),
firstname=fdata.get('firstname', '').strip(),
lastname=fdata.get('lastname', '').strip(),
email=fdata.get('email', '').strip(),
reload_info=False)
if create:
if not fdata.get('password', ''):
return render_template('admin_edit_user.html',
user=user,
create=create,
blank_password=True)
result = user.create_local_user()
history = History(msg='Created user {0}'.format(user.username),
created_by=current_user.username)
else:
result = user.update_local_user()
history = History(msg='Updated user {0}'.format(user.username),
created_by=current_user.username)
if result['status']:
history.add()
return redirect(url_for('admin.manage_user'))
return render_template('admin_edit_user.html',
user=user,
create=create,
error=result['msg'])
@admin_bp.route('/key/edit/<key_id>', methods=['GET', 'POST'])
@admin_bp.route('/key/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_key(key_id=None):
domains = Domain.query.all()
accounts = Account.query.all()
roles = Role.query.all()
apikey = None
create = True
plain_key = None
if key_id:
apikey = ApiKey.query.filter(ApiKey.id == key_id).first()
create = False
if not apikey:
return render_template('errors/404.html'), 404
if request.method == 'GET':
return render_template('admin_edit_key.html',
key=apikey,
domains=domains,
accounts=accounts,
roles=roles,
create=create)
if request.method == 'POST':
fdata = request.form
description = fdata['description']
role = fdata.getlist('key_role')[0]
domain_list = fdata.getlist('key_multi_domain')
account_list = fdata.getlist('key_multi_account')
# Create new apikey
if create:
if role == "User":
domain_obj_list = Domain.query.filter(Domain.name.in_(domain_list)).all()
account_obj_list = Account.query.filter(Account.name.in_(account_list)).all()
else:
account_obj_list, domain_obj_list = [], []
apikey = ApiKey(desc=description,
role_name=role,
domains=domain_obj_list,
accounts=account_obj_list)
try:
apikey.create()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
raise ApiKeyCreateFail(message='Api key create failed')
plain_key = apikey_plain_schema.dump([apikey])[0]["plain_key"]
plain_key = b64encode(plain_key.encode('utf-8')).decode('utf-8')
history_message = "Created API key {0}".format(apikey.id)
# Update existing apikey
else:
try:
if role != "User":
domain_list, account_list = [], []
apikey.update(role,description,domain_list, account_list)
history_message = "Updated API key {0}".format(apikey.id)
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
history = History(msg=history_message,
detail = json.dumps({
'key': apikey.id,
'role': apikey.role.name,
'description': apikey.description,
'domains': [domain.name for domain in apikey.domains],
'accounts': [a.name for a in apikey.accounts]
}),
created_by=current_user.username)
history.add()
return render_template('admin_edit_key.html',
key=apikey,
domains=domains,
accounts=accounts,
roles=roles,
create=create,
plain_key=plain_key)
@admin_bp.route('/manage-keys', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_keys():
if request.method == 'GET':
try:
apikeys = ApiKey.query.all()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
abort(500)
return render_template('admin_manage_keys.html',
keys=apikeys)
elif request.method == 'POST':
jdata = request.json
if jdata['action'] == 'delete_key':
apikey = ApiKey.query.get(jdata['data'])
try:
history_apikey_id = apikey.id
history_apikey_role = apikey.role.name
history_apikey_description = apikey.description
history_apikey_domains = [ domain.name for domain in apikey.domains]
apikey.delete()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
current_app.logger.info('Delete API key {0}'.format(apikey.id))
history = History(msg='Delete API key {0}'.format(apikey.id),
detail = json.dumps({
'key': history_apikey_id,
'role': history_apikey_role,
'description': history_apikey_description,
'domains': history_apikey_domains
}),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Key has been removed.'
}), 200)
@admin_bp.route('/manage-user', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_user():
if request.method == 'GET':
roles = Role.query.all()
users = User.query.order_by(User.username).all()
return render_template('admin_manage_user.html',
users=users,
roles=roles)
if request.method == 'POST':
#
# post data should in format
# {'action': 'delete_user', 'data': 'username'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'user_otp_disable':
user = User(username=data)
result = user.update_profile(enable_otp=False)
if result:
history = History(
msg='Two factor authentication disabled for user {0}'.
format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status':
'ok',
'msg':
'Two factor authentication has been disabled for user.'
}), 200)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Cannot disable two factor authentication for user.'
}), 500)
elif jdata['action'] == 'delete_user':
user = User(username=data)
if user.username == current_user.username:
return make_response(
jsonify({
'status': 'error',
'msg': 'You cannot delete yourself.'
}), 400)
# Remove account associations first
user_accounts = Account.query.join(AccountUser).join(
User).filter(AccountUser.user_id == user.id,
AccountUser.account_id == Account.id).all()
for uc in user_accounts:
uc.revoke_privileges_by_id(user.id)
# Then delete the user
result = user.delete()
if result:
history = History(msg='Delete user {0}'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'User has been removed.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot remove user.'
}), 500)
elif jdata['action'] == 'revoke_user_privileges':
user = User(username=data)
result = user.revoke_privilege()
if result:
history = History(
msg='Revoke {0} user privileges'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Revoked user privileges.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot revoke user privilege.'
}), 500)
elif jdata['action'] == 'update_user_role':
username = data['username']
role_name = data['role_name']
if username == current_user.username:
return make_response(
jsonify({
'status': 'error',
'msg': 'You cannot change you own roles.'
}), 400)
user = User.query.filter(User.username == username).first()
if not user:
return make_response(
jsonify({
'status': 'error',
'msg': 'User does not exist.'
}), 404)
if user.role.name == 'Administrator' and current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status':
'error',
'msg':
'You do not have permission to change Administrator users role.'
}), 400)
if role_name == 'Administrator' and current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status':
'error',
'msg':
'You do not have permission to promote a user to Administrator role.'
}), 400)
user = User(username=username)
result = user.set_role(role_name)
if result['status']:
history = History(
msg='Change user role of {0} to {1}'.format(
username, role_name),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Cannot change user role. {0}'.format(
result['msg'])
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot update user. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
@admin_bp.route('/account/edit/<account_name>', methods=['GET', 'POST'])
@admin_bp.route('/account/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_account(account_name=None):
users = User.query.all()
if request.method == 'GET':
if account_name is None:
return render_template('admin_edit_account.html',
account_user_ids=[],
users=users,
create=1)
else:
account = Account.query.filter(
Account.name == account_name).first()
account_user_ids = account.get_user()
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=0)
if request.method == 'POST':
fdata = request.form
new_user_list = request.form.getlist('account_multi_user')
# on POST, synthesize account and account_user_ids from form data
if not account_name:
account_name = fdata['accountname']
account = Account(name=account_name,
description=fdata['accountdescription'],
contact=fdata['accountcontact'],
mail=fdata['accountmail'])
account_user_ids = []
for username in new_user_list:
userid = User(username=username).get_user_info_by_username().id
account_user_ids.append(userid)
create = int(fdata['create'])
if create:
# account __init__ sanitizes and lowercases the name, so to manage expectations
# we let the user reenter the name until it's not empty and it's valid (ignoring the case)
if account.name == "" or account.name != account_name.lower():
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
invalid_accountname=True)
if Account.query.filter(Account.name == account.name).first():
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
duplicate_accountname=True)
result = account.create_account()
history = History(msg='Create account {0}'.format(account.name),
created_by=current_user.username)
else:
result = account.update_account()
history = History(msg='Update account {0}'.format(account.name),
created_by=current_user.username)
if result['status']:
account.grant_privileges(new_user_list)
history.add()
return redirect(url_for('admin.manage_account'))
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
error=result['msg'])
@admin_bp.route('/manage-account', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_account():
if request.method == 'GET':
accounts = Account.query.order_by(Account.name).all()
for account in accounts:
account.user_num = AccountUser.query.filter(
AccountUser.account_id == account.id).count()
return render_template('admin_manage_account.html', accounts=accounts)
if request.method == 'POST':
#
# post data should in format
# {'action': 'delete_account', 'data': 'accountname'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'delete_account':
account = Account.query.filter(Account.name == data).first()
if not account:
return make_response(
jsonify({
'status': 'error',
'msg': 'Account not found.'
}), 404)
# Remove account association from domains first
for domain in account.domains:
Domain(name=domain.name).assoc_account(None)
# Then delete the account
result = account.delete_account()
if result:
history = History(msg='Delete account {0}'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Account has been removed.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot remove account.'
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot update account. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
class DetailedHistory():
def __init__(self, history, change_set):
self.history = history
self.detailed_msg = ""
self.change_set = change_set
if not history.detail:
self.detailed_msg = ""
return
detail_dict = json.loads(history.detail)
if 'domain_type' in detail_dict and 'account_id' in detail_dict: # this is a domain creation
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain type:</td><td>{{ domaintype }}</td></tr>
<tr><td>Account:</td><td>{{ account }}</td></tr>
</table>
""",
domaintype=detail_dict['domain_type'],
account=Account.get_name_by_id(self=None, account_id=detail_dict['account_id']) if detail_dict['account_id'] != "0" else "None")
elif 'authenticator' in detail_dict: # this is a user authentication
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped" style="width:565px;">
<thead>
<tr>
<th colspan="3" style="background: rgba({{ background_rgba }});">
<p style="color:white;">User {{ username }} authentication {{ auth_result }}</p>
</th>
</tr>
</thead>
<tbody>
<tr>
<td>Authenticator Type:</td>
<td colspan="2">{{ authenticator }}</td>
</tr>
<tr>
<td>IP Address</td>
<td colspan="2">{{ ip_address }}</td>
</tr>
</tbody>
</table>
""",
background_rgba="68,157,68" if detail_dict['success'] == 1 else "201,48,44",
username=detail_dict['username'],
auth_result="success" if detail_dict['success'] == 1 else "failure",
authenticator=detail_dict['authenticator'],
ip_address=detail_dict['ip_address'])
elif 'add_rrests' in detail_dict: # this is a domain record change
# changes_set = []
self.detailed_msg = ""
# extract_changelogs_from_a_history_entry(changes_set, history, 0)
elif 'name' in detail_dict and 'template' in history.msg: # template creation / deletion
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Template name:</td><td>{{ template_name }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
</table>
""",
template_name=DetailedHistory.get_key_val(detail_dict, "name"),
description=DetailedHistory.get_key_val(detail_dict, "description"))
elif 'Change domain' in history.msg and 'access control' in history.msg: # added or removed a user from a domain
users_with_access = DetailedHistory.get_key_val(detail_dict, "user_has_access")
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Users with access to this domain</td><td>{{ users_with_access }}</td></tr>
<tr><td>Number of users:</td><td>{{ users_with_access | length }}</td><tr>
</table>
""",
users_with_access=users_with_access)
elif 'Created API key' in history.msg or 'Updated API key' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Key: </td><td>{{ keyname }}</td></tr>
<tr><td>Role:</td><td>{{ rolename }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
<tr><td>Accessible domains with this API key:</td><td>{{ linked_domains }}</td></tr>
<tr><td>Accessible accounts with this API key:</td><td>{{ linked_accounts }}</td></tr>
</table>
""",
keyname=DetailedHistory.get_key_val(detail_dict, "key"),
rolename=DetailedHistory.get_key_val(detail_dict, "role"),
description=DetailedHistory.get_key_val(detail_dict, "description"),
linked_domains=DetailedHistory.get_key_val(detail_dict, "domains" if "domains" in detail_dict else "domain_acl"),
linked_accounts=DetailedHistory.get_key_val(detail_dict, "accounts"))
elif 'Delete API key' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Key: </td><td>{{ keyname }}</td></tr>
<tr><td>Role:</td><td>{{ rolename }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
<tr><td>Accessible domains with this API key:</td><td>{{ linked_domains }}</td></tr>
</table>
""",
keyname=DetailedHistory.get_key_val(detail_dict, "key"),
rolename=DetailedHistory.get_key_val(detail_dict, "role"),
description=DetailedHistory.get_key_val(detail_dict, "description"),
linked_domains=DetailedHistory.get_key_val(detail_dict, "domains"))
elif 'Update type for domain' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain: </td><td>{{ domain }}</td></tr>
<tr><td>Domain type:</td><td>{{ domain_type }}</td></tr>
<tr><td>Masters:</td><td>{{ masters }}</td></tr>
</table>
""",
domain=DetailedHistory.get_key_val(detail_dict, "domain"),
domain_type=DetailedHistory.get_key_val(detail_dict, "type"),
masters=DetailedHistory.get_key_val(detail_dict, "masters"))
elif 'reverse' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain Type: </td><td>{{ domain_type }}</td></tr>
<tr><td>Domain Master IPs:</td><td>{{ domain_master_ips }}</td></tr>
</table>
""",
domain_type=DetailedHistory.get_key_val(detail_dict, "domain_type"),
domain_master_ips=DetailedHistory.get_key_val(detail_dict, "domain_master_ips"))
elif DetailedHistory.get_key_val(detail_dict, 'msg') and DetailedHistory.get_key_val(detail_dict, 'status'):
self.detailed_msg = render_template_string('''
<table class="table table-bordered table-striped">
<tr><td>Status: </td><td>{{ history_status }}</td></tr>
<tr><td>Message:</td><td>{{ history_msg }}</td></tr>
</table>
''',
history_status=DetailedHistory.get_key_val(detail_dict, 'status'),
history_msg=DetailedHistory.get_key_val(detail_dict, 'msg'))
# check for lower key as well for old databases
@staticmethod
def get_key_val(_dict, key):
return str(_dict.get(key, _dict.get(key.title(), '')))
# convert a list of History objects into DetailedHistory objects
def convert_histories(histories):
changes_set = dict()
detailedHistories = []
j = 0
for i in range(len(histories)):
if histories[i].detail and ('add_rrests' in histories[i].detail or 'del_rrests' in histories[i].detail):
extract_changelogs_from_a_history_entry(changes_set, histories[i], j)
if j in changes_set:
detailedHistories.append(DetailedHistory(histories[i], changes_set[j]))
else: # no changes were found
detailedHistories.append(DetailedHistory(histories[i], None))
j += 1
else:
detailedHistories.append(DetailedHistory(histories[i], None))
return detailedHistories
@admin_bp.route('/history', methods=['GET', 'POST'])
@login_required
@history_access_required
def history():
if request.method == 'POST':
if current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status': 'error',
'msg': 'You do not have permission to remove history.'
}), 401)
h = History()
result = h.remove_all()
if result:
history = History(msg='Remove all histories',
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Can not remove histories.'
}), 500)
if request.method == 'GET':
doms = accounts = users = ""
if current_user.role.name in [ 'Administrator', 'Operator']:
all_domain_names = Domain.query.all()
all_account_names = Account.query.all()
all_user_names = User.query.all()
for d in all_domain_names:
doms += d.name + " "
for acc in all_account_names:
accounts += acc.name + " "
for usr in all_user_names:
users += usr.username + " "
else: # special autocomplete for users
all_domain_names = db.session.query(Domain) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).all()
all_account_names = db.session.query(Account) \
.outerjoin(Domain, Domain.account_id == Account.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).all()
all_user_names = []
for a in all_account_names:
temp = db.session.query(User) \
.join(AccountUser, AccountUser.user_id == User.id) \
.outerjoin(Account, Account.id == AccountUser.account_id) \
.filter(
db.or_(
Account.id == a.id,
AccountUser.account_id == a.id
)
) \
.all()
for u in temp:
if u in all_user_names:
continue
all_user_names.append(u)
for d in all_domain_names:
doms += d.name + " "
for a in all_account_names:
accounts += a.name + " "
for u in all_user_names:
users += u.username + " "
return render_template('admin_history.html', all_domain_names=doms, all_account_names=accounts, all_usernames=users)
# local_offset is the offset of the utc to the local time
# offset must be int
# return the date converted and simplified
def from_utc_to_local(local_offset, timeframe):
offset = str(local_offset *(-1))
date_split = str(timeframe).split(".")[0]
date_converted = datetime.datetime.strptime(date_split, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=int(offset))
return date_converted
@admin_bp.route('/history_table', methods=['GET', 'POST'])
@login_required
@history_access_required
def history_table(): # ajax call data
if request.method == 'POST':
if current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status': 'error',
'msg': 'You do not have permission to remove history.'
}), 401)
h = History()
result = h.remove_all()
if result:
history = History(msg='Remove all histories',
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Can not remove histories.'
}), 500)
detailedHistories = []
lim = int(Setting().get('max_history_records')) # max num of records
if request.method == 'GET':
if current_user.role.name in [ 'Administrator', 'Operator' ]:
base_query = History.query
else:
# if the user isn't an administrator or operator,
# allow_user_view_history must be enabled to get here,
# so include history for the domains for the user
base_query = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
))
domain_name = request.args.get('domain_name_filter') if request.args.get('domain_name_filter') != None \
and len(request.args.get('domain_name_filter')) != 0 else None
account_name = request.args.get('account_name_filter') if request.args.get('account_name_filter') != None \
and len(request.args.get('account_name_filter')) != 0 else None
user_name = request.args.get('auth_name_filter') if request.args.get('auth_name_filter') != None \
and len(request.args.get('auth_name_filter')) != 0 else None
min_date = request.args.get('min') if request.args.get('min') != None and len( request.args.get('min')) != 0 else None
if min_date != None: # get 1 day earlier, to check for timezone errors
min_date = str(datetime.datetime.strptime(min_date, '%Y-%m-%d') - datetime.timedelta(days=1))
max_date = request.args.get('max') if request.args.get('max') != None and len( request.args.get('max')) != 0 else None
if max_date != None: # get 1 day later, to check for timezone errors
max_date = str(datetime.datetime.strptime(max_date, '%Y-%m-%d') + datetime.timedelta(days=1))
tzoffset = request.args.get('tzoffset') if request.args.get('tzoffset') != None and len(request.args.get('tzoffset')) != 0 else None
changed_by = request.args.get('user_name_filter') if request.args.get('user_name_filter') != None \
and len(request.args.get('user_name_filter')) != 0 else None
"""
Auth methods: LOCAL, Github OAuth, Azure OAuth, SAML, OIDC OAuth, Google OAuth
"""
auth_methods = []
if (request.args.get('auth_local_only_checkbox') is None \
and request.args.get('auth_oauth_only_checkbox') is None \
and request.args.get('auth_saml_only_checkbox') is None and request.args.get('auth_all_checkbox') is None):
auth_methods = []
if request.args.get('auth_all_checkbox') == "on":
auth_methods.append("")
if request.args.get('auth_local_only_checkbox') == "on":
auth_methods.append("LOCAL")
if request.args.get('auth_oauth_only_checkbox') == "on":
auth_methods.append("OAuth")
if request.args.get('auth_saml_only_checkbox') == "on":
auth_methods.append("SAML")
if request.args.get('domain_changelog_only_checkbox') != None:
changelog_only = True if request.args.get('domain_changelog_only_checkbox') == "on" else False
else:
changelog_only = False
# users cannot search for authentication
if user_name != None and current_user.role.name not in [ 'Administrator', 'Operator']:
histories = []
elif domain_name != None:
if not changelog_only:
histories = base_query \
.filter(
db.and_(
db.or_(
History.msg.like("%domain "+ domain_name) if domain_name != "*" else History.msg.like("%domain%"),
History.msg.like("%domain "+ domain_name + " access control") if domain_name != "*" else History.msg.like("%domain%access control")
),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()).limit(lim).all()
else:
# search for records changes only
histories = base_query \
.filter(
db.and_(
History.msg.like("Apply record changes to domain " + domain_name) if domain_name != "*" \
else History.msg.like("Apply record changes to domain%"),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
elif account_name != None:
if current_user.role.name in ['Administrator', 'Operator']:
histories = base_query \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.filter(
db.and_(
Account.id == Domain.account_id,
account_name == Account.name if account_name != "*" else True,
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
else:
histories = base_query \
.filter(
db.and_(
Account.id == Domain.account_id,
account_name == Account.name if account_name != "*" else True,
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
elif user_name != None and current_user.role.name in [ 'Administrator', 'Operator']: # only admins can see the user login-logouts
histories = History.query \
.filter(
db.and_(
db.or_(
History.msg.like("User "+ user_name + " authentication%") if user_name != "*" and user_name != None else History.msg.like("%authentication%"),
History.msg.like("User "+ user_name + " was not authorized%") if user_name != "*" and user_name != None else History.msg.like("User%was not authorized%")
),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
temp = []
for h in histories:
for method in auth_methods:
if method in h.detail:
temp.append(h)
break
histories = temp
elif (changed_by != None or max_date != None) and current_user.role.name in [ 'Administrator', 'Operator'] : # select changed by and date filters only
histories = History.query \
.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
elif (changed_by != None or max_date != None): # special filtering for user because one user does not have access to log-ins logs
histories = base_query \
.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
elif max_date != None: # if changed by == null and only date is applied
histories = base_query.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
)
).order_by(History.created_on.desc()).limit(lim).all()
else: # default view
if current_user.role.name in [ 'Administrator', 'Operator']:
histories = History.query.order_by(History.created_on.desc()).limit(lim).all()
else:
histories = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.order_by(History.created_on.desc()) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).limit(lim).all()
detailedHistories = convert_histories(histories)
# Remove dates from previous or next day that were brought over
if tzoffset != None:
if min_date != None:
min_date_split = min_date.split()[0]
if max_date != None:
max_date_split = max_date.split()[0]
for i, history_rec in enumerate(detailedHistories):
local_date = str(from_utc_to_local(int(tzoffset), history_rec.history.created_on).date())
if (min_date != None and local_date == min_date_split) or (max_date != None and local_date == max_date_split):
detailedHistories[i] = None
# Remove elements previously flagged as None
detailedHistories = [h for h in detailedHistories if h is not None]
return render_template('admin_history_table.html', histories=detailedHistories, len_histories=len(detailedHistories), lim=lim)
@admin_bp.route('/setting/basic', methods=['GET'])
@login_required
@operator_role_required
def setting_basic():
if request.method == 'GET':
settings = [
'maintenance', 'fullscreen_layout', 'record_helper',
'login_ldap_first', 'default_record_table_size',
'default_domain_table_size', 'auto_ptr', 'record_quick_edit',
'pretty_ipv6_ptr', 'dnssec_admins_only',
'allow_user_create_domain', 'allow_user_remove_domain', 'allow_user_view_history', 'bg_domain_updates', 'site_name',
'session_timeout', 'warn_session_timeout', 'ttl_options',
'pdns_api_timeout', 'verify_ssl_connections', 'verify_user_email',
'delete_sso_accounts', 'otp_field_enabled', 'custom_css', 'enable_api_rr_history', 'max_history_records', 'otp_force'
]
return render_template('admin_setting_basic.html', settings=settings)
@admin_bp.route('/setting/basic/<path:setting>/edit', methods=['POST'])
@login_required
@operator_role_required
def setting_basic_edit(setting):
jdata = request.json
new_value = jdata['value']
result = Setting().set(setting, new_value)
if (result):
return make_response(
jsonify({
'status': 'ok',
'msg': 'Toggled setting successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to toggle setting.'
}), 500)
@admin_bp.route('/setting/basic/<path:setting>/toggle', methods=['POST'])
@login_required
@operator_role_required
def setting_basic_toggle(setting):
result = Setting().toggle(setting)
if (result):
return make_response(
jsonify({
'status': 'ok',
'msg': 'Toggled setting successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to toggle setting.'
}), 500)
@admin_bp.route('/setting/pdns', methods=['GET', 'POST'])
@login_required
@admin_role_required
def setting_pdns():
if request.method == 'GET':
pdns_api_url = Setting().get('pdns_api_url')
pdns_api_key = Setting().get('pdns_api_key')
pdns_version = Setting().get('pdns_version')
return render_template('admin_setting_pdns.html',
pdns_api_url=pdns_api_url,
pdns_api_key=pdns_api_key,
pdns_version=pdns_version)
elif request.method == 'POST':
pdns_api_url = request.form.get('pdns_api_url')
pdns_api_key = request.form.get('pdns_api_key')
pdns_version = request.form.get('pdns_version')
Setting().set('pdns_api_url', pdns_api_url)
Setting().set('pdns_api_key', pdns_api_key)
Setting().set('pdns_version', pdns_version)
return render_template('admin_setting_pdns.html',
pdns_api_url=pdns_api_url,
pdns_api_key=pdns_api_key,
pdns_version=pdns_version)
@admin_bp.route('/setting/dns-records', methods=['GET', 'POST'])
@login_required
@operator_role_required
def setting_records():
if request.method == 'GET':
_fr = Setting().get('forward_records_allow_edit')
_rr = Setting().get('reverse_records_allow_edit')
f_records = literal_eval(_fr) if isinstance(_fr, str) else _fr
r_records = literal_eval(_rr) if isinstance(_rr, str) else _rr
return render_template('admin_setting_records.html',
f_records=f_records,
r_records=r_records)
elif request.method == 'POST':
fr = {}
rr = {}
records = Setting().defaults['forward_records_allow_edit']
for r in records:
fr[r] = True if request.form.get('fr_{0}'.format(
r.lower())) else False
rr[r] = True if request.form.get('rr_{0}'.format(
r.lower())) else False
Setting().set('forward_records_allow_edit', str(fr))
Setting().set('reverse_records_allow_edit', str(rr))
return redirect(url_for('admin.setting_records'))
def has_an_auth_method(local_db_enabled=None,
ldap_enabled=None,
google_oauth_enabled=None,
github_oauth_enabled=None,
oidc_oauth_enabled=None,
azure_oauth_enabled=None):
if local_db_enabled is None:
local_db_enabled = Setting().get('local_db_enabled')
if ldap_enabled is None:
ldap_enabled = Setting().get('ldap_enabled')
if google_oauth_enabled is None:
google_oauth_enabled = Setting().get('google_oauth_enabled')
if github_oauth_enabled is None:
github_oauth_enabled = Setting().get('github_oauth_enabled')
if oidc_oauth_enabled is None:
oidc_oauth_enabled = Setting().get('oidc_oauth_enabled')
if azure_oauth_enabled is None:
azure_oauth_enabled = Setting().get('azure_oauth_enabled')
return local_db_enabled or ldap_enabled or google_oauth_enabled or github_oauth_enabled or oidc_oauth_enabled or azure_oauth_enabled
@admin_bp.route('/setting/authentication', methods=['GET', 'POST'])
@login_required
@admin_role_required
def setting_authentication():
if request.method == 'GET':
return render_template('admin_setting_authentication.html')
elif request.method == 'POST':
conf_type = request.form.get('config_tab')
result = None
if conf_type == 'general':
local_db_enabled = True if request.form.get(
'local_db_enabled') else False
signup_enabled = True if request.form.get(
'signup_enabled', ) else False
if not has_an_auth_method(local_db_enabled=local_db_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('local_db_enabled', local_db_enabled)
Setting().set('signup_enabled', signup_enabled)
result = {'status': True, 'msg': 'Saved successfully'}
elif conf_type == 'ldap':
ldap_enabled = True if request.form.get('ldap_enabled') else False
if not has_an_auth_method(ldap_enabled=ldap_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('ldap_enabled', ldap_enabled)
Setting().set('ldap_type', request.form.get('ldap_type'))
Setting().set('ldap_uri', request.form.get('ldap_uri'))
Setting().set('ldap_base_dn', request.form.get('ldap_base_dn'))
Setting().set('ldap_admin_username',
request.form.get('ldap_admin_username'))
Setting().set('ldap_admin_password',
request.form.get('ldap_admin_password'))
Setting().set('ldap_filter_basic',
request.form.get('ldap_filter_basic'))
Setting().set('ldap_filter_group',
request.form.get('ldap_filter_group'))
Setting().set('ldap_filter_username',
request.form.get('ldap_filter_username'))
Setting().set('ldap_filter_groupname',
request.form.get('ldap_filter_groupname'))
Setting().set(
'ldap_sg_enabled', True
if request.form.get('ldap_sg_enabled') == 'ON' else False)
Setting().set('ldap_admin_group',
request.form.get('ldap_admin_group'))
Setting().set('ldap_operator_group',
request.form.get('ldap_operator_group'))
Setting().set('ldap_user_group',
request.form.get('ldap_user_group'))
Setting().set('ldap_domain', request.form.get('ldap_domain'))
Setting().set(
'autoprovisioning', True
if request.form.get('autoprovisioning') == 'ON' else False)
Setting().set('autoprovisioning_attribute',
request.form.get('autoprovisioning_attribute'))
if request.form.get('autoprovisioning')=='ON':
if validateURN(request.form.get('urn_value')):
Setting().set('urn_value',
request.form.get('urn_value'))
else:
return render_template('admin_setting_authentication.html',
error="Invalid urn")
else:
Setting().set('urn_value',
request.form.get('urn_value'))
Setting().set('purge', True
if request.form.get('purge') == 'ON' else False)
result = {'status': True, 'msg': 'Saved successfully'}
elif conf_type == 'google':
google_oauth_enabled = True if request.form.get(
'google_oauth_enabled') else False
if not has_an_auth_method(google_oauth_enabled=google_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('google_oauth_enabled', google_oauth_enabled)
Setting().set('google_oauth_client_id',
request.form.get('google_oauth_client_id'))
Setting().set('google_oauth_client_secret',
request.form.get('google_oauth_client_secret'))
Setting().set('google_token_url',
request.form.get('google_token_url'))
Setting().set('google_oauth_scope',
request.form.get('google_oauth_scope'))
Setting().set('google_authorize_url',
request.form.get('google_authorize_url'))
Setting().set('google_base_url',
request.form.get('google_base_url'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'github':
github_oauth_enabled = True if request.form.get(
'github_oauth_enabled') else False
if not has_an_auth_method(github_oauth_enabled=github_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('github_oauth_enabled', github_oauth_enabled)
Setting().set('github_oauth_key',
request.form.get('github_oauth_key'))
Setting().set('github_oauth_secret',
request.form.get('github_oauth_secret'))
Setting().set('github_oauth_scope',
request.form.get('github_oauth_scope'))
Setting().set('github_oauth_api_url',
request.form.get('github_oauth_api_url'))
Setting().set('github_oauth_token_url',
request.form.get('github_oauth_token_url'))
Setting().set('github_oauth_authorize_url',
request.form.get('github_oauth_authorize_url'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'azure':
azure_oauth_enabled = True if request.form.get(
'azure_oauth_enabled') else False
if not has_an_auth_method(azure_oauth_enabled=azure_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('azure_oauth_enabled', azure_oauth_enabled)
Setting().set('azure_oauth_key',
request.form.get('azure_oauth_key'))
Setting().set('azure_oauth_secret',
request.form.get('azure_oauth_secret'))
Setting().set('azure_oauth_scope',
request.form.get('azure_oauth_scope'))
Setting().set('azure_oauth_api_url',
request.form.get('azure_oauth_api_url'))
Setting().set('azure_oauth_token_url',
request.form.get('azure_oauth_token_url'))
Setting().set('azure_oauth_authorize_url',
request.form.get('azure_oauth_authorize_url'))
Setting().set(
'azure_sg_enabled', True
if request.form.get('azure_sg_enabled') == 'ON' else False)
Setting().set('azure_admin_group',
request.form.get('azure_admin_group'))
Setting().set('azure_operator_group',
request.form.get('azure_operator_group'))
Setting().set('azure_user_group',
request.form.get('azure_user_group'))
Setting().set(
'azure_group_accounts_enabled', True
if request.form.get('azure_group_accounts_enabled') == 'ON' else False)
Setting().set('azure_group_accounts_name',
request.form.get('azure_group_accounts_name'))
Setting().set('azure_group_accounts_name_re',
request.form.get('azure_group_accounts_name_re'))
Setting().set('azure_group_accounts_description',
request.form.get('azure_group_accounts_description'))
Setting().set('azure_group_accounts_description_re',
request.form.get('azure_group_accounts_description_re'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'oidc':
oidc_oauth_enabled = True if request.form.get(
'oidc_oauth_enabled') else False
if not has_an_auth_method(oidc_oauth_enabled=oidc_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set(
'oidc_oauth_enabled',
True if request.form.get('oidc_oauth_enabled') else False)
Setting().set('oidc_oauth_key',
request.form.get('oidc_oauth_key'))
Setting().set('oidc_oauth_secret',
request.form.get('oidc_oauth_secret'))
Setting().set('oidc_oauth_scope',
request.form.get('oidc_oauth_scope'))
Setting().set('oidc_oauth_api_url',
request.form.get('oidc_oauth_api_url'))
Setting().set('oidc_oauth_token_url',
request.form.get('oidc_oauth_token_url'))
Setting().set('oidc_oauth_authorize_url',
request.form.get('oidc_oauth_authorize_url'))
Setting().set('oidc_oauth_logout_url',
request.form.get('oidc_oauth_logout_url'))
Setting().set('oidc_oauth_username',
request.form.get('oidc_oauth_username'))
Setting().set('oidc_oauth_firstname',
request.form.get('oidc_oauth_firstname'))
Setting().set('oidc_oauth_last_name',
request.form.get('oidc_oauth_last_name'))
Setting().set('oidc_oauth_email',
request.form.get('oidc_oauth_email'))
Setting().set('oidc_oauth_account_name_property',
request.form.get('oidc_oauth_account_name_property'))
Setting().set('oidc_oauth_account_description_property',
request.form.get('oidc_oauth_account_description_property'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
else:
return abort(400)
return render_template('admin_setting_authentication.html',
result=result)
@admin_bp.route('/templates', methods=['GET', 'POST'])
@admin_bp.route('/templates/list', methods=['GET', 'POST'])
@login_required
@operator_role_required
def templates():
templates = DomainTemplate.query.all()
return render_template('template.html', templates=templates)
@admin_bp.route('/template/create', methods=['GET', 'POST'])
@login_required
@operator_role_required
def create_template():
if request.method == 'GET':
return render_template('template_add.html')
if request.method == 'POST':
try:
name = request.form.getlist('name')[0]
description = request.form.getlist('description')[0]
if ' ' in name or not name or not type:
flash("Please correct your input", 'error')
return redirect(url_for('admin.create_template'))
if DomainTemplate.query.filter(
DomainTemplate.name == name).first():
flash(
"A template with the name {0} already exists!".format(
name), 'error')
return redirect(url_for('admin.create_template'))
t = DomainTemplate(name=name, description=description)
result = t.create()
if result['status'] == 'ok':
history = History(msg='Add domain template {0}'.format(name),
detail = json.dumps({
'name': name,
'description': description
}),
created_by=current_user.username)
history.add()
return redirect(url_for('admin.templates'))
else:
flash(result['msg'], 'error')
return redirect(url_for('admin.create_template'))
except Exception as e:
current_app.logger.error(
'Cannot create domain template. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
@admin_bp.route('/template/create-from-zone', methods=['POST'])
@login_required
@operator_role_required
def create_template_from_zone():
try:
jdata = request.json
name = jdata['name']
description = jdata['description']
domain_name = jdata['domain']
if ' ' in name or not name or not type:
return make_response(
jsonify({
'status': 'error',
'msg': 'Please correct template name'
}), 400)
if DomainTemplate.query.filter(DomainTemplate.name == name).first():
return make_response(
jsonify({
'status':
'error',
'msg':
'A template with the name {0} already exists!'.format(name)
}), 409)
t = DomainTemplate(name=name, description=description)
result = t.create()
if result['status'] == 'ok':
history = History(msg='Add domain template {0}'.format(name),
detail = json.dumps({
'name': name,
'description': description
}),
created_by=current_user.username)
history.add()
# After creating the domain in Domain Template in the,
# local DB. We add records into it Record Template.
records = []
domain = Domain.query.filter(Domain.name == domain_name).first()
if domain:
# Query zone's rrsets from PowerDNS API
rrsets = Record().get_rrsets(domain.name)
if rrsets:
for r in rrsets:
name = '@' if r['name'] == domain_name + '.' else r[
'name'].replace('.{}.'.format(domain_name), '')
for record in r['records']:
t_record = DomainTemplateRecord(
name=name,
type=r['type'],
status=False if record['disabled'] else True,
ttl=r['ttl'],
data=record['content'])
records.append(t_record)
result = t.replace_records(records)
if result['status'] == 'ok':
return make_response(
jsonify({
'status': 'ok',
'msg': result['msg']
}), 200)
else:
# Revert the domain template (remove it)
# ff we cannot add records.
t.delete_template()
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
except Exception as e:
current_app.logger.error(
'Cannot create template from zone. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@admin_bp.route('/template/<path:template>/edit', methods=['GET'])
@login_required
@operator_role_required
def edit_template(template):
try:
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
records_allow_to_edit = Setting().get_records_allow_to_edit()
quick_edit = Setting().get('record_quick_edit')
ttl_options = Setting().get_ttl_options()
if t is not None:
records = []
for jr in t.records:
if jr.type in records_allow_to_edit:
record = DomainTemplateRecord(
name=jr.name,
type=jr.type,
status='Active' if jr.status else 'Disabled',
ttl=jr.ttl,
data=jr.data,
comment=jr.comment if jr.comment else '')
records.append(record)
return render_template('template_edit.html',
template=t.name,
records=records,
editable_records=records_allow_to_edit,
quick_edit=quick_edit,
ttl_options=ttl_options)
except Exception as e:
current_app.logger.error(
'Cannot open domain template page. DETAIL: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
return redirect(url_for('admin.templates'))
@admin_bp.route('/template/<path:template>/apply',
methods=['POST'],
strict_slashes=False)
@login_required
def apply_records(template):
try:
jdata = request.json
records = []
for j in jdata['records']:
name = '@' if j['record_name'] in ['@', ''] else j['record_name']
type = j['record_type']
data = j['record_data']
comment = j['record_comment']
status = 0 if j['record_status'] == 'Disabled' else 1
ttl = int(j['record_ttl']) if j['record_ttl'] else 3600
dtr = DomainTemplateRecord(name=name,
type=type,
data=data,
comment=comment,
status=status,
ttl=ttl)
records.append(dtr)
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
result = t.replace_records(records)
if result['status'] == 'ok':
jdata.pop('_csrf_token',
None) # don't store csrf token in the history.
history = History(
msg='Apply domain template record changes to domain template {0}'
.format(template),
detail = json.dumps(jdata),
created_by=current_user.username)
history.add()
return make_response(jsonify(result), 200)
else:
return make_response(jsonify(result), 400)
except Exception as e:
current_app.logger.error(
'Cannot apply record changes to the template. Error: {0}'.format(
e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@admin_bp.route('/template/<path:template>/delete', methods=['POST'])
@login_required
@operator_role_required
def delete_template(template):
try:
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
if t is not None:
result = t.delete_template()
if result['status'] == 'ok':
history = History(
msg='Deleted domain template {0}'.format(template),
detail = json.dumps({'name': template}),
created_by=current_user.username)
history.add()
return redirect(url_for('admin.templates'))
else:
flash(result['msg'], 'error')
return redirect(url_for('admin.templates'))
except Exception as e:
current_app.logger.error(
'Cannot delete template. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
return redirect(url_for('admin.templates'))
@admin_bp.route('/global-search', methods=['GET'])
@login_required
@operator_role_required
def global_search():
if request.method == 'GET':
domains = []
records = []
comments = []
query = request.args.get('q')
if query:
server = Server(server_id='localhost')
results = server.global_search(object_type='all', query=query)
# Format the search result
for result in results:
if result['object_type'] == 'zone':
# Remove the dot at the end of string
result['name'] = result['name'][:-1]
domains.append(result)
elif result['object_type'] == 'record':
# Remove the dot at the end of string
result['name'] = result['name'][:-1]
result['zone_id'] = result['zone_id'][:-1]
records.append(result)
elif result['object_type'] == 'comment':
# Get the actual record name, exclude the domain part
result['name'] = result['name'].replace(result['zone_id'], '')
if result['name']:
result['name'] = result['name'][:-1]
else:
result['name'] = '@'
# Remove the dot at the end of string
result['zone_id'] = result['zone_id'][:-1]
comments.append(result)
else:
pass
return render_template('admin_global_search.html', domains=domains, records=records, comments=comments)
def validateURN(value):
NID_PATTERN = re.compile(r'^[0-9a-z][0-9a-z-]{1,31}$', flags=re.IGNORECASE)
NSS_PCHAR = '[a-z0-9-._~]|%[a-f0-9]{2}|[!$&\'()*+,;=]|:|@'
NSS_PATTERN = re.compile(fr'^({NSS_PCHAR})({NSS_PCHAR}|/|\?)*$', re.IGNORECASE)
prefix=value.split(':')
if (len(prefix)<3):
current_app.logger.warning( "Too small urn prefix" )
return False
urn=prefix[0]
nid=prefix[1]
nss=value.replace(urn+":"+nid+":", "")
if not urn.lower()=="urn":
current_app.logger.warning( urn + ' contains invalid characters ' )
return False
if not re.match(NID_PATTERN, nid.lower()):
current_app.logger.warning( nid + ' contains invalid characters ' )
return False
if not re.match(NSS_PATTERN, nss):
current_app.logger.warning( nss + ' contains invalid characters ' )
return False
return True
| 42.04381 | 161 | 0.543026 | [
"MIT"
] | CrazyForks/PowerDNS-Admin | powerdnsadmin/routes/admin.py | 82,532 | Python |
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Actor(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, preprocess_net, action_shape, hidden_layer_size=128):
super().__init__()
self.preprocess = preprocess_net
self.last = nn.Linear(hidden_layer_size, np.prod(action_shape))
def forward(self, s, state=None, info={}):
r"""s -> Q(s, \*)"""
logits, h = self.preprocess(s, state)
logits = F.softmax(self.last(logits), dim=-1)
return logits, h
class Critic(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, preprocess_net, hidden_layer_size=128):
super().__init__()
self.preprocess = preprocess_net
self.last = nn.Linear(hidden_layer_size, 1)
def forward(self, s, **kwargs):
"""s -> V(s)"""
logits, h = self.preprocess(s, state=kwargs.get('state', None))
logits = self.last(logits)
return logits
class DQN(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, h, w, action_shape, device='cpu'):
super(DQN, self).__init__()
self.device = device
self.conv1 = nn.Conv2d(4, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.fc = nn.Linear(linear_input_size, 512)
self.head = nn.Linear(512, action_shape)
def forward(self, x, state=None, info={}):
r"""x -> Q(x, \*)"""
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, device=self.device, dtype=torch.float32)
x = x.permute(0, 3, 1, 2)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.fc(x.reshape(x.size(0), -1))
return self.head(x), state
| 33.881579 | 76 | 0.612816 | [
"MIT"
] | FightingSrain/tianshou | tianshou/utils/net/discrete.py | 2,575 | Python |
import datetime
import app
class Order:
def __init__(self, orderId, custId, orderDate, orderStatus, shipDate, creditCardNumber,
street, city, state, zipCode, emailAddress):
self.orderId = orderId
self.custId = custId
self.orderDate = orderDate
self.orderStatus = orderStatus
self.shipDate = shipDate
self.creditCardNumber = creditCardNumber
self.street = street
self.city = city
self.state = state
self.zipCode = zipCode
self.emailAddress = emailAddress
def toString(self):
return "[orderId: " + self.orderId + ", custId: " + self.custId + ", orderDate: " + self.orderDate.strftime(app.DATE_FORMAT) +\
", orderStatus: " + self.orderStatus + ", shipDate: " + self.shipDate.strftime(app.DATE_FORMAT) + ", creditCardNumber: " +\
self.creditCardNumber + ", street: " + self.street + ", city: " + self.city + ", state: " + self.state +\
", zipCode: " + self.zipCode + ", emailAddress: " + self.emailAddress + "]"
| 43.08 | 138 | 0.609099 | [
"Apache-2.0"
] | ShiftLeftSecurity/tarpit-python | app/model/Order.py | 1,077 | Python |
"""Fixer that changes raw_input(...) into input(...)."""
# Author: Andre Roberge
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixRawInput(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< name='raw_input' trailer< '(' [any] ')' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name(u"input", prefix=name.prefix))
| 26.222222 | 70 | 0.582627 | [
"BSD-2-Clause"
] | 8bit-Dude/8bit-Unity | utils/py27/Lib/lib2to3/fixes/fix_raw_input.py | 472 | Python |
"""
This module is the commandline interface of bowl.
Created on 14 March 2014
@author: Charlie Lewis
"""
import argparse
from bowl.cli_opts import add
from bowl.cli_opts import connect
from bowl.cli_opts import delete
from bowl.cli_opts import disconnect
from bowl.cli_opts import grant
from bowl.cli_opts import hosts
from bowl.cli_opts import image_import
from bowl.cli_opts import images
from bowl.cli_opts import info
from bowl.cli_opts import kill
from bowl.cli_opts import link
from bowl.cli_opts import list
from bowl.cli_opts import login
from bowl.cli_opts import logout
from bowl.cli_opts import logs
from bowl.cli_opts import new
from bowl.cli_opts import remove
from bowl.cli_opts import repositories
from bowl.cli_opts import revoke
from bowl.cli_opts import services
from bowl.cli_opts import snapshot
from bowl.cli_opts import snapshots
from bowl.cli_opts import start
from bowl.cli_opts import stop
from bowl.cli_opts import subtract
from bowl.cli_opts import test
from bowl.cli_opts import unlink
from bowl.cli_opts import update
from bowl.cli_opts import version
class cli(object):
"""
This class is responsible for all commandline operations.
"""
def parse_args(self):
default_metadata_path = "~/.bowl"
default_path = "/bowl"
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='bowl commands')
# add
parse_add = subparsers.add_parser('add',
help='add a service')
parse_add.add_argument('OS',
help='specify operating system for service')
parse_add.add_argument('VERSION',
help='specify version of operating system')
parse_add.add_argument('TYPE',
help='specify type of service (databases, environment, services, tools)')
parse_add.add_argument('NAME',
help='specify name of service')
parse_add.add_argument('JSON',
help='JSON object or path to JSON object that contains associated metadata')
parse_add.add_argument('PATH',
help='path that contains the Dockerfile')
parse_add.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
# !! TODO use default, and restructure if/else in add
parse_add.add_argument('--repository', '-r',
help='specify repository to add service to, use localhost by default')
parse_add.set_defaults(func=add.add.main)
# connect
parse_connect = subparsers.add_parser('connect',
help='connect to a docker host')
parse_connect.add_argument('DOCKER_HOST',
help='specify docker host to connect to (hostname or IP)')
parse_connect.add_argument('-e',
action='store_true',
help='use environment variables to establish connection details')
parse_connect.add_argument('--port', '-p',
default="2375",
help='specify docker host port to connect to')
parse_connect.add_argument('--sock', '-s',
default=None,
help='specify docker host socket to connect to, i.e. unix://var/run/docker.sock')
parse_connect.add_argument('--tlsverify',
action='store_true',
help='verify the server certificate for TLS')
parse_connect.add_argument('--tlscert',
default=None,
help='/path/to/client-cert.pem for TLS')
parse_connect.add_argument('--tlskey',
default=None,
help='/path/to/client-key.pem for TLS')
parse_connect.add_argument('--tlscacert',
default=None,
help='/path/to/ca.pem for TLS')
parse_connect.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_connect.set_defaults(func=connect.connect.main)
# delete
parse_delete = subparsers.add_parser('delete',
help='delete an image')
parse_delete.add_argument('IMAGE_NAME',
help='specify name of image to delete')
parse_delete.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_delete.set_defaults(func=delete.delete.main)
# disconnect
parse_disconnect = subparsers.add_parser('disconnect',
help='disconnect from a docker host')
parse_disconnect.add_argument('DOCKER_HOST',
help='specify docker host to disconnect from')
parse_disconnect.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_disconnect.set_defaults(func=disconnect.disconnect.main)
# grant
parse_grant = subparsers.add_parser('grant',
help='grant access to container for a user')
parse_grant.add_argument('USER',
help='specify user to grant access')
parse_grant.add_argument('--container', '-c',
default="all",
help='specify container to add access to for the specified user, default all')
parse_grant.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_grant.add_argument('-z',
action='store_true',
help='do not print any output')
parse_grant.set_defaults(func=grant.grant.main)
# hosts
parse_hosts = subparsers.add_parser('hosts',
help='list hosts that are registered')
parse_hosts.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_hosts.add_argument('-z',
action='store_true',
help='do not print any output')
parse_hosts.set_defaults(func=hosts.hosts.main)
# images
parse_images = subparsers.add_parser('images',
help='list images')
parse_images.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_images.add_argument('-z',
action='store_true',
help='do not print any output')
parse_images.set_defaults(func=images.images.main)
# import
parse_import = subparsers.add_parser('import',
help='import an image')
parse_import.add_argument('IMAGE_NAME',
help='specify name of image to import')
parse_import.add_argument('DOCKER_HOST',
help='specify Docker host of image to import')
parse_import.add_argument('-d', '--description',
help='description of image to import')
parse_import.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_import.add_argument('-u', '--uuid',
help='uuid of image to import')
# use non-standard naming scheme to not conflict with python's import
parse_import.set_defaults(func=image_import.image_import.main)
# info
parse_info = subparsers.add_parser('info',
help='display system-wide information')
parse_info.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_info.add_argument('-z',
action='store_true',
help='do not print any output')
parse_info.set_defaults(func=info.info.main)
# kill
parse_kill = subparsers.add_parser('kill',
help='kill running container')
parse_kill.add_argument('CONTAINER',
help='specify container to kill')
parse_kill.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_kill.add_argument('-z',
action='store_true',
help='do not print any output')
parse_kill.set_defaults(func=kill.kill.main)
# link
parse_link = subparsers.add_parser('link',
help='link to a service repository host')
parse_link.add_argument('SERVICE_HOST',
help='specify service repository host to connect to')
parse_link.add_argument('NAME',
help='specify a name for the repository')
parse_link.add_argument('--path',
default=default_metadata_path,
help='specify path where services live, default '+default_metadata_path)
parse_link.add_argument('--port', '-p',
default='8080',
help='specify port that service host is running on, default=8080')
parse_link.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_link.add_argument('-z',
action='store_true',
help='do not print any output')
parse_link.set_defaults(func=link.link.main)
# list
parse_list = subparsers.add_parser('list',
help='list containers running')
parse_list.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_list.add_argument('-z',
action='store_true',
help='do not print any output')
parse_list.set_defaults(func=list.list.main)
# login
parse_login = subparsers.add_parser('login',
help='login with credentials')
parse_login.add_argument('-e', '--email',
help='email address')
parse_login.add_argument('-u', '--username',
help='username')
parse_login.add_argument('PASSWORD',
help='password')
parse_login.set_defaults(func=login.login.main)
# logout
parse_logout = subparsers.add_parser('logout',
help='logout')
parse_logout.set_defaults(func=logout.logout.main)
# logs
parse_logs = subparsers.add_parser('logs',
help='container logs')
parse_logs.add_argument('CONTAINER',
help='specify container to get logs from')
parse_logs.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_logs.add_argument('-z',
action='store_true',
help='do not print any output')
parse_logs.set_defaults(func=logs.logs.main)
# new
parse_new = subparsers.add_parser('new',
help='new container')
parse_new.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_new.add_argument('--toggle_default',
action='store_true',
help='toggle using default services, uses them by default')
parse_new.add_argument('--no_curses', '-n',
action='store_true',
help='do not use curses')
parse_new.add_argument('--service', '-s',
action='append',
help='add a service to the container, can be used more than once, only used with no_curses')
parse_new.add_argument('--image', '-i',
help='specify an image, only used with no_curses')
parse_new.add_argument('--host',
action='append',
help='add a host to run the container one, can be used more than once, only used with no_curses')
parse_new.add_argument('--command', '-c',
action='store_true',
help='override command at runtime of container, only used with no_curses')
parse_new.add_argument('--entrypoint', '-e',
action='store_true',
help='override entrypoint at runtime of container, only used with no_curses')
parse_new.add_argument('--volume',
action='store_true',
help='add volumes at runtime of container, only used with no_curses')
parse_new.add_argument('--volume_from',
action='store_true',
help='add volumes from other containers at runtime of container, only used with no_curses')
parse_new.add_argument('--port', '-p',
action='store_true',
help='set ports at runtime of container, only used with no_curses')
parse_new.add_argument('--link', '-l',
action='store_true',
help='add links to containers at runtime of container, only used with no_curses')
parse_new.add_argument('--name',
action='store_true',
help='set ports at runtime of container, only used with no_curses')
parse_new.add_argument('--unique', '-u',
action='store_true',
help='set different runtime parameters for each container, only used with no_curses')
parse_new.add_argument('--user',
action='store_true',
help='add a user at runtime of container, only used with no_curses')
parse_new.set_defaults(func=new.new.main)
# remove
parse_remove = subparsers.add_parser('rm',
help='remove a container')
parse_remove.add_argument('CONTAINER',
help='specify container to remove')
parse_remove.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_remove.add_argument('-z',
action='store_true',
help='do not print any output')
parse_remove.set_defaults(func=remove.remove.main)
# repositories
parse_repositories = subparsers.add_parser('repositories',
help='list repositories that are registered')
parse_repositories.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_repositories.add_argument('-z',
action='store_true',
help='do not print any output')
parse_repositories.set_defaults(func=repositories.repositories.main)
# revoke
parse_revoke = subparsers.add_parser('revoke',
help='revoke access to container for a user')
parse_revoke.add_argument('USER',
help='specify user to revoke access')
parse_revoke.add_argument('--container', '-c',
default="all",
help='specify container to remove access to for the specified user, default all')
parse_revoke.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_revoke.add_argument('-z',
action='store_true',
help='do not print any output')
parse_revoke.set_defaults(func=revoke.revoke.main)
# services
parse_services = subparsers.add_parser('services',
help='list services')
parse_services.add_argument('-j', '--json',
action='store_true',
help='print complete JSON object for each service')
parse_services.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_services.add_argument('-q', '--quiet',
action='store_true',
help='print only the name, will ignore -j if also supplied')
parse_services.add_argument('-z',
action='store_true',
help='do not print any output')
parse_services.set_defaults(func=services.services.main)
# snapshot
parse_snapshot = subparsers.add_parser('snapshot',
help='snapshot running container')
parse_snapshot.add_argument('CONTAINER',
help='specify container to snapshot')
parse_snapshot.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_snapshot.add_argument('-z',
action='store_true',
help='do not print any output')
parse_snapshot.set_defaults(func=snapshot.snapshot.main)
# snapshots
parse_snapshots = subparsers.add_parser('snapshots',
help='list snapshots')
parse_snapshots.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_snapshots.add_argument('-z',
action='store_true',
help='do not print any output')
parse_snapshots.set_defaults(func=snapshots.snapshots.main)
# start
parse_start = subparsers.add_parser('start',
help='start the api/repository service server')
parse_start.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_start.add_argument('-z',
action='store_true',
help='do not print any output')
parse_start.set_defaults(func=start.start.main)
# stop
parse_stop = subparsers.add_parser('stop',
help='stop the api/repository service server')
parse_stop.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_stop.add_argument('-z',
action='store_true',
help='do not print any output')
parse_stop.set_defaults(func=stop.stop.main)
# subtract
parse_subtract = subparsers.add_parser('subtract',
help='subtract a service')
parse_subtract.add_argument('OS',
help='specify operating system for service')
parse_subtract.add_argument('VERSION',
help='specify version of operating system')
parse_subtract.add_argument('TYPE',
help='specify type of service (database, environment, service, tool)')
parse_subtract.add_argument('NAME',
help='specify name of service')
parse_subtract.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
# !! TODO use default, and restructure if/else in subtract
parse_subtract.add_argument('--repository', '-r',
help='specify repository to subtract service from, use localhost by default')
parse_subtract.set_defaults(func=subtract.subtract.main)
# test
parse_test = subparsers.add_parser('test',
help='run tests')
parse_test.add_argument('-c',
action='store_true',
help='send to coveralls')
parse_test.add_argument('-f',
action='store_true',
help='do not run tests')
parse_test.add_argument('--path', '-p',
default=default_path,
help='path to test, default '+default_path)
parse_test.set_defaults(func=test.test.main)
# unlink
parse_unlink = subparsers.add_parser('unlink',
help='unlink a service repository')
parse_unlink.add_argument('NAME',
help='specify name of service repository to disconnect from')
parse_unlink.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_unlink.set_defaults(func=unlink.unlink.main)
# update
parse_update = subparsers.add_parser('update',
help='update service repository hosts')
parse_update.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_update.add_argument('-r' '--repository',
help='specify service repository host to get updates from')
parse_update.add_argument('-z',
action='store_true',
help='do not print any output')
parse_update.set_defaults(func=update.update.main)
# version
parse_version = subparsers.add_parser('version',
help='show version')
parse_version.add_argument('-z',
action='store_true',
help='do not print any output')
parse_version.set_defaults(func=version.version.main)
args = parser.parse_args()
if args.func:
args.func(args)
def main():
cli().parse_args()
if __name__ == "__main__": # pragma: no cover
main()
| 52.581633 | 128 | 0.51799 | [
"Apache-2.0"
] | cglewis/bowl | bowl/cli.py | 25,765 | Python |
from flask import render_template, Blueprint, request, current_app
from flask_sqlalchemy import SQLAlchemy
# from QA_api import get_traintest_images
import QA_api
from QA_config import config
from QA_db import Image, Project, Job, Roi, get_latest_modelid, get_imagetable
html = Blueprint("html", __name__, static_folder="static", template_folder="templates")
db = SQLAlchemy()
@html.route('/favicon.ico')
def favicon():
return html.send_static_file("favicon.ico")
@html.route('/')
def index():
projects = db.session.query(Project.name, Project.date, Project.iteration, Project.description, Project.id,
Project.images,
db.func.count(Roi.id).label('nROIs'),
(db.func.count(Roi.id) - db.func.ifnull(db.func.sum(Roi.testingROI), 0))
.label('nTrainingROIs'), db.func.count(db.func.distinct(Image.id)).label('nImages'),
db.func.ifnull(db.func.sum(db.func.distinct(Image.nobjects)), 0).label('nObjects')) \
.outerjoin(Image, Image.projId == Project.id) \
.outerjoin(Roi, Roi.imageId == Image.id).group_by(Project.id).all()
return render_template("index.html", projects=projects)
@html.route('/<project_name>', methods=['GET'])
@html.route('/<project_name>/images', methods=['GET'])
def get_imagelist(project_name):
# Get the image list for the project
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
images = get_imagetable(project)
return render_template("images.html", project=project, images=images)
@html.route('/<project_name>/images/images-main', methods=['GET'])
def images_main(project_name):
# Get the image list for the project
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
else:
return render_template("images-main.js", project=project)
@html.route('/<project_name>/dataset/<type>', methods=['GET'])
def display_sample_images(project_name, type):
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
imglist = QA_api.get_traintest_images(project_name, type)
return render_template("sampleimages.html", project_name=project_name, imglist=imglist)
@html.route("/<project_name>/embed", methods=['GET'])
def plotembed(project_name):
current_app.logger.info('Plotting patch embedding:')
project = Project.query.filter_by(name=project_name).first()
if not project:
current_app.logger.error('No project found.')
return render_template("error.html")
latest_modelid = get_latest_modelid(project_name)
selected_modelid = request.args.get('modelid', default=latest_modelid, type=int)
if selected_modelid > latest_modelid or selected_modelid < 0:
error_message = f"Your selected View Embed Model ID is {selected_modelid}. A valid Model ID ranges from 0 to {latest_modelid}."
current_app.logger.error(error_message)
return render_template("embed.html", project_name=project_name, data="None",
project_iteration=project.iteration, current_modelId=selected_modelid,
error_message=error_message)
return render_template("embed.html", project_name=project_name, project_iteration=project.iteration,
current_modelId=selected_modelid)
@html.route("/<project_name>/embed/embed-main.js", methods=['GET']) # --- should not need this function
def embed_main(project_name):
# Get the image list for the project
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
return render_template("embed-main.js", project_name=project_name)
@html.route('/<project_name>/<image_name>/annotation', methods=['GET'])
def annotation(project_name, image_name):
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
# Method 1
# image = Image.query.filter_by(projId=project.id, name=image_name).first()
# image.nROIs = Roi.query.filter_by(imageId=image.id).count()
# image.nTrainingROIs = Roi.query.filter_by(imageId=image.id, testingROI=0).count()
# Method 2 (corresponding sql code)
# SELECT image.id, count(roi.id)
# FROM image
# JOIN roi
# ON roi.imageId = image.id
# WHERE image.id = 1
# GROUP BY image.id
image = db.session.query(Image.id, Image.projId, Image.name, Image.path, Image.height, Image.width, Image.date,
Image.rois, Image.make_patches_time, Image.nobjects,
db.func.count(Roi.id).label('nROIs'),
(db.func.count(Roi.id) - db.func.ifnull(db.func.sum(Roi.testingROI), 0))
.label('nTrainingROIs')). \
outerjoin(Roi, Roi.imageId == Image.id). \
filter(Image.name == image_name).filter(Image.projId == project.id).group_by(Image.id).first()
x = request.args.get('startX', "#")
y = request.args.get('startY', "#")
defaultCropSize = config.getint('common', 'patchsize', fallback=256)
return render_template("annotation.html", project=project, image=image, startX=x, startY=y,
defaultCropSize=defaultCropSize)
# For templates which just use the project and image name:
def rendered_project_image(template_name, project_name, image_name):
project = Project.query.filter_by(name=project_name).first()
image = Image.query.filter_by(projId=project.id, name=image_name).first()
defaultCropSize = config.getint('common', 'patchsize', fallback=256)
return render_template(template_name, project=project, image=image, defaultCropSize=defaultCropSize)
@html.route('/<project_name>/<image_name>/annotation-main.js', methods=['GET'])
def annotation_main(project_name, image_name):
return rendered_project_image('annotation-main.js', project_name, image_name)
@html.route('/<project_name>/<image_name>/annotation-tool.js', methods=['GET'])
def annotation_tool(project_name, image_name):
return rendered_project_image('annotation-tool.js', project_name, image_name)
@html.route('/<project_name>/<image_name>/annotation-utils.js', methods=['GET'])
def annotation_utils(project_name, image_name):
return rendered_project_image('annotation-utils.js', project_name, image_name)
@html.route("/jobs", methods=['GET'])
@html.route("/<project_name>/jobs", methods=['GET'])
def renderprojectjob(project_name=None):
if (project_name):
proj = Project.query.filter_by(name=project_name).first()
if not proj:
return render_template("error.html")
jobs = proj.jobs
else:
jobs = Job.query.all()
return render_template('jobs.html', jobs=jobs)
| 41.070175 | 135 | 0.686601 | [
"BSD-3-Clause-Clear"
] | cacof1/QuickAnnotator | QA_html.py | 7,023 | Python |
# -*- coding: utf-8 -*-
from django.core.cache import get_cache
from django.utils.functional import cached_property
from jinja2 import BytecodeCache as _BytecodeCache
class BytecodeCache(_BytecodeCache):
"""
A bytecode cache for Jinja2 that uses Django's caching framework.
"""
def __init__(self, cache_name):
self._cache_name = cache_name
@cached_property
def backend(self):
return get_cache(self._cache_name)
def load_bytecode(self, bucket):
key = 'jinja2_%s' % str(bucket.key)
bytecode = self.backend.get(key)
if bytecode:
bucket.bytecode_from_string(bytecode)
def dump_bytecode(self, bucket):
key = 'jinja2_%s' % str(bucket.key)
self.backend.set(key, bucket.bytecode_to_string())
| 27.275862 | 69 | 0.680152 | [
"BSD-3-Clause"
] | centum/django-jinja | django_jinja/cache.py | 791 | Python |
from django.contrib import admin
from .transitions import TransactionLog, TransitionLog, EvaluationLog
# Register your models here.
admin.site.register(TransactionLog)
admin.site.register(TransitionLog)
admin.site.register(EvaluationLog) | 29.875 | 69 | 0.845188 | [
"MIT"
] | tira-io/tira | tira-application/src/tira/admin.py | 239 | Python |
from unittest import TestCase
from pyVHDLParser.Blocks.List import PortList
from pyVHDLParser.Blocks.List.PortList import PortListInterfaceSignalBlock
from pyVHDLParser.Token import WordToken, StartOfDocumentToken, SpaceToken, CharacterToken, EndOfDocumentToken
from pyVHDLParser.Blocks import StartOfDocumentBlock, EndOfDocumentBlock
from pyVHDLParser.Blocks.Common import WhitespaceBlock
from pyVHDLParser.Blocks.Structural import Entity
from tests.unit.Common import Initializer, ExpectedDataMixin, LinkingTests, TokenLinking, TokenSequence, BlockSequence, ExpectedTokenStream, ExpectedBlockStream
if __name__ == "__main__":
print("ERROR: you called a testcase declaration file as an executable module.")
print("Use: 'python -m unitest <testcase module>'")
exit(1)
def setUpModule():
i = Initializer()
class SimplePortList_OneLine_SinglePort(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is port (port1 : bit); end;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(SpaceToken, " "),
(WordToken, "port"),
(SpaceToken, " "),
(CharacterToken, "("),
(WordToken, "port1"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "bit"),
(CharacterToken, ")"),
(CharacterToken, ";"),
(SpaceToken, " "),
(WordToken, "end"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(PortList.OpenBlock, "port ("), # port (
(PortListInterfaceSignalBlock, "port1 : bit"), # port1 : bit
(PortList.CloseBlock, ");"), # );
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end;"), # end;
(EndOfDocumentBlock, None) #
]
)
class SimplePortList_OneLine_DoublePort(TestCase, ExpectedDataMixin, TokenLinking, TokenSequence, BlockSequence):
code = "entity e is port (port1 : bit; port2 : boolean ); end;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(SpaceToken, " "),
(WordToken, "port"),
(SpaceToken, " "),
(CharacterToken, "("),
(WordToken, "port1"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "bit"),
(CharacterToken, ";"),
(SpaceToken, " "),
(WordToken, "port2"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "boolean"),
(SpaceToken, " "),
(CharacterToken, ")"),
(CharacterToken, ";"),
(SpaceToken, " "),
(WordToken, "end"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(PortList.OpenBlock, "port ("), # port (
(PortListInterfaceSignalBlock, "port1 : bit"), # port1 : bit
(PortList.DelimiterBlock, ";"), # ;
(PortListInterfaceSignalBlock, "port2 : boolean "), # port2 : boolean
(PortList.CloseBlock, ");"), # );
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end;"), # end;
(EndOfDocumentBlock, None) #
]
)
| 39.412844 | 177 | 0.51676 | [
"Apache-2.0"
] | JosephAbbey/pyVHDLParser | tests/unit/SimpleBlockSequences/PortList.py | 4,296 | Python |
# Future
from __future__ import division, print_function, unicode_literals
# Standard Library
import time
# Third Party
import pytest
import ratelimit
# DocumentCloud
from documentcloud.constants import RATE_LIMIT
from documentcloud.exceptions import APIError, CredentialsFailedError
# pylint: disable=protected-access
def test_set_tokens_credentials(client):
"""Test setting the tokens using credentials"""
client.refresh_token = None
del client.session.headers["Authorization"]
client._set_tokens()
assert client.refresh_token
assert "Authorization" in client.session.headers
def test_set_tokens_refresh(client):
"""Test setting the tokens using refresh token"""
# first set tokens sets, refresh token, second one uses it
client.refresh_token = None
del client.session.headers["Authorization"]
client._set_tokens()
client._set_tokens()
assert client.refresh_token
assert "Authorization" in client.session.headers
def test_set_tokens_none(public_client):
"""Test setting the tokens with no credentials"""
public_client._set_tokens()
assert public_client.refresh_token is None
assert "Authorization" not in public_client.session.headers
def test_get_tokens(client):
"""Test getting access and refresh tokens using valid credentials"""
access, refresh = client._get_tokens(client.username, client.password)
assert access
assert refresh
def test_get_tokens_bad_credentials(client):
"""Test getting access and refresh tokens using invalid credentials"""
with pytest.raises(CredentialsFailedError):
client._get_tokens(client.username, "foo")
def test_refresh_tokens(client):
"""Test refreshing the tokens"""
access, refresh = client._refresh_tokens(client.refresh_token)
assert access
assert refresh
def test_user_id(client):
assert client.user_id
def test_user_id_public(public_client):
# pylint: disable=pointless-statement
with pytest.raises(APIError, match=r"404"):
public_client.user_id
def test_bad_attr(client):
with pytest.raises(AttributeError):
assert client.foo
def test_rate_limit(rate_client):
with pytest.raises(ratelimit.RateLimitException):
for _ in range(RATE_LIMIT * 2):
rate_client.users.get("me")
@pytest.mark.short
@pytest.mark.vcr(cassette_library_dir="tests/cassettes/short_fixtures")
def test_expired_access_token(short_client, record_mode):
# get fresh tokens
short_client._set_tokens()
old_refresh_token = short_client.refresh_token
# wait for the access token to expire
if record_mode == "all":
time.sleep(3)
# make a request
assert short_client.users.get("me")
# check the refresh token was updated
assert old_refresh_token != short_client.refresh_token
@pytest.mark.short
@pytest.mark.vcr(cassette_library_dir="tests/cassettes/short_fixtures")
def test_expired_refresh_token(short_client, record_mode):
# get fresh tokens
short_client._set_tokens()
old_refresh_token = short_client.refresh_token
# wait for the access and refresh tokens to expire
if record_mode == "all":
time.sleep(6)
# make a request
assert short_client.users.get("me")
# check the refresh token was updated
assert old_refresh_token != short_client.refresh_token
| 29.377193 | 74 | 0.750672 | [
"MIT"
] | MuckRock/python-documentcloud | tests/test_client.py | 3,349 | Python |
# -*- coding: utf-8 -*-
"""
Esendex sms gateway backend. (http://www.esendex.es/)
Configuration example.
~~~~~~~~~~~~~~~~~~~~~~
Modify your settings.py::
ESENDEX_USERNAME = 'yourusername'
ESENDEX_PASSWORD = 'mysecretpassword'
ESENDEX_ACCOUNT = 'account-key-provided-by-esendex'
ESENDEX_SANDBOX = False # True if yo like test first
INSTALLED_APPS += ['sendsms']
Usage::
from sendsms.message import SmsMessage
message = SmsMessage(
body = 'my 160 chars sms',
from_phone = '111111111',
to = ['222222222']
)
message.send()
"""
from django.conf import settings
import requests
from .base import BaseSmsBackend
ESENDEX_API_URL = "https://www.esendex.com/secure/messenger/formpost/SendSMS.aspx"
ESENDEX_USERNAME = getattr(settings, "ESENDEX_USERNAME", "")
ESENDEX_PASSWORD = getattr(settings, "ESENDEX_PASSWORD", "")
ESENDEX_ACCOUNT = getattr(settings, "ESENDEX_ACCOUNT", "")
ESENDEX_SANDBOX = getattr(settings, "ESENDEX_SANDBOX", False)
class SmsBackend(BaseSmsBackend):
"""
SMS Backend for esendex.es provider.
The methods "get_xxxxxx" serve to facilitate the inheritance. Thus if a private
project in the access data are dynamic, and are stored in the database. A child
class overrides the method "get_xxxx" to return data stored in the database.
"""
def get_username(self):
return ESENDEX_USERNAME
def get_password(self):
return ESENDEX_PASSWORD
def get_account(self):
return ESENDEX_ACCOUNT
def _parse_response(self, response):
"""
Parse http raw respone into python
dictionary object.
:param str response: http response
:returns: response dict
:rtype: dict
"""
response_dict = {}
for line in response.splitlines():
key, value = response.split("=", 1)
response_dict[key] = value
return response_dict
def _send(self, message):
"""
Private method to send one message.
:param SmsMessage message: SmsMessage class instance.
:returns: True if message is sent else False
:rtype: bool
"""
params = {
"EsendexUsername": self.get_username(),
"EsendexPassword": self.get_password(),
"EsendexAccount": self.get_account(),
"EsendexOriginator": message.from_phone,
"EsendexRecipient": ",".join(message.to),
"EsendexBody": message.body,
"EsendexPlainText": "1",
}
if ESENDEX_SANDBOX:
params["EsendexTest"] = "1"
response = requests.post(ESENDEX_API_URL, params)
if response.status_code != 200:
if not self.fail_silently:
raise Exception("Bad status code")
else:
return False
if not response.content.startswith(b"Result"):
if not self.fail_silently:
raise Exception("Bad result")
else:
return False
response = self._parse_response(response.content.decode("utf8"))
if ESENDEX_SANDBOX and response["Result"] == "Test":
return True
else:
if response["Result"].startswith("OK"):
return True
else:
if not self.fail_silently:
raise Exception("Bad result")
return False
def send_messages(self, messages):
"""
Send messages.
:param list messages: List of SmsMessage instances.
:returns: number of messages sended successful.
:rtype: int
"""
counter = 0
for message in messages:
res = self._send(message)
if res:
counter += 1
return counter
| 28.117647 | 83 | 0.600941 | [
"MIT"
] | codesankalp/django-sendsms | sendsms/backends/esendex.py | 3,824 | Python |
import torch
class FGM():
def __init__(self, model):
self.model = model
self.backup = {}
def attack(self, epsilon=1e-6, emb_name='embed'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emb_name='embed'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1e-6, alpha=0.3, emb_name='embed', is_first_attack=False):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
# print (name)
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embed'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| 36.56338 | 87 | 0.580123 | [
"Apache-2.0"
] | ksboy/adversarial_attack | modules.py | 2,716 | Python |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.gearbox as gb
class TestGearbox(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
# 5MW inputs
self.discrete_inputs["gear_configuration"] = "eep"
self.discrete_inputs["shaft_factor"] = "normal"
self.discrete_inputs["planet_numbers"] = [3, 3, 0]
self.inputs["gear_ratio"] = 97.0
self.inputs["rotor_diameter"] = 126.0
self.inputs["rated_torque"] = 3946e3
self.inputs["machine_rating"] = 5e3
self.myobj = gb.Gearbox(direct_drive=False)
def testDirectDrive(self):
self.myobj = gb.Gearbox(direct_drive=True)
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_equal(self.outputs["stage_ratios"], 0.0)
self.assertEqual(self.outputs["gearbox_mass"], 0.0)
npt.assert_equal(self.outputs["gearbox_I"], 0.0)
self.assertEqual(self.outputs["L_gearbox"], 0.0)
self.assertEqual(self.outputs["D_gearbox"], 0.0)
def testEEP(self):
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP3(self):
self.discrete_inputs["gear_configuration"] = "eep_3"
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep3", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
self.assertEqual(self.outputs["stage_ratios"][-1], 3.0)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP2(self):
self.discrete_inputs["gear_configuration"] = "eep_2"
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep2", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
self.assertEqual(self.outputs["stage_ratios"][-1], 2.0)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP_planet4_1(self):
self.discrete_inputs["gear_configuration"] = "eep"
self.discrete_inputs["planet_numbers"] = [4, 3, 0]
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep_4-1", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP_planet4_2(self):
self.discrete_inputs["gear_configuration"] = "eep"
self.discrete_inputs["planet_numbers"] = [3, 4, 0]
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep_4-2", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEPP(self):
self.discrete_inputs["gear_configuration"] = "epp"
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("epp", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testLargeMachine(self):
self.inputs["gear_ratio"] = 200.0
self.inputs["rotor_diameter"] = 200.0
self.inputs["rotor_torque"] = 10e3
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("large", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 200.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 200.0)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestGearbox))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
| 44.893617 | 116 | 0.619076 | [
"Apache-2.0"
] | AthulKrishnaSundarrajan/WEIS | WISDEM/wisdem/test/test_drivetrainse/test_gearbox.py | 8,440 | Python |
# client-pypeln-pl.task.py
from aiohttp import ClientSession, TCPConnector
import asyncio
import sys
import pypeln as pl
limit = 1000
urls = ("http://localhost:8080/{}".format(i) for i in range(int(sys.argv[1])))
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
pl.task.each(
fetch,
urls,
workers=limit,
on_start=lambda: dict(session=ClientSession(connector=TCPConnector(limit=None))),
on_done=lambda session: session.close(),
run=True,
)
| 20.538462 | 85 | 0.702247 | [
"MIT"
] | Davidnet/pypeln | benchmarks/100_million_downloads/client-pypeln-io.py | 534 | Python |
from __future__ import annotations
import time
import disnake
from disnake.ext import commands
from PIL import Image
from main import tracked_templates
from utils.arguments_parser import MyParser
from utils.pxls.template_manager import (
Combo,
layer,
)
from utils.setup import stats
from utils.discord_utils import image_to_file, CreateTemplateView
class Layer(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot: commands.Bot = bot
@commands.slash_command(name="layer")
async def _layer(
self,
inter: disnake.AppCmdInter,
templates: str,
):
"""Layer several templates.
Parameters
----------
templates: List of templates (URL or name) separated by a space (last goes above).
"""
await inter.response.defer()
# Remove unused entries, equal to None
template_uris = templates.split(" ")
await self.layer(inter, template_uris)
@commands.command(
name="layer",
description="Layer several templates.",
usage="<templates>",
help="""
- `<templates>`: List of templates (URL or name) separated by a space (last goes above).
""",
)
async def p_layer(self, ctx, *args):
parser = MyParser(add_help=False)
parser.add_argument("templates", nargs="+")
try:
parsed_args, _ = parser.parse_known_args(args)
template_uris = parsed_args.templates
except ValueError as e:
return await ctx.send(f"❌ {e}")
async with ctx.typing():
await self.layer(ctx, template_uris)
@staticmethod
async def layer(ctx, template_uris):
try:
templates = await tracked_templates.get_templates(template_uris)
except ValueError as e:
return await ctx.send(f"❌ {e}")
start = time.time()
ox, oy, palettized_array = layer(templates)
if palettized_array.size == 0:
return await ctx.send("❌ No placeable pixels in the layered template.")
img = Image.fromarray(stats.palettize_array(palettized_array))
end = time.time()
embed = disnake.Embed(color=0x66C5CC, title="Layered")
embed.set_footer(text=f"Layered in {round((end-start),3)}s")
file = await image_to_file(img, "layered.png", embed)
# Use the combo object here because it doesn't generate a placeable mask
template = Combo(None, palettized_array, ox, oy, None, None, None)
view = CreateTemplateView(ctx, template)
m = await ctx.send(file=file, embed=embed, view=view)
# save the URL of the image sent to use it to generate templates later
if isinstance(ctx, disnake.AppCmdInter):
m = await ctx.original_message()
view.template_image_url = m.embeds[0].image.url
view.message = m
def setup(bot: commands.Bot):
bot.add_cog(Layer(bot))
| 32.549451 | 100 | 0.635719 | [
"MIT"
] | GrayTurtles/Clueless | src/cogs/pxls_template/layer.py | 2,968 | Python |
from __future__ import absolute_import
def cleanup():
from .models import MetadataType, MetadataSet
MetadataType.objects.all().delete()
MetadataSet.objects.all().delete()
| 20.666667 | 49 | 0.747312 | [
"Apache-2.0"
] | Dave360-crypto/mayan-edms | mayan/apps/metadata/cleanup.py | 186 | Python |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010
"cuda"
import os
from waflib import Task
from waflib.TaskGen import extension
from waflib.Tools import ccroot, c_preproc
from waflib.Configure import conf
class cuda(Task.Task):
run_str = '${NVCC} ${CUDAFLAGS} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}'
color = 'GREEN'
ext_in = ['.h']
vars = ['CCDEPS']
scan = c_preproc.scan
shell = False
@extension('.cu', '.cuda')
def c_hook(self, node):
return self.create_compiled_task('cuda', node)
def configure(conf):
conf.find_program('nvcc', var='NVCC')
conf.find_cuda_libs()
@conf
def find_cuda_libs(self):
"""
find the cuda include and library folders
use ctx.program(source='main.c', target='app', use='CUDA CUDART')
"""
if not self.env.NVCC:
self.fatal('check for nvcc first')
d = self.root.find_node(self.env.NVCC[0]).parent.parent
node = d.find_node('include')
_includes = node and node.abspath() or ''
_libpath = []
for x in ('lib64', 'lib'):
try:
_libpath.append(d.find_node(x).abspath())
except:
pass
# this should not raise any error
self.check_cxx(header='cuda.h', lib='cuda', libpath=_libpath, includes=_includes)
self.check_cxx(header='cuda.h', lib='cudart', libpath=_libpath, includes=_includes)
| 24.232143 | 162 | 0.699337 | [
"MIT"
] | eanswer/LearningToFly | Firmware/ardupilot/modules/waf/playground/cuda/cuda.py | 1,357 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django import template
from floreal import models as m
register = template.Library()
@register.filter
def price(f):
return u"%.02f€" % f
@register.filter
def price_nocurrency(f):
return u"%.02f" % f
@register.filter
def weight(w):
if w>=1: return u"%.2gkg" % w
else: return u"%dg" % (w*1000)
@register.filter
def email(u):
return '"%s %s" <%s>' % (u.first_name, u.last_name, u.email)
@register.filter
def unit_multiple(unit):
if unit[0].isdigit():
return u"×"+unit
else:
return u" "+unit
@register.filter
def subgroup_state(sg, dv):
x = dv.subgroupstatefordelivery_set.filter(delivery=dv, subgroup=sg)
return x[0].state if x else m.SubgroupStateForDelivery.DEFAULT
@register.filter
def subgroup_has_purchases(sg, dv):
return m.Purchase.objects.filter(product__delivery_id=dv,
user__in=m.Subgroup.objects.get(pk=sg).users.all()).exists()
@register.filter
def order(dv, u):
return m.Order(u, dv)
| 19.555556 | 97 | 0.650568 | [
"MIT"
] | roco/circuit-court | floreal/templatetags/floreal_filters.py | 1,059 | Python |
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import hashlib
import os
import os.path
import shutil
import tempfile
import jinja2
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import netutils
import six
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
CONF = cfg.CONF
class BareMetalUtilsTestCase(base.TestCase):
def test_create_link(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.return_value = None
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
def test_create_link_EEXIST(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.side_effect = OSError(errno.EEXIST)
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
class ExecuteTestCase(base.TestCase):
@mock.patch.object(processutils, 'execute', autospec=True)
@mock.patch.object(os.environ, 'copy', return_value={}, autospec=True)
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
class GenericUtilsTestCase(base.TestCase):
@mock.patch.object(utils, 'hashlib', autospec=True)
def test__get_hash_object(self, hashlib_mock):
algorithms_available = ('md5', 'sha1', 'sha224',
'sha256', 'sha384', 'sha512')
hashlib_mock.algorithms_guaranteed = algorithms_available
hashlib_mock.algorithms = algorithms_available
# | WHEN |
utils._get_hash_object('md5')
utils._get_hash_object('sha1')
utils._get_hash_object('sha224')
utils._get_hash_object('sha256')
utils._get_hash_object('sha384')
utils._get_hash_object('sha512')
# | THEN |
calls = [mock.call.md5(), mock.call.sha1(), mock.call.sha224(),
mock.call.sha256(), mock.call.sha384(), mock.call.sha512()]
hashlib_mock.assert_has_calls(calls)
def test__get_hash_object_throws_for_invalid_or_unsupported_hash_name(
self):
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue,
utils._get_hash_object,
'hickory-dickory-dock')
def test_hash_file_for_md5(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.md5(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_md5_not_binary(self):
# | GIVEN |
data = u'Mary had a little lamb, its fleece as white as sno\u0449'
file_like_object = six.StringIO(data)
expected = hashlib.md5(data.encode('utf-8')).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha1(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha1(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha1')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha512(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha512(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha512')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_throws_for_invalid_or_unsupported_hash(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue, utils.hash_file,
file_like_object, 'hickory-dickory-dock')
def test_file_has_content_equal(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_equal_not_binary(self):
data = u'Mary had a little lamb, its fleece as white as sno\u0449'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_differ(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data + b'!'
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertFalse(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_is_valid_datapath_id(self):
self.assertTrue(utils.is_valid_datapath_id("525400cf2d319fdf"))
self.assertTrue(utils.is_valid_datapath_id("525400CF2D319FDF"))
self.assertFalse(utils.is_valid_datapath_id("52"))
self.assertFalse(utils.is_valid_datapath_id("52:54:00:cf:2d:31"))
self.assertFalse(utils.is_valid_datapath_id("notadatapathid00"))
self.assertFalse(utils.is_valid_datapath_id("5525400CF2D319FDF"))
def test_is_hostname_safe(self):
self.assertTrue(utils.is_hostname_safe('spam'))
self.assertFalse(utils.is_hostname_safe('spAm'))
self.assertFalse(utils.is_hostname_safe('SPAM'))
self.assertFalse(utils.is_hostname_safe('-spam'))
self.assertFalse(utils.is_hostname_safe('spam-'))
self.assertTrue(utils.is_hostname_safe('spam-eggs'))
self.assertFalse(utils.is_hostname_safe('spam_eggs'))
self.assertFalse(utils.is_hostname_safe('spam eggs'))
self.assertTrue(utils.is_hostname_safe('spam.eggs'))
self.assertTrue(utils.is_hostname_safe('9spam'))
self.assertTrue(utils.is_hostname_safe('spam7'))
self.assertTrue(utils.is_hostname_safe('br34kf4st'))
self.assertFalse(utils.is_hostname_safe('$pam'))
self.assertFalse(utils.is_hostname_safe('egg$'))
self.assertFalse(utils.is_hostname_safe('spam#eggs'))
self.assertFalse(utils.is_hostname_safe(' eggs'))
self.assertFalse(utils.is_hostname_safe('spam '))
self.assertTrue(utils.is_hostname_safe('s'))
self.assertTrue(utils.is_hostname_safe('s' * 63))
self.assertFalse(utils.is_hostname_safe('s' * 64))
self.assertFalse(utils.is_hostname_safe(''))
self.assertFalse(utils.is_hostname_safe(None))
# Need to ensure a binary response for success or fail
self.assertIsNotNone(utils.is_hostname_safe('spam'))
self.assertIsNotNone(utils.is_hostname_safe('-spam'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com.'))
self.assertTrue(utils.is_hostname_safe('http._sctp.www.example.com'))
self.assertTrue(utils.is_hostname_safe('mail.pets_r_us.net'))
self.assertTrue(utils.is_hostname_safe('mail-server-15.my_host.org'))
self.assertFalse(utils.is_hostname_safe('www.nothere.com_'))
self.assertFalse(utils.is_hostname_safe('www.nothere_.com'))
self.assertFalse(utils.is_hostname_safe('www..nothere.com'))
long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
self.assertTrue(utils.is_hostname_safe(long_str))
self.assertFalse(utils.is_hostname_safe(long_str + '.'))
self.assertFalse(utils.is_hostname_safe('a' * 255))
def test_is_valid_logical_name(self):
valid = (
'spam', 'spAm', 'SPAM', 'spam-eggs', 'spam.eggs', 'spam_eggs',
'spam~eggs', '9spam', 'spam7', '~spam', '.spam', '.~-_', '~',
'br34kf4st', 's', 's' * 63, 's' * 255)
invalid = (
' ', 'spam eggs', '$pam', 'egg$', 'spam#eggs',
' eggs', 'spam ', '', None, 'spam%20')
for hostname in valid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for success. assertTrue
# is too generous, and would pass this test if, for
# instance, a regex Match object were returned.
self.assertIs(result, True,
"%s is unexpectedly invalid" % hostname)
for hostname in invalid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for
# success. assertFalse is too generous and would pass this
# test if None were returned.
self.assertIs(result, False,
"%s is unexpectedly valid" % hostname)
def test_validate_and_normalize_mac(self):
mac = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(mac.lower(),
utils.validate_and_normalize_mac(mac))
def test_validate_and_normalize_datapath_id(self):
datapath_id = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(utils, 'is_valid_datapath_id',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(datapath_id.lower(),
utils.validate_and_normalize_datapath_id(
datapath_id))
def test_validate_and_normalize_mac_invalid_format(self):
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = False
self.assertRaises(exception.InvalidMAC,
utils.validate_and_normalize_mac, 'invalid-mac')
def test_safe_rstrip(self):
value = '/test/'
rstripped_value = '/test'
not_rstripped = '/'
self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/'))
self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/'))
def test_safe_rstrip_not_raises_exceptions(self):
# Supplying an integer should normally raise an exception because it
# does not save the rstrip() method.
value = 10
# In the case of raising an exception safe_rstrip() should return the
# original value.
self.assertEqual(value, utils.safe_rstrip(value))
@mock.patch.object(os.path, 'getmtime', return_value=1439465889.4964755,
autospec=True)
def test_unix_file_modification_datetime(self, mtime_mock):
expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
self.assertEqual(expected,
utils.unix_file_modification_datetime('foo'))
mtime_mock.assert_called_once_with('foo')
def test_is_valid_no_proxy(self):
# Valid values for 'no_proxy'
valid_no_proxy = [
('a' * 63 + '.' + '0' * 63 + '.c.' + 'd' * 61 + '.' + 'e' * 61),
('A' * 63 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' + 'E' * 61),
('.' + 'a' * 62 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61),
',,example.com:3128,',
'192.168.1.1', # IP should be valid
]
# Test each one individually, so if failure easier to determine which
# one failed.
for no_proxy in valid_no_proxy:
self.assertTrue(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be valid: {}".format(no_proxy))
# Test valid when joined together
self.assertTrue(utils.is_valid_no_proxy(','.join(valid_no_proxy)))
# Test valid when joined together with whitespace
self.assertTrue(utils.is_valid_no_proxy(' , '.join(valid_no_proxy)))
# empty string should also be valid
self.assertTrue(utils.is_valid_no_proxy(''))
# Invalid values for 'no_proxy'
invalid_no_proxy = [
('A' * 64 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.'
+ 'E' * 61), # too long (> 253)
('a' * 100),
'a..com',
('.' + 'a' * 63 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # too long (> 251 after deleting .)
('*.' + 'a' * 60 + '.' + '0' * 60 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # starts with *.
'c.-a.com',
'c.a-.com',
]
for no_proxy in invalid_no_proxy:
self.assertFalse(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be invalid: {}".format(no_proxy))
@mock.patch.object(utils, 'LOG', autospec=True)
def test_warn_about_deprecated_extra_vif_port_id(self, mock_log):
# Set variable to default value
utils.warn_deprecated_extra_vif_port_id = False
utils.warn_about_deprecated_extra_vif_port_id()
utils.warn_about_deprecated_extra_vif_port_id()
self.assertEqual(1, mock_log.warning.call_count)
self.assertIn("extra['vif_port_id'] is deprecated and will not",
mock_log.warning.call_args[0][0])
class TempFilesTestCase(base.TestCase):
def test_tempdir(self):
dirname = None
with utils.tempdir() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
dirname = tempdir
self.assertFalse(os.path.exists(dirname))
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
kwargs = {'dir': 'b'}
with utils.tempdir(**kwargs) as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
mkdtemp_mock.assert_called_once_with(**kwargs)
rmtree_mock.assert_called_once_with(tempdir_created)
@mock.patch.object(utils, 'LOG', autospec=True)
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock,
log_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
rmtree_mock.side_effect = OSError
with utils.tempdir() as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
rmtree_mock.assert_called_once_with(tempdir_created)
self.assertTrue(log_mock.error.called)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_with_pass_in(self, mock_free_space, mock_dir_writable,
mock_exists):
mock_exists.return_value = True
# test passing in a directory and size
utils.check_dir(directory_to_check='/fake/path', required_space=5)
mock_exists.assert_called_once_with('/fake/path')
mock_dir_writable.assert_called_once_with('/fake/path')
mock_free_space.assert_called_once_with('/fake/path', 5)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_no_dir(self, mock_free_space, mock_dir_writable):
self.config(tempdir='/fake/path')
# NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock
# on the method level.
with mock.patch.object(os.path, 'exists',
autospec=True) as mock_exists:
mock_exists.return_value = False
self.assertRaises(exception.PathNotFound, utils.check_dir)
mock_exists.assert_called_once_with(CONF.tempdir)
self.assertFalse(mock_free_space.called)
self.assertFalse(mock_dir_writable.called)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_ok(self, mock_free_space, mock_dir_writable):
self.config(tempdir='/fake/path')
# NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock
# on the method level.
with mock.patch.object(os.path, 'exists',
autospec=True) as mock_exists:
mock_exists.return_value = True
utils.check_dir()
mock_exists.assert_called_once_with(CONF.tempdir)
mock_dir_writable.assert_called_once_with(CONF.tempdir)
mock_free_space.assert_called_once_with(CONF.tempdir, 1)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_ok(self, mock_access):
mock_access.return_value = True
self.assertIsNone(utils._check_dir_writable("/fake/path"))
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_not_writable(self, mock_access):
mock_access.return_value = False
self.assertRaises(exception.DirectoryNotWritable,
utils._check_dir_writable, "/fake/path")
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'statvfs', autospec=True)
def test__check_dir_free_space_ok(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 5
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024 * 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
utils._check_dir_free_space("/fake/path")
mock_stat.assert_called_once_with("/fake/path")
@mock.patch.object(os, 'statvfs', autospec=True)
def test_check_dir_free_space_raises(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 1
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
self.assertRaises(exception.InsufficientDiskSpace,
utils._check_dir_free_space, "/fake/path")
mock_stat.assert_called_once_with("/fake/path")
class GetUpdatedCapabilitiesTestCase(base.TestCase):
def test_get_updated_capabilities(self):
capabilities = {'ilo_firmware_version': 'xyz'}
cap_string = 'ilo_firmware_version:xyz'
cap_returned = utils.get_updated_capabilities(None, capabilities)
self.assertEqual(cap_string, cap_returned)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_multiple_keys(self):
capabilities = {'ilo_firmware_version': 'xyz',
'foo': 'bar', 'somekey': 'value'}
cap_string = 'ilo_firmware_version:xyz,foo:bar,somekey:value'
cap_returned = utils.get_updated_capabilities(None, capabilities)
set1 = set(cap_string.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_invalid_capabilities(self):
capabilities = 'ilo_firmware_version'
self.assertRaises(ValueError,
utils.get_updated_capabilities,
capabilities, {})
def test_get_updated_capabilities_capabilities_not_dict(self):
capabilities = ['ilo_firmware_version:xyz', 'foo:bar']
self.assertRaises(ValueError,
utils.get_updated_capabilities,
None, capabilities)
def test_get_updated_capabilities_add_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'uefi'}
expected_capabilities = 'BootMode:uefi,foo:bar'
cap_returned = utils.get_updated_capabilities('foo:bar',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_replace_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'bios'}
expected_capabilities = 'BootMode:bios'
cap_returned = utils.get_updated_capabilities('BootMode:uefi',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_validate_network_port(self):
port = utils.validate_network_port('0', 'message')
self.assertEqual(0, port)
port = utils.validate_network_port('65535')
self.assertEqual(65535, port)
def test_validate_network_port_fail(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "65536" is not a valid port.',
utils.validate_network_port,
'65536')
self.assertRaisesRegex(exception.InvalidParameterValue,
'fake_port "-1" is not a valid port.',
utils.validate_network_port,
'-1',
'fake_port')
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "invalid" is not a valid port.',
utils.validate_network_port,
'invalid')
class JinjaTemplatingTestCase(base.TestCase):
def setUp(self):
super(JinjaTemplatingTestCase, self).setUp()
self.template = '{{ foo }} {{ bar }}'
self.params = {'foo': 'spam', 'bar': 'ham'}
self.expected = 'spam ham'
def test_render_string(self):
self.assertEqual(self.expected,
utils.render_template(self.template,
self.params,
is_file=False))
@mock.patch('ironic.common.utils.jinja2.FileSystemLoader', autospec=True)
def test_render_file(self, jinja_fsl_mock):
path = '/path/to/template.j2'
jinja_fsl_mock.return_value = jinja2.DictLoader(
{'template.j2': self.template})
self.assertEqual(self.expected,
utils.render_template(path,
self.params))
jinja_fsl_mock.assert_called_once_with('/path/to')
| 44.681208 | 78 | 0.624221 | [
"Apache-2.0"
] | jovial/ironic | ironic/tests/unit/common/test_utils.py | 26,630 | Python |
Subsets and Splits